[misc] remove devops

This commit is contained in:
Klesh Wong 2021-01-21 01:03:29 +08:00
parent 8d7743b7b8
commit 8e4c3a030f
18 changed files with 1 additions and 942 deletions

116
bin/kc
View File

@ -1,116 +0,0 @@
#!/usr/bin/env python3
import json
import os
import sys
try:
import yaml
import clipboard
except:
print("please install PyYAML/clipboard first", file=sys.stderr)
print("sudo pip install PyYAML clipboard", file=sys.stderr)
sys.exit(-1)
new_config_data = clipboard.paste()
if not new_config_data:
print("clipboard is empty", file=sys.stderr)
sys.exit(-1)
try:
new_config = yaml.load(new_config_data, Loader=yaml.Loader)
except:
print("illegal yaml format", file=sys.stderr)
new_clusters = new_config.get('clusters')
new_contexts = new_config.get('contexts')
new_users = new_config.get('users')
if not new_clusters or not new_contexts or not new_users:
print("configuration yaml must have clusers/contexts/users", file=sys.stderr)
sys.exit(-1)
new_context_name = input("Enter context name:")
if not new_context_name:
print("aborted!", file=sys.stderr)
sys.exit(-1)
new_cluster_name = input("Enter cluster name:")
if not new_cluster_name:
print("aborted!", file=sys.stderr)
sys.exit(-1)
new_user_name = f'{new_cluster_name}-user'
# load config file
cfg_path = os.path.expanduser('~/.kube/config')
with open(cfg_path) as f:
config = yaml.load(f, Loader=yaml.Loader) or {}
config['apiVersion'] = config.get('apiVersion', 'v1')
config['kind'] = config.get('kind', 'Config')
config['clusters'] = config.get('clusters', [])
config['contexts'] = config.get('contexts', [])
config['users'] =config.get('users', [])
# merge cluster into config
def append_or_replace(array, elem, cond):
index = -1
for i, c in enumerate(array):
if cond(c):
index = i
break
if index > -1:
old_elem = array[index]
array[index] = elem
return old_elem
else:
array.append(elem)
def update_context_ref(old, new, ref_key):
if old and old['name'] != new['name']:
for ctx in config['contexts']:
if ctx[ref_key] == old['name']:
ctx[ref_key] = new['name']
new_context = new_contexts[0]
new_cluster = new_clusters[0]
new_user = new_users[0]
new_context['name'] = new_context_name
new_context['context']['cluster'] = new_cluster_name
new_context['context']['user'] = new_user_name
new_cluster['name'] = new_cluster_name
new_user['name'] = new_user_name
old_cluster = append_or_replace(
config['clusters'],
new_cluster,
lambda c: (
c['name'] == new_cluster_name or c['cluster'] == new_cluster['cluster']
)
)
update_context_ref(old_cluster, new_cluster, 'cluster')
old_user = append_or_replace(
config['users'],
new_user,
lambda u: (
u['name'] == new_user['name'] or u['user'] == new_user['user']
)
)
update_context_ref(old_user, new_user, 'user')
append_or_replace(
config['contexts'],
new_context,
lambda c: (
c['name'] == new_context_name
)
)
# save config file
config['current-context'] = new_context_name
with open(cfg_path, 'w') as f:
f.write(yaml.dump(config))

16
bin/kp
View File

@ -1,16 +0,0 @@
#!/bin/sh
POD=$(sudo kubectl get pod | grep -P "^$2" | awk '{print $1}')
CTN=$3
case $1 in
sh)
kubectl exec -it $POD ${CTN:+"-c $CTN"} -- /bin/sh
;;
d) # describe pod
kubectl describe pod $POD
;;
e) # watch event of pod
kubectl get event -w --field-selector=involvedObject.name=$POD
esac

View File

@ -53,7 +53,7 @@ if status is-interactive
alias dt='date "+%Y%m%d-%H%M%S"'
# === PATH and file sourcing
append_paths ~/.yarn/bin ~/dotfiles/bin ~/dotfiles/devops/bin
append_paths ~/.local/bin ~/.yarn/bin ~/dotfiles/bin ~/dotfiles/devops/bin
source_files /usr/share/autojump/autojump.fish /usr/local/share/autojump/autojump.fish \
~/.profile.fish

View File

@ -1,100 +0,0 @@
#!/bin/sh
if [ "$#" -lt 3 ] || [ "$#" -eq 4 ]; then
echo "This script will try to setup k3s on a remote server which unfortunately located in YOUR COUNTRY!"
echo " Usage: $0 <user@host> <external-ip> <email> [cloudflare-api-email] [cloudflare-api-key]"
exit 0
fi
DIR=$(dirname "$(readlink -f "$0")")
. "$DIR/../env.sh"
SSH=$1
IP=$2
EMAIL=$3
CF_API_EMAIL=$4
CF_API_KEY=$5
# install ks3
ssh "$SSH" '
if ! command -v k3s >/dev/null ; then
export K3S_KUBECONFIG_MODE="644"
export INSTALL_K3S_MIRROR=cn
export INSTALL_K3S_VERSION=v1.20.0-k3s2
export INSTALL_K3S_EXEC="--tls-san '"$IP"' --node-external-ip '"$IP"' --disable traefik --default-local-storage-path /data"
curl -sfL http://rancher-mirror.cnrancher.com/k3s/k3s-install.sh | sh -
fi
'
# setup mirror
ssh "$SSH" '
CFG_DIR=/var/lib/rancher/k3s/agent/etc/containerd
while ! sudo stat $CFG_DIR/config.toml >/dev/null 2>&1; do
echo waiting k3s to startup $CFG_DIR/config.toml
sleep 3
done
if ! sudo grep -qF "mirrors" $CFG_DIR/config.toml; then
echo "[plugins.cri.registry.mirrors]" | sudo tee -a $CFG_DIR/config.toml
echo " [plugins.cri.registry.mirrors.\"docker.io\"]" | sudo tee -a $CFG_DIR/config.toml
echo " endpoint = [" | sudo tee -a $CFG_DIR/config.toml
echo " \"https://1nj0zren.mirror.aliyuncs.com\"," | sudo tee -a $CFG_DIR/config.toml
echo " \"https://docker.mirrors.ustc.edu.cn\"," | sudo tee -a $CFG_DIR/config.toml
echo " \"http://f1361db2.m.daocloud.io\"]" | sudo tee -a $CFG_DIR/config.toml
fi
sudo cp $CFG_DIR/config.toml $CFG_DIR/config.toml.tmpl
sudo systemctl restart k3s
'
# setup https traefik
scp "$DIR/k3s/traefik-crd.yaml" "$SSH:"
if [ -n "$CF_API_EMAIL" ] ; then
scp "$DIR/k3s/traefik-dpy-cf.yaml" "$SSH:traefik-dpy.yaml"
else
scp "$DIR/k3s/traefik-dpy.yaml" "$SSH:traefik-dpy.yaml"
fi
ssh "$SSH" '
sudo kubectl apply -f traefik-crd.yaml
sed -i "
s/{EMAIL}/'"$EMAIL"'/g;
s/{CF_API_EMAIL}/'"$CF_API_EMAIL"'/g;
s/{CF_API_KEY}/'"$CF_API_KEY"'/g
" traefik-dpy.yaml
sudo kubectl apply -f traefik-dpy.yaml
sudo kubectl wait --for=condition=available --timeout=600s deployment/traefik -n default
'
# add more workers
echo
echo "add more workers with following command:"
echo " sudo k3s agent --server https://$IP:6443 --token $(ssh "$SSH" 'sudo cat /var/lib/rancher/k3s/server/node-token')"
# copy kubctl config file content to clipboard
KUBECONFIG=$(
ssh "$SSH" '
sudo sed "s|server:.*|server: https://'"$IP"':6443|" /etc/rancher/k3s/k3s.yaml
')
echo "$KUBECONFIG" | xsel -b
echo "kube config has been copy to clipboard, you can set it as your only k8s cluster with:"
echo "$KUBECONFIG"
echo " xsel -ob > ~/.kube/config"
# add private registry:
echo
echo "import private registry credentials to your k3s:"
echo " kubectl create secret generic regcred \\"
echo " --from-file=.dockerconfigjson=\$HOME/.docker/config.json \\"
echo " --type=kubernetes.io/dockerconfigjson"
echo
echo "add private registry manually:"
echo " kubectl create secret docker-registry regcred \\"
echo " --docker-server=<your-registry-server> \\"
echo " --docker-username=<your-name> \\"
echo " --docker-password=<your-pword> --docker-email=<your-email>"

View File

@ -1,164 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ingressroutes.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: IngressRoute
plural: ingressroutes
singular: ingressroute
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: middlewares.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: Middleware
plural: middlewares
singular: middleware
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ingressroutetcps.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: IngressRouteTCP
plural: ingressroutetcps
singular: ingressroutetcp
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ingressrouteudps.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: IngressRouteUDP
plural: ingressrouteudps
singular: ingressrouteudp
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: tlsoptions.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: TLSOption
plural: tlsoptions
singular: tlsoption
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: tlsstores.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: TLSStore
plural: tlsstores
singular: tlsstore
scope: Namespaced
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: traefikservices.traefik.containo.us
spec:
group: traefik.containo.us
version: v1alpha1
names:
kind: TraefikService
plural: traefikservices
singular: traefikservice
scope: Namespaced
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- traefik.containo.us
resources:
- middlewares
- ingressroutes
- traefikservices
- ingressroutetcps
- ingressrouteudps
- tlsoptions
- tlsstores
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: default

View File

@ -1,71 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik
spec:
ports:
# - protocol: TCP
# name: web
# port: 80
#- protocol: TCP
#name: admin
#port: 8080
- protocol: TCP
name: websecure
port: 8443
targetPort: 443
type: LoadBalancer
selector:
app: traefik
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: default
name: traefik-ingress-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: default
name: traefik
labels:
app: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
spec:
serviceAccountName: traefik-ingress-controller
containers:
- name: traefik
image: traefik:v2.3.6
args:
- --api.insecure
- --accesslog
- --entrypoints.web.Address=:80
- --entrypoints.websecure.Address=:443
- --providers.kubernetescrd
- --certificatesresolvers.myresolver.acme.dnschallenge=true
- --certificatesresolvers.myresolver.acme.dnschallenge.provider=cloudflare
- --certificatesresolvers.myresolver.acme.email={EMAIL}
- --certificatesresolvers.myresolver.acme.storage=acme.json
env:
- name: CLOUDFLARE_EMAIL
value: {CF_API_EMAIL}
- name: CLOUDFLARE_API_KEY
value: {CF_API_KEY}
ports:
- name: web
containerPort: 80
- name: websecure
containerPort: 443
- name: admin
containerPort: 8080

View File

@ -1,72 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: traefik
spec:
ports:
- protocol: TCP
name: web
port: 80
#- protocol: TCP
#name: admin
#port: 8080
- protocol: TCP
name: websecure
port: 443
type: LoadBalancer
selector:
app: traefik
---
apiVersion: v1
kind: ServiceAccount
metadata:
namespace: default
name: traefik-ingress-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: default
name: traefik
labels:
app: traefik
spec:
replicas: 1
selector:
matchLabels:
app: traefik
template:
metadata:
labels:
app: traefik
spec:
serviceAccountName: traefik-ingress-controller
containers:
- name: traefik
image: traefik:v2.3.6
args:
- --api.insecure
- --accesslog
- --entrypoints.web.Address=:80
- --entrypoints.websecure.Address=:443
- --providers.kubernetescrd
- --certificatesresolvers.myresolver.acme.tlschallenge
- --certificatesresolvers.myresolver.acme.email=EMAIL
- --certificatesresolvers.myresolver.acme.storage=/data/acme.json
ports:
- name: web
containerPort: 80
- name: websecure
containerPort: 443
- name: admin
containerPort: 8080
volumeMounts:
- name: data
mountPath: /data
volumes:
- name: data
hostPath:
path: /data/traefik
type: DirectoryOrCreate

View File

@ -1,55 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: gitea
namespace: default
spec:
entryPoints:
- websecure
# - web
routes:
- match: Host(`example.com`)
kind: Rule
services:
- name: whoami
port: 80
tls:
certResolver: myresolver
---
apiVersion: v1
kind: Service
metadata:
name: whoami
spec:
ports:
- protocol: TCP
name: web
port: 80
selector:
app: whoami
---
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: default
name: whoami
labels:
app: whoami
spec:
replicas: 2
selector:
matchLabels:
app: whoami
template:
metadata:
labels:
app: whoami
spec:
containers:
- name: whoami
image: traefik/whoami
ports:
- name: web
containerPort: 80

View File

@ -1,52 +0,0 @@
apiVersion: traefik.containo.us/v1alpha1
kind: IngressRoute
metadata:
name: simpleingressroute
namespace: default
spec:
entryPoints:
- web
routes:
- match: Host(`172.16.0.10`)
kind: Rule
services:
- name: whoami
port: 80
---
apiVersion: v1
kind: Service
metadata:
name: whoami
spec:
ports:
- protocol: TCP
name: web
port: 80
selector:
app: whoami
---
kind: Deployment
apiVersion: apps/v1
metadata:
namespace: default
name: whoami
labels:
app: whoami
spec:
replicas: 2
selector:
matchLabels:
app: whoami
template:
metadata:
labels:
app: whoami
spec:
containers:
- name: whoami
image: traefik/whoami
ports:
- name: web
containerPort: 80

View File

@ -1 +0,0 @@
k8s/

View File

@ -1,10 +0,0 @@
FROM lachlanevenson/k8s-kubectl:v1.20.1
RUN sed -i 's|dl-cdn.alpinelinux.org|mirrors.aliyun.com|g' /etc/apk/repositories
RUN apk update --no-cache \
&& apk add --update --no-cache curl \
&& rm -rf /var/cache/apk/*
WORKDIR /opt/devops
ADD bin .
ENV PATH=/opt/devops:$PATH
ENTRYPOINT ["/usr/bin/env"]
CMD ["ls"]

View File

@ -1,31 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 2 ] ;then
echo "Keep N days backup of a directory"
echo
echo " Usage $0 <days> <backup-dir> <src-dir ...>"
exit 1
fi
DIR=$(dirname "$(readlink -f "$0")")
DAY=$1
BKD=${2%/}
PTH=$BKD/$(date +%Y%m%d)
shift
shift
echo "start backing up on $(date)"
mkdir -p $PTH
while [ "$#" -gt 0 ] ;do
SRC=$1
echo " backing up $SRC"
tar -zcf "$PTH/$(basename $SRC).tar.gz" "$SRC"
shift
done
echo "start removing older archives"
"$DIR/rm-ndays-ago.sh" "$BKD" "$DAY"
echo "done!"

View File

@ -1,22 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 3 ] ;then
echo "Keep recent N-days backup of postgres on k8s container"
echo
echo " Usage $0 <days> <path/to/backup/directory> <pod-app-label> [container-name] [DBN]"
exit 1
fi
DIR=$(dirname "$(readlink -f "$0")")
DAY=$1
BKD=$2
shift
shift
echo "start backing up on $(date)"
"$DIR/k8s-pgdball.sh" backup "$BKD/$(date +%Y%m%d)" "$@"
echo "start removing older archives"
"$DIR/rm-ndays-ago.sh" "$BKD" "$DAY"
echo "done!"

View File

@ -1,44 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 3 ] ;then
echo "Backup/Restore postgres database on k8s"
echo
echo " Usage $0 <backup|restore> <path/to/dbname.sql.gz> <pod-app-label> [container] [dbname]"
exit 1
fi
GZP=$2
APP=$3
CTN=$4
DBN=${5-"$(basename "$GZP" .sql.gz)"}
POD=$(kubectl get pod --selector "app=$APP" | tail +2 | head -n 1 | awk '{print $1}')
if [ -z "$POD" ] ; then
echo not pod found!
exit 1
elif [ "$(echo "$POD" | wc -w)" -gt 1 ] ;then
echo more than one pod!
exit 1
fi
backup () {
kubectl exec "$POD" -c "$CTN" -- sh -c "pg_dump -U postgres --no-owner $DBN | gzip" > "$GZP"
}
restore () {
kubectl exec "$POD" -c "$CTN" -- psql postgres postgres -c "DROP DATABASE IF EXISTS $DBN"
kubectl exec "$POD" -c "$CTN" -- psql postgres postgres -c "CREATE DATABASE $DBN"
kubectl exec "$POD" -c "$CTN" -i -- sh -c "gunzip -c | psql $DBN postgres" < "$GZP"
}
case $1 in
backup)
backup
;;
restore)
restore
;;
esac

View File

@ -1,58 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 3 ] ;then
echo "Backup/Restore all postgres databases on k8s container with pg_dump and gzip"
echo
echo " Usage $0 <backup|restore> <path/to/backup/directory> <pod-app-label> [container-name] [DBN]"
exit 1
fi
DIR=$(dirname "$(readlink -f "$0")")
BKD=$2
APP=$3
CTN=$4
POD=$(kubectl get pod --selector "app=$APP" | tail +2 | awk '{print $1}')
if [ -z "$POD" ] ; then
echo not pod found!
exit 1
elif [ "$(echo "$POD" | wc -w)" -gt 1 ] ;then
echo more than one pod!
exit 1
fi
backup () {
mkdir -p "$BKD"
for DBN in $(
kubectl exec "$POD" -c "$CTN" -- \
psql postgres postgres -c "SELECT datname FROM pg_database" | \
tail +3 | head -n -2
); do
case $DBN in
postgres|template0|template1)
;;
*)
echo "backing up $DBN to $BKD/$DBN.sql.gz"
"$DIR/k8s-pgdb.sh" backup "$BKD/$DBN.sql.gz" "$APP" "$CTN" "$DBN"
;;
esac
done
}
restore () {
for BACKUPGZ in "$BKD"/*.sql.gz ;do
"$DIR/pgdb.sh" restore "$BACKUPGZ"
done
}
case $1 in
backup)
backup
;;
restore)
restore
;;
esac

View File

@ -1,25 +0,0 @@
#!/bin/sh
if [ "$#" -lt 5 ] ;then
echo "Renew Cloudflare DNS Record"
echo
echo " Usage: $0 <email> <app-key> <zone-id> <record-id> <domain>"
exit
fi
EMAIL=$1
API_KEY=$2
ZONE_ID=$3
RECORD_ID=$4
DOMAIN=$5
IP=$(curl -s https://myip.ipip.net | awk -F'' '{print $2}' | awk -F' ' '{print $1}')
grep -qF "$IP" /tmp/myip && echo "Unchanged since last renewal, do nothing" && exit
curl -X PUT "https://api.cloudflare.com/client/v4/zones/$ZONE_ID/dns_records/$RECORD_ID" \
-H "X-Auth-Email: $EMAIL" \
-H "X-Auth-Key: $API_KEY" \
-H "Content-Type: application/json" \
--data '{"type": "A", "name": "'"$DOMAIN"'", "content": "'"$IP"'"}'
echo "$IP" > /tmp/myip

View File

@ -1,22 +0,0 @@
#!/bin/sh
set -e
if [ "$#" -lt 2 ] ;then
echo "Remove files/directories which name contains DatePart and before n days ago"
echo
echo " Usage $0 <dir> <days>"
exit 1
fi
TARGET_DIR=${1%/}
THRES_DATE=$(date -D "%S" -d $(( $(date +%s) - $2 * 24 * 3600 ))"" +%Y%m%d)
echo date prior to $THRES_DATE will be deleted
for entry in $(ls "$TARGET_DIR") ;do
DATE=$(echo "$entry" | grep -oE '[0-9]{8}')
if [ -n "DATE" ] && [ "$DATE" -lt "$THRES_DATE" ] ;then
echo removeing $TARGET_DIR/$entry
rm -rf $TARGET_DIR/$entry
fi
done

View File

@ -1,82 +0,0 @@
# update cloudflare dns record if ip changed on minutely basis
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: ddns
spec:
schedule: "*/1 * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: renew-cf-dns-record
image: devops
imagePullPolicy: IfNotPresent
args:
- renew-cf-dns-record.sh
- {EMAIL}
- {APP_KEY}
- {ZONE_ID}
- {RECORD_ID}
- {DOMAIN}
volumeMounts:
- name: tmp
mountPath: /tmp
restartPolicy: OnFailure
volumes:
- name: tmp
hostPath:
path: /tmp
---
# backup local data and remote postgres database at 6:00am everyday
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: backup
spec:
schedule: "0 6 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: backup-local-dir
image: devops
imagePullPolicy: IfNotPresent
args:
- backup-local-dir
- {BLD_ROTATE}
- {BLD_SRC_DIR}
- {BLD_DEST_DIR}
volumeMounts:
- name: bldsrcdir
mountPath: {BLD_SRC_DIR}
- name: blddestdir
mountPath: {BLD_DEST_DIR}
- name: backup-k8s-pgdbs
image: devops
imagePullPolicy: IfNotPresent
args:
- backup-k8s-pgdbs
- {BKP_ROTATE}
- {POD_APP_LABEL}
- {CONTAINER_NAME}
- {BKP_DEST_DIR}
volumeMounts:
- name: bkpdestdir
mountPath: {BKP_DEST_DIR}
restartPolicy: OnFailure
volumes:
- name: bldsrcdir
hostPath:
path: {BLD_SRC_DIR}
type: Directory
- name: blddestdir
hostPath:
path: {BLD_DEST_DIR}
type: DirectoryOrCreate
- name: bkpdestdir
hostPath:
path: {BKP_DEST_DIR}
type: DirectoryOrCreate