Kubeadm部署HA高可用Kubernetes集群

https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/

system requirement
2C2G

安装Aliyun YUM Repo

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
## https://developer.aliyun.com/mirror/

### base
mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup
curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

### epel
mv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
mv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup
wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo


# 安装必需软件
### https://github.com/opsnull/follow-me-install-kubernetes-cluster/blob/master/01.%E5%88%9D%E5%A7%8B%E5%8C%96%E7%B3%BB%E7%BB%9F%E5%92%8C%E5%85%A8%E5%B1%80%E5%8F%98%E9%87%8F.md
yum install -y chrony conntrack ipvsadm ipset jq iptables curl sysstat libseccomp wget socat git

优化内核参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-iptables=1
net.bridge.bridge-nf-call-ip6tables=1
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
net.ipv4.tcp_tw_reuse=1
net.ipv4.tcp_timestamps=1
net.ipv4.neigh.default.gc_thresh1=1024
net.ipv4.neigh.default.gc_thresh2=2048
net.ipv4.neigh.default.gc_thresh3=4096
vm.swappiness=0
vm.overcommit_memory=1
vm.panic_on_oom=0
vm.max_map_count = 262144
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF
sysctl --system

IPVS加载内核模块

1
2
3
4
5
6
7
8
9
## https://github.com/kubernetes/kubernetes/blob/master/pkg/proxy/ipvs/README.md

vim /etc/sysconfig/modules/ipvs.modules
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4

modprobe br_netfilter

SELinux

1
2
setenforce 0
sed -i 's/^SELINUX=.*/SELINUX=disabled/' /etc/selinux/config

关闭 swap 分区

1
2
swapoff -a
sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab

设置iptables默认转发策略

1
2
3
4
systemctl stop firewalld
systemctl disable firewalld
iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
iptables -P FORWARD ACCEPT

关闭无用服务

systemctl stop postfix && systemctl disable postfix

安装Aliyun YUM Repo

1
2
3
4
5
6
7
8
9
10
11
## https://developer.aliyun.com/mirror/
### k8s
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装kube* & docker

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
yum install -y kubelet kubeadm kubectl # 注意版本需要和镜像版本对应
systemctl enable kubelet && systemctl start kubelet

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

# https://docs.docker.com/engine/install/centos/

yum remove docker \
docker-client \
docker-client-latest \
docker-common \
docker-latest \
docker-latest-logrotate \
docker-logrotate \
docker-engine

yum update -y && yum install -y \
containerd.io-1.2.13 \
docker-ce-19.03.8 \
docker-ce-cli-19.03.8
mkdir /etc/docker
# 镜像加速 https://cr.console.aliyun.com/cn-beijing/instances/mirrors
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://890une7x.mirror.aliyuncs.com"],
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {
"max-size": "100m"
},
"storage-driver": "overlay2",
"storage-opts": [
"overlay2.override_kernel_check=true"
]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d
systemctl start docker.service
systemctl enable docker.service
systemctl status docker.service

kubectl autocompletion

https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion

kubeadm config

vim kubeadm-config.yaml

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
# kubeadm init --config kubeadm-config.yaml --upload-certs
# kubeadm config print init-defaults
# https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/high-availability/
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
# https://storage.googleapis.com/kubernetes-release/release/stable.txt
# kubernetesVersion: stable
kubernetesVersion: v1.18.3
controlPlaneEndpoint: <your-lb-ip>:<port>
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
apiServer:
timeoutForControlPlane: 4m0s
controllerManager: {}
scheduler: {}
imageRepository: registry.aliyuncs.com/google_containers
networking:
dnsDomain: cluster.local
podSubnet: 172.30.0.0/16
serviceSubnet: 10.96.0.0/12
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs

## 自定义CA
# https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/
# This means you can, for example, copy an existing CA into /etc/kubernetes/pki/ca.crt and /etc/kubernetes/pki/ca.key, and kubeadm will use this CA for signing the rest of the certificates

kubeadm deploy

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
kubeadm init --config kubeadm-config.yaml --upload-certs
kubeadm config images pull --config kubeadm-config.yaml # 先拉取镜像
curl -LO https://docs.projectcalico.org/v3.14/manifests/calico.yaml
# 更改CALICO_IPV4POOL_CIDR 为podSubnet: 172.30.0.0/16
# - name: CALICO_IPV4POOL_CIDR
# value: "172.30.0.0/16"
# 更改为BGP模式 https://docs.projectcalico.org/reference/node/configuration
# Enable IPIP
# - name: CALICO_IPV4POOL_IPIP
# value: "Never"
kubectl apply -f calico.yaml

# core-dns pod不再pending

kubeadm reset
kubectl delete node <node-name>

ipvs重建

1
2
3
4
# 如果安装时为iptables模式
kubectl -n kube-system edit cm kube-proxy
# mode: "ipvs"
kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}' # 重建kube-proxy的pod

Calico BGP模式重建

1
2
3
4
5
6
7
8
9
10
# https://docs.projectcalico.org/archive/v3.14/getting-started/kubernetes/installation/config-options
curl -LO https://docs.projectcalico.org/v3.14/manifests/calico.yaml
# 更改CALICO_IPV4POOL_CIDR 为podSubnet: 172.30.0.0/16
# - name: CALICO_IPV4POOL_CIDR
# value: "172.30.0.0/16"
# 更改为BGP模式 https://docs.projectcalico.org/reference/node/configuration
# Enable IPIP
# - name: CALICO_IPV4POOL_IPIP
# value: "Never"
kubectl apply -f calico.yaml

Operations

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
## 切换默认namespace
kubectl config set-context $(kubectl config current-context) --namespace=<insert-namespace-name-here>
# Validate it
kubectl config view | grep namespace

# create registry secret
kubectl create secret docker-registry boer-harbor --docker-server=harbor.boer.xyz --docker-username=admin --docker-password=Admin@123 --docker-email=boer0924@gmail.com --namespace=boer-public

# join node
kubeadm token list
kubeadm token create --print-join-command

kubectl drain $NODENAME
kubectl uncordon $NODENAME

# https://docs.docker.com/engine/reference/commandline/ps/#filtering
docker rm $(docker ps -a -f status=exited -q)
docker ps --format "{{.ID}}\t{{.Command}}\t{{.Status}}\t{{.Ports}}"
docker ps --filter "status=exited"

etcdctl

1
2
3
4
5
6
7
8
9
10
# https://jimmysong.io/kubernetes-handbook/guide/using-etcdctl-to-access-kubernetes-data.html
curl -LO etcd-v3.4.3-linux-amd64.tar.gz
alias etcdctl='etcdctl --cacert=/etc/kubernetes/pki/etcd/ca.crt --cert=/etc/kubernetes/pki/etcd/peer.crt --key=/etc/kubernetes/pki/etcd/peer.key'
etcdctl get /registry/namespaces/kube-system -w=json | jq .
etcdctl member list
etcdctl help
# 备份
etcdctl snapshot save
etcdctl snapshot status
etcdctl snapshot restore

Calicoctl

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# https://docs.projectcalico.org/archive/v3.14/getting-started/clis/calicoctl/
curl -O -L https://github.com/projectcalico/calicoctl/releases/download/v3.14.1/calicoctl
mv calicoctl /usr/local/bin/
chmod a+x /usr/local/bin/calicoctl

vim /etc/calico/calicoctl.cfg
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
datastoreType: "kubernetes"
kubeconfig: "/root/.kube/config"

calicoctl get nodes
calicoctl node status # 查看calico运行模式
calicoctl get ipPool -o yaml

Helm v2安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# https://qhh.me/2019/08/08/Helm-%E5%AE%89%E8%A3%85%E4%BD%BF%E7%94%A8/
curl -LO https://get.helm.sh/helm-v2.16.6-linux-amd64.tar.gz

vim rbac-config.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: tiller
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tiller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: tiller
namespace: kube-system

helm init --service-account tiller -i registry.aliyuncs.com/google_containers/tiller:v2.16.6

helm repo list
helm repo add stable https://mirror.azure.cn/kubernetes/charts
helm repo add incubator https://mirror.azure.cn/kubernetes/charts-incubator

helm repo update
helm fetch stable/mysql # 当前目录现在xxx.tgz
helm install stable/mysql

MetalLB

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# 不让私有云用户成为K8S世界的二等公民
# https://metallb.universe.tf/installation/

# 用法demo
apiVersion: v1
kind: Service
metadata:
name: theapp-service
annotations:
metallb.universe.tf/address-pool: default
labels:
app: theapp
spec:
type: LoadBalancer
# type: NodePort
# type: ClusterIP
ports:
- port: 5000
targetPort: 5000
# nodePort: 31090
selector:
app: theapp

kubectl get svc # curl -v EXTERNAL-IP

MetalLB (头等舱)
metallb

vs

NodePort (经济舱)
nodeport

Ingress-Nginx L7

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# https://kubernetes.github.io/ingress-nginx/deploy/#bare-metal
# 更改controller-service的type: LoadBalancer(默认NodePort)
# 添加MetalLB annotations
metadata:
annotations:
metallb.universe.tf/address-pool: default
spec:
type: LoadBalancer

# 用法demo
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: theapp-ingress
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
nginx.ingress.kubernetes.io/load-balance: "ip_hash"
nginx.ingress.kubernetes.io/upstream-hash-by: "$request_uri"
spec:
rules:
- host: theapp.boer.xyz
http:
paths:
- path: /
backend:
serviceName: theapp-service
servicePort: 5000

Rancher卸载

1
2
3
4
5
6
7
8
# https://blog.csdn.net/gui951753/article/details/106160427
kubectl proxy &
NAMESPACE=local
kubectl get namespace $NAMESPACE -o json |jq '.spec = {"finalizers":[]}' >temp.json
curl -k -H "Content-Type: application/json" -X PUT --data-binary @temp.json 127.0.0.1:8001/api/v1/namespaces/$NAMESPACE/finalize

# https://github.com/rancher/rancher/issues/14715#issuecomment-430194650
kubectl get customresourcedefinitions | grep cattle.io | awk '{print $1}' | xargs kubectl delete customresourcedefinitions

模型概览

k8s-boer

Reference


Kubeadm部署HA高可用Kubernetes集群
https://www.boer.xyz/2019/09/24/k8s-ha-deployment/
作者
boer
发布于
2019年9月24日
许可协议