0%

利用 kubeadm 部署 Kubernetes v1.11.x HA 集群

本篇将说明如何透过Kubeadm来部署Kubernetes v1.11版本的高可用性丛集,而本安装主要是参考官方文件中的用kubeadm创建高可用的集群内容来进行,这边将透过HAProxy与Keepalived的结合来实现控制面的Load Balancer与VIP。

ip hosts 备注
172.16.2.30 server01 master
172.16.2.31 server02 master
172.16.2.32 server03 master
172.16.2.33 server04 Node

环境准备(所有节点操作)

配置系统相关参数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
# 临时禁用selinux
# 永久关闭 修改/etc/sysconfig/selinux文件设置
sed -i 's/SELINUX=permissive/SELINUX=disabled/' /etc/sysconfig/selinux
setenforce 0

# 临时关闭swap
# 永久关闭 注释/etc/fstab文件里swap相关的行
swapoff -a

# 开启forward
# Docker从1.13版本开始调整了默认的防火墙规则
# 禁用了iptables filter表中FOWARD链
# 这样会引起Kubernetes集群中跨Node的Pod无法通信

iptables -P FORWARD ACCEPT

# 或者关闭防火墙
systemctl stop firewalld && systemctl disable firewalld

# 配置转发相关参数,否则可能会出错
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
vm.swappiness=0
EOF
sysctl --system

# 加载ipvs相关内核模块,如不采用ipvs方式可不操作
# 如果重新开机,需要重新加载
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack_ipv4
lsmod | grep ip_vs

安装 docker

1
2
3
4
5
6
7
8
# 卸载安装指定版本docker-ce
yum remove -y docker-ce docker-ce-selinux container-selinux
yum install -y --setopt=obsoletes=0 \
docker-ce-17.03.1.ce-1.el7.centos \
docker-ce-selinux-17.03.1.ce-1.el7.centos

#启动docker
systemctl enable docker && systemctl restart docker

安装 kubeadm, kubelet 和 kubectl

1
2
3
4
5
6
7
8
9
10
11
12
13
# 配置源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

# 安装
yum install -y kubelet-1.11.0 kubeadm-1.11.0 kubectl-1.11.0 ipvsadm

配置hosts解析

1
2
3
4
5
6
cat <<EOF >> /etc/hosts
172.16.2.30 server01
172.16.2.31 server02
172.16.2.32 server03
172.16.2.33 server04
EOF

配置haproxy代理和keepalived

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
# 拉取haproxy镜像
docker pull haproxy:1.7.8-alpine
mkdir /etc/haproxy
cat >/etc/haproxy/haproxy.cfg<<EOF
global
log 127.0.0.1 local0 err
maxconn 50000
uid 99
gid 99
#daemon
nbproc 1
pidfile haproxy.pid

defaults
mode http
log 127.0.0.1 local0 err
maxconn 50000
retries 3
timeout connect 5s
timeout client 30s
timeout server 30s
timeout check 2s

listen admin_stats
mode http
bind 0.0.0.0:1080
log 127.0.0.1 local0 err
stats refresh 30s
stats uri /haproxy-status
stats realm Haproxy\ Statistics
stats auth will:will
stats hide-version
stats admin if TRUE

frontend k8s-https
bind 0.0.0.0:8443
mode tcp
#maxconn 50000
default_backend k8s-https

backend k8s-https
mode tcp
balance roundrobin
server server01 172.16.2.30:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server server02 172.16.2.31:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server server03 172.16.2.32:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
EOF

# 启动haproxy
docker run -d --name my-haproxy \
-v /etc/haproxy:/usr/local/etc/haproxy:ro \
-p 8443:8443 \
-p 1080:1080 \
--restart always \
haproxy:1.7.8-alpine

# 拉取keepalived镜像
docker pull osixia/keepalived:1.4.4

# 启动
# 载入内核相关模块
lsmod | grep ip_vs
modprobe ip_vs

# 启动keepalived
# ens160为本次实验172.16.2.0/24网段的所在网卡
docker run --net=host --cap-add=NET_ADMIN \
-e KEEPALIVED_INTERFACE=ens160 \
-e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:['172.16.2.29']" \
-e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:['172.16.2.30','172.16.2.31','172.16.2.32']" \
-e KEEPALIVED_PASSWORD=hello \
--name k8s-keepalived \
--restart always \
-d osixia/keepalived:1.4.4

# 查看日志
# 会看到两个成为backup 一个成为master
docker logs k8s-keepalived

# 此时会配置 172.16.2.29 到其中一台机器
# ping测试
ping -c4 172.16.2.29

# 如果失败后清理后,重新实验
docker rm -f k8s-keepalived
ip a del 172.16.2.29/32 dev ens160

部署Kubernetes HA

Master1

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# 生成配置文件
CP0_IP="172.16.2.30"
CP0_HOSTNAME="server01"
cat >kubeadm-config.yaml<<EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.0

apiServerCertSANs:
- "172.16.2.29"

api:
controlPlaneEndpoint: "172.16.2.29:8443"

etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://$CP0_IP:2379"
advertise-client-urls: "https://$CP0_IP:2379"
listen-peer-urls: "https://$CP0_IP:2380"
initial-advertise-peer-urls: "https://$CP0_IP:2380"
initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380"
serverCertSANs:
- $CP0_HOSTNAME
- $CP0_IP
peerCertSANs:
- $CP0_HOSTNAME
- $CP0_IP

networking:
podSubnet: 10.244.0.0/16

EOF


# 初始化
# 注意保存返回的 join 命令
kubeadm init --config kubeadm-config.yaml

执行以下指令来使用kubeconfig

1
2
3
mkdir -p $HOME/.kube
cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

上面完成后,在master01将CA与Certs复制到其他master节点上以供使用:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
export DIR=/etc/kubernetes/
for NODE in server02 server03; do
echo "------ ${NODE} ------"
ssh ${NODE} "mkdir -p ${DIR}/pki/etcd"
scp ${DIR}/pki/ca.crt ${NODE}:${DIR}/pki/ca.crt
scp ${DIR}/pki/ca.key ${NODE}:${DIR}/pki/ca.key
scp ${DIR}/pki/sa.key ${NODE}:${DIR}/pki/sa.key
scp ${DIR}/pki/sa.pub ${NODE}:${DIR}/pki/sa.pub
scp ${DIR}/pki/front-proxy-ca.crt ${NODE}:${DIR}/pki/front-proxy-ca.crt
scp ${DIR}/pki/front-proxy-ca.key ${NODE}:${DIR}/pki/front-proxy-ca.key
scp ${DIR}/pki/etcd/ca.crt ${NODE}:${DIR}/pki/etcd/ca.crt
scp ${DIR}/pki/etcd/ca.key ${NODE}:${DIR}/pki/etcd/ca.key
scp ${DIR}/admin.conf ${NODE}:${DIR}/admin.conf
done

Master2

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
# 生成配置文件
CP0_IP="172.16.2.30"
CP0_HOSTNAME="server01"
CP1_IP="172.16.2.31"
CP1_HOSTNAME="server02"
cat >kubeadm-config.yaml<<EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.0

apiServerCertSANs:
- "172.16.2.29"

api:
controlPlaneEndpoint: 172.16.2.29:8443

etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://$CP1_IP:2379"
advertise-client-urls: "https://$CP1_IP:2379"
listen-peer-urls: "https://$CP1_IP:2380"
initial-advertise-peer-urls: "https://$CP1_IP:2380"
initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380"
initial-cluster-state: existing
serverCertSANs:
- $CP1_HOSTNAME
- $CP1_IP
peerCertSANs:
- $CP1_HOSTNAME
- $CP1_IP

networking:
podSubnet: 10.244.0.0/16

EOF

通过kubeadm alpha来启动master2的kubelet:

1
2
3
4
5
kubeadm alpha phase certs all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
systemctl restart kubelet

将节点添加到etcd集群

1
2
3
4
5
6
7
8
export CP0_IP=172.16.2.30
export CP0_HOSTNAME=server01
export CP1_IP=172.16.2.31
export CP1_HOSTNAME=server02
export KUBECONFIG=/etc/kubernetes/admin.conf

kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP1_HOSTNAME} https://${CP1_IP}:2380
kubeadm alpha phase etcd local --config kubeadm-config.yaml

部署组件并将节点标记为主节点

1
2
3
kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
kubeadm alpha phase controlplane all --config kubeadm-config.yaml
kubeadm alpha phase mark-master --config kubeadm-config.yaml

执行以下指令来使用kubeconfig

1
2
3
mkdir -p $HOME/.kube
cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

Master3

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
# 生成配置文件
CP0_IP="172.16.2.30"
CP0_HOSTNAME="server01"
CP1_IP="172.16.2.31"
CP1_HOSTNAME="server02"
CP2_IP="172.16.2.32"
CP2_HOSTNAME="server03"
cat >kubeadm-config.yaml<<EOF
apiVersion: kubeadm.k8s.io/v1alpha2
kind: MasterConfiguration
kubernetesVersion: v1.11.0

apiServerCertSANs:
- "172.16.2.29"

api:
controlPlaneEndpoint: "172.16.2.29:8443"

etcd:
local:
extraArgs:
listen-client-urls: "https://127.0.0.1:2379,https://$CP2_IP:2379"
advertise-client-urls: "https://$CP2_IP:2379"
listen-peer-urls: "https://$CP2_IP:2380"
initial-advertise-peer-urls: "https://$CP2_IP:2380"
initial-cluster: "$CP0_HOSTNAME=https://$CP0_IP:2380,$CP1_HOSTNAME=https://$CP1_IP:2380,$CP2_HOSTNAME=https://$CP2_IP:2380"
initial-cluster-state: existing
serverCertSANs:
- $CP2_HOSTNAME
- $CP2_IP
peerCertSANs:
- $CP2_HOSTNAME
- $CP2_IP

networking:
podSubnet: 10.244.0.0/16

EOF

通过kubeadm alpha来启动master3的kubelet

1
2
3
4
5
kubeadm alpha phase certs all --config kubeadm-config.yaml
kubeadm alpha phase kubelet config write-to-disk --config kubeadm-config.yaml
kubeadm alpha phase kubelet write-env-file --config kubeadm-config.yaml
kubeadm alpha phase kubeconfig kubelet --config kubeadm-config.yaml
systemctl restart kubelet

将节点添加到etcd集群

1
2
3
4
5
6
7
8
export CP0_IP=172.16.2.30
export CP0_HOSTNAME=server01
export CP2_IP=172.16.2.32
export CP2_HOSTNAME=server03
export KUBECONFIG=/etc/kubernetes/admin.conf

kubectl exec -n kube-system etcd-${CP0_HOSTNAME} -- etcdctl --ca-file /etc/kubernetes/pki/etcd/ca.crt --cert-file /etc/kubernetes/pki/etcd/peer.crt --key-file /etc/kubernetes/pki/etcd/peer.key --endpoints=https://${CP0_IP}:2379 member add ${CP2_HOSTNAME} https://${CP2_IP}:2380
kubeadm alpha phase etcd local --config kubeadm-config.yaml

部署组件并将节点标记为主节点

1
2
3
kubeadm alpha phase kubeconfig all --config kubeadm-config.yaml
kubeadm alpha phase controlplane all --config kubeadm-config.yaml
kubeadm alpha phase mark-master --config kubeadm-config.yaml

执行以下指令来使用kubeconfig

1
2
3
mkdir -p $HOME/.kube
cp -rp /etc/kubernetes/admin.conf $HOME/.kube/config
chown $(id -u):$(id -g) $HOME/.kube/config

配置网络

只有网络插件也安装配置完成之后,才能会显示为ready状态
设置master允许部署应用pod,参与工作负载,现在可以部署其他系统组件

1
kubectl taint nodes --all node-role.kubernetes.io/master-

下载flannel

1
wget https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml

修改配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
# 修改配置
# 此处的ip配置要与上面kubeadm的pod-network一致
net-conf.json: |
{
"Network": "10.244.0.0/16",
"Backend": {
"Type": "vxlan"
}
}

# 如果Node有多个网卡的话,参考flannel issues 39701,
# https://github.com/kubernetes/kubernetes/issues/39701
# 目前需要在kube-flannel.yml中使用--iface参数指定集群主机内网网卡的名称,
# 否则可能会出现dns无法解析。容器无法通信的情况,需要将kube-flannel.yml下载到本地,
# flanneld启动参数加上--iface=<iface-name>
containers:
- name: kube-flannel
image: registry.cn-shanghai.aliyuncs.com/gcr-k8s/flannel:v0.10.0-amd64
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
- --iface=eth1

# 修改镜像
image: registry.cn-shenzhen.aliyuncs.com/hyman0603/flannel:v0.10.0-amd64

启动

1
kubectl apply -f kube-flannel.yml

注意

如不能部署起来,可能是配置文件错误,还有就是切记把swapoff -a把交换分区关闭,否则会无法进行下一步操作