0%

k8s containerd 1.22版本部署

环境准备

【所有节点上执行】

关闭无用服务

  • 关闭selinux

    1
    2
    # sed -i 's#SELINUX=enforcing#SELINUX=disabled#g'  /etc/sysconfig/selinux
    # sed -i 's#SELINUX=enforcing#SELINUX=disabled#g' /etc/selinux/config
  • 关闭交换分区

    1
    2
    # sed -i "/swap/{s/^/#/g}" /etc/fstab
    # swapoff -a
  • 关闭防火墙

    1
    2
    # systemctl stop  firewalld
    # systemctl disable firewalld
  • 关闭其他无用模块

    1
    2
    3
    4
    5
    6
    # systemctl disable auditd ||  \
    systemctl disable postfix || \
    systemctl disable irqbalance || \
    systemctl disable remote-fs || \
    systemctl disable tuned || \
    systemctl disable rhel-configure

环境和网络

  • hostname

    1
    2
    # hostname xxxx
    # vim /etc/hostmame
  • hosts

    1
    2
    3
    4
    # cat >> /etc/hosts << EOF
    10.0.0.1 master
    10.0.0.2 node-01
    EOF
  • 打开路由

    1
    2
    3
    4
    5
    6
    7
    # cat > /etc/sysctl.d/k8s.conf << EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    EOF
    # modprobe br_netfilter
    # sysctl -p /etc/sysctl.d/k8s.conf
  • ip_vs模块

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    # cat > /etc/sysconfig/modules/ipvs.modules <<EOF 
    #!/bin/bash
    modprobe -- ip_vs
    modprobe -- ip_vs_rr
    modprobe -- ip_vs_wrr
    modprobe -- ip_vs_sh
    modprobe -- nf_conntrack_ipv4
    EOF
    # chmod 755 /etc/sysconfig/modules/ipvs.modules
    # bash /etc/sysconfig/modules/ipvs.modules
    # lsmod | grep -e ip_vs -e nf_conntrack_ipv4

依赖安装

  • yum Centos 源

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    1. 备份
    mv /etc/yum.repos.d/CentOS-Base.repo /etc/yum.repos.d/CentOS-Base.repo.backup

    2. 下载新的 CentOS-Base.repo 到 /etc/yum.repos.d/
    wget -O /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-7.repo

    3. 运行 yum makecache 生成缓存

    4. 其他,非阿里云ECS
    sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
  • yum Epel 源

    1
    2
    3
    4
    5
    6
    1. 备份
    mv /etc/yum.repos.d/epel.repo /etc/yum.repos.d/epel.repo.backup
    mv /etc/yum.repos.d/epel-testing.repo /etc/yum.repos.d/epel-testing.repo.backup

    2. 下载新repo 到/etc/yum.repos.d/
    wget -O /etc/yum.repos.d/epel.repo http://mirrors.aliyun.com/repo/epel-7.repo
  • yum Docker CE源

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    # step 1: 安装必要的一些系统工具
    sudo yum install -y yum-utils device-mapper-persistent-data lvm2

    # Step 2: 添加软件源信息
    sudo yum-config-manager --add-repo https://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo

    # Step 3
    sudo sed -i 's+download.docker.com+mirrors.aliyun.com/docker-ce+' /etc/yum.repos.d/docker-ce.repo

    # Step 4: 更新
    sudo yum makecache fast

    # 注意:
    # 官方软件源默认启用了最新的软件,您可以通过编辑软件源的方式获取各个版本的软件包。例如官方并没有将测试版本的软件源置为可用,您可以通过以下方式开启。同理可以开启各种测试版本等。
    # vim /etc/yum.repos.d/docker-ce.repo
    # 将[docker-ce-test]下方的enabled=0修改为enabled=1
    #
    # 安装指定版本的Docker-CE:
    # Step 1: 查找Docker-CE的版本:
    # yum list docker-ce.x86_64 --showduplicates | sort -r
    # Loading mirror speeds from cached hostfile
    # Loaded plugins: branch, fastestmirror, langpacks
    # docker-ce.x86_64 17.03.1.ce-1.el7.centos docker-ce-stable
    # docker-ce.x86_64 17.03.1.ce-1.el7.centos @docker-ce-stable
    # docker-ce.x86_64 17.03.0.ce-1.el7.centos docker-ce-stable
    # Available Packages
    # Step2: 安装指定版本的Docker-CE: (VERSION例如上面的17.03.0.ce.1-1.el7.centos)
    # sudo yum -y install docker-ce-[VERSION]
  • yum Kubernetes 源

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=1
    repo_gpgcheck=1
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    setenforce 0
    yum install -y kubelet kubeadm kubectl
    systemctl enable kubelet && systemctl start kubelet
  • 安装依赖和服务升级

    1
    2
    # yum -y install vim-enhanced wget curl net-tools conntrack-tools bind-utils socat ipvsadm ipset
    # yum -y update

系统优化

  • 内核优化

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    # cat >>/etc/sysctl.conf <<EOF
    net.ipv4.ip_forward = 1
    vm.swappiness = 0
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.tcp_max_syn_backlog = 65536
    net.core.netdev_max_backlog = 32768
    net.core.somaxconn = 32768
    net.core.wmem_default = 8388608
    net.core.rmem_default = 8388608
    net.core.rmem_max = 16777216
    net.core.wmem_max = 16777216
    net.ipv4.tcp_timestamps = 0
    net.ipv4.tcp_synack_retries = 2
    net.ipv4.tcp_syn_retries = 2
    net.ipv4.tcp_tw_recycle = 1
    net.ipv4.tcp_tw_reuse = 1
    net.ipv4.tcp_mem = 94500000 915000000 927000000
    net.ipv4.tcp_max_orphans = 3276800
    net.ipv4.ip_local_port_range = 1024 65535
    EOF

    # sysctl -p
  • 句柄数

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    ulimit -n 655350

    永久生效修改如下两个文件
    # cat >>/etc/security/limits.conf <<EOF
    * soft memlock unlimited
    * hard memlock unlimited
    * soft nofile 655350
    * hard nofile 655350
    * soft nproc 655350
    * hard nproc 655350
    EOF

    vim /etc/systemd/system.conf
    DefaultLimitNOFILE=655350
  • 加载内核模块

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    # cat <<EOF>/etc/sysconfig/modules/ipvs.modules 
    #!/bin/bash
    ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack_ipv4"
    for kernel_module in \${ipvs_modules}; do
    /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
    if [ $? -eq 0 ]; then
    /sbin/modprobe \${kernel_module}
    fi
    done
    EOF
    # chmod +x /etc/sysconfig/modules/ipvs.modules
    # bash /etc/sysconfig/modules/ipvs.modules

安装Containerd

【所有节点安装】

安装

1
2
3
# yum install -y yum-utils device-mapper-persistent-data lvm2 
# yum list | grep containerd
# yum install containerd.io -y

修改配置文件

  • 生成配置文件

    1
    2
    # mkdir -p /etc/containerd 
    # containerd config default > /etc/containerd/config.toml
  • 修改为阿里云镜像

    1
    2
    # sed -i "s#k8s.gcr.io#registry.cn-hangzhou.aliyuncs.com/google_containers#g"  /etc/containerd/config.toml 
    # sed -i "s#https://registry-1.docker.io#https://registry.cn-hangzhou.aliyuncs.com#g" /etc/containerd/config.toml
  • 添加 SystemdCgroup = true

    1
    # sed -i '/containerd.runtimes.runc.options/a\ \ \ \ \ \ \ \ \ \ \ \ SystemdCgroup = true' /etc/containerd/config.toml 

启动

1
2
3
# systemctl daemon-reload 
# systemctl enable containerd
# systemctl restart containerd

测试

  • 下载镜像

    1
    ctr images pull docker.io/library/nginx:alpine
  • 查看下载的镜像

    1
    ctr images ls
  • 附录:ctr命令

id containerd 命令 docker 命令 备注
1 ctr image ls docker images 获取image信息
2 ctr image pull nginx docker pull nginx pull 一个nginx的image
3 ctr image tag nginx nginx-test docker tag nginx nginx-test tag 一个nginx的image
4 ctr image push nginx-test docker push nginx-test push nginx-test的image
5 ctr image pull nginx docker pull nginx pull 一个nginx的image
6 ctr image import nginx.tar docker load<nginx.tar.gz 导入本地镜像ctr不支持压缩
7 ctr run -d –env 111 nginx-test nginx docker run -d –name=nginx nginx-test 运行的一个容器
8 ctr task ls docker ps 查看运行的容器

k8s集群安装

安装服务

【所有节点执行】

  • 安装服务

    1
    yum install -y kubelet kubeadm kubectl
  • 设置runtime

    1
    crictl config runtime-endpoint /run/containerd/containerd.sock
  • 启动服务

    1
    2
    systemctl daemon-reload
    systemctl enable kubelet && systemctl start kubelet
  • containerd设置代理(非必要)

    1
    2
    3
    4
    5
    6
    7
    mkdir /etc/systemd/system/containerd.service.d

    cat > /etc/systemd/system/containerd.service.d/http_proxy.conf << EOF
    [Service]
    Environment="HTTP_PROXY=http://10.0.0.3:808/"
    EOF
    # systemctl restart containerd
  • 测试

    1
    2
    3
    #  crictl  pull nginx:alpine
    # crictl rmi nginx:alpine
    # crictl images

master 初始化文件

【master上执行】

  • 生成初始化文件

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    # kubeadm config print init-defaults > kubeadm.yaml 

    修改如下几处:

    imageRepository修改为国内库(如阿里云)
    criSocket 修改为刚才定义的containerd的socket
    serviceSubnet: 10.1.0.0/16 给service定义一个子网络。
    podSubnet: 10.244.0.0/16 pod子网要和后边的flannel里的网络对上,否则DNS启动不起来。
    kube-proxy 的模式为 ipvs
    使用的containerd作为运行时,指定cgroupDriver为systemd
  • 修改后结果如下:(也可以直接用下边这个文件改一改然后初始化)

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    apiVersion: kubeadm.k8s.io/v1beta2
    bootstrapTokens:
    - groups:
    - system:bootstrappers:kubeadm:default-node-token
    token: abcdef.0123456789abcdef
    ttl: 24h0m0s
    usages:
    - signing
    - authentication
    kind: InitConfiguration
    localAPIEndpoint:
    advertiseAddress: 10.0.0.1
    bindPort: 6443
    nodeRegistration:
    criSocket: /run/containerd/containerd.sock
    name: t-master
    taints:
    - effect: NoSchedule
    key: node-role.kubernetes.io/master
    ---
    apiServer:
    timeoutForControlPlane: 4m0s
    apiVersion: kubeadm.k8s.io/v1beta2
    certificatesDir: /etc/kubernetes/pki
    clusterName: kubernetes
    controllerManager: {}
    dns:
    type: CoreDNS
    etcd:
    local:
    dataDir: /var/lib/etcd
    imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
    kind: ClusterConfiguration
    kubernetesVersion: v1.22.0
    networking:
    dnsDomain: cluster.local
    serviceSubnet: 10.1.0.0/16
    podSubnet: 10.244.0.0/16
    scheduler: {}
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: ipvs
    ---
    apiVersion: kubelet.config.k8s.io/v1beta1
    kind: KubeletConfiguration
    cgroupDriver: systemd

下载镜像(非必要)

  • 提前下载镜像

    1
    # kubeadm config images pull --image-repository registry.aliyuncs.com/google_containers
  • 查看下载的镜像

    1
    [root@t-master ~]# crictl images watch

master初始化

1
# kubeadm init --config=kubeadm.yaml

初始化报错如下:

1
2
3
4
5
6
7
[root@t-master ~]# kubeadm init --config=kubeadm.yaml
[init] Using Kubernetes version: v1.20.5
[preflight] Running pre-flight checks
error execution phase preflight: [preflight] Some fatal errors occurred:
[ERROR FileContent--proc-sys-net-bridge-bridge-nf-call-iptables]: /proc/sys/net/bridge/bridge-nf-call-iptables does not exist
[preflight] If you know what you are doing, you can make a check non-fatal with `--ignore-preflight-errors=...`
To see the stack trace of this error execute with --v=5 or higher

[解决]

1
2
# modprobe br_netfilter
# echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables

使用

  • 配置kubelete

    1
    2
    3
    # mkdir -p $HOME/.kube
    # cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
    # chown $(id -u):$(id -g) $HOME/.kube/config
  • 查看节点

    1
    kubectl get node

node节点接入

【node节点执行】

1
2
kubeadm join 10.0.0.1:6443 --token abcdef.0123456789abcdef \
--discovery-token-ca-cert-hash sha256:b0a2ca593b614fcd25801643f32706fd54cd7d7af7838e6c381c0ffafd4b89c0

网络

【master 节点执行】

  • calico

    1
    2
    3
    kubectl create -f https://docs.projectcalico.org/manifests/tigera-operator.yaml
    kubectl create -f https://docs.projectcalico.org/manifests/custom-resources.yaml
    # update cidr setting
  • flannel

    1
    kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml

相关文档