Press "Enter" to skip to content

kubeadm高可用集群部署

内容目录

环境准备

设置/etc/hosts

cat /etc/hosts
192.168.0.101 master1
192.168.0.102 master2
192.168.0.103 master3
192.168.0.104 worker1

设置hostname

hostnamectl set-hostname xxxx

机器列表

ip Name 备注
192.168.0.101 master1 Haproxy
192.168.0.102 master2
192.168.0.103 master3
192.168.0.104 worker1

清除iptables

iptables -F
echo ""> /etc/sysconfig/iptables
systemctl restart iptables

禁用 SELINUX

setenforce 0

由于开启内核 ipv4 转发需要加载 br_netfilter 模块,所以加载下该模块

modprobe br_netfilter

创建/etc/sysctl.d/k8s.conf文件,添加如下内容

net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1

执行如下命令使修改生效

sysctl -p /etc/sysctl.d/k8s.conf

安装 ipvs

cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bashmodprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack_ipv4
EOF

chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4

上面脚本创建了的/etc/sysconfig/modules/ipvs.modules文件,保证在节点重启后能自动加载所需模块。使用lsmod | grep -e ip_vs -e nf_conntrack_ipv4命令查看是否已经正确加载所需的内核模块。

接下来还需要确保各个节点上已经安装了 ipset 软件包

yum install ipset

为了便于查看 ipvs 的代理规则,最好安装一下管理工具 ipvsadm

yum install ipvsadm

关闭 swap 分区

swapoff -a

修改/etc/fstab文件,注释掉 SWAP 的自动挂载,使用free -m确认 swap 已经关闭。swappiness 参数调整,修改/etc/sysctl.d/k8s.conf添加下面一行

vm.swappiness=0

执行 sysctl -p /etc/sysctl.d/k8s.conf 使修改生效

安装 Containerd

下载

wget https://github.com/containerd/containerd/releases/download/v1.6.1/cri-containerd-cni-1.6.1-linux-amd64.tar.gz

压缩包解压到系统的各个目录中

tar -C / -xzf cri-containerd-cni-1.6.1-linux-amd64.tar.gz

生成containerd的默认配置文件

mkdir -p /etc/containerd
containerd config default > /etc/containerd/config.toml

修改sandbox_image

sandbox_image = "registry.aliyuncs.com/k8sxio/pause:3.5"

修改配置文件,修改 containerd 的 cgroup driver 配置为 systemd,修改以下区块的SystemdCgroup为true

[plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc]
  ...
  [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
    SystemdCgroup = true....

如需添加私有镜像仓库需修改配置文件,如添加harbor仓库,以harbor.chenjie.info例,

      [plugins."io.containerd.grpc.v1.cri".registry.configs]
        #下面为新增内容
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.chenjie.info".tls]
          insecure_skip_verify = true
        [plugins."io.containerd.grpc.v1.cri".registry.configs."harbor.chenjie.info".auth]
          username = "admin"
          password = "Harbor12345"
        #上面为新增内容
      [plugins."io.containerd.grpc.v1.cri".registry.headers]

      [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
        #下面为新增内容
        [plugins."io.containerd.grpc.v1.cri".registry.mirrors."harbor.chenjie.info"]
          endpoint = ["https://harbor.chenjie.info"]
        #上面为新增内容

启用containerd

systemctl daemon-reload
systemctl enable containerd --now

查询本地 libseccomp版本,如果低于2.4需升级

升级步骤

下载2.5包

wget https://vault.centos.org/centos/8/BaseOS/x86_64/os/Packages/libseccomp-2.5.1-1.el8.x86_64.rpm 

查询现有版本

 rpm -qa | grep libseccomp

卸载旧版本

rpm -e libseccomp-2.3.1-2.el7.x86_64 --nodeps

安装新版本

rpm -ivh libseccomp-2.5.1-1.el8.x86_64.rpm

安装负载均衡

在master1 安装

yum install haproxy

配置

frontend  apiserver
    bind *:8443
    default_backend apiserver_backend
    mode tcp
    option tcplog

backend apiserver_backend
    balance source
    mode tcp
    server      master1 192.168.0.101:6443 check
    #server     master2 192.168.0.102:6443 check
    #server     master3 192.168.0.103:6443 check

设置开启启动、启动、状态确认

systemctl enable haproxy
systemctl start haproxy
systemctl status haproxy

初始化控制平面

使用阿里云的源进行安装kubeadm等

cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=0
repo_gpgcheck=0
gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
        http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
EOF

安装 kubeadm、kubelet、kubectl

yum makecache fast
yum install -y kubelet-1.22.8 kubeadm-1.22.8 kubectl-1.22.8 --disableexcludes=kubernetes

kubelet 设置成开机启动

systemctl enable --now kubelet

在 master1 节点上输出集群初始化默认使用的配置

kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm.yaml

对配置进行修改,修改点见注释部分

apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.0.101  #本机ip
  bindPort: 6443
nodeRegistration:
  criSocket: /run/containerd/containerd.sock 
  imagePullPolicy: IfNotPresent
  name: master1 #本机hostname
  taints: null 
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: ipvs  
---
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/k8sxio
kind: ClusterConfiguration
kubernetesVersion: 1.22.8 #版本
controlPlaneEndpoint: master1:8443 #haproxy 
apiServer:
  extraArgs:
    authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
  certSANs:    # 添加master节点的相关信息
  - master1
  - master2
  - master3
  - 192.168.0.101
  - 192.168.0.102
  - 192.168.0.103
networking:
  dnsDomain: cluster.local
  serviceSubnet: 172.30.0.0/16 #service 和物理机网络区分开
  podSubnet: 192.168.0.0/16    #pod     和物理机网络区分开
scheduler: {}
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
clusterDNS:
- 172.30.0.10 #service x.x.x.10
clusterDomain: cluster.local
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
cgroupDriver: systemd   # 配置 cgroup driver
logging: {}
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

查询需要预装的镜像

kubeadm config images list --config kubeadm.yaml 

registry.aliyuncs.com/k8sxio/kube-apiserver:v1.22.8
registry.aliyuncs.com/k8sxio/kube-controller-manager:v1.22.8
registry.aliyuncs.com/k8sxio/kube-scheduler:v1.22.8
registry.aliyuncs.com/k8sxio/kube-proxy:v1.22.8
registry.aliyuncs.com/k8sxio/pause:3.5
registry.aliyuncs.com/k8sxio/etcd:3.5.0-0
registry.aliyuncs.com/k8sxio/coredns:v1.8.4

拉取镜像

ctr -n k8s.io i pull   xxxx
#coredns阿里没有
ctr -n k8s.io i pull docker.io/coredns/coredns:1.8.4 
or 
crictl pull docker.io/coredns/coredns:1.8.4
#后tag
ctr -n k8s.io i tag docker.io/coredns/coredns:1.8.4 registry.aliyuncs.com/k8sxio/coredns:v1.8.4

master1 节点上进行集群初始化

kubeadm init --upload-certs --config kubeadm.yaml

执行日志

[init] Using Kubernetes version: v1.22.8
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action in beforehand using 'kubeadm config images pull'
...省略...
Your Kubernetes control-plane has initialized successfully!

To start using your cluster, you need to run the following as a regular user:

  mkdir -p $HOME/.kube
  sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

Alternatively, if you are the root user, you can run:

  export KUBECONFIG=/etc/kubernetes/admin.conf

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  https://kubernetes.io/docs/concepts/cluster-administration/addons/

You can now join any number of the control-plane node running the following command on each as root:

  kubeadm join master1:8443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:4350be07df44bb66da5f090dd8b1ca7db39e19f0abf6475ce7c8ce4dad8c0a4d 
        --control-plane --certificate-key 337a7cc73581c3246349d659adb01c4c3b73d54e19f752864ce570731791db2e

Please note that the certificate-key gives access to cluster sensitive data, keep it secret!
As a safeguard, uploaded-certs will be deleted in two hours; If necessary, you can use
"kubeadm init phase upload-certs --upload-certs" to reload certs afterward.

Then you can join any number of worker nodes by running the following on each as root:

kubeadm join master1:8443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:4350be07df44bb66da5f090dd8b1ca7db39e19f0abf6475ce7c8ce4dad8c0a4d

可按提示设置kubeconfig

添加后续master或worker节点

添加控制平面节点

对于每个其他控制平面节点,执行先前在第一个节点 master1 上的 kubeadm init 输出提供的 join 命令来添加控制平面节点

kubeadm join master1:8443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:4350be07df44bb66da5f090dd8b1ca7db39e19f0abf6475ce7c8ce4dad8c0a4d 
        --control-plane --certificate-key 337a7cc73581c3246349d659adb01c4c3b73d54e19f752864ce570731791db2e

设置master节点可调度(如果需要在master节点进行业务pod调度)

kubectl taint nodes --all node-role.kubernetes.io/master-

如果恢复master only

kubectl taint node xxx node-role.kubernetes.io/master="":NoSchedule

添加工作节点

在worker节点执行

kubeadm join master1:8443 --token abcdef.0123456789abcdef 
        --discovery-token-ca-cert-hash sha256:4350be07df44bb66da5f090dd8b1ca7db39e19f0abf6475ce7c8ce4dad8c0a4d

安装网络插件

在master1 节点执行以下操作

下载calico

curl https://projectcalico.docs.tigera.io/manifests/calico.yaml -O

修改yaml 设置默认网卡,如果有多网卡需要设置,以bond0为例,如果没有多网卡则无需设置

spec:
  containers:
  - env:
    - name: DATASTORE_TYPE
      value: kubernetes
    - name: IP_AUTODETECTION_METHOD  # DaemonSet中添加该环境变量
      value: interface=bond0    # 指定内网网卡
    - name: WAIT_FOR_DATASTORE
      value: "true"

查询所需镜像

cat calico.yaml |grep image|uniq

          image: docker.io/calico/cni:v3.22.1
          image: docker.io/calico/pod2daemon-flexvol:v3.22.1
          image: docker.io/calico/node:v3.22.1
          image: docker.io/calico/kube-controllers:v3.22.1

拉取镜像

ctr -n k8s.io i pull xxx

安装calico

kubectl apply -f calico.yaml

安装后所有节点ready

kubectl get node

NAME                             STATUS   ROLES                  AGE     VERSION
master1   Ready    control-plane,master   4d17h   v1.22.8
master2   Ready    control-plane,master   4d17h   v1.22.8
master3   Ready    control-plane,master   4d17h   v1.22.8
worker1   Ready    <none>                 20h     v1.22.8
systemctl restart kubelet containerd
发表回复

您的邮箱地址不会被公开。 必填项已用 * 标注