侧边栏壁纸
博主头像
汪洋

即使慢,驰而不息,纵会落后,纵会失败,但一定可以达到他所向的目标。 - 鲁迅

  • 累计撰写 204 篇文章
  • 累计创建 79 个标签
  • 累计收到 130 条评论

kubeadm 搭建高可用 Kubernetes 1.29.8 集群

汪洋
2024-08-20 / 6 评论 / 8 点赞 / 1,206 阅读 / 12,621 字

环境初始化

# 网卡配置
# cat /etc/NetworkManager/system-connections/ens160.nmconnection
[ipv4]
method=manual
address1=192.168.66.12/24,192.168.66.200
dns=114.114.114.114;8.8.8.8
# cat /etc/NetworkManager/system-connections/ens192.nmconnection
[connection]
autoconnect=false

# 调用 nmcli 重启设备和连接配置
nmcli d d ens192
nmcli d r ens160 
nmcli c r ens160
# Rocky 系统软件源更换
sed -e 's|^mirrorlist=|#mirrorlist=|g' \
    -e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.aliyun.com/rockylinux|g' \
    -i.bak \
    /etc/yum.repos.d/[Rr]ocky*.repo
    
dnf makecache
# 防火墙修改 firewalld 为 iptables
systemctl stop firewalld
systemctl disable firewalld

yum -y install iptables-services
systemctl start iptables
iptables -F
systemctl enable iptables
# 禁用 Selinux
setenforce 0
sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
grubby --update-kernel ALL --args selinux=0
# 查看是否禁用,grubby --info DEFAULT
# 回滚内核层禁用操作,grubby --update-kernel ALL --remove-args selinux
# 设置时区
timedatectl set-timezone Asia/Shanghai
# 关闭 swap 分区
swapoff -a
sed -i 's:/dev/mapper/rl-swap:#/dev/mapper/rl-swap:g' /etc/fstab
# 安装 ipvs
yum install -y ipvsadm

Kubernetes 相关系统设置

# 开启路由转发
echo 'net.ipv4.ip_forward=1' >> /etc/sysctl.conf
sysctl -p
# 加载 bridge
yum install -y epel-release
yum install -y bridge-utils

modprobe br_netfilter
echo 'br_netfilter' >> /etc/modules-load.d/bridge.conf
echo 'net.bridge.bridge-nf-call-iptables=1' >> /etc/sysctl.conf
echo 'net.bridge.bridge-nf-call-ip6tables=1' >> /etc/sysctl.conf
sysctl -p

安装 Docker 和 cri-docker

# 添加 docker-ce yum 源
# 中科大(ustc)
sudo dnf config-manager --add-repo https://mirrors.ustc.edu.cn/docker-ce/linux/centos/docker-ce.repo
cd /etc/yum.repos.d
# 切换中科大源
sed -e 's|download.docker.com|mirrors.ustc.edu.cn/docker-ce|g' docker-ce.repo

# 安装 docker-ce
yum -y install docker-ce

# 配置 daemon.
cat > /etc/docker/daemon.json <<EOF
{
  "data-root": "/data/docker",
  "exec-opts": ["native.cgroupdriver=systemd"],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "100"
  },
  "insecure-registries": ["harbor.xinxainghf.com"],
  "registry-mirrors": ["https://docker.cloudmessage.top"]
}
EOF
mkdir -p /etc/systemd/system/docker.service.d

# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker
# 安装 cri-docker
wget https://github.com/Mirantis/cri-dockerd/releases/download/v0.2.6/cri-dockerd-0.2.6.amd64.tgz
tar -xf cri-dockerd-0.3.9.amd64.tgz
cp cri-dockerd/cri-dockerd /usr/bin/
chmod +x /usr/bin/cri-dockerd

# 配置 cri-docker 服务
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.service
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --network-plugin=cni --pod-infra-container-image=registry.aliyuncs.com/google_containers/pause:3.8
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF

# 添加 cri-docker 套接字
cat <<"EOF" > /usr/lib/systemd/system/cri-docker.socket
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF

# 启动 cri-docker 对应服务
systemctl daemon-reload
systemctl enable cri-docker
systemctl start cri-docker
systemctl is-active cri-docker

安装 ETCD

# 1.集群master节点上安装etcd(kubernetes 1.29版本要求ETCD版本至少大于V3.5.0+)
# 1.1下载安装包
wget https://github.com/coreos/etcd/releases/download/v3.5.11/etcd-v3.5.11-linux-amd64.tar.gz
# 1.2解压
tar xzvf etcd-v3.5.11-linux-amd64.tar.gz
# 1.3进入目录
cd etcd-v3.5.11-linux-amd64/
# 1.4移动,注意:/usr/local/bin需要与etcd.service中的路径对应
sudo mv etcd* /usr/local/bin

# 2.生成etcd配置相关文件
# 2.1 k8s-master-01 etcd配置
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' 
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-01 \
--data-dir=/var/lib/etcd/default.etcd \
# 当前节点的 IP 地址
--listen-peer-urls=http://192.168.66.11:2380 \
--listen-client-urls=http://192.168.66.11:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.66.11:2379 \
--initial-advertise-peer-urls=http://192.168.66.11:2380 \
--initial-cluster=k8s-master-01=http://192.168.66.11:2380,k8s-master-02=http://192.168.66.12:2380,k8s-master-03=http://192.168.66.13:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

# 2.2 k8s-master-02 etcd配置
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' 
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-02 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.66.12:2380 \
--listen-client-urls=http://192.168.66.12:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.66.12:2379 \
--initial-advertise-peer-urls=http://192.168.66.12:2380 \
--initial-cluster=k8s-master-01=http://192.168.66.11:2380,k8s-master-02=http://192.168.66.12:2380,k8s-master-03=http://192.168.66.13:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

# 2.3 k8s-master-03 etcd配置
sudo tee /usr/lib/systemd/system/etcd.service <<-'EOF' 
[Unit]
Description=Etcd Server
After=network.target
[Service]
Type=notify
ExecStart=/usr/local/bin/etcd \
--name=k8s-master-03 \
--data-dir=/var/lib/etcd/default.etcd \
--listen-peer-urls=http://192.168.66.13:2380 \
--listen-client-urls=http://192.168.66.13:2379,http://127.0.0.1:2379 \
--advertise-client-urls=http://192.168.66.13:2379 \
--initial-advertise-peer-urls=http://192.168.66.13:2380 \
--initial-cluster=k8s-master-01=http://192.168.66.11:2380,k8s-master-02=http://192.168.66.12:2380,k8s-master-03=http://192.168.66.13:2380 \
--initial-cluster-token=smartgo \
--initial-cluster-state=new
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF

# 3.在k8s集群 3个 master节点上启动etcd,并开机启动
systemctl enable --now etcd
# 查看版本
[admin@k8s-master-01 ~]$ etcd --version
etcd Version: 3.5.11
Git SHA: 3b252db4f
Go Version: go1.20.12
Go OS/Arch: linux/amd64
You have new mail in /var/spool/mail/admin

安装外置调度器和高可用

# master01 master02 master03 同样配置
wget https://nginx.org/download/nginx-1.27.1.tar.gz
tar -zxvf nginx-1.27.1.tar.gz
cd nginx-1.27.1
yum -y install pcre pcre-devel zlib zlib-devel gcc gcc-c++
./configure --prefix=/usr/local/nginx --with-stream
make && make install
useradd  -s /sbin/nologin  -M nginx

# cat /usr/local/nginx/conf/nginx.conf
user  nginx;
worker_processes  auto;

error_log  /usr/local/nginx/logs/error.log notice;
pid        /usr/local/nginx/logs/nginx.pid;


events {
    worker_connections  1024;
}

stream {
    log_format  main  '$remote_addr $upstream_addr - [$time_local] $status $upstream_bytes_sent';
    access_log  /usr/local/nginx/logs/k8s-access.log  main;
    upstream k8s-apiserver {
       server 192.168.66.11:6443;           #k8s-master-01的IP和6443端口
       server 192.168.66.12:6443;               #k8s-master-02的IP和6443端口
       server 192.168.66.13:6443;               #k8s-master-03的IP和6443端口
    }
    server {
       listen 16443;                                    #监听的是16443端口,因为nginx和master复用机器,所以不能是6443端口
       proxy_pass k8s-apiserver;                #使用proxy_pass模块进行反向代理
    }
}
http {
    include       /usr/local/nginx/conf/mime.types;
    default_type  application/octet-stream;

    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';
    access_log  /usr/local/nginx/logs/access.log  main;
    sendfile        on;
    #tcp_nopush     on;
    keepalive_timeout  65;
    #gzip  on;
    include /etc/nginx/conf.d/*.conf;
}


# cat /etc/systemd/system/nginx.service 
[Unit]
Description=A high performance web server and a reverse proxy server
After=network.target

[Service]
Type=forking
ExecStart=/usr/local/nginx/sbin/nginx
ExecReload=/usr/local/nginx/sbin/nginx -s reload
ExecStop=/usr/local/nginx/sbin/nginx -s quit
PIDFile=/usr/local/nginx/logs/nginx.pid
TimeoutStopSec=5
KillMode=mixed
PrivateTmp=true

[Install]
WantedBy=multi-user.target

# 编译安装 keepalived 软件,master01 master02 master03 均需要配置
wget --no-check-certificate https://www.keepalived.org/software/keepalived-2.2.8.tar.gz
tar -zxvf keepalived-2.2.8.tar.gz
cd keepalived-2.2.8
yum -y install openssl openssl-devel
./configure --prefix=/home/admin/software/keepalived --sysconf=/etc
make
make install

# cat /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
   notification_email {
       [email protected]
   }
   notification_email_from [email protected]
   smtp_server smtp.163.com
   smtp_connect_timeout 30
   router_id LVS_DEVEL
   vrrp_skip_check_adv_addr
   #vrrp_strict
   vrrp_garp_interval 0
   vrrp_gna_interval 0
}

vrrp_script chk_apiserver {
  script "/etc/keepalived/check_apiserver.sh"   #检测脚本文件
  interval 5                    #检测时间间隔
  weight  -5                    #权重
  fall 2
  rise 1
}
vrrp_instance VI_1 {
  state MASTER                        # 主机状态master,从节点为BACKUP
  interface ens160                    #设置实例绑定的网卡
  mcast_src_ip 192.168.66.11    # 广播的原地址,k8s-master-01:192.168.1.110,k8s-master-02:192.168.1.111,k8s-master-03:192.168.1.112
  virtual_router_id 51                      #同一实例下virtual_router_id必须相同
  priority 100                  #设置优先级,优先级高的会被竞选为Master
  advert_int 2
  authentication {                                    #设置认证
    auth_type PASS                #认证方式,支持PASS和AH
    auth_pass K8SHA_KA_AUTH       #认证密码
  }
  virtual_ipaddress {                         #设置VIP,可以设置多个
    192.168.66.200
  }
  track_script {                                        #设置追踪脚本
   chk_apiserver
  }
}

# cat /etc/keepalived/check_apiserver.sh 
if [ "$(ps -ef | grep "nginx: master process"| grep -v grep )" == "" ];then     
      /usr/bin/systemctl  stop nginx
      sleep 5

      if [ "$(ps -ef | grep "nginx: master process"| grep -v grep )" == "" ];then    
          /usr/bin/systemctl  status keepalived            
      fi
fi

#  master01 master02 master03 开启 nginx 于 keepalived
```shell
systemctl enable nginx
systemctl start nginx
systemctl enable keepalived
systemctl enable keepalived

安装 Kubernetes 所需软件

# 添加 kubeadm yum 源
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/
enabled=1
gpgcheck=1
gpgkey=https://pkgs.k8s.io/core:/stable:/v1.29/rpm/repodata/repomd.xml.key
exclude=kubelet kubeadm kubectl cri-tools kubernetes-cni
EOF

# 安装 kubeadm 1.29 版本
yum install -y kubelet-1.29.0 kubectl-1.29.0 kubeadm-1.29.0
systemctl enable kubelet.service

准备集群初始化文件

# 查看不同 kind 默认配置
kubeadm config print init-defaults --component-configs KubeletConfiguration > kubeadm-config.yaml
kubeadm config print init-defaults --component-configs InitConfiguration
kubeadm config print init-defaults --component-configs ClusterConfiguration

# 个人使用初始化模板
# cat kubeadm-config.yaml
apiVersion: kubeadm.k8s.io/v1beta3
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.66.11	# 修改为自己集群的 master01 的 IP
  bindPort: 6443
nodeRegistration:
  criSocket: unix:///var/run/cri-dockerd.sock	# 修改为 cri-docker 的套接字路径
  imagePullPolicy: IfNotPresent
  name: k8s-master-01			# 修改为 master01 的主机名
  taints: null
---
apiServer:
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta3
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controllerManager: {}
dns: {}
etcd:
  external:
    endpoints:		# 修改为当前三台外置 ETCD 集群的地址
      - http://192.168.66.11:2379
      - http://192.168.66.12:2379
      - http://192.168.66.13:2379
imageRepository: registry.aliyuncs.com/google_containers	# 修改为 aliyun 镜像仓库地址
kind: ClusterConfiguration
kubernetesVersion: 1.29.8		# 修改为当前集群的版本
networking:
  dnsDomain: cluster.local
  serviceSubnet: 10.96.0.0/12
  podSubnet: 10.244.0.0/16
scheduler: {}
apiServerCertSANs:
- 192.168.66.200
controlPlaneEndpoint: "192.168.66.200:16443"
---
apiVersion: kubelet.config.k8s.io/v1beta1
authentication:
  anonymous:
    enabled: false
  webhook:
    cacheTTL: 0s
    enabled: true
  x509:
    clientCAFile: /etc/kubernetes/pki/ca.crt
authorization:
  mode: Webhook
  webhook:
    cacheAuthorizedTTL: 0s
    cacheUnauthorizedTTL: 0s
cgroupDriver: systemd
clusterDNS:
- 10.96.0.10
clusterDomain: cluster.local
containerRuntimeEndpoint: ""
cpuManagerReconcilePeriod: 0s
evictionPressureTransitionPeriod: 0s
fileCheckFrequency: 0s
healthzBindAddress: 127.0.0.1
healthzPort: 10248
httpCheckFrequency: 0s
imageMaximumGCAge: 0s
imageMinimumGCAge: 0s
kind: KubeletConfiguration
logging:
  flushFrequency: 0
  options:
    json:
      infoBufferSize: "0"
  verbosity: 0
memorySwap: {}
nodeStatusReportFrequency: 0s
nodeStatusUpdateFrequency: 0s
rotateCertificates: true
runtimeRequestTimeout: 0s
shutdownGracePeriod: 0s
shutdownGracePeriodCriticalPods: 0s
staticPodPath: /etc/kubernetes/manifests
streamingConnectionIdleTimeout: 0s
syncFrequency: 0s
volumeStatsAggPeriod: 0s

master 节点初始化

kubeadm init --config kubeadm-config.yaml --upload-certs --v=9 

部署 Calico 网络

https://docs.tigera.io/calico/latest/getting-started/kubernetes/self-managed-onprem/onpremises#install-calico-with-kubernetes-api-datastore-more-than-50-nodes

curl https://raw.githubusercontent.com/projectcalico/calico/v3.26.3/manifests/calico-typha.yaml -o calico.yaml
	CALICO_IPV4POOL_CIDR	指定为 pod 地址
	
# 修改为 BGP 模式
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
  value: "Always"  #改成Off
  
kubectl apply -f calico.yaml
0

评论区