- 1、先在 master 节点查看 Node 情况
- 2、接着查看下 pod 情况
- 3、封锁 k8s-node03 这个 node 节点,排干该 node 节点上的 pod 资源
- 4、接着删除k8s-node03这个节点
- 5、再查看 pod 情况,发现原来在 k8s-node03 上的 pod 已经调度到其他留存的 node 节点上了
- 6、最后在 k8s-node03 节点上执行清理操作
如何从 Kubernetes 集群中移除 Node,比如从集群中移除 k8s-node03 这个 Node 节点,做法如下
1、先在 master 节点查看 Node 情况
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node01 Ready <none> 47d v1.14.2
k8s-node02 Ready <none> 47d v1.14.2
k8s-node03 Ready <none> 47d v1.14.2
2、接着查看下 pod 情况
[root@k8s-master01 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dnsutils-ds-5sc4z 1/1 Running 963 40d 172.30.56.3 k8s-node02 <none> <none>
dnsutils-ds-h546r 1/1 Running 963 40d 172.30.72.5 k8s-node03 <none> <none>
dnsutils-ds-jx5kx 1/1 Running 963 40d 172.30.88.4 k8s-node01 <none> <none>
kevin-nginx 1/1 Running 0 27d 172.30.72.11 k8s-node03 <none> <none>
my-nginx-5dd67b97fb-69gvm 1/1 Running 0 40d 172.30.72.4 k8s-node03 <none> <none>
my-nginx-5dd67b97fb-8j4k6 1/1 Running 0 40d 172.30.88.3 k8s-node01 <none> <none>
nginx-7db9fccd9b-dkdzf 1/1 Running 0 27d 172.30.88.8 k8s-node01 <none> <none>
nginx-7db9fccd9b-t8njb 1/1 Running 0 27d 172.30.72.10 k8s-node03 <none> <none>
nginx-7db9fccd9b-vrp9f 1/1 Running 0 27d 172.30.56.6 k8s-node02 <none> <none>
nginx-ds-4lf8z 1/1 Running 0 41d 172.30.56.2 k8s-node02 <none> <none>
nginx-ds-6kfsw 1/1 Running 0 41d 172.30.72.2 k8s-node03 <none> <none>
nginx-ds-xqdgw 1/1 Running 0 41d 172.30.88.2 k8s-node01 <none> <none>
3、封锁 k8s-node03 这个 node 节点,排干该 node 节点上的 pod 资源
[root@k8s-master01 ~]# kubectl drain k8s-node03 --delete-local-data --force --ignore-daemonsets
node/k8s-node03 cordoned
WARNING: deleting Pods not managed by ReplicationController, ReplicaSet, Job, DaemonSet or StatefulSet: default/kevin-nginx; ignoring DaemonSet-managed Pods: default/dnsutils-ds-h546r, default/nginx-ds-6kfsw, kube-system/node-exporter-zmb68
evicting pod "metrics-server-54997795d9-rczmc"
evicting pod "kevin-nginx"
evicting pod "nginx-7db9fccd9b-t8njb"
evicting pod "coredns-5b969f4c88-pd5js"
evicting pod "kubernetes-dashboard-7976c5cb9c-4jpzb"
evicting pod "my-nginx-5dd67b97fb-69gvm"
pod/my-nginx-5dd67b97fb-69gvm evicted
pod/coredns-5b969f4c88-pd5js evicted
pod/nginx-7db9fccd9b-t8njb evicted
pod/kubernetes-dashboard-7976c5cb9c-4jpzb evicted
pod/kevin-nginx evicted
pod/metrics-server-54997795d9-rczmc evicted
node/k8s-node03 evicted
4、接着删除k8s-node03这个节点
[root@k8s-master01 ~]# kubectl delete node k8s-node03
node "k8s-node03" deleted
5、再查看 pod 情况,发现原来在 k8s-node03 上的 pod 已经调度到其他留存的 node 节点上了
[root@k8s-master01 ~]# kubectl get pods -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
dnsutils-ds-5sc4z 1/1 Running 963 40d 172.30.56.3 k8s-node02 <none> <none>
dnsutils-ds-jx5kx 1/1 Running 963 40d 172.30.88.4 k8s-node01 <none> <none>
my-nginx-5dd67b97fb-8j4k6 1/1 Running 0 40d 172.30.88.3 k8s-node01 <none> <none>
my-nginx-5dd67b97fb-kx2pc 1/1 Running 0 98s 172.30.56.7 k8s-node02 <none> <none>
nginx-7db9fccd9b-7vbhq 1/1 Running 0 98s 172.30.88.7 k8s-node01 <none> <none>
nginx-7db9fccd9b-dkdzf 1/1 Running 0 27d 172.30.88.8 k8s-node01 <none> <none>
nginx-7db9fccd9b-vrp9f 1/1 Running 0 27d 172.30.56.6 k8s-node02 <none> <none>
nginx-ds-4lf8z 1/1 Running 0 41d 172.30.56.2 k8s-node02 <none> <none>
nginx-ds-xqdgw 1/1 Running 0 41d 172.30.88.2 k8s-node01 <none> <none>
[root@k8s-master01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-node01 Ready <none> 47d v1.14.2
k8s-node02 Ready <none> 47d v1.14.2
6、最后在 k8s-node03 节点上执行清理操作
[root@k8s-node03 ~]# systemctl stop kubelet kube-proxy flanneld docker
[root@k8s-node03 ~]# source /opt/k8s/bin/environment.sh
[root@k8s-node03 ~]# mount | grep "${K8S_DIR}" | awk '{print $3}'|xargs sudo umount
[root@k8s-node03 ~]# rm -rf ${K8S_DIR}/kubelet
[root@k8s-node03 ~]# rm -rf ${DOCKER_DIR}
[root@k8s-node03 ~]# rm -rf /var/run/flannel/
[root@k8s-node03 ~]# rm -rf /var/run/docker/
[root@k8s-node03 ~]# rm -rf /etc/systemd/system/{kubelet,docker,flanneld,kube-nginx}.service
[root@k8s-node03 ~]# rm -rf /opt/k8s/bin/*
[root@k8s-node03 ~]# rm -rf /etc/flanneld/cert /etc/kubernetes/cert
[root@k8s-node03 ~]# iptables -F && iptables -X && iptables -F -t nat && iptables -X -t nat
[root@k8s-node03 ~]# ip link del flannel.1
[root@k8s-node03 ~]# ip link del docker0
评论区