跳到主要内容

集群验证

集群验证

16.1、安装busybox(master01上)
cat<<EOF | kubectl apply -f -
apiVersion: v1
kind: Pod
metadata:
name: busybox
namespace: default
spec:
containers:
- name: busybox
image: busybox:1.28
command:
- sleep
- "3600"
imagePullPolicy: IfNotPresent
restartPolicy: Always
EOF
16.2、验证步骤(matser01上)
1. Pod必须能解析Service
2. Pod必须能解析跨namespace的Service
3. 每个节点都必须要能访问Kubernetes的kubernetes svc 443和kube-dns的service 53
4. Pod和Pod之前要能通
a) 同namespace能通信
b) 跨namespace能通信
c) 跨机器能通信
16.3、步骤演示(matser01上)
# 首先查看po是否安装成功
[root@k8s-master01 ~]# kubectl get po
NAME READY STATUS RESTARTS AGE
busybox 1/1 Running 0 3m11s

# 查看svc是否正常
[root@k8s-master01 ~]# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 163m

# 查看Pod是否能能解析Service
[root@k8s-master01 ~]# kubectl exec busybox -n default -- nslookup kubernetes
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name: kubernetes
Address 1: 10.96.0.1 kubernetes.default.svc.cluster.local

# 查看Pod是否能解析跨namespace的Service
[root@k8s-master01 ~]# kubectl exec busybox -n default -- nslookup kube-dns.kube-system
Server: 10.96.0.10
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

Name: kube-dns.kube-system
Address 1: 10.96.0.10 kube-dns.kube-system.svc.cluster.local

# 跟我以上结果一致就成功了
16.4、使用telnet命令验证
# 所有节点安装telnet命令,有的话忽略
yum install -y telnet

# 所有机器 10.96.0.1 443 kubernetes svc 443
# 所有机器 10.96.0.10 53 kube-dns的service 53
# 不会自动断开就是成功了
telnet 10.96.0.1 443
telnet 10.96.0.10 53

Trying 10.96.0.1...
Connected to 10.96.0.1.
Escape character is '^]'.
16.5、使用curl命令验证(所有机器)
[root@k8s-master01 ~]# curl 10.96.0.10:53
curl: (52) Empty reply from server
16.6、容器验证(master01上)
[root@k8s-master01 ~]# kubectl get po -n kube-system
NAME READY STATUS RESTARTS AGE
calico-kube-controllers-5f6d4b864b-pq2qw 1/1 Running 0 62m
calico-node-75blv 1/1 Running 0 62m
calico-node-hw27b 1/1 Running 0 62m
calico-node-k2wdf 1/1 Running 0 62m
calico-node-l58lz 1/1 Running 0 62m
calico-node-v2qlq 1/1 Running 0 62m
coredns-867d46bfc6-8vzrk 1/1 Running 0 72m
metrics-server-595f65d8d5-kgn8c 1/1 Running 0 60m

[root@k8s-master01 ~]# kubectl get po -n kube-system -owide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
calico-kube-controllers-5f6d4b864b-pq2qw 1/1 Running 0 63m 192.168.1.100 k8s-master01 <none> <none>
calico-node-75blv 1/1 Running 0 63m 192.168.1.103 k8s-node01 <none> <none>
calico-node-hw27b 1/1 Running 0 63m 192.168.1.101 k8s-master02 <none> <none>
calico-node-k2wdf 1/1 Running 0 63m 192.168.1.100 k8s-master01 <none> <none>
calico-node-l58lz 1/1 Running 0 63m 192.168.1.102 k8s-master03 <none> <none>
calico-node-v2qlq 1/1 Running 0 63m 192.168.1.104 k8s-node02 <none> <none>
coredns-867d46bfc6-8vzrk 1/1 Running 0 73m 172.161.125.2 k8s-node01 <none> <none>
metrics-server-595f65d8d5-kgn8c 1/1 Running 0 62m 172.161.125.1 k8s-node01 <none> <none>

# 能进去就ok
[root@k8s-master01 ~]# kubectl exec -it calico-node-v2qlq -n kube-system -- sh
sh-4.4#

# 进入node01,然后能ping通node02就行
[root@k8s-master01 ~]# kubectl exec -it calico-node-v2qlq -n kube-system -- bash
[root@k8s-node02 /]# ping 192.168.1.104
PING 192.168.1.104 (192.168.1.104) 56(84) bytes of data.
64 bytes from 192.168.1.104: icmp_seq=1 ttl=64 time=0.123 ms
64 bytes from 192.168.1.104: icmp_seq=2 ttl=64 time=0.090 ms
^C
--- 192.168.1.104 ping statistics ---
2 packets transmitted, 2 received, 0% packet loss, time 46ms
rtt min/avg/max/mdev = 0.090/0.106/0.123/0.019 ms