- 创建命名空间
root@master:/home/ljy/桌面# kubectl create namespace cloud
namespace/cloud created
- 查看集群信息
kubectl cluster-info
root@master:/home/ljy/桌面# kubectl cluster-info
Kubernetes master is running at https://10.0.2.15:6443
KubeDNS is running at https://10.0.2.15:6443/api/v1/namespaces/kube-system/services/kube-dns:dns/proxy
To further debug and diagnose cluster problems, use 'kubectl cluster-info dump'.
- 查看各个组件状态
kubectl get cs
root@master:/home/ljy/桌面# kubectl get cs
Warning: v1 ComponentStatus is deprecated in v1.19+
NAME STATUS MESSAGE ERROR
etcd-0 Healthy {"health":"true"}
scheduler Healthy ok
controller-manager Healthy ok
- 查看服务器节点
kubectl get nodes
root@master:/home/ljy/桌面# kubectl get nodes
NAME STATUS ROLES AGE VERSION
master Ready master 5d3h v1.19.4
- 查看服务器节点详情
kubectl get nodes -o wide
root@master:/home/ljy/桌面# kubectl get nodes -o wide
NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
master Ready master 5d3h v1.19.4 10.0.2.15 <none> Ubuntu 20.04.1 LTS 5.4.0-56-generic docker://19.3.14
- 节点打标签
kubectl label nodes 节点名称 labelName=标签名称
- 查看节点标签
kubectl get node --show-labels
- 删除节点标签
kubectl label node 节点名称 labelName-
- 查看命名空间
kubectl get namespaces
root@master:/home/ljy/桌面# kubectl get namespaces
NAME STATUS AGE
default Active 5d3h
kube-node-lease Active 5d3h
kube-public Active 5d3h
kube-system Active 5d3h
- 查看pod节点
kubectl get pod -n 命名空间名称
root@master:/home/ljy/桌面# kubectl get pod -n kube-system
NAME READY STATUS RESTARTS AGE
cloud-communal-service-5d867f6dd4-gd4rq 1/1 Running 0 52m
coredns-f9fd979d6-ffr4w 1/1 Running 1 5d3h
coredns-f9fd979d6-sr2rq 1/1 Running 1 5d3h
etcd-master 1/1 Running 1 5d3h
kube-apiserver-master 1/1 Running 1 5d3h
kube-controller-manager-master 0/1 Error 3 5d3h
kube-flannel-ds-29jbd 1/1 Running 0 53m
kube-proxy-n7hjf 1/1 Running 1 5d3h
kube-scheduler-master 0/1 Running 3 5d3h
- 查看pod节点详情
kubectl get pod -n 命名空间名称 -o wide
root@master:/home/ljy/桌面# kubectl get pod -n kube-system -o wide
NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
cloud-communal-service-5d867f6dd4-gd4rq 1/1 Running 0 63m 10.244.0.9 master <none> <none>
coredns-f9fd979d6-ffr4w 1/1 Running 1 5d3h 10.244.0.8 master <none> <none>
coredns-f9fd979d6-sr2rq 1/1 Running 1 5d3h 10.244.0.7 master <none> <none>
etcd-master 1/1 Running 1 5d3h 10.0.2.15 master <none> <none>
kube-apiserver-master 1/1 Running 1 5d3h 10.0.2.15 master <none> <none>
kube-controller-manager-master 0/1 CrashLoopBackOff 5 5d3h 10.0.2.15 master <none> <none>
kube-flannel-ds-29jbd 1/1 Running 0 64m 10.0.2.15 master <none> <none>
kube-proxy-n7hjf 1/1 Running 1 5d3h 10.0.2.15 master <none> <none>
kube-scheduler-master 0/1 Running 5 5d3h 10.0.2.15 master <none> <none>
- 查看运行中的pod节点
kubectl get pods -n 命名空间名称 | grep -v Running
root@master:/home/ljy/桌面# kubectl get pod -n kube-system | grep Running
cloud-communal-service-5d867f6dd4-gd4rq 1/1 Running 0 65m
coredns-f9fd979d6-ffr4w 1/1 Running 1 5d3h
coredns-f9fd979d6-sr2rq 1/1 Running 1 5d3h
etcd-master 1/1 Running 1 5d3h
kube-apiserver-master 1/1 Running 1 5d3h
kube-flannel-ds-29jbd 1/1 Running 0 66m
kube-proxy-n7hjf 1/1 Running 1 5d3h
- 查看异常的pod节点
kubectl get pods -n 命名空间名称 | grep -v Error
root@master:/home/ljy/桌面# kubectl get pod -n kube-system | grep Error
kube-scheduler-master 0/1 Error 6 5d3h
- 查看pod节点的日志
kubectl describe pod pod名称 -n 命名空间名称
oot@master:/home/ljy/桌面# kubectl describe pod kube-scheduler-master -n kube-system
Name: kube-scheduler-master
Namespace: kube-system
Priority: 2000001000
Priority Class Name: system-node-critical
Node: master/10.0.2.15
Start Time: Thu, 10 Dec 2020 11:30:24 +0800
Labels: component=kube-scheduler
tier=control-plane
Annotations: kubernetes.io/config.hash: eacd4884e052077eeb923552f174ef74
kubernetes.io/config.mirror: eacd4884e052077eeb923552f174ef74
kubernetes.io/config.seen: 2020-12-10T11:30:15.783223384+08:00
kubernetes.io/config.source: file
Status: Running
IP: 10.0.2.15
IPs:
IP: 10.0.2.15
Controlled By: Node/master
Containers:
kube-scheduler:
Container ID: docker://fbb4f14a570612f44851c4efce1156cf908671229ee8eb5713d318e98585e8bd
Image: k8s.gcr.io/kube-scheduler:v1.19.4
Image ID: docker://sha256:14cd22f7abe78e59b77c30819906920b3c5677596ef8967b649b87c13e8e65f4
Port: <none>
Host Port: <none>
Command:
kube-scheduler
--authentication-kubeconfig=/etc/kubernetes/scheduler.conf
--authorization-kubeconfig=/etc/kubernetes/scheduler.conf
--bind-address=127.0.0.1
--kubeconfig=/etc/kubernetes/scheduler.conf
--leader-elect=true
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: Error
Exit Code: 255
Started: Tue, 15 Dec 2020 15:16:05 +0800
Finished: Tue, 15 Dec 2020 15:17:08 +0800
Ready: False
Restart Count: 7
Requests:
cpu: 100m
Liveness: http-get https://127.0.0.1:10259/healthz delay=10s timeout=15s period=10s #success=1 #failure=8
Startup: http-get https://127.0.0.1:10259/healthz delay=10s timeout=15s period=10s #success=1 #failure=24
Environment: <none>
Mounts:
/etc/kubernetes/scheduler.conf from kubeconfig (ro)
Conditions:
Type Status
Initialized True
Ready False
ContainersReady False
PodScheduled True
Volumes:
kubeconfig:
Type: HostPath (bare host directory volume)
Path: /etc/kubernetes/scheduler.conf
HostPathType: FileOrCreate
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: :NoExecuteop=Exists
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning Unhealthy 26m kubelet Liveness probe failed: Get "https://127.0.0.1:10259/healthz": write tcp 127.0.0.1:45170->127.0.0.1:10259: write: broken pipe
Warning Unhealthy 24m kubelet Liveness probe failed: Get "https://127.0.0.1:10259/healthz": write tcp 127.0.0.1:45632->127.0.0.1:10259: write: connection reset by peer
Warning Unhealthy 22m kubelet Liveness probe failed: Get "https://127.0.0.1:10259/healthz": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)
Warning Unhealthy 21m (x11 over 25m) kubelet Liveness probe failed: Get "https://127.0.0.1:10259/healthz": dial tcp 127.0.0.1:10259: connect: connection refused
Normal Started 21m (x4 over 98m) kubelet Started container kube-scheduler
Warning Unhealthy 18m kubelet Liveness probe failed: Get "https://127.0.0.1:10259/healthz": context deadline exceeded (Client.Timeout exceeded while awaiting headers)
Normal Killing 16m kubelet Container kube-scheduler failed liveness probe, will be restarted
Normal Pulled 15m (x5 over 98m) kubelet Container image "k8s.gcr.io/kube-scheduler:v1.19.4" already present on machine
Normal Created 15m (x5 over 98m) kubelet Created container kube-scheduler
Warning BackOff 5m46s (x8 over 11m) kubelet Back-off restarting failed container
- 根据ymal创建pod
kubectl apply -f yaml后缀的文件名
root@master:/home/ljy/桌面# kubectl apply -f kube-flannel.yml
podsecuritypolicy.policy/psp.flannel.unprivileged created
clusterrole.rbac.authorization.k8s.io/flannel created
clusterrolebinding.rbac.authorization.k8s.io/flannel created
serviceaccount/flannel created
configmap/kube-flannel-cfg created
daemonset.apps/kube-flannel-ds created
- 重启pod
kubectl replace --force -f xxxx.yaml
- 让pod优雅滚动重启
kubectl rollout restart deployment 你的deployment名称
# 使用 kubectl get deployment -n 命名空间 -o wide 命令查看 deployment名称
root@master:/home/ljy/桌面# kubectl get deployment -n cloud -o wide
NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR
cloud-communal-service 1/1 1 1 2d22h cloud-communal-service registry.cn-chengdu.aliyuncs.com/lbyjwwyqt/cloud-communal:v1.0.0 app=cloud-communal-service
root@master:/home/ljy/桌面# kubectl rollout restart deployment cloud-communal-service -n cloud
deployment.apps/cloud-communal-service restarted
- 删除pod节点
kubectl delete pod pod名称 -n 命名空间名称
kubectl get pod -n kube-system
如果发现pod还在 则使用下面方法删除试试
kubectl get deployment -n kube-system
NAME READY UP-TO-DATE AVAILABLE AGE
nginx-deployment 1/1 1 1 39m
kubectl delete deployment -n kube-system nginx-deployment
deployment.apps "nginx-deployment" deleted
- 根据yaml 删除
kubectl delete -f ingress.yaml
root@master:/home/ljy/桌面# kubectl delete -f ingress.yaml
Warning: extensions/v1beta1 Ingress is deprecated in v1.14+, unavailable in v1.22+; use networking.k8s.io/v1 Ingress
ingress.extensions "api-ingress" deleted
- 进入pod容器内部
kubectl exec -it pod名称 -n 命名空间名称 -- /bin/bash
kubectl exec -it nginx-service-777878686f-wx567 -- /bin/bash
- 查看pod应用服务日志
持续输出日志
kubectl logs -f pod名称 -n 命名空间名称
输出最后多少条
kubectl logs --tail=100 pod名称 -n 命名空间名称
- 查看svc服务
kubectl get svc
root@master:/home/ljy/桌面# kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
kubernetes ClusterIP 10.96.0.1 <none> 443/TCP 5d4h
- 查看service
root@master:/home/ljy/桌面# kubectl get service -o wide -n kube-system
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR
kube-dns ClusterIP 10.96.0.10 <none> 53/UDP,53/TCP,9153/TCP 6d5h k8s-app=kube-dns
- 获取端点
kubectl get endpoints --all-namespaces
root@master:/home/ljy/桌面# kubectl get endpoints --all-namespaces
NAMESPACE NAME ENDPOINTS AGE
default kubernetes 10.0.2.15:6443 10d
ingress-nginx ingress-nginx-controller 10.0.2.15:443,10.0.2.15:80 2d19h
ingress-nginx ingress-nginx-controller-admission 10.0.2.15:8443 2d19h
kube-system kube-controller-manager <none> 10d
kube-system kube-dns 10.244.0.44:53,10.244.0.45:53,10.244.0.44:53 + 3 more... 10d
kube-system kube-scheduler <none> 10d
- 查看所有 pod
kubectl get pod -A -o wide
root@master:/home/ljy/桌面# kubectl get pod -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
ingress-nginx ingress-nginx-controller-9f64489f5-7pvwf 1/1 Running 2 2d19h 10.0.2.15 master <none> <none>
kube-system cloud-communal-service-5d867f6dd4-gd4rq 1/1 Running 67 5d20h 10.244.0.43 master <none> <none>
kube-system coredns-f9fd979d6-ffr4w 1/1 Running 9 10d 10.244.0.45 master <none> <none>
kube-system coredns-f9fd979d6-sr2rq 1/1 Running 9 10d 10.244.0.44 master <none> <none>
kube-system etcd-master 1/1 Running 9 10d 10.0.2.15 master <none> <none>
kube-system kube-apiserver-master 1/1 Running 9 10d 10.0.2.15 master <none> <none>
kube-system kube-controller-manager-master 1/1 Running 35 10d 10.0.2.15 master <none> <none>
kube-system kube-flannel-ds-29jbd 1/1 Running 12 5d20h 10.0.2.15 master <none> <none>
kube-system kube-proxy-n7hjf 1/1 Running 9 10d 10.0.2.15 master <none> <none>
kube-system kube-scheduler-master 1/1 Running 35 10d 10.0.2.15 master <none> <none>
- 为Deployment设置新的镜像
kubectl set image deployment/自己的deployment名称 -n 命名空间 镜像名称=镜像
kubectl set image deployment/nginx-deployment -n nginx nginx=nginx:1.9.1
- 查看Deployment部署的历史记录
kubectl rollout history deployment/自己的deployment名称 -n 命名空间
kubectl rollout history deployment/nginx-deployment -n nginx
- 撤销本次发布回滚到上一个部署版本
kubectl rollout undo deployment 自己的deployment名称
kubectl rollout undo deployment/nginx-deployment -n nginx
- 回滚到指定版本
kubectl rollout undo deployment/自己的deployment名称 --to-revision=版本号
kubectl rollout undo deployment/nginx-deployment --to-revision=2 -n nginx
- 查看 回滚操作状态
kubectl rollout status deployment 自己的deployment名称 -n 命名空间
kubectl rollout status deployment nginx-deployment -n nginx
网友评论