kubelet
签发证书
- 创建kubelet-csr.json
[root@node5 certs]# cat kubelet-csr.json
{
"CN": "k8s-kubelet",
"hosts": [
"127.0.0.1",
"172.16.6.181",
"172.16.6.182",
"172.16.6.183",
"172.16.6.184",
"172.16.6.185"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
- cfssl命令签发kubelet证书
[root@node5 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json --profile=server kubelet-csr.json | cfssl-json -bare kubelet
2020/07/28 04:35:27 [INFO] generate received request
2020/07/28 04:35:27 [INFO] received CSR
2020/07/28 04:35:27 [INFO] generating key: rsa-2048
2020/07/28 04:35:27 [INFO] encoded CSR
2020/07/28 04:35:27 [INFO] signed certificate with serial number 495010976119244979069741357240867263076184758969
2020/07/28 04:35:27 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
-
证书下发到需要安装kubelet的节点
-
创建kubelet的配置
-
kubelet配置需要在两个节点操作
-
在/opt/kubernetes/server/bin/conf目录下创建kubelet的配置,如果ca发生变化需要重新签发证书并重新生成kubelet.kubeconfig
-
set-cluster 创建需要连接的集群信息,可以创建多个k8s集群信息。
[root@node2 conf]# pwd
/opt/kubernetes/server/bin/conf
[root@node2 conf]# kubectl config set-cluster myk8s \
--certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem \
--embed-certs=true \
--server=https://172.16.6.180:7443 \
--kubeconfig=kubelet.kubeconfig
Cluster "myk8s" set.
[root@node2 conf]# ls kubelet.kubeconfig
kubelet.kubeconfig
- set-credentiale 创建用户帐号
[root@node2 conf]# kubectl config set-credentials k8s-node \
--client-certificate=/opt/kubernetes/server/bin/certs/client.pem \
--client-key=/opt/kubernetes/server/bin/certs/client-key.pem \
--embed-certs=true \
--kubeconfig=kubelet.kubeconfig
User "k8s-node" set.
- set-context 设置context 确定帐号和集群的对应关系
[root@node2 conf]# kubectl config set-context myk8s-context \
--cluster=myk8s \
--user=k8s-node \
--kubeconfig=kubelet.kubeconfig
Context "myk8s-context" created.
- use-context 设置当前使用哪个context
[root@node2 conf]# kubectl config use-context myk8s-context --kubeconfig=kubelet.kubeconfig
Switched to context "myk8s-context".
- 授权k8s-node用户
- 创建k8s.node.yaml,授权k8s-node用户绑定system:node集群角色,让k8s-node具备运算节点的权限.
- 此操作只在一台master节点执行
[root@node2 conf]# cat k8s-node.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: k8s-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
[root@node2 conf]# kubectl create -f k8s-node.yaml
clusterrolebinding.rbac.authorization.k8s.io/k8s-node created
[root@node2 conf]# kubectl get clusterrolebinding k8s-node -o yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
creationTimestamp: "2020-07-28T06:52:25Z"
name: k8s-node
resourceVersion: "28913"
selfLink: /apis/rbac.authorization.k8s.io/v1/clusterrolebindings/k8s-node
uid: b88b7d7d-d6e3-4cd3-a573-2b05de595a4a
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: k8s-node
[root@node2 conf]# kubectl get node
NAME AGE
k8s-node 114s
- 拷贝kubelet.kubeconfig配置文件到另一个节点。
[root@node2 conf]# scp kubelet.kubeconfig node3:/opt/kubernetes/server/bin/conf/
root@node3's password:
kubelet.kubeconfig 100% 6198 2.8MB/s 00:00
- 准备pause的基础镜像,并上传到harbor.od.com
- 仅在node5节点执行
[root@node5 harbor]# docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/pause-amd64:3.1
[root@node5 harbor]# docker tag da86e6ba6ca1 harbor.od.com/public/pause
[root@node5 harbor]# docker push harbor.od.com/public/pause
The push refers to repository [harbor.od.com/public/pause]
e17133b79956: Pushed
latest: digest: sha256:fcaff905397ba63fd376d0c3019f1f1cb6e7506131389edbcb3d22719f1ae54d size: 527
[root@node5 harbor]#
- 创建kubelet的启动脚本
[root@node2 bin]# cat kubelet-startup.sh
#!/bin/bash
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit
/opt/kubernetes/server/bin/kubelet \
--anonymous-auth=false \
--cgroup-driver systemd \
--cluster-dns 192.168.0.2 \
--cluster-domain cluster.local \
--runtime-cgroups=/systemd/system.slice \
--kubelet-cgroups=/systemd/system.slice \
--fail-swap-on="false" \
--client-ca-file ./certs/ca.pem \
--tls-cert-file ./certs/kubelet.pem \
--tls-private-key-file ./certs/kubelet-key.pem \
--hostname-override node2.host.com \
--image-gc-high-threshold 20 \
--image-gc-low-threshold 10 \
--kubeconfig ./conf/kubelet.kubeconfig \
--log-dir /data/logs/kubernetes/kube-kubelet \
--pod-infra-container-image harbor.od.com/public/pause:latest \
--root-dir /data/kubelet
- 创建目录和修改脚本权限
[root@node2 bin]# hmod +x kubelet-startup.sh
[root@node2 bin]# mkdir -p /data/logs/kubernetes/kube-kubelet /data/kubelet
[root@node3 supervisord.d]# cat kube-kubelet.ini
[program:kube-kubelet-node3]
command=/opt/kubernetes/server/bin/kubelet-startup.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-kubelet/kubelet.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
[root@node3 supervisord.d]# supervisorctl status
etcd-server-node3 RUNNING pid 26427, uptime 16:25:34
kube-apiserver-node3 RUNNING pid 26426, uptime 16:25:34
kube-controller-manager-node3 RUNNING pid 26428, uptime 16:25:34
kube-kubelet-node3 RUNNING pid 27861, uptime 0:00:33
kube-scheduler-node3 RUNNING pid 26489, uptime 16:23:07
- 查看node
[root@node2 bin]# kubectl get node
NAME STATUS ROLES AGE VERSION
node2.host.com Ready <none> 10m v1.15.11
node3.host.com Ready <none> 47s v1.15.11
- 修改node的roles
- node2 node3都执行
- 每台node分别做master和node
- roles只是一个label
[root@node2 bin]# kubectl label node node2.host.com node-role.kubernetes.io/master=
node/node2.host.com labeled
[root@node2 bin]# kubectl get node
NAME STATUS ROLES AGE VERSION
node2.host.com Ready master 14m v1.15.11
node3.host.com Ready <none> 4m4s v1.15.11
[root@node2 bin]# kubectl label node node2.host.com node-role.kubernetes.io/node=
node/node2.host.com labeled
[root@node2 bin]# kubectl get node
NAME STATUS ROLES AGE VERSION
node2.host.com Ready master,node 14m v1.15.11
node3.host.com Ready <none> 4m19s v1.15.11
网友评论