签发证书
- 创建kube-proxy-csr.json
[root@node5 certs]# cat kube-proxy-csr.json
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "beijing",
"L": "beijing",
"O": "od",
"OU": "ops"
}
]
}
[root@node5 certs]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client kube-proxy-csr.json | cfssl-json -bare kube-proxy-client
2020/07/28 09:09:05 [INFO] generate received request
2020/07/28 09:09:05 [INFO] received CSR
2020/07/28 09:09:05 [INFO] generating key: rsa-2048
2020/07/28 09:09:06 [INFO] encoded CSR
2020/07/28 09:09:06 [INFO] signed certificate with serial number 538425229982659422420343343620030686628817699859
2020/07/28 09:09:06 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
- 分发证书
- 拷贝到需要安装proxy的节点 node2 node3
[root@node5 certs]# scp kube-proxy-client.pem kube-proxy-client-key.pem node2:/opt/kubernetes/server/bin/certs/
root@node2's password:
kube-proxy-client.pem 100% 1375 1.2MB/s 00:00
kube-proxy-client-key.pem 100% 1675 1.3MB/s 00:00
[root@node5 certs]# scp kube-proxy-client.pem kube-proxy-client-key.pem node3:/opt/kubernetes/server/bin/certs/
root@node3's password:
kube-proxy-client.pem 100% 1375 1.1MB/s 00:00
kube-proxy-client-key.pem 100% 1675 1.3MB/s 00:00
创建kube-proxy配置
[root@node2 conf]# kubectl config set-cluster myk8s --certificate-authority=/opt/kubernetes/server/bin/certs/ca.pem --embed-certs=true --server=https://172.16.6.180:7443 --kubeconfig=/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig
Cluster "myk8s" set.
[root@node2 conf]#
[root@node2 conf]# kubectl config set-credentials kube-proxy --client-certificate=/opt/kubernetes/server/bin/certs/kube-proxy-client.pem --client-key=/opt/kubernetes/server/bin/certs/kube-proxy-client-key.pem --embed-certs=true --kubeconfig=/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig
User "kube-proxy" set.
[root@node2 conf]# kubectl config set-context myk8s-context --cluster=myk8s --user=kube-proxy --kubeconfig=/opt/kubernetes/server/bin/conf/kube-proxy.kubeconfig
Context "myk8s-context" created.
[root@node2 conf]# kubectl config use-context myk8s-context --kubeconfig=kube-proxy.kubeconfig
Switched to context "myk8s-context".
加载ipvs模块
[root@node2 bin]# cat /opt/ipvs.sh
#!/bin/bash
ipvs_mods=`ls /usr/lib/modules/$(uname -r)/kernel/net/netfilter/ipvs | grep -o "^[^.]*"`
for i in $ipvs_mods
do
echo $i
/sbin/modinfo -F filename $i >/dev/null 2>&1
/sbin/modprobe $i
done
[root@node2 bin]# chmod +x /opt/ipvs.sh
[root@node2 bin]# sh /opt/ipvs.sh
创建kube-proxy的启动脚本
[root@node2 bin]# cat kube-proxy-startup.sh
#!/bin/sh
WORK_DIR=$(dirname $(readlink -f $0))
[ $? -eq 0 ] && cd $WORK_DIR || exit
/opt/kubernetes/server/bin/kube-proxy \
--cluster-cidr 172.27.0.0/16 \
--hostname-override node2.host.com \
--proxy-mode=ipvs \
--ipvs-scheduler=nq \
--kubeconfig ./conf/kube-proxy.kubeconfig
[root@node2 bin]# chmod +x kube-proxy-startup.sh
[root@node2 bin]# mkdir /data/logs/kubernetes/kube-proxy
创建supervisor配置
[root@node2 bin]# cat /etc/supervisord.d/kube-proxy.ini
[program:kube-proxy-node2]
command=/opt/kubernetes/server/bin/kube-proxy-startup.sh
numprocs=1
directory=/opt/kubernetes/server/bin
autostart=true
autorestart=true
startsecs=30
startretries=3
exitcodes=0,2
stopsignal=QUIT
stopwaitsecs=10
user=root
redirect_stderr=true
stdout_logfile=/data/logs/kubernetes/kube-proxy/proxy.stdout.log
stdout_logfile_maxbytes=64MB
stdout_logfile_backups=5
stdout_capture_maxbytes=1MB
stdout_events_enabled=false
将kube-proxy添加到supervisor
#
[root@node2 bin]# supervisorctl update
kube-proxy-node2: added process group
[root@node2 bin]# supervisorctl status
etcd-server-node2 RUNNING pid 5226, uptime 17:08:29
kube-apiserver-node2 RUNNING pid 5225, uptime 17:08:29
kube-controller-manager-node2 RUNNING pid 5228, uptime 17:08:29
kube-kubelet-node2 RUNNING pid 5227, uptime 17:08:29
kube-proxy-node2 STARTING
验证集群
[root@node2 bin]# supervisorctl status
etcd-server-node2 RUNNING pid 5226, uptime 17:22:19
kube-apiserver-node2 RUNNING pid 5225, uptime 17:22:19
kube-controller-manager-node2 RUNNING pid 5228, uptime 17:22:19
kube-kubelet-node2 RUNNING pid 5227, uptime 17:22:19
kube-proxy-node2 RUNNING pid 17271, uptime 0:13:55
[root@node3 bin]# supervisorctl status
etcd-server-node3 RUNNING pid 2475, uptime 17:23:13
kube-apiserver-node3 RUNNING pid 2474, uptime 17:23:13
kube-controller-manager-node3 RUNNING pid 2477, uptime 17:23:13
kube-kubelet-node3 RUNNING pid 2476, uptime 17:23:13
kube-proxy-node3 RUNNING pid 14548, uptime 0:12:3
[root@node3 bin]# yum install ipvsadm -y
[root@node3 bin]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.0.1:443 nq
-> 172.16.6.183:6443 Masq 1 0 0
-> 172.16.6.184:6443 Masq 1 0 0
网友评论