一、硬件环境准备
序号 | ip | 系统版本 | hostname | 节点类型 |
---|---|---|---|---|
1 | 192.168.0.248 | CentOS 7.6.1810 (Core) | k8s-clusters | master |
2 | 192.168.0.170 | CentOS 7.6.1810 (Core) | k8s-clusters-1 | master |
3 | 192.168.0.222 | CentOS 7.6.1810 (Core) | k8s-clusters-2 | master |
4 | 192.168.0.55 | CentOS 7.6.1810 (Core) | k8s-clusters-3 | node |
二、系统软件环境预置
三、部署HAProxy+Keepalive高可用负载均衡器
1、确认内核版本后,开启IPVS
uname -r
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_fo ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
for kernel_module in \${ipvs_modules}; do
/sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
if [ $? -eq 0 ]; then
/sbin/modprobe \${kernel_module}
fi
done
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash
# 执行脚本
/etc/sysconfig/modules/ipvs.modules
# 检查ip_vs是否开启
lsmod | grep ip_vs
2、准备haproxy配置文件
mkdir /etc/haproxy
// 创建配置文件
cat >/etc/haproxy/haproxy.cfg<<EOF
global
log 127.0.0.1 local0 err
maxconn 50000
uid 99
gid 99
#daemon
nbproc 1
pidfile haproxy.pid
defaults
mode http
log 127.0.0.1 local0 err
maxconn 50000
retries 3
timeout connect 5s
timeout client 30s
timeout server 30s
timeout check 2s
listen admin_stats
mode http
bind 0.0.0.0:1080
log 127.0.0.1 local0 err
stats refresh 30s
stats uri /stats
stats realm Haproxy\ Statistics
stats auth admin:admin
stats hide-version
stats admin if TRUE
frontend kube-apiserver
bind 0.0.0.0:8443
mode tcp
default_backend kube-apiserver
backend kube-apiserver
mode tcp
balance roundrobin
server master-1.k8s.com 192.168.0.248:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server master-2.k8s.com 192.168.0.170:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
server master-3.k8s.com 192.168.0.222:6443 weight 1 maxconn 1000 check inter 2000 rise 2 fall 3
EOF
3、启动haproxy
docker pull haproxy:1.7.8-alpine
docker run -d --name k8s-haproxy \
--net=host --restart=always \
-v /etc/haproxy:/usr/local/etc/haproxy:ro \
-p 8443:8443 \
-p 1080:1080 \
haproxy:1.7.8-alpine
4、浏览器查看状态(密码:admin:admin)
http://192.168.0.248:1080/stats
http://192.168.0.170:1080/stats
http://192.168.0.222:1080/stats
5、启动keepalived
docker pull osixia/keepalived:1.4.4
docker run --net=host --cap-add=NET_ADMIN \
-e KEEPALIVED_INTERFACE=eth0 \
-e KEEPALIVED_VIRTUAL_IPS="#PYTHON2BASH:['192.168.0.199']" \
-e KEEPALIVED_UNICAST_PEERS="#PYTHON2BASH:['192.168.0.248','192.168.0.170','192.168.0.222']" \
-e KEEPALIVED_PASSWORD=admin \
--name k8s-keepalived \
--restart always \
-d osixia/keepalived:1.4.4
6、验证keepalived是否部署成功
# 会看到两个成为backup 一个成为master
docker logs k8s-keepalived
# ping测试
ping -c2 192.168.0.199
# 如果失败后清理后,重新实验
docker rm -f k8s-keepalived
ip a del 192.168.0.199/32 dev ens33
四、多主K8S集群设置
1、kubeadm 初始化一台节点
kubeadm init --image-repository registry.aliyuncs.com/google_containers \
--kubernetes-version v1.23.0 \
--pod-network-cidr=10.1.0.0/16 \
--apiserver-advertise-address=192.168.0.248 \
--control-plane-endpoint=192.168.0.199:8443
- 执行成功会输出以下内容:
[addons] Applied essential addon: CoreDNS
[endpoint] WARNING: port specified in controlPlaneEndpoint overrides bindPort in the controlplane address
[addons] Applied essential addon: kube-proxy
Your Kubernetes control-plane has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
Alternatively, if you are the root user, you can run:
export KUBECONFIG=/etc/kubernetes/admin.conf
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of control-plane nodes by copying certificate authorities
and service account keys on each node and then running the following as root:
kubeadm join 192.168.0.199:8443 --token v4m8ry.z1sek31mp8uwe5cv \
--discovery-token-ca-cert-hash sha256:7b7c80f2a9816e828438b6f6ab94368ac15c2a55395bd235c8487187bb375a49 \
--control-plane
Then you can join any number of worker nodes by running the following on each as root:
kubeadm join 192.168.0.199:8443 --token v4m8ry.z1sek31mp8uwe5cv \
--discovery-token-ca-cert-hash sha256:7b7c80f2a9816e828438b6f6ab94368ac15c2a55395bd235c8487187bb375a49
2、为kubectl准备Kubeconfig文件
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
3、将该Master上认证文件同步到未加入的Master
# 设置节点间免密登录
ssh-keygen
ssh-copy-id 192.168.0.170
ssh-copy-id 192.168.0.222
cat > scp_k8s_crt.sh < EOF
#!/bin/bash
USER=$1
CONTROL_PLANE_IPS=$2
for host in ${CONTROL_PLANE_IPS}; do
ssh "${USER}"@$host "mkdir -p /etc/kubernetes/pki/etcd"
scp /etc/kubernetes/pki/ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/sa.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/front-proxy-ca.* "${USER}"@$host:/etc/kubernetes/pki/
scp /etc/kubernetes/pki/etcd/ca.* "${USER}"@$host:/etc/kubernetes/pki/etcd/
scp /etc/kubernetes/admin.conf "${USER}"@$host:/etc/kubernetes/
done
EOF
bash scp_k8s_crt.sh root master-2.k8s.com
bash scp_k8s_crt.sh root master-3.k8s.com
4、加入其他master节点
# 在其他未加入的master节点中执行kubeadm join
kubeadm join 192.168.0.199:8443 --token v4m8ry.z1sek31mp8uwe5cv \
--discovery-token-ca-cert-hash sha256:7b7c80f2a9816e828438b6f6ab94368ac15c2a55395bd235c8487187bb375a49 \
--control-plane
5、加入其他worker节点
# 在worker节点中执行kubeadm join
kubeadm join 192.168.0.199:8443 --token v4m8ry.z1sek31mp8uwe5cv \
--discovery-token-ca-cert-hash sha256:7b7c80f2a9816e828438b6f6ab94368ac15c2a55395bd235c8487187bb375a49
6、查看集群
kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-clusters NotReady control-plane,master 60m v1.23.1
k8s-clusters-1 NotReady control-plane,master 47m v1.23.1
k8s-clusters-2 NotReady control-plane,master 46m v1.23.1
k8s-clusters-3 NotReady <none> 46m v1.23.1
# 可根据需要安装flannel或calico等CNI插件后,状态将更新为Ready
References:
https://www.jianshu.com/p/6a57d79f08a3
网友评论