本文参考官方文档,采用堆控制层面节点方式部署高可用集群,etcd组件和控制平面组件都以pod形式部署在master节点上,利用keepalived实现api-server组件的高可用访问
拓扑图
准备工作
资源准备
k8s-1 192.168.229.131
k8s-2 192.168.229.132
k8s-3 192.168.229.133
VIP: 192.168.229.139
环境准备
- 配置主机名
192.168.229.131上执行
hostname k8s-1
192.168.229.132上执行
hostname k8s-2
192.168.229.133上执行
hostname k8s-3
- 配置hosts(三个节点上执行)
cat <<EOF > /etc/hosts
k8s-1 192.168.229.131
k8s-2 192.168.229.132
k8s-3 192.168.229.133
EOF
- 禁用swap(三个节点上执行 )
swapoff -a
- 配置NTP服务(三个节点上执行)
systemctl start ntpd
- 调整内核参数(三个节点上执行)
- 临时修改
sysctl net.bridge.bridge-nf-call-iptables=1
- 永久修改
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
- 配置yum源(三个节点上执行)
[kubernetes]
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
- 安装docker,kubeadm(三个节点上执行)
yum install -y yum-utils
yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum -y install docker-ce docker-ce-selinux kubeadm
- 下载k8s镜像(三个节点上执行)
8.1 查看指定版本组件
任意一台node上执行
kubeadm config images list --kubernetes-version=1.18.0
获取版本组件输出,如下所示:
images=(
kube-apiserver:v1.18.0
kube-controller-manager:v1.18.0
kube-scheduler:v1.18.0
kube-proxy:v1.18.0
pause:3.2
etcd:3.4.3-0
coredns:1.6.7
)
8.2 下载镜像
for imageName in ${images[@]} ; do
docker pull registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName
docker tag registry.cn-hangzhou.aliyuncs.com/google_containers/$imageName k8s.gcr.io/$imageName
done
安装部署
安装keepalived
配置master
192.168.229.131上编辑/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id k8s-1
}
vrrp_instance VI_1 {
state MASTER
interface eno16777736
virtual_router_id 50
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.229.139
}
}
配置backup
192.168.229.132上编辑/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id k8s-2
}
vrrp_instance VI_1 {
state BACKUP
interface eno16777736
virtual_router_id 50
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.229.139
}
}
192.168.229.133上编辑/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id k8s-3
}
vrrp_instance VI_1 {
state BACKUP
interface eno16777736
virtual_router_id 50
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.229.139
}
}
启动keepalived(三个节点上执行)
systemctl start keepalived
安装k8s集群master节点
初始化第一台master节点
192.168.229.121上执行
kubeadm init --control-plane-endpoint=192.168.229.139:6443 --upload-certs --kubernetes-version=1.18.0 --pod-network-cidr=10.25.0.0/16
执行完毕后会输出添加master节点和添加worker节点命令,拷贝复制
添加其他master节点
192.168.229.132上执行
kubeadm join 192.168.229.139:6443 --token dxznj9.2ckioxsea8yj6e9i \
--discovery-token-ca-cert-hash sha256:6cbf6a8df1edd5921698a8db2ca193a6fe769f5b380ac511d0afec53d7da3ec7 \
--control-plane --certificate-key f0cb3b5efab8d3d1895801b77451b5b9d7e52b74ec031d6cc68623d191aba358
192.168.229.133上执行
kubeadm join 192.168.229.139:6443 --token dxznj9.2ckioxsea8yj6e9i \
--discovery-token-ca-cert-hash sha256:6cbf6a8df1edd5921698a8db2ca193a6fe769f5b380ac511d0afec53d7da3ec7 \
--control-plane --certificate-key f0cb3b5efab8d3d1895801b77451b5b9d7e52b74ec031d6cc68623d191aba358
kubectl配置(任意一台master节点执行)
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
安装flannel网络插件
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
查看集群状态
[root@k8s-1 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
k8s-1 Ready master 94d v1.18.3
k8s-2 Ready master 94d v1.18.3
k8s-3 Ready master 94d v1.18.3
查看master节点组件状态
[root@k8s-1 ~]# kubectl get pods -n kube-system
NAME READY STATUS RESTARTS AGE
coredns-66bff467f8-mtxxf 1/1 Running 15 90d
coredns-66bff467f8-p78fx 1/1 Running 15 90d
etcd-k8s-1 1/1 Running 32 89d
etcd-k8s-2 1/1 Running 25 89d
etcd-k8s-3 1/1 Running 26 89d
kube-apiserver-k8s-1 1/1 Running 33 89d
kube-apiserver-k8s-2 1/1 Running 28 89d
kube-apiserver-k8s-3 1/1 Running 29 89d
kube-controller-manager-k8s-1 1/1 Running 19 89d
kube-controller-manager-k8s-2 1/1 Running 21 89d
kube-controller-manager-k8s-3 1/1 Running 21 89d
kube-flannel-ds-amd64-kfdt6 1/1 Running 22 94d
kube-flannel-ds-amd64-qfq7d 1/1 Running 23 94d
kube-flannel-ds-amd64-wpt6t 1/1 Running 25 94d
kube-proxy-8k46v 1/1 Running 19 94d
kube-proxy-cdrfl 1/1 Running 16 94d
kube-proxy-lj2bm 1/1 Running 20 94d
kube-scheduler-k8s-1 1/1 Running 19 89d
kube-scheduler-k8s-2 1/1 Running 20 89d
kube-scheduler-k8s-3 1/1 Running 20 89d
网友评论