美文网首页
k8s-高可用

k8s-高可用

作者: 码农GG | 来源:发表于2021-03-08 10:52 被阅读0次

1.准备环境

角色 IP hostname
k8s-master1 192.168.1.200 master1
k8s-master2 192.168.1.201 master2
k8s-node1 192.168.1.202 node1
k8s-node2 192.168.1.203 node2
k8s-vip 192.168.1.204 k8s-vip

2 初始化操作

我们需要在这四个节点上进行操作

2.1 关闭防火墙

systemctl stop firewalld
systemctl disable firewalld

2.2 关闭selinux

# 永久关闭
sed -i 's/enforcing/disabled/' /etc/selinux/config 
# 临时关闭
setenforce 0 

2.3 关闭swap

# 临时
swapoff -a 
# 永久关闭
sed -ri 's/.*swap.*/#&/' /etc/fstab

2.4设置主机名

# 根据规划设置主机名【master1节点上操作】
hostnamectl set-hostname master1
# 根据规划设置主机名【master2节点上操作】
hostnamectl set-hostname master2
# 根据规划设置主机名【node1节点操作】
hostnamectl set-hostname node1
# 根据规划设置主机名【node2节点操作】
hostnamectl set-hostname node2

添加hosts

cat >> /etc/hosts << EOF
192.168.1.204 master.k8s.io k8s-vip
192.168.1.200 master01.k8s.io master1
192.168.1.201 master02.k8s.io master2
192.168.1.202 node01.k8s.io node1
192.168.1.203 node02.k8s.io node2
EOF

2.5设置防火墙为 Iptables 并设置空规则

yum -y install iptables-services  &&  systemctl  start iptables  &&  systemctl  enable iptables&&  iptables -F  &&  service iptables save

2.6 将桥接的IPv4流量传递到iptables的链【3个节点上都执行】

cat > /etc/sysctl.d/k8s.conf << EOF 
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1 
net.ipv4.ip_forward=1
net.ipv4.tcp_tw_recycle=0
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
fs.inotify.max_user_instances=8192
fs.inotify.max_user_watches=1048576
fs.file-max=52706963
fs.nr_open=52706963
net.ipv6.conf.all.disable_ipv6=1
net.netfilter.nf_conntrack_max=2310720
EOF 
# 生效
sysctl --system 

2.7 时间同步

yum install ntpdate -y
ntpdate time.windows.com

2.8关闭系统不需要服务

systemctl stop postfix && systemctl disable postfix

2.9设置日志 rsyslogd 和 systemd journald

mkdir /var/log/journal 
# 持久化保存日志的目录
mkdir /etc/systemd/journald.conf.d
cat > /etc/systemd/journald.conf.d/99-prophet.conf <<EOF
[Journal]
# 持久化保存到磁盘
Storage=persistent
# 压缩历史日志
Compress=yes
SyncIntervalSec=5m
RateLimitInterval=30s
RateLimitBurst=1000
# 最大占用空间 10G
SystemMaxUse=10G
# 单日志文件最大 200M
SystemMaxFileSize=200M
# 日志保存时间 2 周
MaxRetentionSec=2week
# 不将日志转发到 
syslogForwardToSyslog=no
EOF
systemctl restart systemd-journald

2.10升级系统内核为5.4

CentOS 7.x 系统自带的 3.10.x 内核存在一些 Bugs,导致运行的 Docker、Kubernetes 不稳定,例如: rpm -Uvh
http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm

rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-3.el7.elrepo.noarch.rpm
# 安装完成后检查 /boot/grub2/grub.cfg 中对应内核 menuentry 中是否包含 initrd16 配置,如果没有,再安装一次!
yum --enablerepo=elrepo-kernel install -y kernel-lt
# 设置开机从新内核启动
grub2-set-default 'CentOS Linux (5.4.100-1.el7.elrepo.x86_64) 7 (Core)'
# 重启系统
reboot
# 查看系统内核版本
uname -r

2.11kube-proxy开启ipvs的前置条件

什么是ipvs模式
linux 内核4.19后nf_conntrack_ipv4改成成nf_conntrack

modprobe br_netfilter
cat > /etc/sysconfig/modules/ipvs.modules <<EOF
#!/bin/bash
modprobe -- ip_vs
modprobe -- ip_vs_rr
modprobe -- ip_vs_wrr
modprobe -- ip_vs_sh
modprobe -- nf_conntrack
EOF
chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules &&lsmod | grep -e ip_vs -e nf_conntrack

2.12关闭 NUMA

cp /etc/default/grub{,.bak}
vim /etc/default/grub # 在 GRUB_CMDLINE_LINUX 一行添加 `numa=off` 参数,如下所示:
diff /etc/default/grub.bak /etc/default/grub
6c6
< GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet"
---
> GRUB_CMDLINE_LINUX="crashkernel=auto rd.lvm.lv=centos/root rhgb quiet numa=off"
cp /boot/grub2/grub.cfg{,.bak}
grub2-mkconfig -o /boot/grub2/grub.cfg

3. 部署keepAlived

下面我们需要在所有的master节点【master1和master2】上部署keepAlive

3.1安装相关包

# 安装相关工具
yum install -y conntrack-tools libseccomp libtool-ltdl
# 安装keepalived
yum install -y keepalived

3.2 配置master节点

在master1,master2上添加的配置

cat > /etc/keepalived/keepalived.conf <<EOF 
! Configuration File for keepalived

global_defs {
 router_id k8s
}

vrrp_script check_haproxy {
 script "killall -0 haproxy"
 interval 3
 weight -2
 fall 10
 rise 2
}

vrrp_instance VI_1 {
 state MASTER 
 interface ens33  #设置网卡名称
 virtual_router_id 51
 priority 250
 advert_int 1
 authentication {
     auth_type PASS
     auth_pass ceb1b3ec013d66163d6ab
 }
 virtual_ipaddress {
     192.168.1.204  #设置vip
 }
 track_script {
     check_haproxy
 }

}
EOF

3.3 启动和检查

在两台master节点都执行

# 启动keepalived
systemctl start keepalived.service
# 设置开机启动
systemctl enable keepalived.service
# 查看启动状态
systemctl status keepalived.service

启动后查看master的网卡信息

ip a s  ens33
master1
master2

4. 部署haproxy

haproxy主要做负载的作用,将我们的请求分担到不同的node节点上

4.1 安装

在两个master节点安装 haproxy

# 安装haproxy
yum install -y haproxy

4.2配置

两台master节点的配置均相同,配置中声明了后端代理的两个master节点服务器,指定了haproxy运行的端口为16443等,因此16443端口为集群的入口

cat > /etc/haproxy/haproxy.cfg << EOF
#---------------------------------------------------------------------
# Global settings
#---------------------------------------------------------------------
global
    # to have these messages end up in /var/log/haproxy.log you will
    # need to:
    # 1) configure syslog to accept network log events.  This is done
    #    by adding the '-r' option to the SYSLOGD_OPTIONS in
    #    /etc/sysconfig/syslog
    # 2) configure local2 events to go to the /var/log/haproxy.log
    #   file. A line like the following can be added to
    #   /etc/sysconfig/syslog
    #
    #    local2.*                       /var/log/haproxy.log
    #
    log         127.0.0.1 local2
    
    chroot      /var/lib/haproxy
    pidfile     /var/run/haproxy.pid
    maxconn     4000
    user        haproxy
    group       haproxy
    daemon 
       
    # turn on stats unix socket
    stats socket /var/lib/haproxy/stats
#---------------------------------------------------------------------
# common defaults that all the 'listen' and 'backend' sections will
# use if not designated in their block
#---------------------------------------------------------------------  
defaults
    mode                    http
    log                     global
    option                  httplog
    option                  dontlognull
    option http-server-close
    option forwardfor       except 127.0.0.0/8
    option                  redispatch
    retries                 3
    timeout http-request    10s
    timeout queue           1m
    timeout connect         10s
    timeout client          1m
    timeout server          1m
    timeout http-keep-alive 10s
    timeout check           10s
    maxconn                 3000
#---------------------------------------------------------------------
# kubernetes apiserver frontend which proxys to the backends
# 设置集群绑定端口 16443
#--------------------------------------------------------------------- 
frontend kubernetes-apiserver
    mode                 tcp
    bind                 *:16443   
    option               tcplog
    default_backend      kubernetes-apiserver    
#---------------------------------------------------------------------
# round robin balancing between the various backends
#  设置负载策略,和HA集群
#---------------------------------------------------------------------
backend kubernetes-apiserver
    mode        tcp
    balance     roundrobin
    server      master01.k8s.io   192.168.1.200:6443 check
    server      master02.k8s.io   192.168.1.201:6443 check
#---------------------------------------------------------------------
# collection haproxy statistics message
#---------------------------------------------------------------------
listen stats
    bind                 *:1080
    stats auth           admin:awesomePassword
    stats refresh        5s
    stats realm          HAProxy\ Statistics
    stats uri            /admin?stats
EOF

4.3启动和检查

两台master都检查

# 开启自启
systemctl enable haproxy
# 启动 haproxy
systemctl start haproxy
# 查看状态
systemctl status haproxy
image.png

启动后,我们查看对应的监听端口是16443

netstat -lntup | grep haproxy
image.png

7、所有节点安装 Docker/kubeadm/kubelet

Kubernetes 默认 CRI(容器运行时)为 Docker,因此先安装 Docker。

7.1安装 Docker

yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo   http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
yum update -y && yum install -y docker-ce

# 重启系统
reboot
# 查看系统内核版本
uname -r
# 如果是3.10版本,设置开机从新内核启动
#1. 查看当前系统具有的内核
$ cat /boot/grub2/grub.cfg |grep menuentry
查看到如下内容列表
menuentry ‘CentOS Linux (3.10.0-327.22.2.el7.x86_64) 7 (Core)’ –class centos
menuentry 'CentOS Linux (5.4.101-1.el7.elrepo.x86_64) 7 (Core)'  –class centos

#2 设置默认的启动内核。
比如我们选择上边中的CentOS Linux (5.4.101-1.el7.elrepo.x86_64) 7 (Core)这个内核为默认启动。
grub2-set-default 'CentOS Linux (5.4.101-1.el7.elrepo.x86_64) 7 (Core)' 

#3配置默认内核,验证是否修改成功:
[root@ ~]# grub2-editenv list
saved_entry=CentOS Linux (5.4.101-1.el7.elrepo.x86_64) 7 (Core)

#重启
 reboot

## 创建 /etc/docker 目录
mkdir /etc/docker

# 配置 daemon.#添加阿里云 YUM 软件源
cat > /etc/docker/daemon.json <<EOF
{
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"] ,
"exec-opts": ["native.cgroupdriver=systemd"],
"log-driver": "json-file",
"log-opts": {"max-size": "100m"  }
}
EOF

mkdir -p /etc/systemd/system/docker.service.d

# 重启docker服务
systemctl daemon-reload && systemctl restart docker && systemctl enable docker

7.2添加 yum 源

$ cat > /etc/yum.repos.d/kubernetes.repo << EOF 
[kubernetes] 
name=Kubernetes
baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64 
enabled=1 
gpgcheck=0 
repo_gpgcheck=0 
gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg 
EOF 

7.3安装 kubeadm,kubelet 和 kubectl

$ yum install -y kubelet-1.18.0 kubeadm-1.18.0 kubectl-1.18.0
$ systemctl enable kubelet 

8.部署Kubernetes Master【master节点】

8.1创建kubeadm配置文件

在具有vip的master上进行初始化操作,这里为master1

# 创建文件夹
mkdir /usr/local/kubernetes/manifests -p
# 到manifests目录
cd /usr/local/kubernetes/manifests/
# 生成 kubeadm-config.yaml
kubeadm config print init-defaults > kubeadm-config.yaml
# 新建yaml文件
vi kubeadm-config.yaml

修改yaml内容如下所示:

apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
apiVersion: kubeadm.k8s.io/v1beta2
bootstrapTokens:
- groups:
  - system:bootstrappers:kubeadm:default-node-token
  token: abcdef.0123456789abcdef
  ttl: 24h0m0s
  usages:
  - signing
  - authentication
kind: InitConfiguration
localAPIEndpoint:
  advertiseAddress: 192.168.1.200  #设置master ip 
  bindPort: 6443                   #设置master绑定端口号
nodeRegistration:
  criSocket: /var/run/dockershim.sock
  name: master1
  taints:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
---
apiServer:
 certSANs: #添加 master ip ,vip
 - master1
 - master2
 - master.k8s.io
 - 192.168.1.200
 - 192.168.1.201
 - 192.168.1.204
 - 127.0.0.1
 extraArgs:
 authorization-mode: Node,RBAC
  timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta2
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "master.k8s.io:16443"  #设置HA vip
controllerManager: {}
dns:
  type: CoreDNS
etcd:
  local:
    dataDir: /var/lib/etcd
imageRepository: registry.aliyuncs.com/google_containers #修改源地址
kind: ClusterConfiguration
kubernetesVersion: v1.18.0
networking:
  dnsDomain: cluster.local
  podSubnet: "10.244.0.0/16"
  serviceSubnet: 10.96.0.0/12
scheduler: {}
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
featureGates:
  SupportIPVSProxyMode: true

8.2初始化

kubeadm init --config kubeadm-config.yaml
#如果失败执行命令 从新初始化
kubeadm reset
#初始化成功后执行下方命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config

8.3查看状态

# 查看节点
kubectl get nodes
# 查看pod
kubectl get pods -n kube-system

9 master2节点加入集群

9.1复制密钥及相关文件

#从master1复制密钥及相关文件到master2
 ssh root@192.168.1.201 mkdir -p /etc/kubernetes/pki/etcd

 scp /etc/kubernetes/admin.conf root@192.168.1.201:/etc/kubernetes

 scp /etc/kubernetes/pki/{ca.*,sa.*,front-proxy-ca.*} root@192.168.1.201:/etc/kubernetes/pki

 scp /etc/kubernetes/pki/etcd/ca.* root@192.168.1.201:/etc/kubernetes/pki/etcd

#保存以下内容,一会要使用
kubeadm join master.k8s.io:16443 --token jv5z7n.3y1zi95p952y9p65 \
    --discovery-token-ca-cert-hash sha256:403bca185c2f3a4791685013499e7ce58f9848e2213e27194b75a2e3293d8812 \
    --control-plane 

9.2 master2加入集群

执行在master1上init后输出的join命令,需要带上参数--control-plane表示把master控制节点加入集群

kubeadm join master.k8s.io:16443 --token ckf7bs.30576l0okocepg8b     --discovery-token-ca-cert-hash sha256:19afac8b11182f61073e254fb57b9f19ab4d798b70501036fc69ebef46094aba --control-plane

9.3检查状态

kubectl get node
kubectl get pods --all-namespaces

9.4查看集群状态

# 查看集群状态
kubectl get cs
# 查看pod
kubectl get pods -n kube-system

10.添加node节点

按照master init输出内容,去掉--control-plane

kubeadm join master.k8s.io:16443 --token jv5z7n.3y1zi95p952y9p65 
    --discovery-token-ca-cert-hash sha256:403bca185c2f3a4791685013499e7ce58f9848e2213e27194b75a2e3293d8812 

--control-plane : 只有在添加master节点的时候才有

11.安装集群网络

kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 

11.2检查

kubectl get pods -n kube-system

相关文章

网友评论

      本文标题:k8s-高可用

      本文链接:https://www.haomeiwen.com/subject/duptfltx.html