美文网首页
Kubernetes v1.14.4 HA安装部署指南

Kubernetes v1.14.4 HA安装部署指南

作者: 痕迹_dark | 来源:发表于2019-07-26 17:33 被阅读0次

    版本规划

    • 操作系统:centos 7.6
    • kubernete:kubernete v1.14.4
    • 容器:docker 18.09.7
    • master高可用方案:keepalived+haproxy
    • 网络方案: Calico
    • kube-proxy mode: IPVS
    • DNS插件: CoreDNS

    环境规划

    服务器 IP配置 服务角色 备注
    kubernetes-master-1 VIP: 172.253.60.10
    IP: 172.253.60.3
    master
    haproxy
    Master节点
    kubernetes-master-1 VIP: 172.253.60.10
    IP: 172.253.60.4
    master
    haproxy
    Master节点
    kubernetes-master-1 VIP: 172.253.60.10
    IP: 172.253.60.5
    master
    haproxy
    Master节点
    kubernetes-node-1 IP: 172.253.60.6 node Node节点
    kubernetes-node-2 IP: 172.253.60.7 node Node节点
    kubernetes-node-3 IP: 172.253.60.8 node Node节点

    环境准备

    安装软件

    安装基础软件

    master

    # yum install -y wget yum-utils device-mapper-persistent-data lvm2 net-tools chrony socat keepalived ipvsadm haproxy ebtables 
    

    node

    # yum install -y wget yum-utils device-mapper-persistent-data lvm2 net-tools chrony socat ipvsadm ebtables
    

    安装docker

    # yum-config-manager \
     --add-repo \
     https://download.docker.com/linux/centos/docker-ce.repo
    # yum makecache fast
    # yum install -y docker-ce-18.09.7 docker-ce-cli-18.09.7 containerd.io-1.2.6
    

    安装kubernetes

    # cat <<EOF > /etc/yum.repos.d/kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=http://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg
            http://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    # yum install -y kubelet-1.14.4 kubeadm-1.14.4 kubectl-1.14.4
    

    节点配置

    关闭SELinux、防火墙

    # systemctl stop firewalld
    # systemctl disable firewalld
    # setenforce 0
    # sed -i "s/SELINUX=enforcing/SELINUX=disabled/g" /etc/selinux/config
    

    关闭系统的Swap(Kubernetes 1.8开始要求)

    # swapoff -a
    # yes | cp /etc/fstab /etc/fstab_bak
    # cat /etc/fstab_bak |grep -v swap > /etc/fstab
    

    系统参数配置

    # cat <<EOF > /etc/sysctl.d/k8s.conf
    net.ipv4.tcp_keepalive_time = 600
    net.ipv4.tcp_keepalive_intvl = 30
    net.ipv4.tcp_keepalive_probes = 10
    net.ipv6.conf.all.disable_ipv6 = 1
    net.ipv6.conf.default.disable_ipv6 = 1
    net.ipv6.conf.lo.disable_ipv6 = 1
    net.ipv4.neigh.default.gc_stale_time = 120
    net.ipv4.conf.all.rp_filter = 0
    net.ipv4.conf.default.rp_filter = 0
    net.ipv4.conf.default.arp_announce = 2
    net.ipv4.conf.lo.arp_announce = 2
    net.ipv4.conf.all.arp_announce = 2
    net.ipv4.ip_forward = 1
    net.ipv4.tcp_max_tw_buckets = 5000
    net.ipv4.tcp_syncookies = 1
    net.ipv4.tcp_max_syn_backlog = 1024
    net.ipv4.tcp_synack_retries = 2
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.netfilter.nf_conntrack_max = 2310720
    fs.inotify.max_user_watches=89100
    fs.may_detach_mounts = 1
    fs.file-max = 52706963
    fs.nr_open = 52706963
    net.bridge.bridge-nf-call-arptables = 1
    vm.swappiness = 0
    vm.overcommit_memory=1
    vm.panic_on_oom=0
    EOF
    # sysctl --system
    

    时间同步

    修改/etc/chrony.conf配置文件,添加

    server 172.253.60.1 iburst
    

    启动chrony

    # systemctl start chronyd
    # systemctl enable chronyd
    # chronyc sources -v
    

    开启ipvs

    # cat > /etc/sysconfig/modules/ipvs.modules <<EOF
    #!/bin/bash
    ipvs_modules="ip_vs ip_vs_lc ip_vs_wlc ip_vs_rr ip_vs_wrr ip_vs_lblc ip_vs_lblcr ip_vs_dh ip_vs_sh ip_vs_nq ip_vs_sed ip_vs_ftp nf_conntrack"
    for kernel_module in \${ipvs_modules}; do
        /sbin/modinfo -F filename \${kernel_module} > /dev/null 2>&1
        if [ $? -eq 0 ]; then
            /sbin/modprobe \${kernel_module}
        fi
    done
    EOF
    # chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep ip_vs
    

    配置docker配置文件

    修改/etc/docker/daemon.json

    {
      "exec-opts": ["native.cgroupdriver=systemd"],
      "log-driver": "json-file",
      "log-opts": {
        "max-size": "100m"
      },
      "storage-driver": "overlay2",
      "storage-opts": [
        "overlay2.override_kernel_check=true"
      ],
      "insecure-registries": ["registry.cmstest.com", "registry", "10.254.249.2"]
    }
    

    启动docker

    # systemctl enable docker
    # systemctl start docker
    

    配置kubelet

    # systemctl enable kubelet
    # systemctl start kubelet
    

    Master节点高可用

    实现方案

    Master节点高可用通过配置keepalived+haproxy实现

    如果节点数量较多,性能要求高,建议使用lvs+keepalived方案实现,但是需要两台额外的服务器做LVS服务器

    Keepalived配置

    配置文件路径/etc/keepalived/keepalived.conf

    master-1配置

    master-1为主节点,VIP默认启用节点

    ! Configuration File for keepalived
    
    global_defs {
       router_id LVS_DEVEL
    }
    
    vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
    }
    
    vrrp_instance VI_1 {
        state MASTER
        interface ens160
        virtual_router_id 70
        priority 100
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
            172.253.60.10
        }
        track_script {
            check_haproxy
        }
    }
    

    master-2配置

    master-2为首要备节点,主节点宕机后,VIP启用节点

    ! Configuration File for keepalived
    
    global_defs {
       router_id LVS_DEVEL
    }
    
    vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
    }
    
    vrrp_instance VI_1 {
        state BACKUP
        interface ens160
        virtual_router_id 70
        priority 90
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
            172.253.60.10
        }
        track_script {
            check_haproxy
        }
    }
    

    master-3配置

    master-3为次备节点,主节点、首要备节点宕机后,VIP启用节点

    ! Configuration File for keepalived

    global_defs {
       router_id LVS_DEVEL
    }
    
    vrrp_script check_haproxy {
        script "killall -0 haproxy"
        interval 3
        weight -2
        fall 10
        rise 2
    }
    
    vrrp_instance VI_1 {
        state BACKUP
        interface ens160
        virtual_router_id 70
        priority 95
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 35f18af7190d51c9f7f78f37300a0cbd
        }
        virtual_ipaddress {
            172.253.60.10
        }
        track_script {
            check_haproxy
        }
    }
    

    启动keepalived

    master节点上执行

    # systemctl enable keepalived
    # systemctl start keepalived
    

    Haproxy配置

    配置文件路径/etc/haproxy/haproxy.cfg

    master配置

    #---------------------------------------------------------------------
    # Global settings
    #---------------------------------------------------------------------
    global
        # to have these messages end up in /var/log/haproxy.log you will
        # need to:
        #
        # 1) configure syslog to accept network log events.  This is done
        #    by adding the '-r' option to the SYSLOGD_OPTIONS in
        #    /etc/sysconfig/syslog
        #
        # 2) configure local2 events to go to the /var/log/haproxy.log
        #   file. A line like the following can be added to
        #   /etc/sysconfig/syslog
        #
        #    local2.*                       /var/log/haproxy.log
        #
        log         127.0.0.1 local2
    
        chroot      /var/lib/haproxy
        pidfile     /var/run/haproxy.pid
        maxconn     4000
        user        haproxy
        group       haproxy
        daemon
    
        # turn on stats unix socket
        stats socket /var/lib/haproxy/stats
    
    #---------------------------------------------------------------------
    # common defaults that all the 'listen' and 'backend' sections will
    # use if not designated in their block
    #---------------------------------------------------------------------
    defaults
        mode                    http
        log                     global
        option                  httplog
        option                  dontlognull
        option http-server-close
        option forwardfor       except 127.0.0.0/8
        option                  redispatch
        retries                 3
        timeout http-request    10s
        timeout queue           1m
        timeout connect         10s
        timeout client          1m
        timeout server          1m
        timeout http-keep-alive 10s
        timeout check           10s
        maxconn                 3000
    
    #---------------------------------------------------------------------
    # kubernetes apiserver frontend which proxys to the backends
    #---------------------------------------------------------------------
    frontend kubernetes-apiserver
        mode                 tcp
        bind                 *:443
        option               tcplog
        default_backend      kubernetes-apiserver
    
    #---------------------------------------------------------------------
    # round robin balancing between the various backends
    #---------------------------------------------------------------------
    backend kubernetes-apiserver
        mode        tcp
        balance     roundrobin
        server  master-1 172.253.60.3:6443 check
        server  master-2 172.253.60.4:6443 check
        server  master-3 172.253.60.5:6443 check
    
    #---------------------------------------------------------------------
    # collection haproxy statistics message
    #---------------------------------------------------------------------
    listen stats
        bind                 *:1080
        stats auth           admin:awesomePassword
        stats refresh        5s
        stats realm          HAProxy\ Statistics
        stats uri            /admin?stats
    

    启动Haproxy

    # systemctl enable haproxy
    # systemctl start haproxy
    

    创建Kubernetes

    准备kubernetes配置文件

    kubeadm-config.yaml

    设置kube-proxy模式为ipvs,调度算法为wrr加权轮询

    apiVersion: kubeadm.k8s.io/v1beta1
    kind: ClusterConfiguration
    imageRepository: 10.254.249.2/kubernetes
    kubernetesVersion: v1.14.4
    controlPlaneEndpoint: "kubernetes-cluster:6443"
    networking:
      podSubnet: "192.168.0.0/16"
    ---
    apiVersion: kubeproxy.config.k8s.io/v1alpha1
    kind: KubeProxyConfiguration
    mode: "ipvs"
    ipvs:
      scheduler: "wrr"
    

    部署kubernetes

    创建第一个master节点

    在kubernetes-master-1上运行,注意输出结果中的join行,后续用来添加节点

    # kubeadm init --config=kubeadm-config.yaml --experimental-upload-certs
    

    添加master节点

    # kubeadm join kubernetes-cluster:6443 --token 6cliof.3kuo3kdc6nfj7xup --discovery-token-ca-cert-hash sha256:f280c7d168d8bc05e67a2c7d3007df40c01cda6a0458abfe02942c2b070c429d --experimental-control-plane --certificate-key 26a12efab0f7f392ab07faabdfaffa35b2328d59b6a15ba8b0b4adee6844b819
    

    添加node节点

    # kubeadm join kubernetes-cluster:6443 --token 6cliof.3kuo3kdc6nfj7xup --discovery-token-ca-cert-hash sha256:f280c7d168d8bc05e67a2c7d3007df40c01cda6a0458abfe02942c2b070c429d 
    

    运行配置

    配置kuberctl运行参数,在master上可以直接执行,node节点需要运行,需要从master上获取admin.conf文件

    # echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile
    

    删除节点

    在master节点上运行

    # kubectl drain kubernetes-node-4  --delete-local-data --force --ignore-daemonsets
    # kubectl delete node kubernetes-node-4
    

    在被删除节点上运行

    # kubeadm reset
    # iptables -F && iptables -t nat -F && iptables -t mangle -F && iptables -X
    # systemctl restart docker
    

    获取token/ca/key相关配置

    获取token,使用TTL未过期的token

    # kubeadm token create
    # kubeadm token list
    

    获取ca证书sha256

    # openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //'
    

    获取certificate-key

    # kubeadm init phase upload-certs --experimental-upload-certs
    

    获取kubeadm相关配置

    kube-proxy配置

    # kubectl -n kube-system get configmap kube-proxy -o yaml
    

    kubeadm-config配置

    # kubectl -n kube-system get configmap kubeadm-config -o yaml
    

    kubelet-config配置

    # kubectl -n kube-system get configmap kubelet-config-1.14 -o yaml
    

    修改kubeadm相关配置

    # kubectl -n kube-system edit configmap kube-proxy
    # kubectl -n kube-system edit configmap kubeadm-config
    # kubectl -n kube-system edit configmap kubelet-config-1.14
    

    修改完成后,使用如下命令重启相关pod,把kube-proxy替换成相应的pod名称类别即可

    # kubectl get pod -n kube-system | grep kube-proxy | awk '{system("kubectl delete pod "$1" -n kube-system")}'
    

    网络Calico方案

    Calico部署

    Calico配置文件

    根据Calico官方文档,节点少于50个用calico.yaml,节点多于50个用calico-typha.yaml

    配置文件地址:https://docs.projectcalico.org/v3.8/manifests/calico.yaml

    主要修改以下:

    • image: calico/ 修改成对应可以获取镜像的地址
    • nodeSelector: 修改成对应放置的node的label,
    • replicas: 修改成需求的对应,比如3
    • calico-node: env中CALICO_IPV4POOL_IPIPCALICO_IPV4POOL_CIDRCALICO_ADVERTISE_CLUSTER_IPS

    修改的配置相关如下:

    image

    image: 10.254.249.2/kubernetes/calico-cni:v3.8.0
    image: 10.254.249.2/kubernetes/calico-cni:v3.8.0
    image: 10.254.249.2/kubernetes/calico-pod2daemon-flexvol:v3.8.0
    image: 10.254.249.2/kubernetes/calico-node:v3.8.0
    image: 10.254.249.2/kubernetes/calico-kube-controllers:v3.8.0
    

    calico-kube-controllers

    apiVersion: apps/v1
    kind: Deployment
    metadata:
      name: calico-kube-controllers
      namespace: kube-system
      labels:
        k8s-app: calico-kube-controllers
    spec:
      # The controllers can only have a single active instance.
      replicas: 3
      template:
        spec:
          nodeSelector:
            beta.kubernetes.io/os: linux
            calico/role: "master"
    

    calico-node

    kind: DaemonSet
    apiVersion: apps/v1
    metadata:
      name: calico-node
      namespace: kube-system
      labels:
        k8s-app: calico-node
    spec:
      template:
        spec:
        containers:
            # Runs calico-node container on each Kubernetes node.  This
            # container programs network policy and routes on each
            # host.
            - name: calico-node
              image: calico/node:v3.8.1
              env:
                - name: CALICO_IPV4POOL_IPIP
                  value: "CrossSubnet"
                - name: CALICO_IPV4POOL_CIDR
                  value: "172.16.0.0/16"
                - name: CALICO_ADVERTISE_CLUSTER_IPS
                  value: "10.96.0.0/16"
    

    准备环境

    下载镜像,并上传至私有镜像仓库

    • calico/pod2daemon-flexvol:v3.8.0
    • calico/node:v3.8.0
    • calico/kube-controllers:v3.8.0
    • calico/cni:v3.8.0

    master节点增加label

    # kubectl label nodes kubenetes-mater-1 calico/role=master
    # kubectl label nodes kubenetes-mater-2 calico/role=master
    # kubectl label nodes kubenetes-mater-3 calico/role=master
    

    启动calico

    # kubectl apply -f calico.yaml
    

    calicoctl部署

    calicoctl配置

    calicoctl用来管理calico的命令行工具,可以通过pod部署,但建议使用二进制文件部署

    配置文件路径/etc/calico/calicoctl.cfg

    apiVersion: projectcalico.org/v3
    kind: CalicoAPIConfig
    metadata:
    spec:
        datastoreType: "kubernetes"
        kubeconfig: "/root/.kube/config"
    

    部署calicoctl

    # wget https://github.com/projectcalico/calicoctl/releases/download/v3.8.1/calicoctl
    # mv calicoctl /usr/local/bin
    # chmod +x /usr/local/bin/calicoctl
    

    calico网络优化

    calico网络架构

    Node-to-node mesh

    calico 默认启用Node-to-node mesh 模式,启用后,每个Calico节点都会自动与网络中的每个其他Calico节点建立BGP对等关系。在50个calico节点内,Node-to-node mesh 模式有较好的性能,但是随着节点数量增多,带来的路由策略的几何数增加(有至少2^N条路由)会给服务器带来负担。因此,在多于50个节点calico集群中,应禁用 Node-to-node mesh 模式。

    Route reflector

    Route reflector节点,该节点作为calico网络的星型中心节点,通过与一组calico节点、以及其他Route reflector节点建立BGP对等关系,以实现网络互联。虽然对网络互访增加了一跳路由,但是能有效减少网络中的路由策略,适用于多于50个节点calico集群中的网络组网模式。同时,通过BGP路由策略,可以设置2-3台 Route reflector 节点,实现路由策略高可用。

    ipip Mode

    calico默认在所有网络中启用ipip封装,对于实际情况,应该选择性地仅封装跨越子网边界的流量

    优化步骤

    • ipipMode切换为CrossSubnet
    • 网络模式切换为Route reflector
    • 修改calico网络MTU值

    配置准备

    default-ipv4-ippool.yaml

    apiVersion: projectcalico.org/v3
    kind: IPPool
    metadata:
      name: default-ipv4-ippool
    spec:
      cidr: 192.168.0.0/16
      ipipMode: CrossSubnet
      natOutgoing: true
    

    bgp.yaml

    apiVersion: projectcalico.org/v3
    kind: BGPConfiguration
    metadata:
      name: default
    spec:
      logSeverityScreen: Info
      nodeToNodeMeshEnabled: false
      asNumber: 64512
    

    通过一下命令获取单个节点配置并修改

    # calicoctl get node kubenetes-master-1 --export -o yaml > master-1.yml
    # calicoctl get node kubenetes-master-1 --export -o yaml > master-2.yml
    # calicoctl get node kubenetes-master-1 --export -o yaml > master-3.yml
    

    本次使用kubenetes-master-1、kubenetes-master-2、kubenetes-master-3作为Route reflector

    master-1.yaml

    apiVersion: projectcalico.org/v3
    kind: Node
    metadata:
      annotations:
        projectcalico.org/kube-labels: '{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","calico/role":"master","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"kubernetes-master-1","kubernetes.io/os":"linux","node-role.kubernetes.io/master":""}'
      creationTimestamp: null
      labels:
        beta.kubernetes.io/arch: amd64
        beta.kubernetes.io/os: linux
        calico/role: master
        kubernetes.io/arch: amd64
        kubernetes.io/hostname: kubernetes-master-1
        kubernetes.io/os: linux
        node-role.kubernetes.io/master: ""
        i-am-a-route-reflector: true
      name: kubernetes-master-1
    spec:
      bgp:
        ipv4Address: 172.253.60.3/22
        ipv4IPIPTunnelAddr: 192.168.244.192
        routeReflectorClusterID: 224.0.0.1
    

    master-2.yaml

    apiVersion: projectcalico.org/v3
    kind: Node
    metadata:
      annotations:
        projectcalico.org/kube-labels: '{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","calico/role":"master","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"kubernetes-master-2","kubernetes.io/os":"linux","node-role.kubernetes.io/master":""}'
      creationTimestamp: null
      labels:
        beta.kubernetes.io/arch: amd64
        beta.kubernetes.io/os: linux
        calico/role: master
        kubernetes.io/arch: amd64
        kubernetes.io/hostname: kubernetes-master-2
        kubernetes.io/os: linux
        node-role.kubernetes.io/master: ""
        i-am-a-route-reflector: true
      name: kubernetes-master-2
    spec:
      bgp:
        ipv4Address: 172.253.60.4/22
        ipv4IPIPTunnelAddr: 192.168.108.128
        routeReflectorClusterID: 224.0.0.1
    

    master-3.yaml

    apiVersion: projectcalico.org/v3
    kind: Node
    metadata:
      annotations:
        projectcalico.org/kube-labels: '{"beta.kubernetes.io/arch":"amd64","beta.kubernetes.io/os":"linux","calico/role":"master","kubernetes.io/arch":"amd64","kubernetes.io/hostname":"kubernetes-master-3","kubernetes.io/os":"linux","node-role.kubernetes.io/master":""}'
      creationTimestamp: null
      labels:
        beta.kubernetes.io/arch: amd64
        beta.kubernetes.io/os: linux
        calico/role: master
        kubernetes.io/arch: amd64
        kubernetes.io/hostname: kubernetes-master-3
        kubernetes.io/os: linux
        node-role.kubernetes.io/master: ""
        i-am-a-route-reflector: true
      name: kubernetes-master-3
    spec:
      bgp:
        ipv4Address: 172.253.60.5/22
        ipv4IPIPTunnelAddr: 192.168.235.128
        routeReflectorClusterID: 224.0.0.1
    

    peer-to-rrs.yaml

    kind: BGPPeer
    apiVersion: projectcalico.org/v3
    metadata:
      name: peer-to-rrs
    spec:
      nodeSelector: "!has(i-am-a-route-reflector)"
      peerSelector: has(i-am-a-route-reflector)
    

    rr-mesh.yaml

    kind: BGPPeer
    apiVersion: projectcalico.org/v3
    metadata:
      name: rr-mesh
    spec:
      nodeSelector: has(i-am-a-route-reflector)
      peerSelector: has(i-am-a-route-reflector)
    

    执行优化

    修改ipipmode配置

    # calicoctl apply -f default-ipv4-ippool.yaml
    

    修改网络模式

    # calicoctl apply -f bgp.yaml
    # calicoctl apply -f master-1.yaml
    # calicoctl apply -f master-2.yaml
    # calicoctl apply -f master-3.yaml
    # calicoctl apply -f peer-to-rrs.yaml
    # calicoctl apply -f rr-mesh.yaml
    

    查看配置

    # calicoctl get node -o yaml
    # calicoctl get bgpconfig -o yaml
    # calicoctl get bgpPeer -o yaml
    

    修改MTU值

    修改/etc/cni/net.d/10-calico.conflist,把MTU设置成需求值

    重启calico-node

    # kubectl get pod -n kube-system | grep calico-node | awk '{system("kubectl delete pod "$1" -n kube-system")}'

    相关文章

      网友评论

          本文标题:Kubernetes v1.14.4 HA安装部署指南

          本文链接:https://www.haomeiwen.com/subject/oubdrctx.html