美文网首页
Kubernetes高可用部署

Kubernetes高可用部署

作者: 想成为大师的学徒小纪 | 来源:发表于2022-06-04 01:07 被阅读0次

    一、环境准备

    主机名 IP 部署应用
    cnsz-fbu-bck8s-master01-uat 10.81.0.101 kubeadm, kubectl, kubelet, docker, apiserver, controllerManager, scheduler, helm
    cnsz-fbu-bck8s-master02-uat 10.81.0.102 kubeadm, kubectl, kubelet, docker, apiserver, controllerManager, scheduler
    cnsz-fbu-bck8s-master03-uat 10.81.0.103 kubeadm, kubectl, kubelet, docker, apiserver, controllerManager, scheduler
    cnsz-fbu-bck8s-node01-uat 10.81.0.104 kubeadm, kubelet, docker, kubeproxy
    cnsz-fbu-bck8s-node02-uat 10.81.0.105 kubeadm, kubelet, docker, kubeproxy
    cnsz-fbu-etcd01-uat 10.81.64.37
    vip:10.81.64.110
    etcd, nginx, keepalived
    cnsz-fbu-etcd02-uat 10.81.64.38 etcd, nginx, keepalived
    cnsz-fbu-etcd03-uat 10.81.64.39 etcd

    升级内核版本到4.0以上,设置docker存储驱动为overlay2需要内核版本高于4.0。

    yum -y install http://192.168.73.43/soft/kernel-4.9.86-30.el7.x86_64.rpm
    awk -F "'" '$1=="menuentry "{print $2}' /etc/grub2.cfg
    cat >>/etc/rc.local <<'EOF'
    grub2-set-default 'CentOS Linux (4.9.86-30.el7.x86_64) 7 (Core)'
    EOF
    reboot
    
    uname -r
    

    关闭swap交换空间,swapoff -a,然后在/etc/fstab文件下删除swap项。

    在安全内网环境时,关闭防火墙服务。

    systemctl stop firewalld
    systemctl disable firewalld
    

    禁用SELinux,让容器可以读取主机文件系统

    setenforce 0
    sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
    

    确保每个节点上的MAC地址和product_uuid是唯一的

    ip a
    cat /sys/class/dmi/id/product_uuid
    

    允许iptables检查桥接流量

    cat <<EOF | sudo tee /etc/modules-load.d/k8s.conf
    br_netfilter
    EOF
    
    cat <<EOF | sudo tee /etc/sysctl.d/k8s.conf
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    EOF
    sudo sysctl --system
    

    设置kubectl命令补全功能

    yum -y install bash-completion
    echo "source <(kubectl completion bash)" >> ~/.bash_profile
    source /etc/profile.d/bash_completion.sh
    source /root/.bash_profile
    

    设置host解析

    cat >> /etc/hosts <<'EOF'
    10.81.0.101 cnsz-fbu-bck8s-master01-uat
    10.81.0.102 cnsz-fbu-bck8s-master02-uat
    10.81.0.103 cnsz-fbu-bck8s-master03-uat
    10.81.0.104 cnsz-fbu-bck8s-node01-uat
    10.81.0.105 cnsz-fbu-bck8s-node02-uat
    10.81.64.110 apiserver
    EOF
    

    二、使用kubeadm工具安装

    1、安装容器运行时docker

    <!== 所有k8s主机执行 ==>

    • 卸载旧版本

      yum -y remove docker \
                        docker-client \
                        docker-client-latest \
                        docker-common \
                        docker-latest \
                        docker-latest-logrotate \
                        docker-logrotate \
                        docker-engine
      
    • 安装docker

      yum install -y yum-utils
      yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
      yum list docker-ce --showduplicates | sort -r
      yum -y install docker-ce-18.09.9-3.el7 docker-ce-cli-18.09.9-3.el7 containerd.io docker-compose-plugin
      
    • 设置docker

      mkdir /etc/docker
      mkdir -p /data/docker/data
      cat > /etc/docker/daemon.json <<'EOF'
      {
        "registry-mirrors": ["https://xigwl1gq.mirror.aliyuncs.com"],
        "exec-opts": ["native.cgroupdriver=systemd"],
        "log-driver": "json-file",
        "log-opts": {
          "max-size": "200m",
          "max-file": "7"
        },
        "data-root": "/data/docker/data",
        "storage-driver": "overlay2",
        "storage-opts": ["overlay2.override_kernel_check=true"],
        "dns": ["192.168.94.94", "192.168.94.95", "192.168.109.104"]
      }
      EOF
      cat >> /etc/sysctl.conf <<'EOF'
      net.ipv4.conf.all.forwarding = 1
      EOF
      sysctl -p
      
    • 启动docker

      systemctl start docker
      systemctl enable docker
      

    2、安装外部ETCD集群

    • 下载etcd二进制文件

      <!== 所有etcd主机执行 ==>

      mkdir -p /data/svc
      cd /usr/local/src && wget https://github.com/etcd-io/etcd/releases/download/v3.4.18/etcd-v3.4.18-linux-amd64.tar.gz
      tar zxf etcd-v3.4.18-linux-amd64.tar.gz -C /data/svc
      cd /data/svc
      mv etcd-v3.4.18-linux-amd64 etcd-v3.4.18
      
    • 使用TLS协议加密通信

      etcd01主机上,下载cfssl工具

      wget -O /usr/bin/cfssl "https://pkg.cfssl.org/R1.2/cfssl_linux-amd64"
      wget -O /usr/bin/cfssljson "https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64"
      chmod a+x /usr/bin/cfssl*
      

      生成CA证书

      mkdir -p /data/certs/etcd
      cat > /data/certs/etcd/ca-config.json <<'EOF'
      {
          "signing": {
              "default": {
                  "expiry": "876000h"
              },
              "profiles": {
                  "server": {
                      "expiry": "876000h",
                      "usages": [
                          "signing",
                          "key encipherment",
                          "server auth",
                          "client auth"
                      ]
                  },
                  "client": {
                      "expiry": "876000h",
                      "usages": [
                          "signing",
                          "key encipherment",
                          "client auth"
                      ]
                  },
                  "peer": {
                      "expiry": "876000h",
                      "usages": [
                          "signing",
                          "key encipherment",
                          "server auth",
                          "client auth"
                      ]
                  }
              }
          }
      }
      EOF
      cat > /data/certs/etcd/ca-csr.json <<'EOF'
      {
          "CN": "etcd",
          "key": {
              "algo": "rsa",
              "size": 2048
          }
      }
      EOF
      cd /data/certs/etcd && cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
      

      生成服务器证书及对等证书

      cat > /data/certs/etcd/server.json <<'EOF'
      {
          "CN": "etcd",
          "hosts": [
              "127.0.0.1",
              "10.81.64.37",
              "10.81.64.38",
              "10.81.64.39"
          ],
          "key": {
              "algo": "ecdsa",
              "size": 256
          },
          "names": [
              {
                  "C": "CN",
                  "L": "BeiJing",
                  "ST": "BeiJing"
              }
          ]
      }
      EOF
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server server.json | cfssljson -bare server
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer server.json | cfssljson -bare peer
      

      生成客户端证书

      cat > /data/certs/etcd/client.json <<'EOF'
      {
          "CN": "client",
          "hosts": [""],
          "key": {
              "algo": "ecdsa",
              "size": 256
          },
          "names": [
              {
                  "C": "CN",
                  "L": "BeiJing",
                  "ST": "BeiJing"
              }
          ]
      }
      EOF
      cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | cfssljson -bare client
      

      分发相关证书

      #etcd02/etcd03主机创建目录
      mkdir /data/certs
      
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd 10.81.64.38:/data/certs/
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd 10.81.64.39:/data/certs/
      #k8s master主机创建目录
      mkdir -p /data/certs/etcd
      
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd/client*.pem 10.81.0.101:/data/certs/etcd
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd/client*.pem 10.81.0.102:/data/certs/etcd
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd/client*.pem 10.81.0.103:/data/certs/etcd
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd/ca.pem 10.81.0.101:/data/certs/etcd
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd/ca.pem 10.81.0.102:/data/certs/etcd
      rsync -avzP -e "ssh -p 60025" /data/certs/etcd/ca.pem 10.81.0.103:/data/certs/etcd
      
    • 修改配置文件

      <!== 所有etcd主机执行 ==>

      mkdir -p /data/etcd/conf
      mkdir -p /data/etcd/data
      cat > /data/etcd/conf/etcd.conf.yml <<'EOF'
      # 集群节点名称,对应修改
      name: 'etcd01'
      # Path to the data directory.
      data-dir: '/data/etcd/data'
      # Number of committed transactions to trigger a snapshot to disk.
      snapshot-count: 50000
      # Time (in milliseconds) of a heartbeat interval.
      heartbeat-interval: 100
      # Time (in milliseconds) for an election to timeout.
      election-timeout: 1000
      # Raise alarms when backend size exceeds the given quota. 0 means use the
      # default quota.
      quota-backend-bytes: 8589934592
      # List of comma separated URLs to listen on for peer traffic.修改对应IP
      listen-peer-urls: https://10.81.64.37:2380
      # List of comma separated URLs to listen on for client traffic.修改对应IP
      listen-client-urls: https://10.81.64.37:2379
      # Maximum number of snapshot files to retain (0 is unlimited).
      max-snapshots: 5
      max-request-bytes: 10485760
      # Maximum number of wal files to retain (0 is unlimited).
      max-wals: 5
      # Comma-separated white list of origins for CORS (cross-origin resource sharing).
      cors:
      # List of this member's peer URLs to advertise to the rest of the cluster.
      # The URLs needed to be a comma-separated list.修改对应IP
      initial-advertise-peer-urls: https://10.81.64.37:2380
      # List of this member's client URLs to advertise to the public.
      # The URLs needed to be a comma-separated list.修改对应IP
      advertise-client-urls: https://10.81.64.37:2379
      # Discovery URL used to bootstrap the cluster.
      discovery:
      # Valid values include 'exit', 'proxy'
      discovery-fallback: 'proxy'
      # HTTP proxy to use for traffic to discovery service.
      discovery-proxy:
      # DNS domain used to bootstrap initial cluster.
      discovery-srv:
      # Initial cluster configuration for bootstrapping.
      initial-cluster: 'etcd01=https://10.81.64.37:2380,etcd02=https://10.81.64.38:2380,etcd03=https://10.81.64.39:2380'
      # Initial cluster token for the etcd cluster during bootstrap.
      initial-cluster-token: 'etcd-cluster'
      # Initial cluster state ('new' or 'existing').
      initial-cluster-state: 'new'
      # Reject reconfiguration requests that would cause quorum loss.
      strict-reconfig-check: false
      # Enable runtime profiling data via HTTP server
      enable-pprof: true
      # Valid values include 'on', 'readonly', 'off'
      proxy: 'off'
      # Time (in milliseconds) an endpoint will be held in a failed state.
      proxy-failure-wait: 5000
      # Time (in milliseconds) of the endpoints refresh interval.
      proxy-refresh-interval: 30000
      # Time (in milliseconds) for a dial to timeout.
      proxy-dial-timeout: 1000
      # Time (in milliseconds) for a write to timeout.
      proxy-write-timeout: 5000
      # Time (in milliseconds) for a read to timeout.
      proxy-read-timeout: 0
      client-transport-security:
        # Path to the client server TLS cert file.
        cert-file: '/data/certs/etcd/server.pem'
        # Path to the client server TLS key file.
        key-file: '/data/certs/etcd/server-key.pem'
        # Enable client cert authentication.
        client-cert-auth: true
        # Path to the client server TLS trusted CA cert file.
        trusted-ca-file: '/data/certs/etcd/ca.pem'
        # Client TLS using generated certificates
        auto-tls: false
      peer-transport-security:
        # Path to the peer server TLS cert file.
        cert-file: '/data/certs/etcd/peer.pem'
        # Path to the peer server TLS key file.
        key-file: '/data/certs/etcd/peer-key.pem'
        # Enable peer client cert authentication.
        client-cert-auth: true
        # Path to the peer server TLS trusted CA cert file.
        trusted-ca-file: '/data/certs/etcd/ca.pem'
        # Peer TLS using generated certificates.
        auto-tls: false
      # The validity period of the self-signed certificate, the unit is year.
      self-signed-cert-validity: 100
      # Enable debug-level logging for etcd.
      log-level: 'info'
      logger: zap
      # Specify 'stdout' or 'stderr' to skip journald logging even when running under systemd.
      log-outputs: [stderr]
      # Force to create a new one member cluster.
      force-new-cluster: false
      auto-compaction-mode: periodic
      auto-compaction-retention: "1"
      EOF
      
    • 设置systemd管理

      <!== 所有etcd主机执行 ==>

      cat > /etc/systemd/system/etcd.service <<'EOF'
      [Unit]
      Description=Etcd Server
      After=network.target
      After=network-online.target
      Wants=network-online.target
      
      [Service]
      Type=notify
      ExecStart=/data/svc/etcd-v3.4.18/etcd --config-file /data/etcd/conf/etcd.conf.yml
      Restart=on-failure
      LimitNOFILE=65535
      OOMScoreAdjust=-999
      
      [Install]
      WantedBy=multi-user.target
      EOF
      systemctl daemon-relaod
      systemctl start etcd
      systemctl enable etcd
      
    • 验证集群是否正常

      $ /data/svc/etcd-v3.4.18/etcdctl --cacert=ca.pem --cert=server.pem --key=server-key.pem --endpoints="https://10.81.64.37:2379" member list
      2259985fb5165391, started, etcd02, https://10.81.64.38:2380, https://10.81.64.38:2379, false
      592e58e2927d0458, started, etcd01, https://10.81.64.37:2380, https://10.81.64.37:2379, false
      d32cce3d601b23ec, started, etcd03, https://10.81.64.39:2380, https://10.81.64.39:2379, false
      $ /data/svc/etcd-v3.4.18/etcdctl --cacert=ca.pem --cert=server.pem --key=server-key.pem --endpoints="https://10.81.64.37:2379" endpoint health
      https://10.81.64.37:2379 is healthy: successfully committed proposal: took = 11.013657ms
      

    3、安装Nginx+Keepalived反代apiServer

    • 安装nginx

      yum -y install nginx nginx-all-modules.noarch
      
    • 修改nginx配置文件

      user nginx nginx;
      worker_processes auto;
      worker_cpu_affinity auto;
      error_log /var/log/nginx/error.log;
      pid /run/nginx.pid;
      
      # Load dynamic modules. See /usr/share/doc/nginx/README.dynamic.
      include /usr/share/nginx/modules/*.conf;
      
      events {
          use epoll;
          accept_mutex off;
          worker_connections 1024;
      }
      
      stream {
        upstream apiserver {
          server 10.81.0.101:6443 max_fails=3 fail_timeout=30s;
          server 10.81.0.102:6443 max_fails=3 fail_timeout=30s;
          server 10.81.0.103:6443 max_fails=3 fail_timeout=30s;
        }
        log_format nginx-json-log '{"remote_addr":"$remote_addr","time_local":"$time_local","ssl_protocol":"$ssl_protocol","status":"$status","bytes_sent":"$bytes_sent","bytes_received":"$bytes_received","session_time":"$session_time","upstream_addr":"$upstream_addr","upstream_bytes_sent":"$upstream_bytes_sent","upstream_bytes_received":"$upstream_bytes_received","upstream_connect_time":"$upstream_connect_time","ng_server":"$ng_server"}';
        server {
          listen 6443;
          set $ng_server k8s-apiserver;
          access_log /var/log/nginx/k8s-apiserver.log nginx-json-log;
          proxy_connect_timeout 2s;
          proxy_timeout 600s;
          proxy_pass apiserver;
        }
      }
      
    • 启动nginx

      systemctl start nginx
      systemctl enable nginx
      systemctl status nginx
      
    • 安装keepalived软件

      yum -y install keepalived
      
    • 修改keepalived配置文件

      <!== 各节点配置文件不同,对应修改 ==>

      cat > /etc/keepalived/keepalived.conf <<'EOF'
      global_defs {
         ##标识身份,默认本地主机名,需修改
         router_id nginx37
         script_user root root
         enable_script_security
      }
      
      vrrp_script CheckNginx {
          script "/bin/bash /etc/keepalived/check_nginx.sh"
          interval 1
          weight -20
      }
      
      vrrp_instance VI_1 {
          #定义该实例的初始化状态,需修改
          state MASTER
          #对应网卡接口
          interface ens192
          #实例身份标识要保持一致
          virtual_router_id 51
          #权重大小,需修改
          priority 150
          advert_int 1
          nopreempt
          track_script {
              CheckNginx
          }
          authentication {
              auth_type PASS
              auth_pass I9OsLmlb
          }
          virtual_ipaddress {
              10.81.64.110
          }
      }
      EOF
      
      cat > /etc/keepalived/check_nginx.sh <<'EOF'
      #!/bin/bash
      
      if ! pidof nginx >/dev/null 2>&1;then
        nginx -t && nginx 
        sleep 5
        if ! pidof nginx >/dev/null 2>&1;then
          systemctl stop keepalived
        fi
      fi
      EOF
      
    • 启动keepalived

      systemctl start keepalived
      systemctl enable keepalived
      

    4、开启ipvs

    • 安装软件工具

      <!== 所有k8s节点执行 ==>

      yum -y install ipset ipvsadm
      
    • 加载模块配置

      <!== 所有k8s节点执行 ==>

      cat > /etc/sysconfig/modules/ipvs.modules <<EOF
      #!/bin/bash
      modprobe -- ip_vs
      modprobe -- ip_vs_rr
      modprobe -- ip_vs_wrr
      modprobe -- ip_vs_sh
      modprobe -- nf_conntrack_ipv4
      EOF
      chmod 755 /etc/sysconfig/modules/ipvs.modules && bash /etc/sysconfig/modules/ipvs.modules && lsmod | grep -e ip_vs -e nf_conntrack_ipv4
      

    5、安装kubeadm及相关工具

    上传修改好证书有效期重新编译的kubeadm命令

    <!== 所有master节点执行 ==>

    cat >/etc/yum.repos.d/kubernetes.repo <<'EOF'
    [kubernetes]
    name=Kubernetes Repository
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=0
    EOF
    yum makecache fast
    yum -y install kubelet-1.19.6-0 kubeadm-1.19.6-0 kubectl-1.19.6-0 --disableexcludes=kubernetes
    systemctl enable --now kubelet
    

    替换为重新编译的kubeadm命令

    mv /usr/bin/kubeadm{,_bak}
    tar zxf kubeadm.tar.gz -C /usr/bin
    

    <!== 所有node节点执行 ==>

    cat >/etc/yum.repos.d/kubernetes.repo <<'EOF'
    [kubernetes]
    name=Kubernetes Repository
    baseurl=http://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64/
    enabled=1
    gpgcheck=0
    EOF
    yum makecache fast
    yum -y install kubelet-1.19.6-0 kubeadm-1.19.6-0 --disableexcludes=kubernetes
    systemctl enable --now kubelet
    

    替换为重新编译的kubeadm命令

    mv /usr/bin/kubeadm{,_bak}
    tar zxf kubeadm.tar.gz -C /usr/bin
    

    6、初始化master01节点

    • 编写初始化配置文件

      mkdir -p /data/k8s/install
      mkdir -p /data/certs/kubernetes
      cat > /data/k8s/install/init-config.yml <<'EOF'
      apiVersion: kubeadm.k8s.io/v1beta2
      kind: InitConfiguration
      bootstrapTokens:
      - groups:
        - system:bootstrappers:kubeadm:default-node-token
        token: abcdef.0123456789abcdef
        ttl: 24h0m0s
        usages:
        - signing
        - authentication
      nodeRegistration:
        #修改为对应节点主机名
        name: cnsz-fbu-bck8s-master01-uat
        criSocket: "/var/run/dockershim.sock"
        taints:
        - effect: NoSchedule
          key: node-role.kubernetes.io/control-plane
      localAPIEndpoint:
        #修改为对应节点IP
        advertiseAddress: 10.81.0.101
        bindPort: 6443
      ---
      apiVersion: kubeadm.k8s.io/v1beta2
      kind: ClusterConfiguration
      etcd:
        external:
          endpoints:
          - "https://10.81.64.37:2379"
          - "https://10.81.64.38:2379"
          - "https://10.81.64.39:2379"
          caFile: "/data/certs/etcd/ca.pem"
          certFile: "/data/certs/etcd/client.pem"
          keyFile: "/data/certs/etcd/client-key.pem" 
      networking:
        dnsDomain: cluster.local
        serviceSubnet: 10.96.0.0/16
        podSubnet: "10.100.0.0/16"
      kubernetesVersion: "v1.19.6"
      controlPlaneEndpoint: "apiserver:6443"
      apiServer:
        extraArgs:
          service-node-port-range: "80-65535"
        certSANs:
        - cnsz-fbu-bck8s-master01-uat
        - cnsz-fbu-bck8s-master02-uat
        - cnsz-fbu-bck8s-master03-uat
        - cnsz-fbu-bck8s-node01-uat
        - cnsz-fbu-bck8s-node02-uat
        - apiserver
        - 10.81.0.101
        - 10.81.0.102
        - 10.81.0.103
        - 10.81.0.104
        - 10.81.0.105
        - 10.81.64.110
        timeoutForControlPlane: 4m0s
      controllerManager:
        extraArgs:
          experimental-cluster-signing-duration: "876000h"
      scheduler: {}
      dns:
        type: CoreDNS
      certificatesDir: "/data/certs/kubernetes"
      imageRepository: registry.cn-hangzhou.aliyuncs.com/google_containers
      clusterName: bc-kubernetes
      ---
      apiVersion: kubelet.config.k8s.io/v1beta1
      kind: KubeletConfiguration
      cgroupDriver: "systemd"
      maxPods: 300
      containerLogMaxSize: "100Mi"
      containerLogMaxFiles: 7
      ---
      apiVersion: kubeproxy.config.k8s.io/v1alpha1
      kind: KubeProxyConfiguration
      clusterCIDR: "10.100.0.0/16"
      metricsBindAddress: "0.0.0.0:10249"
      mode: "ipvs"
      EOF
      
    • 拉取组件镜像

      cd /data/k8s/install
      kubeadm config images pull --config init-config.yml
      docker images
      
    • 初始化控制平面

      kubeadm init --config init-config.yml
      
      #如果初始化报错,要先重置初始化再进行初始化
      kubeadm reset
      kubeadm init --config init-config.yml
      
    • 记录加入集群的相关信息

      Your Kubernetes control-plane has initialized successfully!
      
      To start using your cluster, you need to run the following as a regular user:
      
        mkdir -p $HOME/.kube
        sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
        sudo chown $(id -u):$(id -g) $HOME/.kube/config
      
      Alternatively, if you are the root user, you can run:
      
        export KUBECONFIG=/etc/kubernetes/admin.conf
      
      You should now deploy a pod network to the cluster.
      Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
        https://kubernetes.io/docs/concepts/cluster-administration/addons/
      
      You can now join any number of control-plane nodes by copying certificate authorities
      and service account keys on each node and then running the following as root:
      
        kubeadm join apiserver:6443 --token abcdef.0123456789abcdef \
          --discovery-token-ca-cert-hash sha256:2b2858a9663dd0be99b8a0d3af941b666b7361d48fc5f9a05eabad140d36373c \
          --control-plane 
      
      Then you can join any number of worker nodes by running the following on each as root:
      
      kubeadm join apiserver:6443 --token abcdef.0123456789abcdef \
          --discovery-token-ca-cert-hash sha256:2b2858a9663dd0be99b8a0d3af941b666b7361d48fc5f9a05eabad140d36373c
      
    • 设置kubeconfig文件的环境变量

      echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> /etc/profile
      source /etc/profile
      
    • 关闭禁止使用非安全的http接口参数

      sed -i 's/- --port=0/#- --port=0/g' /etc/kubernetes/manifests/kube-controller-manager.yaml
      sed -i 's/- --port=0/#- --port=0/g' /etc/kubernetes/manifests/kube-scheduler.yaml
      
    • 检查组件状态

      $ kubectl get pods -n kube-system -o wide
      $ kubectl get nodes
      $ kubectl get cs
      Warning: v1 ComponentStatus is deprecated in v1.19+
      NAME                 STATUS      MESSAGE                                                                                       ERROR
      #该不健康状态为正常,不管是否开启http端口,get cs都是使用的非安全的http端口查询
      scheduler            Unhealthy   Get "http://127.0.0.1:10251/healthz": dial tcp 127.0.0.1:10251: connect: connection refused   
      controller-manager   Unhealthy   Get "http://127.0.0.1:10252/healthz": dial tcp 127.0.0.1:10252: connect: connection refused   
      etcd-0               Healthy     {"health":"true"}                                                                             
      etcd-1               Healthy     {"health":"true"}                                                                             
      etcd-2               Healthy     {"health":"true"}
      

    7、其他master节点加入控制平面集群

    • 从master01节点拉取证书

      <!== master02/03节点执行 ==>

      mkdir -p /data/certs/kubernetes
      rsync -avzP 10.81.0.101:/data/certs/kubernetes/* /data/certs/kubernetes/
      
    • 设置kubeconfig文件的环境变量

      <!== master02/03节点执行 ==>

      echo 'export KUBECONFIG=/etc/kubernetes/admin.conf' >> /etc/profile
      source /etc/profile
      
    • 编写加入集群配置文件

      <!== master02/03节点执行,需对应修改 ==>

      mkdir -p /data/k8s/install
      cat > /data/k8s/install/join-config.yml <<'EOF'
      apiVersion: kubeadm.k8s.io/v1beta2
      kind: JoinConfiguration
      nodeRegistration:
        #修改为对应节点主机名
        name: cnsz-fbu-bck8s-master02-uat
        criSocket: "/var/run/dockershim.sock"
        taints:
        - effect: NoSchedule
          key: node-role.kubernetes.io/control-plane
      caCertPath: "/data/certs/kubernetes/ca.crt"
      discovery:
        bootstrapToken:
          token: "abcdef.0123456789abcdef"
          apiServerEndpoint: "apiserver:6443"
          caCertHashes:
          - "sha256:2b2858a9663dd0be99b8a0d3af941b666b7361d48fc5f9a05eabad140d36373c"
      controlPlane:
        localAPIEndpoint:
          #修改为对应节点ip
          advertiseAddress: "10.81.0.102"
          bindPort: 6443
      EOF
      
    • 执行命令加入集群

      <!== master02/03节点执行 ==>

      kubeadm join --config /data/k8s/install/join-config.yml
      
    • 关闭禁止使用非安全的http接口参数

      <!== master02/03节点执行 ==>

      sed -i 's/- --port=0/#- --port=0/g' /etc/kubernetes/manifests/kube-controller-manager.yaml
      sed -i 's/- --port=0/#- --port=0/g' /etc/kubernetes/manifests/kube-scheduler.yaml
      systemctl restart kubelet
      
    • 检查集群状态

      <!== master02/03节点执行 ==>

      kubectl get nodes
      

    8、开启PodPreset资源

    • 修改apiserver配置文件

      <!== 所有master主机执行 ==>

      vim /etc/kubernetes/manifests/kube-apiserver.yaml
      # 修改
      - --enable-admission-plugins=NodeRestriction,PodPreset
      # 新增
      - --runtime-config=settings.k8s.io/v1alpha1=true
      
    • 重启kubelet

      <!== 所有master主机执行 ==>

      systemctl restart kubelet
      

    9、worker节点加入集群

    • 从master01节点拉取证书

      <!== 所有node节点执行 ==>

      mkdir -p /data/certs/kubernetes
      rsync -avzP 10.81.0.101:/data/certs/kubernetes/ca.crt /data/certs/kubernetes/
      
    • 编写加入集群配置文件

      <!== 所有node节点执行,需对应修改 ==>

      mkdir -p /data/k8s/install
      cat > /data/k8s/install/join-worker-config.yml <<'EOF'
      apiVersion: kubeadm.k8s.io/v1beta2
      kind: JoinConfiguration
      nodeRegistration:
        #修改为对应节点主机名
        name: cnsz-fbu-bck8s-node02-uat
        criSocket: "/var/run/dockershim.sock"
      discovery:
        bootstrapToken:
          token: "abcdef.0123456789abcdef"
          apiServerEndpoint: "apiserver:6443"
          caCertHashes:
          - "sha256:2b2858a9663dd0be99b8a0d3af941b666b7361d48fc5f9a05eabad140d36373c"
      EOF
      
    • 执行命令加入集群

      <!== 所有node节点执行 ==>

      kubeadm join --config /data/k8s/install/join-worker-config.yml
      
    • 如果token过期,使用以下命令重新生成

      kubeadm token create --print-join-command
      

    10、安装pod网络插件

    <!== 任意master节点执行 ==>

    (1)、flannel

    • 上传kube-flannel.yml文件

      kube-flannel.yml

    • 修改文件配置

    • 启动flannel

      kubectl apply -f /data/k8s/install/kube-flannel.yml
      

    (2)、calico

    • 下载修改yml文件

      <!== master01节点执行 ==>

      cd /data/k8s/install
      wget https://docs.projectcalico.org/archive/v3.19/manifests/calico.yaml
      
    • 修改yml文件

      <!== master01节点执行 ==>

      vim calico.yaml
      ...
                  # Cluster type to identify the deployment type
                  - name: CLUSTER_TYPE
                    value: "k8s,bgp"
                  # Auto-detect the BGP IP address.
                  - name: IP
                    value: "autodetect"
                  # 关闭IPIP模式,启用BGP模式
                  - name: CALICO_IPV4POOL_IPIP
                    value: "Always"
                  # Enable or Disable VXLAN on the default IP pool.
                  - name: CALICO_IPV4POOL_VXLAN
                    value: "Never"
      ...
                  # The default IPv4 pool to create on startup if none exists. Pod IPs will be
                  # chosen from this range. Changing this value after installation will have
                  # no effect. This should fall within `--cluster-cidr
                  # 修改pod网段地址
                  - name: CALICO_IPV4POOL_CIDR
                    value: "10.100.0.0/16"
      ...
      
    • 如果无法匹配网卡名,可自定义匹配规则

      vim calico.yaml
      ...
                  # Cluster type to identify the deployment type
                  - name: CLUSTER_TYPE
                    value: "k8s,bgp"
                  # IP automatic detection
                  - name: IP_AUTODETECTION_METHOD
                    value: "interface=en.*,eth0"
                  # Auto-detect the BGP IP address.
                  - name: IP
                    value: "autodetect"
                  # 关闭IPIP模式,启用BGP模式
                  - name: CALICO_IPV4POOL_IPIP
                    value: "Always"
                  # Enable or Disable VXLAN on the default IP pool.
                  - name: CALICO_IPV4POOL_VXLAN
                    value: "Never"
      ...
      
    • 启动calico

      <!== master01节点执行 ==>

      kubectl apply -f calico.yaml
      

    11、创建一个服务测试

    • 编写yaml文件

      mkdir /data/k8s/deploy
      cat > /data/k8s/deploy/nginx-deployment.yml <<'EOF'
      apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: nginx
        labels:
          app: nginx
      spec:
        replicas: 2
        selector:
          matchLabels:
            app: nginx
        template:
          metadata:
            labels:
              app: nginx
          spec:
            containers:
            - name: nginx-demo
              image: nginx:1.17.6-alpine
              imagePullPolicy: IfNotPresent
              ports:
              - containerPort: 80
      EOF
      
      cat > /data/k8s/deploy/nginx-service.yml <<'EOF'
      apiVersion: v1
      kind: Service
      metadata:
        name: nginx-service
      spec:
        type: NodePort
        ports:
        - port: 80
          nodePort: 32000
        selector:
          app: nginx
      EOF
      
    • 创建资源

      kubectl apply -f /data/k8s/deploy/nginx-deployment.yml
      kubectl apply -f /data/k8s/deploy/nginx-service.yml
      
    • 访问页面是否正常

      http://10.81.0.105:32000

    三、修改证书有效期

    1、使用脚本修改证书有效期

    该脚本为一位码友编写,https://github.com/yuyicai/update-kube-cert

    <!== 所有master节点都是要执行 ==>

    • 查看证书有效期

      $ kubeadm alpha certs check-expiration
      [check-expiration] Reading configuration from the cluster...
      [check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
      
      CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
      admin.conf                 Jun 02, 2023 02:31 UTC   364d                                    no      
      apiserver                  Jun 02, 2023 02:16 UTC   364d            ca                      no      
      apiserver-kubelet-client   Jun 02, 2023 02:16 UTC   364d            ca                      no      
      controller-manager.conf    Jun 02, 2023 02:31 UTC   364d                                    no      
      front-proxy-client         Jun 02, 2023 02:16 UTC   364d            front-proxy-ca          no      
      scheduler.conf             Jun 02, 2023 02:31 UTC   364d                                    no      
      
      CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
      ca                      May 30, 2032 02:16 UTC   9y              no      
      front-proxy-ca          May 30, 2032 02:16 UTC   9y              no
      
    • 下载脚本

      cd /usr/local/src
      git clone https://github.com/yuyicai/update-kube-cert.git
      cd update-kubeadm-cert
      chmod 755 update-kubeadm-cert.sh
      
    • 修改证书目录及设置有效期

      vim update-kubeadm-cert.sh
      ...
      main() {
        local node_type=$1
      
        # CERT_DAYS=3650
        CERT_DAYS=36500
      
        KUBE_PATH=/etc/kubernetes
        # PKI_PATH=${KUBE_PATH}/pki
        PKI_PATH=/data/certs/kubernetes
      
        # master certificates path
        # apiserver
      
      ...
      
    • 执行脚本

      ./update-kubeadm-cert.sh master
      
    • 查看证书有效期

      $ kubeadm alpha certs check-expiration
      [check-expiration] Reading configuration from the cluster...
      [check-expiration] FYI: You can look at this config file with 'kubectl -n kube-system get cm kubeadm-config -oyaml'
      
      CERTIFICATE                EXPIRES                  RESIDUAL TIME   CERTIFICATE AUTHORITY   EXTERNALLY MANAGED
      admin.conf                 May 10, 2122 01:54 UTC   99y                                     no      
      apiserver                  May 10, 2122 01:54 UTC   99y             ca                      no      
      apiserver-kubelet-client   May 10, 2122 01:54 UTC   99y             ca                      no      
      controller-manager.conf    May 10, 2122 01:54 UTC   99y                                     no      
      front-proxy-client         May 10, 2122 01:54 UTC   99y             front-proxy-ca          no      
      scheduler.conf             May 10, 2122 01:54 UTC   99y                                     no      
      
      CERTIFICATE AUTHORITY   EXPIRES                  RESIDUAL TIME   EXTERNALLY MANAGED
      ca                      May 30, 2032 02:16 UTC   9y              no      
      front-proxy-ca          May 30, 2032 02:16 UTC   9y              no
      

    2、修改源码重新编译

    • 下载源码

      cd /usr/local/src
      wget https://github.com/kubernetes/kubernetes/archive/refs/tags/v1.19.6.tar.gz
      tar zxf v1.19.6.tar.gz
      
    • 修改源码CA证书有效期时间

      cd kubernetes-1.19.6/
      vim staging/src/k8s.io/client-go/util/cert/cert.go
      ...
      func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) {
              now := time.Now()
              tmpl := x509.Certificate{
                      SerialNumber: new(big.Int).SetInt64(0),
                      Subject: pkix.Name{
                              CommonName:   cfg.CommonName,
                              Organization: cfg.Organization,
                      },
                      NotBefore:             now.UTC(),
                      // NotAfter:              now.Add(duration365d * 10).UTC(),
                      NotAfter:              now.Add(duration365d * 100).UTC(),
                      KeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
                      BasicConstraintsValid: true,
                      IsCA:                  true,
              }
      
              certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key)
              if err != nil {
                      return nil, err
              }
              return x509.ParseCertificate(certDERBytes)
      }
      ...
      
    • 修改源码其他证书有效期时间

      vim cmd/kubeadm/app/constants/constants.go
      ...
              // CertificateValidity defines the validity for all the signed certificates generated by kubeadm
              // CertificateValidity = time.Hour * 24 * 365
              CertificateValidity = time.Hour * 24 * 365 * 100
      ...
      
    • 查看 kube-cross 的 TAG 版本号

      $ cat build/build-image/cross/VERSION
      v1.15.5-1
      
    • 安装GoLang环境

      yum -y install rsync jq gcc make
      cd /usr/local/src
      wget https://dl.google.com/go/go1.15.5.linux-amd64.tar.gz
      tar zxf go1.15.5.linux-amd64.tar.gz
      export PATH=$PATH:/usr/local/src/go/bin/
      
    • 编译kubeadm

      cd /usr/local/src/kubernetes-1.19.6/
      make all WHAT=cmd/kubeadm GOFLAGS=-v
      
    • 替换kubeadm命令

      mv /usr/bin/kubeadm{,_bak}
      cp _output/local/bin/linux/amd64/kubeadm /usr/bin/
      
    • 更新证书有效期

      无法更新CA证书有效期,要更新CA证书需一开始构建集群就使用该重新编译的kubadm命令

      cp -rp /data/certs/kubernetes{,_bak}
      kubeadm alpha certs check-expiration
      kubeadm alpha certs renew all
      kubeadm alpha certs check-expiration
      

    四、helm(k8s包管理器)安装

    <!== 任意master节点执行 ==>

    • 下载二进制包安装

      cd /usr/local/src
      wget https://get.helm.sh/helm-v3.4.2-linux-amd64.tar.gz
      tar zxf helm-v3.4.2-linux-amd64.tar.gz
      cp linux-amd64/helm /usr/bin/
      
    • 设置命令补全

      echo 'source <(helm completion bash)' >>/root/.bash_profile
      source /root/.bash_profile
      
    • 添加常用的仓库

      helm repo add elastic https://helm.elastic.co
      helm repo add gitlab https://charts.gitlab.io
      helm repo add harbor https://helm.goharbor.io
      helm repo add bitnami https://charts.bitnami.com/bitnami
      helm repo add stable http://mirror.azure.cn/kubernetes/charts
      helm repo add aliyun https://kubernetes.oss-cn-hangzhou.aliyuncs.com/charts
      helm repo update
      helm repo list
      
    • 验证测试

      # 查找仓库里nginx的chart
      helm search repo nginx
      # 拉取一个chart并解压
      helm pull bitnami/nginx
      tar zxf nginx-12.0.1.tgz
      
      # 修改chart里默认配置值,改为使用国内镜像并修改容器端口
      vim nginx/values.yaml
      ...
      ## Bitnami NGINX image version
      ## ref: https://hub.docker.com/r/bitnami/nginx/tags/
      ## @param image.registry NGINX image registry
      ## @param image.repository NGINX image repository
      ## @param image.tag NGINX image tag (immutable tags are recommended)
      ## @param image.pullPolicy NGINX image pull policy
      ## @param image.pullSecrets Specify docker-registry secret names as an array
      ## @param image.debug Set to true if you would like to see extra information on logs
      ##
      image:
        repository: nginx
        tag: 1.17.6-alpine
      ...
      ## Configures the ports NGINX listens on
      ## @param containerPorts.http Sets http port inside NGINX container
      ## @param containerPorts.https Sets https port inside NGINX container
      ##
      containerPorts:
        http: 80
        https: ""
      ...
      
      # 安装该chart
      helm install nginx ./nginx
      
      # 访问是否正常
      kubectl get pods -o wide
      kubectl get svc
      curl http://[cluster_ip]
      # 卸载chart
      helm uninstall nginx
      

    五、Ingress Controller安装

    1、Ingress Nginx Controller

    • 创建kube-system命名空间podpreset

      cat > /data/k8s/install/podpreset.yaml <<'EOF'
      apiVersion: settings.k8s.io/v1alpha1
      kind: PodPreset
      metadata:
        name: tz-env
        namespace: kube-system
      spec:
        selector:
          matchLabels:
        env:
          - name: TZ
            value: Asia/Shanghai
      EOF
      kubectl apply -f /data/k8s/install/podpreset.yaml
      
    • helm添加仓库

      mkdir -p /data/helm/install
      cd /data/helm/install
      helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
      helm repo update
      
    • 拉取ingress-nginx对应版本chart

      helm pull ingress-nginx/ingress-nginx --version=4.1.3
      tar zxf ingress-nginx-4.1.3.tgz
      
    • 将已有证书上传到k8s集群

      mkdir -p /data/certs/nginx-ingress
      cd /data/certs/nginx-ingress
      kubectl create secret tls nginx-ingress --cert=eminxing.crt --key=eminxing.key -n kube-system
      
    • 修改values配置

      vim ingress-nginx/values.yaml
      ...
        # -- Will add custom configuration options to Nginx https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/
        config:
          use-gzip: true
          gzip-level: 4
          gzip-types: "text/plain text/javascript application/x-javascript application/javascript text/css application/xml"
          worker-processes: 1
          proxy-body-size: "200m"
          proxy-connect-timeout: 60
          proxy-read-timeout: 1800
          proxy-send-timeout: 1800
          proxy-buffer-size: "256k"
      
        # -- Annotations to be added to the controller config configuration configmap.
        configAnnotations:
          nginx.ingress.kubernetes.io/enable-cors: "true"
          nginx.ingress.kubernetes.io/cors-allow-origin: "PUT, GET, POST, OPTIONS"
          nginx.ingress.kubernetes.io/cors-allow-headers: "DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-Forwarded-For"
          nginx.ingress.kubernetes.io/cors-expose-headers: "*"
          nginx.ingress.kubernetes.io/cors-allow-origin: "*"
      
      ...
        watchIngressWithoutClass: true
      ...
        ingressClassResource:
          # -- Name of the ingressClass
          name: nginx
          # -- Is this ingressClass enabled or not
          enabled: true
          # -- Is this the default ingressClass for the cluster
          default: true
          # -- Controller-value of the controller that is processing this ingressClass
          controllerValue: "k8s.io/ingress-nginx"
      ...
        #extraArgs: {}
        extraArgs:
          default-ssl-certificate: "kube-system/nginx-ingress"
      ...
        tolerations:
        - key: "node-role.kubernetes.io/control-plane"
          operator: "Equal"
          effect: "NoSchedule"
      ...
        terminationGracePeriodSeconds: 30
        nodeSelector:
          node-role.kubernetes.io/master: ""
      ...
        replicaCount: 3
      ...
          #type: LoadBalancer
      
          type: NodePort
          nodePorts:
            http: 80
            https: 443
            tcp:
              8080: 32808
          #nodePorts:
          #  http: ""
          #  https: ""
          #  tcp: {}
          #  udp: {}
      ...
      
    • 安装ingress-nginx

      helm install ingress-nginx ingress-nginx/ -n kube-system
      
    • 测试是否正常

      $ curl http://10.81.0.101
      <html>
      <head><title>404 Not Found</title></head>
      <body>
      <center><h1>404 Not Found</h1></center>
      <hr><center>nginx</center>
      </body>
      </html>
      

    2、Nginx Ingress Controller

    • 创建kube-system命名空间podpreset

      cat > /data/k8s/install/podpreset.yaml <<'EOF'
      apiVersion: settings.k8s.io/v1alpha1
      kind: PodPreset
      metadata:
        name: tz-env
        namespace: kube-system
      spec:
        selector:
          matchLabels:
        env:
          - name: TZ
            value: Asia/Shanghai
      EOF
      kubectl apply -f /data/k8s/install/podpreset.yaml
      
    • helm添加仓库

      mkdir -p /data/helm/install
      cd /data/helm/install
      helm repo add nginx-stable https://helm.nginx.com/stable
      helm repo update
      
    • 拉取nginx-ingress对应版本chart

      helm pull nginx-stable/nginx-ingress --version=0.13.2
      tar zxf nginx-ingress-0.13.2.tgz
      
    • 将已有证书上传到k8s集群

      mkdir -p /data/certs/nginx-ingress
      cd /data/certs/nginx-ingress
      kubectl create secret tls nginx-ingress --cert=eminxing.crt --key=eminxing.key -n kube-system
      
    • 修改values配置

      vim nginx-ingress/values.yaml
      ...
        # Timeout in milliseconds which the Ingress Controller will wait for a successful NGINX reload after a change or at the initial start.
        nginxReloadTimeout: 5000
      ...
        config:
          ## The name of the ConfigMap used by the Ingress Controller.
          ## Autogenerated if not set or set to "".
          name: nginx-config
          ## The annotations of the Ingress Controller configmap.
          annotations: {}
          ## The entries of the ConfigMap for customizing NGINX configuration.
          entries:
            proxy-connect-timeout: "60s"
            proxy-read-timeout: "1800s"
            proxy-send-timeout: "1800s"
            client-max-body-size: "200m"
            worker-processes: "auto"
            worker-rlimit-nofile: "65535"
            worker-connections: "65535"
            keepalive-timeout: "300s"
            access-log-off: "true"
            log-format-escaping: "json"
            max-fails: "3"
            fail-timeout: "5s"
            keepalive: "256"
            server-snippets: |
              add_header Access-Control-Allow-Origin '*';
              add_header Access-Control-Allow-Methods 'GET, POST, OPTIONS,PUT,DELETE,OPTION';
              add_header Access-Control-Allow-Headers 'DNT,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range,Authorization,X-Forwarded-For';
              add_header Access-Control-Allow-Credentials 'true';
            location-snippets: |
              proxy_set_header Upgrade $http_upgrade;
              proxy_set_header Connection $connection_upgrade;
      ...
        defaultTLS:
          cert: ""
          key: ""
          secret: kube-system/nginx-ingress
      ...
        ## The node selector for pod assignment for the Ingress Controller pods.
        nodeSelector:
          node-role.kubernetes.io/master: ""
        tolerations:
        - key: "node-role.kubernetes.io/control-plane"
          operator: "Equal"
          effect: "NoSchedule"
      ...
        ## The number of replicas of the Ingress Controller deployment.
        replicaCount: 3
        setAsDefaultIngress: true
        enableSnippets: true
          ## The type of service to create for the Ingress Controller.
          type: NodePort
          httpPort:
            enable: true
            port: 80
            nodePort: "80"
            targetPort: 80
          httpsPort:
            enable: true
            port: 443
            nodePort: "443"
            targetPort: 443
      ...
      
    • 安装nginx-ingress

      helm install nginx-ingress nginx-ingress/ -n kube-system
      
    • 测试是否正常

      $ curl http://10.81.0.101
      <html>
      <head><title>404 Not Found</title></head>
      <body>
      <center><h1>404 Not Found</h1></center>
      <hr><center>nginx</center>
      </body>
      </html>
      

    六、Rancher(k8s管理web)高可用安装

    • helm添加rancher仓库

      helm repo add rancher-stable http://rancher-mirror.oss-cn-beijing.aliyuncs.com/server-charts/stable
      
    • 创建rancher命名空间

      kubectl create ns cattle-system
      
    • 创建cattle-system命名空间podpreset

      cat > /data/k8s/install/podpreset.yaml <<'EOF'
      apiVersion: settings.k8s.io/v1alpha1
      kind: PodPreset
      metadata:
        name: tz-env
        namespace: cattle-system
      spec:
        selector:
          matchLabels:
        env:
          - name: TZ
            value: Asia/Shanghai
      EOF
      kubectl apply -f /data/k8s/install/podpreset.yaml
      
    • 将已有证书上传到k8s集群

      cd /data/certs/nginx-ingress
      kubectl create secret tls tls-rancher-ingress --cert=eminxing.crt --key=eminxing.key -n cattle-system
      
    • 拉取rancher对应版本chart

      mkdir -p /data/helm/install
      cd /data/helm/install
      helm pull rancher-stable/rancher --version=2.5.5
      tar zxf rancher-2.5.5.tgz
      
    • 修改values配置

      vim rancher/values.yaml
      ...
      # Fully qualified name to reach your Rancher server
      # hostname: rancher.my.org
      hostname: rancher.eminxing.com
      ...
      ### ingress ###
      # Readme for details and instruction on adding tls secrets.
      ingress:
        extraAnnotations: {}
      
        # configurationSnippet - Add additional Nginx configuration. This example statically sets a header on the ingress.
        # configurationSnippet: |
        #   more_set_input_headers "X-Forwarded-Host: {{ .Values.hostname }}";
      
        tls:
          # options: rancher, letsEncrypt, secret
          source: secret
      ...
      # Number of Rancher server replicas.
      replicas: 2
      ...
      
    • 安装Rancher

      helm install rancher rancher/ -n cattle-system
      
    • 绑定host访问

      10.81.0.101 rancher.eminxing.com
      

    相关文章

      网友评论

          本文标题:Kubernetes高可用部署

          本文链接:https://www.haomeiwen.com/subject/aghkmrtx.html