美文网首页
K8S-1.26,二进制安装

K8S-1.26,二进制安装

作者: DGFM | 来源:发表于2023-04-02 12:12 被阅读0次

    K8S,二进制安装

    本次安装为学习+练习向,所以安装并不完全,例如coredns等组件会通过后续学习过程中手动安装方式逐步补全环境;

    设备环境

    主机名 系统 IP 功能
    ly-test-deploy Ubuntu 20.04 172.16.0.3 主部署机
    test-ha-kp Ubuntu 20.04 172.16.0.5,VIP:172.16.0.20-24 负载均衡代理
    test-master01 Ubuntu 20.04 172.16.0.6 master01节点
    test-master02 Ubuntu 20.04 172.16.0.7 master02节点
    test-node01 Ubuntu 20.04 172.16.0.8 node01节点
    test-node02 Ubuntu 20.04 172.16.0.9 node02节点
    test-node03 Ubuntu 20.04 172.16.0.10 node03节点
    test-etcd01 Ubuntu 20.04 172.16.0.11 etcd01节点
    test-etcd02 Ubuntu 20.04 172.16.0.12 etcd02节点
    test-etcd03 Ubuntu 20.04 172.16.0.13 etcd03节点

    开始部署

    HA+KP部署

    test-ha-kp,节点;

    # 安装相关软件包;
    apt-get -y install haproxy keepalived
    

    HA配置文件内容

    global
     log /dev/log    local0
     log /dev/log    local1 notice
     chroot /var/lib/haproxy
     stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
     stats timeout 30s
     user haproxy
     group haproxy
     daemon
    
     # Default SSL material locations
     # ca-base /etc/ssl/certs
     # ccrt-base /etc/ssl/private
    
     # See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
     ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
     ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
     ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
    
    defaults
     mode                    http
     log                     global
     log         127.0.0.1 local7
     option                  httplog
     option                  dontlognull
     option http-server-close
     option forwardfor       except 127.0.0.0/8
     option                  redispatch
     retries                 3
     timeout http-request    10s
     timeout queue           1m
     timeout connect         10s
     timeout client          1m
     timeout server          1m
     timeout http-keep-alive 10s
     timeout check           10s
     maxconn                 3000
    
    listen k8s-master-6443
     bind 172.16.0.20:6443
     mode tcp
     balance source
     server 172.16.0.6 172.16.0.6:6443 check inter 3s fall 3 rise 5
     server 172.16.0.7 172.16.0.7:6443 check inter 3s fall 3 rise 5
    
    listen k8s-master-30880
     bind 172.16.0.21:30880
     mode tcp
     server 172.16.0.6 172.16.0.6:30880 check inter 3s fall 3 rise 5
     server 172.16.0.7 172.16.0.7:30880 check inter 3s fall 3 rise 5
    
    listen k8s-master-80
     bind 172.16.0.22:80
     mode tcp
     server 172.16.0.6 172.16.0.6:80 check inter 3s fall 3 rise 5
     server 172.16.0.7 172.16.0.7:80 check inter 3s fall 3 rise 5
    
    listen k8s-master-443
     bind 172.16.0.23:443
     mode tcp
     server 172.16.0.6 172.16.0.6:443 check inter 3s fall 3 rise 5
     server 172.16.0.7 172.16.0.7:443 check inter 3s fall 3 rise 5
    
    listen k8s-node-80
     bind 172.16.0.24:80
     mode tcp
     server 172.16.0.8 172.16.0.8:80 check inter 3s fall 3 rise 5
     server 172.16.0.9 172.16.0.9:80 check inter 3s fall 3 rise 5
     server 172.16.0.10 172.16.0.10:80 check inter 3s fall 3 rise 5
    

    Keepalived配置文件内容

     vrrp_instance VI_1 {
        state MASTER
        interface eth0
        garp_master_delay 10
        smtp_alert
        virtual_router_id 51
        priority 100        #优先级数值越大越优先;
        advert_int 1
        authentication {
            auth_type PASS
            auth_pass 1111
        }
        virtual_ipaddress {
            172.17.0.20 dev eth0 label eth0:0
            172.17.0.21 dev eth0 label eth0:1
            172.17.0.22 dev eth0 label eth0:2
            172.17.0.23 dev eth0 label eth0:3
            172.17.0.24 dev eth0 label eth0:4
        }
    
        nopreempt|preempt        #配置抢占模式;
        preempt delay 300        #定义抢占模式下延迟多久再抢占;
    }
    

    deploy节点部署准备

    执行节点test-deploy

    安装kubeasz项目

    安装git,ansible
    apt-get -y install git ansible
    
    声明环境变量
    export release=3.5.2
    

    注意:此处声明的是要进行安装的项目版本号,需要和下载的ezdown文件版本相同。参考链接:https://github.com/easzlab/kubeasz/blob/master/docs/setup/00-planning_and_overall_intro.md

    下载ezdown项目文件

    由于下载地址为国外网站所以速度需要看个人网络情况,可以选择挂代理使用下载器下载;

    mkdir -p /usr/local/src/kubeasz/
    cd /usr/local/src/kubeasz
    wget https://github.com/easzlab/kubeasz/releases/download/${release}/ezdown  
    
    赋权并安装项目
    chmod +x ./ezdown
    ./ezdown -D    # 国内环境;
    ./ezdown -D -m standard    # 国外环境;
    

    查看输出成果

    root@ly-test-deploy:/usr/local/src/kubeasz# ls /etc/kubeasz/
    ansible.cfg  bin  docs  down  example  ezctl  ezdown  manifests  pics  playbooks  README.md  roles  tools
    

    创建K8S集群

    使用ezctl脚本工具创建名为”test.cluster“的k8s集群,ezctl,可以管理多个K8S集群;

    cd /etc/kubeasz/
    ./ezctl new test.cluster
    

    输出结果

    root@ly-test-deploy:/etc/kubeasz# ls clusters/
    test.cluster
    

    编辑hosts文件

    vim /etc/kubeasz/clusters/test.cluster/hosts
    

    K8S集群节点配置文件,定义了要使用哪些节点;

      1 # 'etcd' cluster should have odd member(s) (1,3,5,...)
      2 [etcd]
      3 172.16.0.11
      4 172.16.0.12
      5 172.16.0.13
      6
      7 # master node(s), set unique 'k8s_nodename' for each node
      8 # CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
      9 # and must start and end with an alphanumeric character
     10 [kube_master]
     11 172.16.0.6 k8s_nodename='master-01'
     12 172.16.0.7 k8s_nodename='master-02'
     13
     14 # work node(s), set unique 'k8s_nodename' for each node
     15 # CAUTION: 'k8s_nodename' must consist of lower case alphanumeric characters, '-' or '.',
     16 # and must start and end with an alphanumeric character
     17 [kube_node]
     18 172.16.0.8 k8s_nodename='worker-01'
     19 172.16.0.9 k8s_nodename='worker-02'
     20 172.16.0.10 k8s_nodename='worker-03'
     .
     .
     52 SERVICE_CIDR="10.10.0.0/16"
     53
     54 # Cluster CIDR (Pod CIDR), not overlap with node(host) networking
     55 CLUSTER_CIDR="10.100.0.0/16"
     56
     57 # NodePort Range
     58 NODE_PORT_RANGE="30000-32767"
     59
     60 # Cluster DNS Domain
     61 CLUSTER_DNS_DOMAIN="cluster.k8s"
     .
     .
    

    向私有Harbor上传pause镜像

    查看现有pause镜像标签名称

    docker images
    .
    .
    easzlab.io.local:5000/easzlab/pause                  3.9       78d53e70b442   5 months ago    744kB
    .
    .
    

    将原有镜像重新打标签并上传

    docker tag easzlab.io.local:5000/easzlab/pause:3.9 test.harbor.lnsz:14433/k8s-guofei-test/pause:3.9
    docker push test.harbor.lnsz:14433/k8s-guofei-test/pause:3.9
    

    编辑config.yml文件

    vim /etc/kubeasz/clusters/test.cluster/config.yml
    

    K8S的配置文件,定义了要安装什么内容;

    .
    .
    43 ETCD_DATA_DIR: "/mnt/data_disk/etcd"    # etcd数据存储目录;
    .
    .
    55 SANDBOX_IMAGE: "test.harbor.lnsz:14433/k8s-guofei-test/pause:3.9"    # 指定pause镜像地址;
    .
    .
    62 CONTAINERD_STORAGE_DIR: "/mnt/data_disk/containerd"
    .
    .
    75 MASTER_CERT_HOSTS:
    76   - "172.16.0.20"
    77   - "172.16.0.21"
    78   - "172.16.0.22"
    79   - "172.16.0.23"
    80   - "test.k8s.lnsz"
    81   #- "www.test.com"
    .
    .
    

    注意由于本文档用于学习向,所以在该文件中包括coredns,metric server,dashboard,prometheus,nfs全部自行手动安装,所以在config.yml文件后半部组件部分,自动安装选项均选择no;

    修改/etc/kubeasz/playbooks/01.prepare.yml,将不需要安装的服务注释掉;

    vim /etc/kubeasz/playbooks/01.prepare.yml        #查看ansible剧本内容,删除掉我们不需要其执行的部分;
    # [optional] to synchronize system time of nodes with 'chrony'
    - hosts:
      - kube_master
      - kube_node
      - etcd
        #  - ex_lb        #负载均衡服务,我们自行搭建所以此处注释掉;
        #  - chrony
    

    初始化deploy节点(prepare阶段)

    本操作必须使用root用户执行;

    执行部署命令

    /etc/kubeasz/ezctl setup test.cluster 01
    

    如果出现ssh无法访问情况,请酌情修改/etc/kubeasz/ansible.cfg文件;

    部署etcd节点(etcd阶段)

    首先所有etcd节点必须安装python,包括deploy节点;

    apt-get -y install python
    

    执行部署命令

    /etc/kubeasz/ezctl test.cluster 02
    

    注意如果所使用环境带有防火墙或者安全组等防护服务,需要开放2379和2380端口,以便etcd集群互相通信;(若不提前开放,可能会造成安装报错)

    部署运行时(containerd-runtime阶段)

    配置master和node节点下载部署所需images时所用到的节点信息;

    由于部署运行时阶段需要下载相应的images,所以可以提前修改/etc/kubeasz/roles/containerd/templates/config.toml.j2,将自行搭建的harbor地址信息编辑进去,以方便后续节点进行下载。

    .
    .
    147       [plugins."io.containerd.grpc.v1.cri".registry.mirrors]
    .
    .
    167         [plugins."io.containerd.grpc.v1.cri".registry.mirrors."test.harbor.lnsz:14433"]
    168           endpoint = ["https://test.harbor.lnsz:14433"]
    169         [plugins."io.containerd.grpc.v1.cri".registry.configs."test.harbor.lnsz:14433".tls]
    170           insecure_skip_verify = true
    171         [plugins."io.containerd.grpc.v1.cri".registry.configs."test.harbor.lnsz:14433".auth]
    172           username = "admin"
    173           password = "Harbor12345"
    .
    .
    

    如果忘记修改,只能在各自节点上手动修改/etc/containerd/config.toml文件来实现,修改方法相同;

    最后重启containerd服务,并使用crictl下载images;

    systemctl restart containerd
    crictl pull test.harbor.lnsz:14433/test.k8s.lnsz/pause:3.9
    

    执行部署命令

    /etc/kubeasz/ezctl setup test.cluster 03
    

    部署master节点(kube-master阶段)

    执行部署命令

    /etc/kubeasz/ezctl setup test.cluster 04
    

    记得提前调整防火墙等服务开放6443端口

    部署node节点(kube-node阶段)

    执行部署命令

    /etc/kubeasz/ezctl setup test.cluster 05
    

    部署network阶段

    将以下3个相关calico镜像上传到自行部署的Harbor服务以提高后续部署速度;

    easzlab.io.local:5000/calico/cni

    easzlab.io.local:5000/calico/node

    easzlab.io.local:5000/calico/kube-controllers

    docker tag easzlab.io.local:5000/calico/kube-controllers:v3.24.5 test.harbor.lnsz:14433/test.k8s.lnsz/calico/kube-controllers:v3.24.5
    
    docker tag easzlab.io.local:5000/calico/cni:v3.24.5 test.harbor.lnsz:14433/test.k8s.lnsz/calico/cni:v3.24.5
    
    docker tag easzlab.io.local:5000/calico/node:v3.24.5 test.harbor.lnsz:14433/test.k8s.lnsz/calico/node:v3.24.5
    
    docker push test.harbor.lnsz:14433/test.k8s.lnsz/calico/kube-controllers:v3.24.5
    docker push test.harbor.lnsz:14433/test.k8s.lnsz/calico/cni:v3.24.5
    docker push test.harbor.lnsz:14433/test.k8s.lnsz/calico/node:v3.24.5
    

    修改部署配置文件/roles/calico/templates/calico-v3.24.yaml.j2,将其中的image下载地址替换成自建harbor地址;

    calico-v3.24.yaml.j2的版本需要与clusters/k8s-cluster.1/config.yml中的定义的calico版本一致;

    .
    .
    257         - name: install-cni
    258           # image: easzlab.io.local:5000/calico/cni:{{ calico_ver }}
    259           image: test.harbor.lnsz:14433/test.k8s.lnsz/calico/cni:{{ calico_ver }}
    .
    .
    304         - name: "mount-bpffs"
    305           # image: easzlab.io.local:5000/calico/node:{{ calico_ver }}
    306           image: test.harbor.lnsz:14433/test.k8s.lnsz/calico/node:{{ calico_ver }}
    .
    .
    331         - name: calico-node
    332           # image: easzlab.io.local:5000/calico/node:{{ calico_ver }}
    333           image: test.harbor.lnsz:14433/test.k8s.lnsz/calico/node:{{ calico_ver }}
    .
    .
    587         - name: calico-kube-controllers
    588           # image: easzlab.io.local:5000/calico/kube-controllers:{{ calico_ver }}
    589           image: test.harbor.lnsz:14433/test.k8s.lnsz/calico/kube-controllers:{{ calico_ver }}
    .
    .
    

    执行部署命令

    /etc/kubeasz/ezctl setup test.cluster 06
    

    测试

    #创建一个名为myserver的namespace;
    kubectl create ns myserver
    
    #在myserver,ns中启动一个centos:7.9.2009镜像,别名为net-test1;
    kubectl run net-test1 --image=centos:7.9.2009 sleep 100000000 -n myserver
    kubectl run net-test2 --image=centos:7.9.2009 sleep 100000000 -n myserver
    
    #查询myserver,ns中的pod信息,-o wide,表示查看扩展信息;
    kubectl get pod -n myserver -o wide
    
    #交互式登录net-test1容器,注意一定要指定ns,否则可能找不到pod;
    kubectl exec -it net-test1 /bin/bash -n myserver
    

    进入容器后,测试是否能和外网,还有各相关容器之间通信,注意由于现在还没有安装coredns所以无法ping同域名;

    相关文章

      网友评论

          本文标题:K8S-1.26,二进制安装

          本文链接:https://www.haomeiwen.com/subject/fbqrddtx.html