美文网首页
搭建K8S集群

搭建K8S集群

作者: 生不悔改 | 来源:发表于2023-08-17 15:13 被阅读0次

    前期准备

    所有的节点机器都配置至少是:3G内存,20G磁盘,2核CPU

    下面的所有操作三台机器都需要执行

    1.设置主机名

    hostnamectl set-hostname k8s-node1
    hostnamectl set-hostname k8s-node2
    hostnamectl set-hostname k8s-node3
    

    校验是否设置成功


    image.png

    2.配置host文件

    # 进入 /etc/hosts 文件,添加下面hosts
    vim /etc/hosts
    ==========================================================================
    192.168.56.107 k8s-node1
    192.168.56.110 k8s-node2
    192.168.56.111 k8s-node3
    

    校验是否生效


    image.png

    3.关闭防火墙

    # 关闭防火墙 并 永远禁止防火墙
    systemctl stop firewalld && systemctl disable firewalld
    

    4.设置selinux

    setenforce 0 && sed -i 's/SELINUX=enforcing/SELINUX=disable/g' /etc/selinux/config
    

    5.关闭swap分区

    swapoff -a && sed -ri 's/.*swap.*/#&/' /etc/fstab
    

    6.同步三台机器时间

    # 安装 ntpdate 时间同步工具
    yum install -y ntpdate
    # 将所有机器的时间跟当前虚拟机时间同步
    ntpdate time.windows.com
    

    7.安装 containerd (类似docker)并相关配置

    # 安装 yum-config-manager相关依赖
    yum install -y yum-utils device-mapper-persistent-data lvm2
    # 添加 containerd yum 源
    yum-config-manager --add-repo http://mirrors.aliyun.com/docker-ce/linux/centos/docker-ce.repo
    # 安装 containerd
    yum install -y containerd.io cri-tools
    # 配置 containerd 
    cat > /etc/containerd/config.toml <<EOF
    disabled_plugins = ["restart"]
    [plugins.linux]
    shim_debug = true
    [plugins.cri.registry.mirrors."docker.io"]
    endpoint = ["https://frz7i079.mirror.aliyun.com"]
    [plugins.cri]
    sandbox_image = "registry.aliyuncs.com/google_containers/pause:3.6"
    EOF
    
    # 重载一下配置
    systemctl daemon-reload
    
    # 启动 containerd服务,并设置成 开机自启动
    systemctl enable containerd && systemctl start containerd && systemctl status containerd
    
    # 配置containerd配置
    cat > /etc/modules-load.d/containerd.conf <<EOF
    overlay
    br_netfilter
    EOF
    
    # 配置 K8s 网络配置
    cat > /etc/sysctl.d/k8s.conf <<EOF
    net.bridge.bridge-nf-call-ip6tables = 1
    net.bridge.bridge-nf-call-iptables = 1
    net.ipv4.ip_forward = 1
    EOF
    
    # 加载 overlay 和 br_netfilter
    modprobe overlay
    modprobe br_netfilter
    
    # 查看当前配置是否生效
    sysctl -p /etc/sysctl.d/k8s.conf
    

    8.添加源

    # 查看源
    yum repolist
    

    添加源 x86(windows系统,intel系统)

    # 配置k8s的安装源
    cat <<EOF > kubernetes.repo
    [kubernetes]
    name=Kubernetes
    baseurl=https://mirrors.aliyun.com/kubernetes/yum/repos/kubernetes-el7-x86_64
    enabled=1
    gpgcheck=0
    repo_gpgcheck=0
    gpgkey=https://mirrors.aliyun.com/kubernetes/yum/doc/yum-key.gpg https://mirrors.aliyun.com/kubernetes/yum/doc/rpm-package-key.gpg
    EOF
    
    mv kubernetes.repo /etc/yum.repos.d/
    

    这里提前拉取K8S需要的pause镜像,不然初始化集群拉不下来会报错,无法启动。

    # 拉取registry.aliyuncs.com/google_containers/pause:3.6镜像
    ctr image pull registry.aliyuncs.com/google_containers/pause:3.6
    # 拉取registry.aliyuncs.com/google_containers/pause:3.9镜像
    ctr image pull registry.aliyuncs.com/google_containers/pause:3.9
    

    9.安装K8S

    # 安装最新版本
    yum install -y kubelet kubeadm kubectl
    # 查看是否安装成功
    kubelet --version
    kubectl version
    kubeadm version
    
    # 启动 kubelet(kubelet就是操作k8s的一个组件)
    sudo systemctl enable kubelet && sudo systemctl start kubelet && sudo systemctl status kubelet
    

    10.初始化集群(只需要在主节点执行)

    # 执行一句即可
    kubeadm init  --apiserver-advertise-address=192.168.171.6 --pod-network-cidr=10.244.0.0/16 --image-repository registry.aliyuncs.com/google_containers
    =======================================================================================
    # 这个参数就是master主机的IP地址,例如我的Master主机的IP是:192.168.180.123
    --apiserver-advertise-address=192.168.56.107   
    
    # 这个是镜像地址,由于国外地址无法访问,故使用的阿里云仓库地址:registry.aliyuncs.com/google_containers
    --image-repository=registry.aliyuncs.com/google_containers 
    
    # 这个参数是下载的k8s软件版本号
    --kubernetes-version=v1.18.0   
    
    # 这个参数后的IP地址直接就套用10.96.0.0/12 ,以后安装时也套用即可,不要更改
    --service-cidr=10.96.0.0/12     
    
    #  k8s内部的pod节点之间网络可以使用的IP段,不能和service-cidr写一样,如果不知道怎么配,就先用这个10.244.0.0/16
    --pod-network-cidr=10.244.0.0/16     
    

    11.启动成功(只需要在主节点执行)

    按照红框中的顺序执行里面的命令


    image.png

    保存红框中的命令


    image.png
    # 在k8s-node1节点上执行(每秒监控集群的节点情况命令)
     watch -n 1 -d kubectl get nodes
    
    # k8s-node2 和 k8s-node3 节点执行下面命令,加入集群
    kubeadm join 192.168.56.107:6443 --token d2yazm.ubodgkh9rlvytspi \
            --discovery-token-ca-cert-hash sha256:4d0f10276c0d34b6529730c7583d72f0b6aa8b372af90c41c46c631eacb11987
    

    集群就搭建好了,这是一个两个worker,一个master的集群,但是此时还是不可用,状态是NotReady


    image.png

    12.配置集群网络(只需要在主节点执行)

    创建配置:kube-flannel.yml,执行 kubectl apply -f kube-flannel.yml

    ---
    kind: Namespace
    apiVersion: v1
    metadata:
      name: kube-flannel
      labels:
        pod-security.kubernetes.io/enforce: privileged
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: flannel
    rules:
      - apiGroups:
          - ""
        resources:
          - pods
        verbs:
          - get
      - apiGroups:
          - ""
        resources:
          - nodes
        verbs:
          - list
          - watch
      - apiGroups:
          - ""
        resources:
          - nodes/status
        verbs:
          - patch
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1
    metadata:
      name: flannel
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: flannel
    subjects:
      - kind: ServiceAccount
        name: flannel
        namespace: kube-flannel
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: flannel
      namespace: kube-flannel
    ---
    kind: ConfigMap
    apiVersion: v1
    metadata:
      name: kube-flannel-cfg
      namespace: kube-flannel
      labels:
        tier: node
        app: flannel
    data:
      cni-conf.json: |
        {
          "name": "cbr0",
          "cniVersion": "0.3.1",
          "plugins": [
            {
              "type": "flannel",
              "delegate": {
                "hairpinMode": true,
                "isDefaultGateway": true
              }
            },
            {
              "type": "portmap",
              "capabilities": {
                "portMappings": true
              }
            }
          ]
        }
      net-conf.json: |
        {
          "Network": "10.244.0.0/16",
          "Backend": {
            "Type": "vxlan"
          }
        }
    ---
    apiVersion: apps/v1
    kind: DaemonSet
    metadata:
      name: kube-flannel-ds
      namespace: kube-flannel
      labels:
        tier: node
        app: flannel
    spec:
      selector:
        matchLabels:
          app: flannel
      template:
        metadata:
          labels:
            tier: node
            app: flannel
        spec:
          affinity:
            nodeAffinity:
              requiredDuringSchedulingIgnoredDuringExecution:
                nodeSelectorTerms:
                  - matchExpressions:
                      - key: kubernetes.io/os
                        operator: In
                        values:
                          - linux
          hostNetwork: true
          priorityClassName: system-node-critical
          tolerations:
            - operator: Exists
              effect: NoSchedule
          serviceAccountName: flannel
          initContainers:
            - name: install-cni-plugin
              #image: flannelcni/flannel-cni-plugin:v1.1.0 for ppc64le and mips64le (dockerhub limitations may apply)
              image: docker.io/rancher/mirrored-flannelcni-flannel-cni-plugin:v1.1.0
              command:
                - cp
              args:
                - -f
                - /flannel
                - /opt/cni/bin/flannel
              volumeMounts:
                - name: cni-plugin
                  mountPath: /opt/cni/bin
            - name: install-cni
              #image: flannelcni/flannel:v0.19.0 for ppc64le and mips64le (dockerhub limitations may apply)
              image: quay.io/coreos/flannel:v0.14.0
              command:
                - cp
              args:
                - -f
                - /etc/kube-flannel/cni-conf.json
                - /etc/cni/net.d/10-flannel.conflist
              volumeMounts:
                - name: cni
                  mountPath: /etc/cni/net.d
                - name: flannel-cfg
                  mountPath: /etc/kube-flannel/
          containers:
            - name: kube-flannel
              #image: flannelcni/flannel:v0.19.0 for ppc64le and mips64le (dockerhub limitations may apply)
              image: quay.io/coreos/flannel:v0.14.0
              command:
                - /opt/bin/flanneld
              args:
                - --ip-masq
                - --kube-subnet-mgr
              resources:
                requests:
                  cpu: "100m"
                  memory: "50Mi"
                limits:
                  cpu: "100m"
                  memory: "50Mi"
              securityContext:
                privileged: false
                capabilities:
                  add: ["NET_ADMIN", "NET_RAW"]
              env:
                - name: POD_NAME
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.name
                - name: POD_NAMESPACE
                  valueFrom:
                    fieldRef:
                      fieldPath: metadata.namespace
                - name: EVENT_QUEUE_DEPTH
                  value: "5000"
              volumeMounts:
                - name: run
                  mountPath: /run/flannel
                - name: flannel-cfg
                  mountPath: /etc/kube-flannel/
                - name: xtables-lock
                  mountPath: /run/xtables.lock
          volumes:
            - name: run
              hostPath:
                path: /run/flannel
            - name: cni-plugin
              hostPath:
                path: /opt/cni/bin
            - name: cni
              hostPath:
                path: /etc/cni/net.d
            - name: flannel-cfg
              configMap:
                name: kube-flannel-cfg
            - name: xtables-lock
              hostPath:
                path: /run/xtables.lock
                type: FileOrCreate
    

    执行完成之后

    # 查看节点情况,等待所有的pod都变成 READY
     kubectl get pods -A
    
    image.png

    此时只能在master节点上执行:kubectl 命令,在子节点上执行命令会报错,所以需要配置一步

    # 将当前的主节点上的文件 复制到 对应的子节点上;k8s-node2这里就是host,前面做了host映射
    scp -r ~/.kube k8s-node2:~/
    scp -r ~/.kube k8s-node3:~/
    

    按照下图中,执行命令会让你选择是或否,填入 yes 然后 在输入 k8s-node2节点机器的root登录密码,即可


    image.png

    13.搭建kuboard界面化图形工具

    另外起一台服务器,安装好docker

    # kuboard:v3.5.0.3 需要k8s集群是1.18版本以上的才行
    docker run -d   --restart=unless-stopped   --name=kuboard   -p 6017:80/tcp   -p 10081:10081/tcp   -e KUBOARD_ENDPOINT="http://192.168.171.3:6017"   -e KUBOARD_AGENT_SERVER_TCP_PORT="10081"   -v /home/apps/kuboard/data:/data   eipwork/kuboard:v3.5.0.3
    

    启动好了直接访问:ip:6017 访问,默认账户密码:
    账户:admin
    密码:Kuboard123
    然后按照步骤导入:k8s集群即可


    image.png

    相关文章

      网友评论

          本文标题:搭建K8S集群

          本文链接:https://www.haomeiwen.com/subject/gtqrmdtx.html