美文网首页
Centos7二进制部署k8s(三)

Centos7二进制部署k8s(三)

作者: Rainy丶Wang | 来源:发表于2019-05-28 16:28 被阅读0次

    安装包:链接: https://pan.baidu.com/s/1cDB8AC2mclIss6Fepw9Xaw
    提取码: wfpu

    在部署Kubernetes之前一定要确保etcd、flannel、docker是正常工作的,否则先解决问题再继续。

    3.1 生成证书

    创建CA证书:

    # cat << Rainy > ca-config.json 
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
             "expiry": "87600h",
             "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ]
          }
        }
      }
    }
    Rainy
    
    # cat << Rainy > ca-csr.json
    {
        "CN": "kubernetes",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "DaLian",
                "ST": "DaLian",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    Rainy
    
    
    [root@k8s-master ssl]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca -
    2019/05/28 14:20:58 [INFO] generating a new CA key and certificate from CSR
    2019/05/28 14:20:58 [INFO] generate received request
    2019/05/28 14:20:58 [INFO] received CSR
    2019/05/28 14:20:58 [INFO] generating key: rsa-2048
    2019/05/28 14:20:58 [INFO] encoded CSR
    2019/05/28 14:20:58 [INFO] signed certificate with serial number 27805327042003841885602678838302617605516095789
    

    生成apiserver证书:

    # cat << Rainy > server-csr.json
    {
        "CN": "kubernetes",
        "hosts": [
          "10.0.0.1",
          "127.0.0.1",
          "22.22.22.10",
          "kubernetes",
          "kubernetes.default",
          "kubernetes.default.svc",
          "kubernetes.default.svc.cluster",
          "kubernetes.default.svc.cluster.local"
        ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [
            {
                "C": "CN",
                "L": "DaLian",
                "ST": "DaLian",
                "O": "k8s",
                "OU": "System"
            }
        ]
    }
    Rainy
    
    [root@k8s-master ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes server-csr.json | cfssljson -bare server
    2019/05/28 14:22:45 [INFO] generate received request
    2019/05/28 14:22:45 [INFO] received CSR
    2019/05/28 14:22:45 [INFO] generating key: rsa-2048
    2019/05/28 14:22:45 [INFO] encoded CSR
    2019/05/28 14:22:45 [INFO] signed certificate with serial number 105181185567179413487695400768822969596658874128
    2019/05/28 14:22:45 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    

    生成kube-proxy证书:

     # cat << Rainy > kube-proxy-csr.json
    {
      "CN": "system:kube-proxy",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "L": "DaLian",
          "ST": "DaLian",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    Rainy
    
    [root@k8s-master ssl]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=kubernetes kube-proxy-csr.json | cfssljson -bare kube-proxy
    2019/05/28 14:24:14 [INFO] generate received request
    2019/05/28 14:24:14 [INFO] received CSR
    2019/05/28 14:24:14 [INFO] generating key: rsa-2048
    2019/05/28 14:24:14 [INFO] encoded CSR
    2019/05/28 14:24:14 [INFO] signed certificate with serial number 231252524183126754847481635608369729284559413614
    2019/05/28 14:24:14 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
    websites. For more information see the Baseline Requirements for the Issuance and Management
    of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
    specifically, section 10.2.3 ("Information Requirements").
    

    最终生成以下证书文件:

    [root@k8s-master ssl]# ls *pem
    ca-key.pem  ca.pem  kube-proxy-key.pem  kube-proxy.pem  server-key.pem  server.pem
    
    

    3.2 部署apiserver组件

    [root@k8s-master k8s1.13]# mkdir /opt/kubernetes/{bin,cfg,ssl} -p
    将证书放在kubernetes的证书文件中
    [root@k8s-master ssl]# cp *pem /opt/kubernetes/ssl/
    [root@k8s-master k8s1.13]# tar zxvf kubernetes-server-linux-amd64.tar.gz
    [root@k8s-master k8s1.13]# cd kubernetes/server/bin
    [root@k8s-master bin]#  cp kube-apiserver kube-scheduler kube-controller-manager kubectl /opt/kubernetes/bin
    

    创建token文件

    [root@k8s-master bin]# cat << Rainy > /opt/kubernetes/cfg/token.csv
    674c457d4dcf2eefe4920d7dbb6b0ddc,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    Rainy
    

    第一列:随机字符串,自己可生成
    第二列:用户名
    第三列:UID
    第四列:用户组

    创建apiserver配置文件:

    [root@k8s-master bin]# vim /opt/kubernetes/cfg/kube-apiserver
    [root@k8s-master cfg]# cat  /opt/kubernetes/cfg/kube-apiserver 
    KUBE_APISERVER_OPTS="--logtostderr=true \
    --v=4 \
    --etcd-servers=https://22.22.22.10:2379,https://22.22.22.11:2379,https://22.22.22.12:2379 \
    --bind-address=22.22.22.10 \
    --secure-port=6443 \
    --advertise-address=22.22.22.10 \
    --allow-privileged=true \
    --service-cluster-ip-range=10.0.0.0/24 \
    --enable-admission-plugins=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota,NodeRestriction \
    --authorization-mode=RBAC,Node \
    --enable-bootstrap-token-auth \
    --token-auth-file=/opt/kubernetes/cfg/token.csv \
    --service-node-port-range=30000-50000 \
    --tls-cert-file=/opt/kubernetes/ssl/server.pem  \
    --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
    --client-ca-file=/opt/kubernetes/ssl/ca.pem \
    --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
    --etcd-cafile=/opt/etcd/ssl/ca.pem \
    --etcd-certfile=/opt/etcd/ssl/server.pem \
    --etcd-keyfile=/opt/etcd/ssl/server-key.pem"
    
    

    配置好前面生成的证书,确保能连接etcd。

    参数说明:

    —logtostderr 启用日志
    —-v 日志等级
    —etcd-servers etcd集群地址
    —bind-address 监听地址
    —secure-port https安全端口
    —advertise-address 集群通告地址
    —allow-privileged 启用授权
    —service-cluster-ip-range Service虚拟IP地址段
    —enable-admission-plugins 准入控制模块
    —authorization-mode 认证授权,启用RBAC授权和节点自管理
    —enable-bootstrap-token-auth 启用TLS bootstrap功能,后面会讲到
    —token-auth-file token文件
    —service-node-port-range Service Node类型默认分配端口范围

    systemd管理apiserver:

    [root@k8s-master bin]# vim /usr/lib/systemd/system/kube-apiserver.service
    [root@k8s-master bin]# cat /usr/lib/systemd/system/kube-apiserver.service
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-apiserver
    ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    

    启动:

    # systemctl daemon-reload
    # systemctl enable kube-apiserver
    # systemctl restart kube-apiserver
    

    部署scheduler组件

    创建schduler配置文件:

    [root@k8s-master bin]# vim /opt/kubernetes/cfg/kube-scheduler
    [root@k8s-master cfg]# cat /opt/kubernetes/cfg/kube-scheduler
    KUBE_SCHEDULER_OPTS="--logtostderr=true \
    --v=4 \
    --master=127.0.0.1:8080 \
    --leader-elect"
    

    参数说明:
    —master 连接本地apiserver
    —leader-elect 当该组件启动多个时,自动选举(HA)

    systemd管理schduler组件:

    [root@k8s-master cfg]# vim /usr/lib/systemd/system/kube-scheduler.service
    [root@k8s-master cfg]# cat /usr/lib/systemd/system/kube-scheduler.service
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-scheduler
    ExecStart=/opt/kubernetes/bin/kube-scheduler $KUBE_SCHEDULER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    

    启动:

    [root@k8s-master cfg]# systemctl daemon-reload
    [root@k8s-master cfg]# systemctl enable kube-scheduler
    [root@k8s-master cfg]# systemctl restart kube-scheduler
    [root@k8s-master cfg]# systemctl status kube-scheduler
    

    部署controller-manager组件

    创建controller-manager配置文件:

    [root@k8s-master cfg]# vim /opt/kubernetes/cfg/kube-controller-manager
    [root@k8s-master cfg]# cat /opt/kubernetes/cfg/kube-controller-manager
    KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=true \
    --v=4 \
    --master=127.0.0.1:8080 \
    --leader-elect=true \
    --address=127.0.0.1 \
    --service-cluster-ip-range=10.0.0.0/24 \
    --cluster-name=kubernetes \
    --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
    --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem  \
    --root-ca-file=/opt/kubernetes/ssl/ca.pem \
    --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem"
    

    systemd管理controller-manager组件:

    [root@k8s-master cfg]# vim /usr/lib/systemd/system/kube-controller-manager.service
    root@k8s-master cfg]# cat /usr/lib/systemd/system/kube-controller-manager.service
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/kubernetes/kubernetes
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-controller-manager
    ExecStart=/opt/kubernetes/bin/kube-controller-manager $KUBE_CONTROLLER_MANAGER_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    

    启动

    [root@k8s-master cfg]# systemctl daemon-reload
    [root@k8s-master cfg]# systemctl enable kube-controller-manager
    Created symlink from /etc/systemd/system/multi-user.target.wants/kube-controller-manager.service to /usr/lib/systemd/system/kube-controller-manager.service.
    [root@k8s-master cfg]# systemctl restart kube-controller-manager
    [root@k8s-master cfg]# systemctl status kube-controller-manager
    

    所有组件都已经启动成功,通过kubectl工具查看当前集群组件状态:

    将可执行文件路/k8s/kubernetes/ 添加到 PATH 变量中

    PATH=/opt/kubernetes/bin:$PATH:$HOME/bin
    
    [root@k8s-master cfg]# /opt/kubernetes/bin/kubectl get cs
    NAME                 STATUS    MESSAGE             ERROR
    scheduler            Healthy   ok                  
    controller-manager   Healthy   ok                  
    etcd-2               Healthy   {"health":"true"}   
    etcd-0               Healthy   {"health":"true"}   
    etcd-1               Healthy   {"health":"true"}   
    

    如上输出说明组件都正常。

    在Node节点部署组件

    Master apiserver启用TLS认证后,Node节点kubelet组件想要加入集群,必须使用CA签发的有效证书才能与apiserver通信,当Node节点很多时,签署证书是一件很繁琐的事情,因此有了TLS Bootstrapping机制,kubelet会以一个低权限用户自动向apiserver申请证书,kubelet的证书由apiserver动态签署。

    认证大致工作流程如图所示: image.png

    将kubelet-bootstrap用户绑定到系统集群角色(master)

    kubectl create clusterrolebinding kubelet-bootstrap \
      --clusterrole=system:node-bootstrapper \
      --user=kubelet-bootstrap
    
    [root@k8s-master ssl]# cd /usr/local/src/k8s1.13/kubernetes/server/bin/
    [root@k8s-master bin]# scp kubelet kube-proxy 22.22.22.11:/opt/kubernetes/bin/
    [root@k8s-master bin]# scp kubelet kube-proxy 22.22.22.12:/opt/kubernetes/bin/
    [root@k8s-master bin]# cd /usr/local/src/ssl/
    [root@k8s-master ssl]# cat environment.sh 
    # 创建kubelet bootstrapping kubeconfig 
    BOOTSTRAP_TOKEN=674c457d4dcf2eefe4920d7dbb6b0ddc
    KUBE_APISERVER="https://22.22.22.10:6443"
    
    # 设置集群参数
    kubectl config set-cluster kubernetes \
      --certificate-authority=./ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置客户端认证参数
    kubectl config set-credentials kubelet-bootstrap \
      --token=${BOOTSTRAP_TOKEN} \
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置上下文参数
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=kubelet-bootstrap \
      --kubeconfig=bootstrap.kubeconfig
    
    # 设置默认上下文
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    
    #----------------------
    
    # 创建kube-proxy kubeconfig文件
    
    kubectl config set-cluster kubernetes \
      --certificate-authority=./ca.pem \
      --embed-certs=true \
      --server=${KUBE_APISERVER} \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-credentials kube-proxy \
      --client-certificate=./kube-proxy.pem \
      --client-key=./kube-proxy-key.pem \
      --embed-certs=true \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=kube-proxy \
      --kubeconfig=kube-proxy.kubeconfig
    
    kubectl config use-context default --kubeconfig=kube-proxy.kubeconfig
    
    -----------------------------------结束---------------------------------------
    
    [root@k8s-master ssl]# tail -n 1 /etc/profile
    PATH=/opt/kubernetes/bin:$PATH:$HOME/bin
    [root@k8s-master ssl]# source /etc/profile
    
    [root@k8s-master ssl]# sh environment.sh 
    Cluster "kubernetes" set.
    User "kubelet-bootstrap" set.
    Context "default" created.
    Switched to context "default".
    Cluster "kubernetes" set.
    User "kube-proxy" set.
    Context "default" created.
    Switched to context "default".
    
    [root@k8s-master ssl]# ll bootstrap.kubeconfig kube-proxy.kubeconfig
    -rw-------. 1 root root 2161 5月  28 16:38 bootstrap.kubeconfig
    -rw-------. 1 root root 6255 5月  28 16:38 kube-proxy.kubeconfig
    
    将这两个文件拷贝到Node节点/opt/kubernetes/cfg目录下
    [root@k8s-master ssl]# scp bootstrap.kubeconfig kube-proxy.kubeconfig 22.22.22.11:/opt/kubernetes/cfg/
    [root@k8s-master ssl]# scp bootstrap.kubeconfig kube-proxy.kubeconfig 22.22.22.12:/opt/kubernetes/cfg/
    

    部署kubelet组件(node操作)

    创建kubelet配置文件:

    [root@k8s-node1 kubernetes]# cat /opt/kubernetes/cfg/kubelet
    KUBELET_OPTS="--logtostderr=true \
    --v=4 \
    --hostname-override=22.22.22.11 \
    --kubeconfig=/opt/kubernetes/cfg/kubelet.kubeconfig \
    --bootstrap-kubeconfig=/opt/kubernetes/cfg/bootstrap.kubeconfig \
    --config=/opt/kubernetes/cfg/kubelet.config \
    --cert-dir=/opt/kubernetes/ssl \
    --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/google-containers/pause-amd64:3.0"
    

    参数说明:
    —hostname-override 在集群中显示的主机名
    —kubeconfig 指定kubeconfig文件位置,会自动生成
    —bootstrap-kubeconfig 指定刚才生成的bootstrap.kubeconfig文件
    —cert-dir 颁发证书存放位置
    —pod-infra-container-image 管理Pod网络的镜像

    /opt/kubernetes/cfg/kubelet.config配置文件如下

    [root@k8s-node1 kubernetes]# cat /opt/kubernetes/cfg/kubelet.config
    kind: KubeletConfiguration
    apiVersion: kubelet.config.k8s.io/v1beta1
    address: 22.22.22.11
    port: 10250
    readOnlyPort: 10255
    cgroupDriver: cgroupfs
    clusterDNS: ["10.0.0.2"]
    clusterDomain: cluster.local.
    failSwapOn: false
    authentication:
      anonymous:
        enabled: true
    

    systemd管理kubelet组件:

    [root@k8s-node1 kubernetes]# cat /usr/lib/systemd/system/kubelet.service
    [Unit]
    Description=Kubernetes Kubelet
    After=docker.service
    Requires=docker.service
    
    [Service]
    EnvironmentFile=/opt/kubernetes/cfg/kubelet
    ExecStart=/opt/kubernetes/bin/kubelet $KUBELET_OPTS
    Restart=on-failure
    KillMode=process
    
    [Install]
    WantedBy=multi-user.target
    

    启动:

    [root@k8s-node1 kubernetes]# systemctl daemon-reload
    [root@k8s-node1 kubernetes]#  systemctl enable kubelet
    Created symlink from /etc/systemd/system/multi-user.target.wants/kubelet.service to /usr/lib/systemd/system/kubelet.service.
    [root@k8s-node1 kubernetes]# systemctl restart kubelet
    

    在Master审批Node加入集群:

    启动后还没加入到集群中,需要手动允许该节点才可以。
    在Master节点查看请求签名的Node:

    [root@k8s-master ssl1]# kubectl get csr
    NAME                                                   AGE    REQUESTOR           CONDITION
    node-csr-UkSbyIZ4endWtfIQ-gIl7UPTPwsd4q6KgSAX7eFfi84   117s   kubelet-bootstrap   Pending
    
    [root@k8s-master ssl1]# kubectl certificate approve node-csr-UkSbyIZ4endWtfIQ-gIl7UPTPwsd4q6KgSAX7eFfi84
    certificatesigningrequest.certificates.k8s.io/node-csr-UkSbyIZ4endWtfIQ-gIl7UPTPwsd4q6KgSAX7eFfi84 approved
    
    [root@k8s-master ssl1]#  kubectl get node
    NAME          STATUS   ROLES    AGE   VERSION
    22.22.22.11   Ready    <none>   22s   v1.13.0
    

    部署kube-proxy组件

    创建kube-proxy配置文件:

    # cat /opt/kubernetes/cfg/kube-proxy
    KUBE_PROXY_OPTS="--logtostderr=true \
    --v=4 \
    --hostname-override=22.22.22.11 \
    --cluster-cidr=10.0.0.0/24 \
    --kubeconfig=/opt/kubernetes/cfg/kube-proxy.kubeconfig"
    

    systemd管理kube-proxy组件:

    # cat /usr/lib/systemd/system/kube-proxy.service 
    [Unit]
    Description=Kubernetes Proxy
    After=network.target
    
    [Service]
    EnvironmentFile=-/opt/kubernetes/cfg/kube-proxy
    ExecStart=/opt/kubernetes/bin/kube-proxy $KUBE_PROXY_OPTS
    Restart=on-failure
    
    [Install]
    WantedBy=multi-user.target
    

    启动:

    # systemctl daemon-reload
    # systemctl enable kube-proxy
    # systemctl restart kube-proxy
    

    Node2部署方式一样。

    查看集群状态

    # kubectl get node
    NAME             STATUS    ROLES     AGE       VERSION
    192.168.31.65   Ready     <none>    1d       v1.12.0
    192.168.31.66   Ready     <none>    1d       v1.12.0
    # kubectl get cs
    NAME                 STATUS    MESSAGE             ERROR
    controller-manager   Healthy   ok                  
    scheduler            Healthy   ok                  
    etcd-2               Healthy   {"health":"true"}   
    etcd-1               Healthy   {"health":"true"}   
    etcd-0               Healthy   {"health":"true"}
    

    运行一个测试示例

    # kubectl run nginx --image=nginx --replicas=3
    # kubectl expose deployment nginx --port=88 --target-port=80 --type=NodePort
    

    查看Pod,Service:

    # kubectl get pods
    NAME                     READY     STATUS    RESTARTS   AGE
    nginx-64f497f8fd-gmstq   1/1       Running   3          1d
    nginx-64f497f8fd-q6wk9   1/1       Running   3          1d
    # kubectl get svc
    NAME         TYPE        CLUSTER-IP   EXTERNAL-IP   PORT(S)                        AGE
    kubernetes   ClusterIP   10.0.0.1     <none>        443/TCP                        28d
    nginx        NodePort    10.0.0.175   <none>        88:38696/TCP                   28d
    

    访问集群中部署的Nginx,打开浏览器输入:http://192.168.31.66:38696


    image.png

    相关文章

      网友评论

          本文标题:Centos7二进制部署k8s(三)

          本文链接:https://www.haomeiwen.com/subject/qqqdtctx.html