美文网首页
Kubernetes-1.9.1部署

Kubernetes-1.9.1部署

作者: 天夭夭 | 来源:发表于2019-10-14 16:12 被阅读0次

    环境说明

    系统版本:Centos7.X
    这里配置2个Master 1个node, Master-64 只做 Master, Master-65 既是 Master 也是 Node, node-66 只做单纯 Node

    kubernetes-64: 172.16.1.64
    kubernetes-65: 172.16.1.65
    kubernetes-66: 172.16.1.66
    

    初始化环境

    hostnamectl --static set-hostname hostname
    
    kubernetes-64: 172.16.1.64
    kubernetes-65: 172.16.1.65
    kubernetes-66: 172.16.1.66
    
    #编辑 /etc/hosts 文件,配置hostname 通信
    
    vi /etc/hosts
    
    172.16.1.64  kubernetes-64
    172.16.1.65  kubernetes-65
    172.16.1.66  kubernetes-66
    #关闭防火墙
    systemctl stop firewalld.service
    systemctl disable firewalld.service
    

    创建验证

    这里使用 CloudFlare 的 PKI 工具集 cfssl 来生成 Certificate Authority (CA) 证书和秘钥文件。
    
    安装 cfssl
    mkdir -p /opt/local/cfssl
    
    cd /opt/local/cfssl
    
    wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
    mv cfssl_linux-amd64 cfssl
    
    wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
    mv cfssljson_linux-amd64 cfssljson
    
    wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
    mv cfssl-certinfo_linux-amd64 cfssl-certinfo
    
    chmod +x *
    
    创建 CA 证书配置
    
    mkdir /opt/ssl
    
    cd /opt/ssl
    
    # config.json 文件
    
    vi  config.json
    
    {
      "signing": {
        "default": {
          "expiry": "87600h"
        },
        "profiles": {
          "kubernetes": {
            "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ],
            "expiry": "87600h"
          }
        }
      }
    }
    
    # csr.json 文件
    
    vi csr.json
    
    {
      "CN": "kubernetes",
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "ShenZhen",
          "L": "ShenZhen",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    
    生成 CA 证书和私钥
    
    cd /opt/ssl/
    /opt/local/cfssl/cfssl gencert -initca csr.json | /opt/local/cfssl/cfssljson -bare ca
    
    [root@kubernetes-64 ssl]# ls -lt
    总用量 20
    -rw-r--r-- 1 root root 1005 7月   3 17:26 ca.csr
    -rw------- 1 root root 1675 7月   3 17:26 ca-key.pem
    -rw-r--r-- 1 root root 1363 7月   3 17:26 ca.pem
    -rw-r--r-- 1 root root  210 7月   3 17:24 csr.json
    -rw-r--r-- 1 root root  292 7月   3 17:23 config.json
    分发证书
    
    # 创建证书目录
    mkdir -p /etc/kubernetes/ssl
    
    # 拷贝所有文件到目录下
    cp *.pem /etc/kubernetes/ssl
    cp ca.csr /etc/kubernetes/ssl
    
    # 这里要将文件拷贝到所有的k8s 机器上
    
    scp *.pem 172.16.1.65:/etc/kubernetes/ssl/
    scp *.csr 172.16.1.65:/etc/kubernetes/ssl/
    
    scp *.pem 172.16.1.66:/etc/kubernetes/ssl/
    scp *.csr 172.16.1.66:/etc/kubernetes/ssl/
    

    安装 docker

     所有服务器预先安装 docker-ce ,官方1.9 中提示, 目前 k8s 支持最高 Docker versions 1.11.2, 1.12.6, 1.13.1, and 17.03.1
    
    # 导入 yum 源
    
    # 安装 yum-config-manager
    
    yum -y install yum-utils
    
    # 导入
    yum-config-manager \
        --add-repo \
        https://download.docker.com/linux/centos/docker-ce.repo
    
    
    # 更新 repo
    yum makecache
    
    # 查看yum 版本
    
    yum list docker-ce.x86_64  --showduplicates |sort -r
    
    
    
    # 安装指定版本 docker-ce 17.03 被 docker-ce-selinux 依赖, 不能直接yum 安装 docker-ce-selinux
    
    wget https://download.docker.com/linux/centos/7/x86_64/stable/Packages/docker-ce-selinux-17.03.1.ce-1.el7.centos.noarch.rpm
    
    yum install policycoreutils-python -y
    rpm -ivh docker-ce-selinux-17.03.1.ce-1.el7.centos.noarch.rpm
    
    
    
    yum -y install docker-ce-17.03.1.ce
    
    
    # 查看安装
    
    docker version
    Client:
     Version:      17.03.1-ce
     API version:  1.27
     Go version:   go1.7.5
     Git commit:   f5ec1e2
     Built:        Tue Jun 27 02:21:36 2017
     OS/Arch:      linux/amd64
    

    更改docker 配置

    # 添加配置
    
    vi /etc/systemd/system/docker.service
    
    
    
    [Unit]
    Description=Docker Application Container Engine
    Documentation=http://docs.docker.com
    After=network.target docker-storage-setup.service
    Wants=docker-storage-setup.service
    
    [Service]
    Type=notify
    Environment=GOTRACEBACK=crash
    ExecReload=/bin/kill -s HUP $MAINPID
    Delegate=yes
    KillMode=process
    ExecStart=/usr/bin/dockerd \
              $DOCKER_OPTS \
              $DOCKER_STORAGE_OPTIONS \
              $DOCKER_NETWORK_OPTIONS \
              $DOCKER_DNS_OPTIONS \
              $INSECURE_REGISTRY
    LimitNOFILE=1048576
    LimitNPROC=1048576
    LimitCORE=infinity
    TimeoutStartSec=1min
    Restart=on-abnormal
    
    [Install]
    WantedBy=multi-user.target
    
    修改其他配置
    
    # 低版本内核, kernel 3.10.x  配置使用 overlay2
    
    
    vi /etc/docker/daemon.json
    
    {
      "storage-driver": "overlay2",
      "storage-opts": [
        "overlay2.override_kernel_check=true"
      ]
    }
    
    
    
    
    mkdir -p /etc/systemd/system/docker.service.d/
    
    
    vi /etc/systemd/system/docker.service.d/docker-options.conf
        # 添加如下 :   (注意 environment 必须在同一行,如果出现换行会无法加载)
        # docker 版本 17.03.2 之前配置为 --graph=/opt/docker
        # docker 版本 17.04.x 之后配置为 --data-root=/opt/docker 
    
    [Service]
    Environment="DOCKER_OPTS=--insecure-registry=10.254.0.0/16 \
        --graph=/opt/docker --log-opt max-size=50m --log-opt max-file=5"
    
    
    vi /etc/systemd/system/docker.service.d/docker-dns.conf
    
    
    # 添加如下 : 
    
    [Service]
    Environment="DOCKER_DNS_OPTIONS=\
        --dns 10.254.0.2 --dns 114.114.114.114  \
        --dns-search default.svc.cluster.local --dns-search svc.cluster.local  \
        --dns-opt ndots:2 --dns-opt timeout:2 --dns-opt attempts:2"
    
    重新读取配置,启动 docker 
    
    systemctl daemon-reload
    systemctl start docker
    systemctl enable docker
    
    # 如果报错 请使用
    journalctl -f -t docker  和 journalctl -u docker 来定位问题
    

    etcd 集群

    etcd 是k8s集群最重要的组件, etcd 挂了,集群就挂了
    
    #安装 etcd
    
        官方地址 https://github.com/coreos/etcd/releases
    
    # 下载 二进制文件
    
    wget https://github.com/coreos/etcd/releases/download/v3.2.14/etcd-v3.2.14-linux-amd64.tar.gz
    
    tar zxvf etcd-v3.2.14-linux-amd64.tar.gz
    
    cd etcd-v3.2.14-linux-amd64
    
    mv etcd  etcdctl /usr/bin/
    
    创建 etcd 证书
        etcd 证书这里,默认配置三个,后续如果需要增加,更多的 etcd 节点 这里的认证IP 请多预留几个,以备后续添加能通过认证,不需要重新签发
    
    cd /opt/ssl/
    
    vi etcd-csr.json
    
    {
      "CN": "etcd",
      "hosts": [
        "127.0.0.1",
        "172.16.1.64",
        "172.16.1.65",
        "172.16.1.66"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "ShenZhen",
          "L": "ShenZhen",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    
    生成 etcd   密钥
    
    /opt/local/cfssl/cfssl gencert -ca=/opt/ssl/ca.pem \
      -ca-key=/opt/ssl/ca-key.pem \
      -config=/opt/ssl/config.json \
      -profile=kubernetes etcd-csr.json | /opt/local/cfssl/cfssljson -bare etcd
    
    # 查看生成
    
    [root@kubernetes-64 ssl]# ls etcd*
    etcd.csr  etcd-csr.json  etcd-key.pem  etcd.pem
    # 拷贝到etcd服务器
    
    # etcd-1 
    cp etcd*.pem /etc/kubernetes/ssl/
    
    # etcd-2
    scp etcd*.pem 172.16.1.65:/etc/kubernetes/ssl/
    
    # etcd-3
    scp etcd*.pem 172.16.1.66:/etc/kubernetes/ssl/
    
    
    
    # 如果 etcd 非 root 用户,读取证书会提示没权限
    
    chmod 644 /etc/kubernetes/ssl/etcd-key.pem
    修改 etcd 配置
    
        由于 etcd 是最重要的组件,所以 –data-dir 请配置到其他路径中
    
    # 创建 etcd data 目录, 并授权
    
    useradd etcd
    
    mkdir -p /opt/etcd
    
    chown -R etcd:etcd /opt/etcd
    
    etcd-1配置
    
    vi /etc/systemd/system/etcd.service
    
    
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    WorkingDirectory=/opt/etcd/
    User=etcd
    # set GOMAXPROCS to number of processors
    ExecStart=/usr/bin/etcd \
      --name=etcd1 \
      --cert-file=/etc/kubernetes/ssl/etcd.pem \
      --key-file=/etc/kubernetes/ssl/etcd-key.pem \
      --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
      --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
      --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
      --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
      --initial-advertise-peer-urls=https://172.16.1.64:2380 \
      --listen-peer-urls=https://172.16.1.64:2380 \
      --listen-client-urls=https://172.16.1.64:2379,http://127.0.0.1:2379 \
      --advertise-client-urls=https://172.16.1.64:2379 \
      --initial-cluster-token=k8s-etcd-cluster \
      --initial-cluster=etcd1=https://172.16.1.64:2380,etcd2=https://172.16.1.65:2380,etcd3=https://172.16.1.66:2380 \
      --initial-cluster-state=new \
      --data-dir=/opt/etcd/
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    
    etcd-2配置
    
    vi /etc/systemd/system/etcd.service
    
    
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    WorkingDirectory=/opt/etcd/
    User=etcd
    # set GOMAXPROCS to number of processors
    ExecStart=/usr/bin/etcd \
      --name=etcd2 \
      --cert-file=/etc/kubernetes/ssl/etcd.pem \
      --key-file=/etc/kubernetes/ssl/etcd-key.pem \
      --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
      --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
      --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
      --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
      --initial-advertise-peer-urls=https://172.16.1.65:2380 \
      --listen-peer-urls=https://172.16.1.65:2380 \
      --listen-client-urls=https://172.16.1.65:2379,http://127.0.0.1:2379 \
      --advertise-client-urls=https://172.16.1.65:2379 \
      --initial-cluster-token=k8s-etcd-cluster \
      --initial-cluster=etcd1=https://172.16.1.64:2380,etcd2=https://172.16.1.65:2380,etcd3=https://172.16.1.66:2380 \
      --initial-cluster-state=new \
      --data-dir=/opt/etcd
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    
    etcd-3配置
    
    vi /etc/systemd/system/etcd.service
    
    
    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    WorkingDirectory=/opt/etcd/
    User=etcd
    # set GOMAXPROCS to number of processors
    ExecStart=/usr/bin/etcd \
      --name=etcd3 \
      --cert-file=/etc/kubernetes/ssl/etcd.pem \
      --key-file=/etc/kubernetes/ssl/etcd-key.pem \
      --peer-cert-file=/etc/kubernetes/ssl/etcd.pem \
      --peer-key-file=/etc/kubernetes/ssl/etcd-key.pem \
      --trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
      --peer-trusted-ca-file=/etc/kubernetes/ssl/ca.pem \
      --initial-advertise-peer-urls=https://172.16.1.66:2380 \
      --listen-peer-urls=https://172.16.1.66:2380 \
      --listen-client-urls=https://172.16.1.66:2379,http://127.0.0.1:2379 \
      --advertise-client-urls=https://172.16.1.66:2379 \
      --initial-cluster-token=k8s-etcd-cluster \
      --initial-cluster=etcd1=https://172.16.1.64:2380,etcd2=https://172.16.1.65:2380,etcd3=https://172.16.1.66:2380 \
      --initial-cluster-state=new \
      --data-dir=/opt/etcd/
    Restart=on-failure
    RestartSec=5
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    
    启动 etcd
    
        分别启动 所有节点的 etcd 服务
    
    systemctl daemon-reload
    systemctl enable etcd
    systemctl start etcd
    systemctl status etcd
    
    # 如果报错 请使用
    journalctl -f -t etcd  和 journalctl -u etcd 来定位问题
        如出现类似错误“master-16 etcd[25461]: request cluster ID mismatch (got bdd7c7c32bde1208 want 698851d67fea3c17)”
        解决方法:删除了etcd集群所有节点中的--data_dir的内容,并重新启动etcd服务
        参考:http://blog.51cto.com/1666898/2156165
    
    验证 etcd 集群状态
    
    #查看 etcd 集群状态:
    
    etcdctl --endpoints=https://172.16.1.64:2379,https://172.16.1.65:2379,https://172.16.1.66:2379\
            --cert-file=/etc/kubernetes/ssl/etcd.pem \
            --ca-file=/etc/kubernetes/ssl/ca.pem \
            --key-file=/etc/kubernetes/ssl/etcd-key.pem \
            cluster-health
    
    member 35eefb8e7cc93b53 is healthy: got healthy result from https://172.16.1.66:2379
    member 4576ff5ed626a66b is healthy: got healthy result from https://172.16.1.64:2379
    member bf3bd651ec832339 is healthy: got healthy result from https://172.16.1.65:2379
    cluster is healthy
    
    #查看 etcd 集群成员:
    
    etcdctl --endpoints=https://172.16.1.64:2379,https://172.16.1.65:2379,https://172.16.1.66:2379\
            --cert-file=/etc/kubernetes/ssl/etcd.pem \
            --ca-file=/etc/kubernetes/ssl/ca.pem \
            --key-file=/etc/kubernetes/ssl/etcd-key.pem \
            member list
    
    35eefb8e7cc93b53: name=etcd3 peerURLs=https://172.16.1.66:2380 clientURLs=https://172.16.1.66:2379 isLeader=false
    4576ff5ed626a66b: name=etcd1 peerURLs=https://172.16.1.64:2380 clientURLs=https://172.16.1.64:2379 isLeader=true
    bf3bd651ec832339: name=etcd2 peerURLs=https://172.16.1.65:2380 clientURLs=https://172.16.1.65:2379 isLeader=false
    

    配置 Kubernetes 集群

    kubectl 安装在所有需要进行操作的机器上
    

    Master and Node

    Master 需要部署 kube-apiserver , kube-scheduler , kube-controller-manager 这三个组件。 kube-scheduler 作用是调度pods分配到那个node里,简单来说就是资源调度。 kube-controller-manager 作用是 对 deployment controller , replication controller, endpoints controller, namespace controller, and serviceaccounts controller等等的循环控制,与kube-apiserver交互。
    

    安装组件

    # 从github 上下载版本
    
    cd /tmp
    
    wget https://dl.k8s.io/v1.9.1/kubernetes-server-linux-amd64.tar.gz
    
    tar -xzvf kubernetes-server-linux-amd64.tar.gz
    
    cd kubernetes
    
    cp -r server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl} /usr/local/bin/
    scp server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kubectl,kube-proxy,kubelet} 172.16.1.65:/usr/local/bin/
    scp server/bin/{kube-proxy,kubelet} 172.16.1.66:/usr/local/bin/
    
    创建 admin 证书
        kubectl 与 kube-apiserver 的安全端口通信,需要为安全通信提供 TLS 证书和秘钥。
    
    cd /opt/ssl/
    
    vi admin-csr.json
    
    
    {
      "CN": "admin",
      "hosts": [],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "ShenZhen",
          "L": "ShenZhen",
          "O": "system:masters",
          "OU": "System"
        }
      ]
    }# 生成 admin 证书和私钥
    cd /opt/ssl/
    
    /opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
      -ca-key=/etc/kubernetes/ssl/ca-key.pem \
      -config=/opt/ssl/config.json \
      -profile=kubernetes admin-csr.json | /opt/local/cfssl/cfssljson -bare admin
    
    
    # 查看生成
    
    [root@kubernetes-64 ssl]# ls admin*
    admin.csr  admin-csr.json  admin-key.pem  admin.pem
    
    cp admin*.pem /etc/kubernetes/ssl/
    
    scp admin*.pem 172.16.1.65:/etc/kubernetes/ssl/
    

    配置 kubectl kubeconfig 文件

    生成证书相关的配置文件存储与 /root/.kube 目录中
    

    配置 kubernetes 集群

    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=https://127.0.0.1:6443
    

    配置 客户端认证

    kubectl config set-credentials admin \
      --client-certificate=/etc/kubernetes/ssl/admin.pem \
      --embed-certs=true \
      --client-key=/etc/kubernetes/ssl/admin-key.pem
      
    kubectl config set-context kubernetes \
      --cluster=kubernetes \
      --user=admin
    
    kubectl config use-context kubernetes
    

    创建 kubernetes 证书

    cd /opt/ssl
    
    vi kubernetes-csr.json
    
    {
      "CN": "kubernetes",
      "hosts": [
        "127.0.0.1",
        "172.16.1.64",
        "172.16.1.65",
        "172.16.1.66",
        "10.254.0.1",
        "kubernetes",
        "kubernetes.default",
        "kubernetes.default.svc",
        "kubernetes.default.svc.cluster",
        "kubernetes.default.svc.cluster.local"
      ],
      "key": {
        "algo": "rsa",
        "size": 2048
      },
      "names": [
        {
          "C": "CN",
          "ST": "ShenZhen",
          "L": "ShenZhen",
          "O": "k8s",
          "OU": "System"
        }
      ]
    }
    
    ## 这里 hosts 字段中 三个 IP 分别为 127.0.0.1 本机, 172.16.1.64 和 172.16.1.65 为 Master 的IP,多个Master需要写多个。  10.254.0.1 为 kubernetes SVC 的 IP, 一般是 部署网络的第一个IP , 如: 10.254.0.1 , 在启动完成后,我们使用   kubectl get svc , 就可以查看到
    

    生成 kubernetes 证书和私钥

    /opt/local/cfssl/cfssl gencert -ca=/etc/kubernetes/ssl/ca.pem \
      -ca-key=/etc/kubernetes/ssl/ca-key.pem \
      -config=/opt/ssl/config.json \
      -profile=kubernetes kubernetes-csr.json | /opt/local/cfssl/cfssljson -bare kubernetes
    #查看生成
    [root@kubernetes-64 ssl]# ls -lt kubernetes*
    -rw-r--r-- 1 root root 1261 11月 16 15:12 kubernetes.csr
    -rw------- 1 root root 1679 11月 16 15:12 kubernetes-key.pem
    -rw-r--r-- 1 root root 1635 11月 16 15:12 kubernetes.pem
    -rw-r--r-- 1 root root  475 11月 16 15:12 kubernetes-csr.json
    # 拷贝到目录
    cp kubernetes*.pem /etc/kubernetes/ssl/
    scp kubernetes*.pem 172.16.1.65:/etc/kubernetes/ssl/
    

    配置 kube-apiserver

    kubelet 首次启动时向 kube-apiserver 发送 TLS Bootstrapping 请求,kube-apiserver 验证 kubelet 请求中的 token 是否与它配置的 token 一致,如果一致则自动为 kubelet生成证书和秘钥。
    
    # 生成 token
    
    [root@kubernetes-64 ssl]# head -c 16 /dev/urandom | od -An -t x | tr -d ' '
    df3b158fbdc425ae2ac70bbef0688921
    
    
    # 创建 token.csv 文件
    
    cd /opt/ssl
    
    vi token.csv
    
    df3b158fbdc425ae2ac70bbef0688921,kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    
    
    # 拷贝
    
    cp token.csv /etc/kubernetes/
    
    scp token.csv 172.16.1.65:/etc/kubernetes/
    
    # 生成高级审核配置文件
    
    cd /etc/kubernetes
    
    
    cat >> audit-policy.yaml <<EOF
    # Log all requests at the Metadata level.
    apiVersion: audit.k8s.io/v1beta1
    kind: Policy
    rules:
    - level: Metadata
    EOF
    
    
    
    
    # 拷贝
    
    scp audit-policy.yaml 172.16.1.65:/etc/kubernetes/
    
    创建 kube-apiserver.service 文件
    
    # 自定义 系统 service 文件一般存于 /etc/systemd/system/ 下
    # 配置为 各自的本地 IP
    
    vi /etc/systemd/system/kube-apiserver.service
    
    [Unit]
    Description=Kubernetes API Server
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=network.target
    
    [Service]
    User=root
    ExecStart=/usr/local/bin/kube-apiserver \
      --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota,NodeRestriction \
      --advertise-address=172.16.1.64 \
      --allow-privileged=true \
      --apiserver-count=3 \
      --audit-policy-file=/etc/kubernetes/audit-policy.yaml \
      --audit-log-maxage=30 \
      --audit-log-maxbackup=3 \
      --audit-log-maxsize=100 \
      --audit-log-path=/var/log/kubernetes/audit.log \
      --authorization-mode=Node,RBAC \
      --bind-address=0.0.0.0 \
      --secure-port=6443 \
      --client-ca-file=/etc/kubernetes/ssl/ca.pem \
      --enable-swagger-ui=true \
      --etcd-cafile=/etc/kubernetes/ssl/ca.pem \
      --etcd-certfile=/etc/kubernetes/ssl/etcd.pem \
      --etcd-keyfile=/etc/kubernetes/ssl/etcd-key.pem \
      --etcd-servers=https://172.16.1.64:2379,https://172.16.1.65:2379,https://172.16.1.66:2379 \
      --event-ttl=1h \
      --kubelet-https=true \
      --insecure-bind-address=127.0.0.1 \
      --insecure-port=8080 \
      --service-account-key-file=/etc/kubernetes/ssl/ca-key.pem \
      --service-cluster-ip-range=10.254.0.0/18 \
      --service-node-port-range=30000-32000 \
      --tls-cert-file=/etc/kubernetes/ssl/kubernetes.pem \
      --tls-private-key-file=/etc/kubernetes/ssl/kubernetes-key.pem \
      --enable-bootstrap-token-auth \
      --token-auth-file=/etc/kubernetes/token.csv \
      --v=1
    Restart=on-failure
    RestartSec=5
    Type=notify
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    
    # k8s 1.8 开始需要 添加 --authorization-mode=Node
    # k8s 1.8 开始需要 添加 --admission-control=NodeRestriction
    # k8s 1.8 开始需要 添加 --audit-policy-file=/etc/kubernetes/audit-policy.yaml
    
    # 这里面要注意的是 --service-node-port-range=30000-32000
    # 这个地方是 映射外部端口时 的端口范围,随机映射也在这个范围内映射,指定映射端口必须也在这个范围内。
    
    启动 kube-apiserver
    
    systemctl daemon-reload
    systemctl enable kube-apiserver
    systemctl start kube-apiserver
    systemctl status kube-apiserver
    

    配置 kube-controller-manager

    # 创建 kube-controller-manager.service 文件
    
    vi /etc/systemd/system/kube-controller-manager.service
    
    
    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    
    [Service]
    ExecStart=/usr/local/bin/kube-controller-manager \
      --address=0.0.0.0 \
      --master=http://127.0.0.1:8080 \
      --allocate-node-cidrs=true \
      --service-cluster-ip-range=10.254.0.0/18 \
      --cluster-cidr=10.254.64.0/18 \
      --cluster-name=kubernetes \
      --service-account-private-key-file=/etc/kubernetes/ssl/ca-key.pem \
      --root-ca-file=/etc/kubernetes/ssl/ca.pem \
      --cluster-signing-cert-file=/etc/kubernetes/ssl/ca.pem \
      --cluster-signing-key-file=/etc/kubernetes/ssl/ca-key.pem \
      --leader-elect=true \
      --v=1
    Restart=on-failure
    RestartSec=5
    
    [Install]
    WantedBy=multi-user.target
    

    启动 kube-controller-manager

    systemctl daemon-reload
    systemctl enable kube-controller-manager
    systemctl start kube-controller-manager
    systemctl status kube-controller-manager
    

    配置 kube-scheduler

    # 创建 kube-cheduler.service 文件
    
    vi /etc/systemd/system/kube-scheduler.service
    
    
    [Unit]
    Description=Kubernetes Scheduler
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    
    [Service]
    ExecStart=/usr/local/bin/kube-scheduler \
      --address=0.0.0.0 \
      --master=http://127.0.0.1:8080 \
      --leader-elect=true \
      --v=1
    Restart=on-failure
    RestartSec=5
    
    [Install]
    WantedBy=multi-user.target
    
    启动 kube-scheduler
    
    systemctl daemon-reload
    systemctl enable kube-scheduler
    systemctl start kube-scheduler
    systemctl status kube-scheduler
    

    验证 Master 节点

    [root@kubernetes-64 ~]# kubectl get componentstatuses
    NAME                 STATUS    MESSAGE              ERROR
    controller-manager   Healthy   ok                   
    scheduler            Healthy   ok                   
    etcd-2               Healthy   {"health": "true"}   
    etcd-0               Healthy   {"health": "true"}   
    etcd-1               Healthy   {"health": "true"} 
    
    
    
    [root@kubernetes-65 ~]# kubectl get componentstatuses
    NAME                 STATUS    MESSAGE              ERROR
    controller-manager   Healthy   ok                   
    scheduler            Healthy   ok                   
    etcd-2               Healthy   {"health": "true"}   
    etcd-0               Healthy   {"health": "true"}   
    etcd-1               Healthy   {"health": "true"}  
    

    配置 kubelet

    kubelet 启动时向 kube-apiserver 发送 TLS bootstrapping 请求,需要先将 bootstrap token 文件中的 kubelet-bootstrap 用户赋予 system:node-bootstrapper 角色,然后 kubelet 才有权限创建认证请求(certificatesigningrequests)。
    

    先创建认证请求

    user 为 master 中 token.csv 文件里配置的用户
    只需创建一次就可以
    
    kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
    

    创建 kubelet kubeconfig 文件

    # 配置集群
    
    kubectl config set-cluster kubernetes \
      --certificate-authority=/etc/kubernetes/ssl/ca.pem \
      --embed-certs=true \
      --server=https://127.0.0.1:6443 \
      --kubeconfig=bootstrap.kubeconfig
    
    # 配置客户端认证
    
    kubectl config set-credentials kubelet-bootstrap \
      --token=df3b158fbdc425ae2ac70bbef0688921 \
      --kubeconfig=bootstrap.kubeconfig
    
    
    # 配置关联
    
    kubectl config set-context default \
      --cluster=kubernetes \
      --user=kubelet-bootstrap \
      --kubeconfig=bootstrap.kubeconfig
      
      
    # 配置默认关联
    kubectl config use-context default --kubeconfig=bootstrap.kubeconfig
    
    # 拷贝生成的 bootstrap.kubeconfig 文件
    
    mv bootstrap.kubeconfig /etc/kubernetes/
    
    创建 kubelet.service 文件
    
    # 创建 kubelet 目录
    
    > 配置为 node 本机 IP
    
    mkdir /var/lib/kubelet
    
    vi /etc/systemd/system/kubelet.service
    
    
    [Unit]
    Description=Kubernetes Kubelet
    Documentation=https://github.com/GoogleCloudPlatform/kubernetes
    After=docker.service
    Requires=docker.service
    
    [Service]
    WorkingDirectory=/var/lib/kubelet
    ExecStart=/usr/local/bin/kubelet \
      --cgroup-driver=cgroupfs \
      --hostname-override=kubernetes-64 \
      --pod-infra-container-image=jicki/pause-amd64:3.0 \
      --experimental-bootstrap-kubeconfig=/etc/kubernetes/bootstrap.kubeconfig \
      --kubeconfig=/etc/kubernetes/kubelet.kubeconfig \
      --cert-dir=/etc/kubernetes/ssl \
      --cluster_dns=10.254.0.2 \
      --cluster_domain=cluster.local. \
      --hairpin-mode promiscuous-bridge \
      --allow-privileged=true \
      --fail-swap-on=false \
      --serialize-image-pulls=false \
      --logtostderr=true \
      --max-pods=512 \
      --v=1
    
    [Install]
    WantedBy=multi-user.target
    
    # 如上配置:
    kubernetes-64    本机hostname
    10.254.0.2       预分配的 dns 地址
    cluster.local.   为 kubernetes 集群的 domain
    jicki/pause-amd64:3.0  这个是 pod 的基础镜像,既 gcr 的 gcr.io/google_containers/pause-amd64:3.0 镜像, 下载下来修改为自己的仓库中的比较快。
    
    启动 kubelet
    
    systemctl daemon-reload
    systemctl enable kubelet
    systemctl start kubelet
    systemctl status kubelet
    
    # 如果报错 请使用
    journalctl -f -t kubelet  和 journalctl -u kubelet 来定位问题
    
    配置 TLS 认证
    
    # 查看 csr 的名称
    
    [root@kubernetes-64 ~]# kubectl get csr
    NAME                                                   AGE       REQUESTOR           CONDITION
    node-csr-Pu4QYp3NAwlC6o8AG8iwdCl52CiqhjiSyrso3335JTs   1m        kubelet-bootstrap   Pending
    node-csr-poycCHd7B8YPxc12EBgI3Rwe0wnDJah5uIGvQHzghVY   2m        kubelet-bootstrap   Pending
    # 增加 认证
    
    kubectl get csr | grep Pending | awk '{print $1}' | xargs kubectl certificate approve
    
    验证 nodes

    [root@kubernetes-64 ~]# kubectl get nodes
    NAME STATUS ROLES AGE VERSION
    kubernetes-64 Ready <none> 12s v1.9.1
    kubernetes Ready <none> 17s v1.9.1

    相关文章

      网友评论

          本文标题:Kubernetes-1.9.1部署

          本文链接:https://www.haomeiwen.com/subject/fabrmctx.html