美文网首页
106.Kubernetes安装

106.Kubernetes安装

作者: davisgao | 来源:发表于2018-08-08 14:49 被阅读0次

    1、说明

    2、准备证书

    2.1、下载ssl工具

      wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -O /usr/local/bin/cfssl
      wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -O /usr/local/bin/cfssljson
      chmod +x /usr/local/bin/cfssl /usr/local/bin/cfssljson 
    

    2.2、准备生成证书的模板文件

    证书配置ca-config.json

    {
      "signing": {
        "default": {
          "expiry": "8760h"
        },
        "profiles": {
          "kubernetes": {
            "usages": [
                "signing",
                "key encipherment",
                "server auth",
                "client auth"
            ],
            "expiry": "8760h"
          }
        }
      }
    }
    

    根证书ca-csr.json(etcd和kubernetes共用根证书,很多地方是分开的)

    {
        "CN": "etcd",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "Kubernetes",
            "OU": "Kubernetes-manual"
        }]
    }
    

    etcd-csr.json

    {
        "CN": "kubernetes",
            "hosts": [
                $HOSTS$------可连接的地址
                "127.0.0.1"
            ],
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "Kubernetes",
            "OU": "Kubernetes-manual"
        }]
    }
    

    apiserver-csr.json

    {
        "CN": "kube-apiserver",
            "hosts": [
              "10.20.16.227",
              "10.20.16.228",
              "10.20.16.229",
              "10.20.16.214",
              "127.0.0.1",
              "10.254.0.1", #一定要加上后期域名服务器的IP,后续coredns用到
              "localhost",
              "kubernetes",
              "kubernetes.default",
              "kubernetes.default.svc",
              "kubernetes.default.svc.cluster",
              "kubernetes.default.svc.cluster.local"#一定要加上后期域名服务器的名称
            ],
    
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "Kubernetes",
            "OU": "Kubernetes-manual"
        }]
    }
    

    front-proxy-client-csr.json

    {
        "CN": "front-proxy-client",
        "key": {
            "algo": "rsa",
            "size": 2048
        }
    }
    

    admin-csr.json

    {
        "CN": "admin",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "system:masters",
            "OU": "Kubernetes-manual"
        }]
    }
    

    manager-csr.json

    {
        "CN": "system:kube-controller-manager",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "system:kube-controller-manager",
            "OU": "Kubernetes-manual"
        }]
    }
    

    scheduler-csr.json

    {
        "CN": "system:kube-scheduler",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "system:kube-scheduler",
            "OU": "Kubernetes-manual"
        }]
    }
    

    kubelet-csr.json

    {
        "CN": "system:node:$NODE",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "L": "NanJing",
            "ST": "NanJing",
            "O": "system:nodes",
            "OU": "Kubernetes-manual"
        }]
    }
    

    proxy-csr.json

    {
        "CN": "system:kube-proxy",
        "key": {
            "algo": "rsa",
            "size": 2048
        },
        "names": [{
            "C": "CN",
            "ST": "NanJing",
            "L": "NanJing",
            "O": "system:kube-proxy",
            "OU": "Kubernetes-manual"
        }]
    }
    

    2.2、根据模板文件生成证书和各个组件的配置文件(共用一个根证书)

    生成证书

    # 产生根证书
    cfssl gencert -initca ca-csr.json | cfssljson -bare ca
    # etcd 证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      etcd-csr.json | cfssljson -bare etcd
    # admin 证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      admin-csr.json | cfssljson -bare admin
    # apiserver 证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      apiserver-csr.json | cfssljson -bare apiserver
    # controller-manager证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      manager-csr.json | cfssljson -bare controller-manager
    # scheduler证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      scheduler-csr.json | cfssljson -bare scheduler
    # front-proxy-client证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      front-proxy-client-csr.json | cfssljson -bare front-proxy-client
    # kubelet证书
    # cfssl gencert \
    #    -ca=ca.pem \
    #    -ca-key=ca-key.pem \
    #    -config=ca-config.json \
    #    -hostname=$NODE \
    #    -profile=kubernetes \
    # kubelet-$NODE-csr.json | cfssljson -bare kubelet-$NODE
    # scheduler证书
    cfssl gencert \
      -ca=ca.pem \
      -ca-key=ca-key.pem \
      -config=ca-config.json \
      -profile=kubernetes \
      proxy-csr.json | cfssljson -bare proxy
    #--------------------------sa-----------------------------------
    $ openssl genrsa -out sa.key 2048
    $ openssl rsa -in sa.key -pubout -out sa.pub
    $ ls sa.*
    sa.key  sa.pub
    

    生成配置文件

    # 设置Master的地址
    export KUBE_APISERVER="https://10.20.16.229:6443"
    #--------------------------admin-----------------------------------
    # admin set cluster
    $ kubectl config set-cluster kubernetes \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=./admin.conf
    
    # admin set credentials
    $ kubectl config set-credentials kubernetes-admin \
        --client-certificate=admin.pem \
        --client-key=admin-key.pem \
        --embed-certs=true \
        --kubeconfig=./admin.conf
    
    # admin set context
    $ kubectl config set-context kubernetes-admin@kubernetes \
        --cluster=kubernetes \
        --user=kubernetes-admin \
        --kubeconfig=./admin.conf
    
    # admin set default context
    $ kubectl config use-context kubernetes-admin@kubernetes \
        --kubeconfig=./admin.conf
    
    #--------------------------manager-----------------------------------
    # controller-manager set cluster
    $ kubectl config set-cluster kubernetes \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=./controller-manager.conf
    
    # controller-manager set credentials
    $ kubectl config set-credentials system:kube-controller-manager \
        --client-certificate=controller-manager.pem \
        --client-key=controller-manager-key.pem \
        --embed-certs=true \
        --kubeconfig=./controller-manager.conf
    
    # controller-manager set context
    $ kubectl config set-context system:kube-controller-manager@kubernetes \
        --cluster=kubernetes \
        --user=system:kube-controller-manager \
        --kubeconfig=./controller-manager.conf
    
    # controller-manager set default context
    $ kubectl config use-context system:kube-controller-manager@kubernetes \
        --kubeconfig=./controller-manager.conf
    
    
    #--------------------------scheduler-----------------------------------
    # scheduler set cluster
    $ kubectl config set-cluster kubernetes \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=./scheduler.conf
    
    # scheduler set credentials
    $ kubectl config set-credentials system:kube-scheduler \
        --client-certificate=scheduler.pem \
        --client-key=scheduler-key.pem \
        --embed-certs=true \
        --kubeconfig=./scheduler.conf
    
    # scheduler set context
    $ kubectl config set-context system:kube-scheduler@kubernetes \
        --cluster=kubernetes \
        --user=system:kube-scheduler \
        --kubeconfig=./scheduler.conf
    
    # scheduler use default context
    $ kubectl config use-context system:kube-scheduler@kubernetes \
        --kubeconfig=./scheduler.conf
    
    #--------------------------proxy-----------------------------------
    # proxy set cluster
    $ kubectl config set-cluster kubernetes \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=./proxy.conf
    
    # proxy set credentials
    $ kubectl config set-credentials system:kube-proxy \
        --client-certificate=proxy.pem \
        --client-key=proxy-key.pem \
        --embed-certs=true \
        --kubeconfig=./proxy.conf
    
    # proxy set context
    $ kubectl config set-context system:kube-proxy@kubernetes \
        --cluster=kubernetes \
        --user=system:kube-proxy \
        --kubeconfig=./proxy.conf
    
    # proxy use default context
    $ kubectl config use-context system:kube-proxy@kubernetes \
        --kubeconfig=./proxy.conf
    
    #--------------------------kubelet-----------------------------------
    # TLS 认证需要每个节点都单独产生证书(本次不采用)。 说明如下:
    #$ kubectl config set-cluster kubernetes \
    #        --certificate-authority=ca.pem \
    #       --embed-certs=true \
    #        --server=${KUBE_APISERVER} \
    #        --kubeconfig=./kubelet.conf 
    
    #$ kubectl config set-cluster kubernetes \
    #        --certificate-authority=ca.pem \
    #        --embed-certs=true \
    #        --server=${KUBE_APISERVER} \
    #        --kubeconfig=./kubelet.conf 
    
    #$ kubectl config set-credentials system:node:${NODE} \
    #        --client-certificate=kubelet-${NODE}.pem \
    #        --client-key=kubelet-${NODE}-key.pem \
    #        --embed-certs=true \
    #        --kubeconfig=./kubelet.conf
    
    #$ kubectl config set-context system:node:${NODE}@kubernetes \
    #       --cluster=kubernetes \
    #        --user=system:node:${NODE} \
    #      --kubeconfig=./kubelet.conf 
    # $ kubectl config use-context system:node:${NODE}@kubernetes \
    #        --kubeconfig=../kubelet.conf && \
    
    # 通过TLS bootstrapping实现kube-apiserver动态签发证书(本次采用),
    # 原理主要首先产生一个token,由kubelet向kube-apiserver发起证书申请请求,token一致后,动态下发。
    
    # 产生令牌
    export BOOTSTRAP_TOKEN=$(head -c 16 /dev/urandom | od -An -t x | tr -d ' ')
    echo "BOOTSTRAP_TOKEN:"${BOOTSTRAP_TOKEN}
    cat > token.csv <<EOF
    ${BOOTSTRAP_TOKEN},kubelet-bootstrap,10001,"system:kubelet-bootstrap"
    EOF
    
    $ kubectl config set-cluster kubernetes \
        --certificate-authority=ca.pem \
        --embed-certs=true \
        --server=${KUBE_APISERVER} \
        --kubeconfig=./bootstrap.conf
    
    # bootstrap set credentials
    $ kubectl config set-credentials kubelet-bootstrap \
        --token=${BOOTSTRAP_TOKEN} \
        --kubeconfig=./bootstrap.conf
    
    # bootstrap set context
    $ kubectl config set-context kubelet-bootstrap@kubernetes \
        --cluster=kubernetes \
        --user=kubelet-bootstrap \
        --kubeconfig=./bootstrap.conf
    
    # bootstrap use default context
    $ kubectl config use-context kubelet-bootstrap@kubernetes \
        --kubeconfig=./bootstrap.conf
    
    

    3、文件配置service文件并启动相关组件

    3.1、文件配置service

    etcd.service

    [Unit]
    Description=Etcd Server
    After=network.target
    After=network-online.target
    Wants=network-online.target
    
    [Service]
    Type=notify
    #User=etcd
    # set GOMAXPROCS to number of processors
    # ExecStartPre=/bin/bash -c "GOMAXPROCS=$(nproc)"
    ExecStart=/data/cloud/etcd/etcd \
    --name=master \
    --auto-tls=true \
    --heartbeat-interval=1000 \
    --election-timeout=5000 \
    --client-cert-auth=true \
    --ca-file=/data/cloud/ssl/ca.pem \
    --cert-file=/data/cloud/ssl/etcd.pem \
    --key-file=/data/cloud/ssl/etcd-key.pem \
    --trusted-ca-file=/data/cloud/ssl/ca.pem \
    --peer-client-cert-auth=true \
    --peer-cert-file=/data/cloud/ssl/etcd.pem \
    --peer-key-file=/data/cloud/ssl/etcd-key.pem \
    --peer-trusted-ca-file=/data/cloud/ssl/ca.pem \
    --initial-advertise-peer-urls=https://10.20.16.227:2380 \
    --listen-peer-urls=https://10.20.16.227:2380 \
    --listen-client-urls=https://10.20.16.227:2379,http://127.0.0.1:2379 \
    --advertise-client-urls=https://10.20.16.227:2379 \
    --initial-cluster-token=kubernetes \
    --initial-cluster=node1=https://10.20.16.227:2380,node3=https://10.20.16.228:2380,master=https://10.20.16.229:2380 \
    --initial-cluster-state=new \
    --data-dir=/data/cloud/work/etcd \
    --wal-dir=/data/cloud/work/etcd/wal
    Restart=on-failure
    LimitNOFILE=65536
    

    kube-apiserver.service

    [Unit]
    Description=Kubernetes API Server
    Documentation=http://kubernetes.io/docs/
    After=network.target
    
    [Service]
    ExecStart=/data/cloud/kubernetes/bin/kube-apiserver \
        --v=2 \
        --log-dir=/data/cloud/work/kubernetes \
        --logtostderr=false \
        --allow-privileged=true \
        --bind-address=0.0.0.0 \
        --secure-port=6443 \
        --insecure-port=0 \
        --advertise-address=10.20.16.229 \
        --service-cluster-ip-range=10.254.0.0/16 \
        --service-node-port-range=30000-32767 \
        --etcd-servers=https://10.20.16.227:2379,https://10.20.16.228:2379,https://10.20.16.229:2379 \
        --etcd-cafile=/data/cloud/ssl/ca.pem \
        --etcd-certfile=/data/cloud/ssl/etcd.pem \
        --etcd-keyfile=/data/cloud/ssl/etcd-key.pem \
        --client-ca-file=/data/cloud/ssl/ca.pem \
        --tls-cert-file=/data/cloud/ssl/apiserver.pem \
        --tls-private-key-file=/data/cloud/ssl/apiserver-key.pem \
        --kubelet-client-certificate=/data/cloud/ssl/apiserver.pem \
        --kubelet-client-key=/data/cloud/ssl/apiserver-key.pem \
        --service-account-key-file=/data/cloud/ssl/sa.pub \
        --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \
        --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \
        --authorization-mode=Node,RBAC \
        --enable-bootstrap-token-auth=true \
        --token-auth-file=/data/cloud/ssl/token.csv \
        --requestheader-client-ca-file=/data/cloud/ssl/ca.pem \
        --proxy-client-cert-file=/data/cloud/ssl/front-proxy-client.pem \
        --proxy-client-key-file=/data/cloud/ssl/front-proxy-client-key.pem \
        --requestheader-allowed-names=aggregator \
        --requestheader-group-headers=X-Remote-Group \
        --requestheader-extra-headers-prefix=X-Remote-Extra- \
        --requestheader-username-headers=X-Remote-User \
        --audit-log-maxage=30 \
        --audit-log-maxbackup=3 \
        --audit-log-maxsize=100 \
        --audit-log-path=/data/cloud/work/kubernetes/audit.log \
        --event-ttl=1h 
     
    Restart=on-failure
    StartLimitInterval=0
    RestartSec=10
    Type=notify
    LimitNOFILE=65536
    
    [Install]
    WantedBy=multi-user.target
    

    kube-manager.service

    [Unit]
    Description=Kubernetes Controller Manager
    Documentation=http://kubernetes.io/docs/
    
    [Service]
    ExecStart=/data/cloud/kubernetes/bin/kube-controller-manager \
      --address=127.0.0.1 \
      --allocate-node-cidrs=true \
      --cluster-cidr=172.16.0.0/16 \
      --cluster-name=kubernetes \
      --cluster-signing-cert-file=/data/cloud/ssl/ca.pem \
      --cluster-signing-key-file=/data/cloud/ssl/ca-key.pem \
      --service-account-private-key-file=/data/cloud/ssl/sa.key \
      --root-ca-file=/data/cloud/ssl/ca.pem \
      --leader-elect=true \
      --use-service-account-credentials=true \
      --node-monitor-grace-period=40s \
      --node-monitor-period=5s \
      --pod-eviction-timeout=2m0s \
      --controllers=*,bootstrapsigner,tokencleaner \
      --log-dir=/data/cloud/work/kubernetes/controller-manager \
      --logtostderr=false \
      --kubeconfig=/data/cloud/ssl/controller-manager.conf \
      --v=2
      
    Restart=on-failure
    RestartSec=5
    StartLimitInterval=0
    RestartSec=10
    
    [Install]
    WantedBy=multi-user.target
    

    kube-scheduler.service

    [Unit]
    Description=Kubernetes Scheduler
    Documentation=http://kubernetes.io/docs/
    
    [Service]
    ExecStart=/data/cloud/kubernetes/bin/kube-scheduler \
      --address=127.0.0.1 \
      --leader-elect=true \
      --logtostderr=false \
      --log-dir=/data/cloud/work/kubernetes/kube-scheduler \
      --kubeconfig=/data/cloud/ssl/scheduler.conf \
      --v=2
    
    Restart=on-failure
    StartLimitInterval=0
    RestartSec=10
    
    [Install]
    WantedBy=multi-user.target
    

    kubelet.service

    [Unit]
    Description=kubelet: The Kubernetes Node Agent
    Documentation=http://kubernetes.io/docs/
    
    [Service]
    ExecStart=/data/cloud/kubernetes/bin/kubelet \
      --fail-swap-on=false \
      --hostname-override=node3 \
      --pod-infra-container-image=k8s.gcr.io/pause:3.1 \
      --network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin \
      --kubeconfig=/data/cloud/ssl/kubelet.conf \
      --bootstrap-kubeconfig=/data/cloud/ssl/bootstrap.conf \
      --pod-manifest-path=/data/cloud/kubernetes/manifests \
      --allow-privileged=true \
      --cluster-dns=10.254.0.2 \
      --cluster-domain=cluster.kube. \
      --authorization-mode=Webhook \
      --client-ca-file=/data/cloud/ssl/ca.pem \
      --rotate-certificates=true \
      --cert-dir=/data/cloud/ssl \
      --cgroup-driver=cgroupfs \
      --serialize-image-pulls=false  \
      --root-dir=/data/cloud/work/kubernetes/kubelet \
      --log-dir=/data/cloud/work/kubernetes/kubelet \
      --logtostderr=false \
      --v=2
    
    Restart=on-failure
    StartLimitInterval=0
    RestartSec=10
    
    [Install]
    WantedBy=multi-user.target
    

    kube-proxy.service

    [Unit]
    Description=kubelet: The Kubernetes Node Agent
    Documentation=http://kubernetes.io/docs/
    
    [Service]
    ExecStart=/data/cloud/kubernetes/bin/kube-proxy \
      --bind-address=10.239.7.253 \
      --hostname-override=node4 \
      --kubeconfig=/data/cloud/pki/proxy.conf \
      --proxy-mode=iptables \
      --v=2 \
      --logtostderr=false \
      --log-file=proxy.log \
      --log-dir=/data/cloud/work/kubernetes/logs
    
    Restart=on-failure
    StartLimitInterval=0
    RestartSec=10
    
    [Install]
    WantedBy=multi-user.target
    
    

    3.3、安装CNI插件

    mkdir -p /etc/cni/net.d /opt/cni/bin
    #拷贝模板用于保证kubelet启动时不报错(主要提示网络插件为初始化),后续kube-router会重新写入
    cp 10-kuberouter.conf  /etc/cni/net.d
    #到https://github.com/containernetworking/plugins下载相关包
    tar xf cni-plugins-amd64-v0.7.1.tgz -C /opt/cni/bin
    

    10-kuberouter.conf

    {"bridge":"kube-bridge","ipam":{"type":"host-local"},"isDefaultGateway":true,"name":"kubernetes","type":"bridge"}
    

    3.3、启动相关组件

    systemctl daemon-reload
    systemctl enable etcd kube-apiserver kube-manager kube-scheduler kubelet 
    systemctl start etcd kube-apiserver kube-manager kube-scheduler 
    #创建clusterrolebinding 
    kubectl create clusterrolebinding controller-node-clusterrolebing --clusterrole=system:kube-controller-manager  --user=system:kube-controller-manager
    kubectl create clusterrolebinding scheduler-node-clusterrolebing  --clusterrole=system:kube-scheduler --user=system:kube-scheduler
    kubectl create clusterrolebinding kubelet-bootstrap --clusterrole=system:node-bootstrapper --user=kubelet-bootstrap
    kubectl create clusterrolebinding  kube-system-cluster-adminkube-system-cluster-admin --user=system:serviceaccount:kube-system:default  --clusterrole=cluster-admin
    
    #启动kubelet
    systemctl start kubelet
    

    3.4 审核Node节点

    为了方便操作在profile中做了alias

    如果需要自动审核,移步CSR 请求自动批准

    export ETCDCTL_API=3
    alias etcdctl='etcdctl --endpoints=https://10.20.16.227:2379,https://10.20.16.228:2379,https://10.20.16.229:2379 --cacert=/data/cloud/ssl/ca.pem --cert=/data/cloud/ssl/etcd.pem --key=/data/cloud/ssl/etcd-key.pem'
    alias kubectl='kubectl --kubeconfig=/data/cloud/ssl/admin.conf'
    
    kubectl get csr | tail -n +2 | while read name number
    do
        kubectl  certificate approve  $name 
    done 
    

    4.kube-router

    4.1 说明

    kube-router组件取代kube-proxy,用lvs做svc负载均衡,更快稳定。
    cluster-ip,external-ip 全网路由
    解决iptables 性能和负载聚合问题
    还有iptables 负载NAT 丢失源ip问题

    4.2 参数说明及部署

    ① Kube-router 从API server 获取pods, services, endpoints, network policies等信息. 所有的细节信息都要通过得到API server. API server可以通过kube-router --master=http://192.168.1.99:8080/ 或者 kube-router --kubeconfig=<path to kubeconfig file>传递。

    ②如果想把kube-router 作为agent运行在节点上,必须事先在每个安装ipset (如果作为daemonset运行时,镜像中已经预置ipset)。

    ③如果想通过kube-router实现pod-to-pod连接,那么需要在controller manager中配置CIDRs用于各pod分配IP.通过如下参数设置:

    --allocate-node-cidrs=true
    --cluster-cidr=10.1.0.0/16 
    

    ④kube-router作为daemonset时,需要 kube-apiserver设置

    --allow-privileged=true
    

    ⑤如果想通过kube-router实现pod-to-pod连接必须安装CNI 网络插件,主要会用到bridge CNI plugin 和 host-local 两个插件。配置https://raw.githubusercontent.com/cloudnativelabs/kube-router/master/cni/10-kuberouter.conf

    参数说明

     --run-router=true 
     #启用Pod网络 - 通过iBGP发布并学习到Pod的路由。 (默认为true)
     --run-firewall=true
    #启用网络策略 - 设置iptables为pod提供入口防火墙。 (默认为true)
     --run-service-proxy=true 
    #启用服务代理 - 为Kubernetes服务设置IPVS。 (默认为true)
    --advertise-cluster-ip=true
    #将该服务的集群IP添加到RIB,以便通告给BGP peers.
    --advertise-external-ip=true
    #将服务的外部IP添加到RIB,以便将其通告给BGP peers.
    --cluster-asn=64512
    #集群自身节点运行iBGP的ASN编号.
    --peer-router-ips=10.129.6.8
    #所有节点将对等的外部路由器的IP地址,并通告集群ip和pod cidr。 (默认[])
    --peer-router-asns=64513
    #集群节点将向其通告集群ip和节点的pid cidr的BGP peers的ASN编号。 (默认[])
    

    部署 kubectl apply -f kube-router.yml

    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: kube-router-cfg
      namespace: kube-system
      labels:
        tier: node
        k8s-app: kube-router
    data:
      cni-conf.json: |
        {
          "name":"kubernetes",
          "type":"bridge",
          "bridge":"kube-bridge",
          "isDefaultGateway":true,
          "ipam": {
            "type":"host-local"
          }
        }
    ---
    apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
      labels:
        k8s-app: kube-router
        tier: node
      name: kube-router
      namespace: kube-system
    spec:
      template:
        metadata:
          labels:
            k8s-app: kube-router
            tier: node
          annotations:
            scheduler.alpha.kubernetes.io/critical-pod: ''
        spec:
          serviceAccountName: kube-router
          serviceAccount: kube-router
          containers:
          - name: kube-router
            image: registry.ipscloud.com/kube-router:v1.0 #修改镜像
    #        imagePullPolicy: Always #注释默认使用本地已有的
            args:
            - --run-router=true
            - --run-firewall=true
            - --run-service-proxy=true
            - --kubeconfig=/var/lib/kube-router/kubeconfig
            env:
            - name: NODE_NAME
              valueFrom:
                fieldRef:
                  fieldPath: spec.nodeName
            resources:
              requests:
                cpu: 250m
                memory: 250Mi
            securityContext:
              privileged: true
            volumeMounts:
            - name: lib-modules
              mountPath: /lib/modules
              readOnly: true
            - name: cni-conf-dir
              mountPath: /etc/cni/net.d
            - name: kubeconfig
              mountPath: /var/lib/kube-router/kubeconfig
            - name: run
              mountPath: /var/run/docker.sock
              readOnly: true
          initContainers:
          - name: install-cni
            image: registry.ipscloud.com/busybox:v1.0#修改镜像
     #       imagePullPolicy: Always #注释默认使用本地已有的
            command:
            - /bin/sh
            - -c
            - set -e -x;
              if [ ! -f /etc/cni/net.d/10-kuberouter.conf ]; then
                TMP=/etc/cni/net.d/.tmp-kuberouter-cfg;
                cp /etc/kube-router/cni-conf.json ${TMP};
                mv ${TMP} /etc/cni/net.d/10-kuberouter.conf;
              fi
            volumeMounts:
            - name: cni-conf-dir
              mountPath: /etc/cni/net.d
            - name: kube-router-cfg
              mountPath: /etc/kube-router
          hostNetwork: true
          hostIPC: true
          hostPID: true
          tolerations:
          - key: CriticalAddonsOnly
            operator: Exists
          - effect: NoSchedule
            key: node-role.kubernetes.io/master
            operator: Exists
          volumes:
          - name: lib-modules
            hostPath:
              path: /lib/modules
          - name: cni-conf-dir
            hostPath:
              path: /etc/cni/net.d
          - name: run
            hostPath:
              path: /var/run/docker.sock
          - name: kube-router-cfg
            configMap:
              name: kube-router-cfg
          - name: kubeconfig
            hostPath:
              path: /data/cloud/ssl/admin.conf #修改成自己的认证文件位置
           # configMap:
            #  name: kube-proxy
             # items:
             # - key: kubeconfig.conf
             #   path: kubeconfig
    ---
    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: kube-router
      namespace: kube-system
    ---
    kind: ClusterRole
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: kube-router
      namespace: kube-system
    rules:
      - apiGroups:
        - ""
        resources:
          - namespaces
          - pods
          - services
          - nodes
          - nodes/proxy
          - endpoints
        verbs:
          - list
          - get
          - watch
      - apiGroups:
        - "networking.k8s.io"
        resources:
          - networkpolicies
        verbs:
          - list
          - get
          - watch
      - apiGroups:
        - extensions
        resources:
          - networkpolicies
        verbs:
          - get
          - list
          - watch
    ---
    kind: ClusterRoleBinding
    apiVersion: rbac.authorization.k8s.io/v1beta1
    metadata:
      name: kube-router
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: kube-router
    subjects:
    - kind: ServiceAccount
      name: kube-router
      namespace: kube-system
    

    4.3 验证

    Network Services Controller

    #查看主机分配网段
    [root@host229 ~]# kubectl get nodes -o json | jq '.items[] | .spec'
    {
      "podCIDR": "172.16.1.0/24"
    }
    {
      "podCIDR": "172.16.3.0/24"
    }
    {
      "podCIDR": "172.16.2.0/24"
    }
    {
      "podCIDR": "172.16.0.0/24"
    }
    
    [root@host229 yaml]# kubectl run myip --image=cloudnativelabs/whats-my-ip --replicas=3 --port=8080
    deployment.apps/myip created
    [root@host229 yaml]# kubectl get pod -o wide
    NAME                    READY     STATUS    RESTARTS   AGE       IP           NODE
    myip-5fc5cf6476-jmjzh   1/1       Running   0          28s       172.16.0.2   host229
    myip-5fc5cf6476-qh546   1/1       Running   0          28s       172.16.1.2   host227
    myip-5fc5cf6476-z7ccm   1/1       Running   0          28s       172.16.2.2   host228
    [root@host229 yaml]# kubectl expose deployment myip --port=8080 --target-port=8080 --type=NodePort 
    #-------------------不同主机都可以访问------------------
    [root@host229 yaml]# curl host227:31007
    HOSTNAME:myip-5fc5cf6476-qh546 IP:172.16.1.2
    [root@host229 yaml]# curl host228:31007
    HOSTNAME:myip-5fc5cf6476-z7ccm IP:172.16.2.2
    [root@host229 yaml]# curl host229:31007
    HOSTNAME:myip-5fc5cf6476-qh546 IP:172.16.1.2
    [root@host229 yaml]# ipvsadm -Ln
    IP Virtual Server version 1.2.1 (size=4096)
    Prot LocalAddress:Port Scheduler Flags
      -> RemoteAddress:Port           Forward Weight ActiveConn InActConn
    TCP  10.20.16.229:31007 rr
      -> 172.16.0.2:8080              Masq    1      0          0         
      -> 172.16.1.2:8080              Masq    1      0          1         
      -> 172.16.2.2:8080              Masq    1      0          0         
    TCP  10.254.0.1:443 rr
      -> 10.20.16.229:6443            Masq    1      0          0         
    TCP  10.254.62.77:8080 rr
      -> 172.16.0.2:8080              Masq    1      0          0         
      -> 172.16.1.2:8080              Masq    1      0          0         
      -> 172.16.2.2:8080              Masq    1      0          0 
     # ----------------------删除svc,创建新的svc,并设置session persistence--------
    [root@host229 yaml]# kubectl get svc -o wide
    NAME         TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE       SELECTOR
    kubernetes   ClusterIP   10.254.0.1     <none>        443/TCP          52m       <none>
    myip         NodePort    10.254.62.77   <none>        8080:31007/TCP   4m        run=myip
    [root@host229 yaml]# kubectl delete svc myip
    service "myip" deleted
    [root@host229 yaml]#  kubectl expose deployment myip --port=8080 --target-port=8080 --type=NodePort  --session-affinity=ClientIP
    service/myip exposed
    #------------------再次访问发现同一个终端每次请求到pod为同一个-------------
    [root@host229 yaml]# curl 10.254.102.139:8080
    HOSTNAME:myip-5fc5cf6476-z7ccm IP:172.16.2.2
    [root@host229 yaml]# curl 10.254.102.139:8080
    HOSTNAME:myip-5fc5cf6476-z7ccm IP:172.16.2.2
    [root@host229 yaml]# curl 10.254.102.139:8080
    HOSTNAME:myip-5fc5cf6476-z7ccm IP:172.16.2.2
    [root@host229 yaml]# curl 10.254.102.139:8080
    HOSTNAME:myip-5fc5cf6476-z7ccm IP:172.16.2.2
    

    Network Policy Controller(基于iptables )

     # 创建两个命名空间production 和 staging
    [root@host229 ~]# kubectl create namespace production
    namespace/production created
    [root@host229 ~]# kubectl create namespace staging
    namespace/staging created
     # 在两个命名空间中创建guestbook-all-in-one.yaml
     # yaml 样例可到github的kubernetes的example中找,见https://github.com/kubernetes/examples/tree/master/guestbook/all-in-one
    [root@host229 yaml]# kubectl  -n production apply -f guestbook-all-in-one.yml 
    service/redis-master created
    deployment.apps/redis-master created
    service/redis-slave created
    deployment.apps/redis-slave created
    service/frontend created
    deployment.apps/frontend created
    [root@host229 yaml]# kubectl  -n staging  apply -f guestbook-all-in-one.yml 
    service/redis-master created
    deployment.apps/redis-master created
    service/redis-slave created
    deployment.apps/redis-slave created
    service/frontend created
    deployment.apps/frontend created
    [root@host229 ~]# kubectl get pod --all-namespaces -o wide
    NAMESPACE     NAME                            READY     STATUS    RESTARTS   AGE       IP             NODE
    kube-system   kube-router-49nc6               1/1       Running   0          2h        10.20.16.229   host229
    kube-system   kube-router-lqzmq               1/1       Running   0          2h        10.20.16.228   host228
    kube-system   kube-router-nd46r               1/1       Running   0          2h        10.20.16.227   host227
    kube-system   kube-router-vvdp8               1/1       Running   0          2h        10.20.16.214   host214
    production    frontend-56f7975f44-654lz       1/1       Running   0          43m       172.16.2.3     host228
    production    frontend-56f7975f44-jlxcd       1/1       Running   0          43m       172.16.0.4     host229
    production    frontend-56f7975f44-s5wrg       1/1       Running   0          43m       172.16.1.3     host227
    production    redis-master-6b464554c8-cwbk2   1/1       Running   0          43m       172.16.3.2     host214
    production    redis-slave-b58dc4644-lqgrm     1/1       Running   0          43m       172.16.3.3     host214
    production    redis-slave-b58dc4644-wmqzc     1/1       Running   0          43m       172.16.0.3     host229
    staging       frontend-56f7975f44-7kncx       1/1       Running   0          40m       172.16.0.5     host229
    staging       frontend-56f7975f44-stvsq       1/1       Running   0          40m       172.16.1.5     host227
    staging       frontend-56f7975f44-zm7km       1/1       Running   0          40m       172.16.2.5     host228
    staging       redis-master-6b464554c8-rjl68   1/1       Running   0          40m       172.16.2.4     host228
    staging       redis-slave-b58dc4644-744sz     1/1       Running   0          40m       172.16.3.4     host214
    staging       redis-slave-b58dc4644-kv859     1/1       Running   0          40m       172.16.1.4     host227
    # ----------- 相同命名空间-----------
    [root@host229 ~]# kubectl -n production  exec -it frontend-56f7975f44-654lz ping 172.16.3.2 
    PING 172.16.3.2 (172.16.3.2): 56 data bytes
    64 bytes from 172.16.3.2: icmp_seq=0 ttl=62 time=0.535 ms
    64 bytes from 172.16.3.2: icmp_seq=1 ttl=62 time=0.227 ms
    64 bytes from 172.16.3.2: icmp_seq=2 ttl=62 time=0.231 ms
    # ----------- 不相同命名空间-----------
    [root@host229 ~]# kubectl -n production  exec -it frontend-56f7975f44-654lz ping 172.16.2.4
    PING 172.16.2.4 (172.16.2.4): 56 data bytes
    64 bytes from 172.16.2.4: icmp_seq=0 ttl=64 time=0.206 ms
    64 bytes from 172.16.2.4: icmp_seq=1 ttl=64 time=0.085 ms
    64 bytes from 172.16.2.4: icmp_seq=2 ttl=64 time=0.072 m
    

    网络的隔离策略

    #允许所有入口和出口
    apiVersion: networking.k8s.io/v1
    kind: NetworkPolicy
    metadata:
      name: allow-all
    spec:
      podSelector: {}
      ingress:
      - {}
      egress:
      - {}
    #禁止所有入口和出口
    apiVersion: networking.k8s.io/v1
    kind: NetworkPolicy
    metadata:
      name: default-deny
    spec:
      podSelector: {}
      policyTypes:
       - Ingress
       - Egress
    #开放特定入口和出口
    apiVersion: networking.k8s.io/v1
    kind: NetworkPolicy
    metadata:
      name: default-deny
    spec:
      podSelector: {}
      policyTypes:
      - Egress
    apiVersion: networking.k8s.io/v1
    kind: NetworkPolicy
    metadata:
      name: test-network-policy
      namespace: default
    spec:
      podSelector:
        matchLabels:
          role: db
      policyTypes:
      - Ingress
      - Egress
      ingress:
      - from:
        - ipBlock:
            cidr: 172.17.0.0/16
            except:
            - 172.17.1.0/24
        - namespaceSelector:
            matchLabels:
              project: myproject
        - podSelector:
            matchLabels:
              role: frontend
        ports:
        - protocol: TCP
          port: 6379
      egress:
      - to:
        - ipBlock:
            cidr: 10.0.0.0/24
        ports:
        - protocol: TCP
          port: 5978
    

    Network Routes Controller

    ①启用hairpin traffic
    kubectl annotate service my-service "kube-router.io/service.hairpin="
    ②启用DSR(DSR将仅适用于外部IP)
    kubectl annotate service my-service "kube-router.io/service.dsr=tunnel"
    ③负载均衡调度算法(默认轮询)
    #最少连接
    kubectl annotate service my-service "kube-router.io/service.scheduler=lc"
    #轮序
    kubectl annotate service my-service "kube-router.io/service.scheduler=rr"
    #hash
    kubectl annotate service my-service "kube-router.io/service.scheduler=sh"
    #目标hash
    kubectl annotate service my-service "kube-router.io/service.scheduler=dh"
    

    5.Ingress

    1.Ingress SLB后端只会挂载打标了node-role.kubernetes.io/ingress=true的集群Node;
    2.Ingress Pod只会被部署到打标了node-role.kubernetes.io/ingress=true的集群Node;
    3.不建议将Ingress Pod部署在集群Master节点上,因为Master节点承载着集群的所有管控服务,以避免集群接入流量过高时对管控服务造成影响。

    # 1.主机打标签
    [root@host229 yaml]# kubectl label no  host214 node-role.kubernetes.io/ingress=true
    node/host214 labeled
    [root@host229 yaml]# kubectl label no  host227 node-role.kubernetes.io/ingress=true
    node/host227 labeled
    [root@host229 yaml]# kubectl label no  host228 node-role.kubernetes.io/ingress=true
    node/host228 labeled
    
    

    5.CoreDNS

    5.1 安装

    <service_name>.<namespace>.svc.<domain>

    confingmap中[kubernetes cluster.local 10.254.0.0/16]和Service中的clusterIP保持一个网段

    apiVersion: v1
    kind: ServiceAccount
    metadata:
      name: coredns
      namespace: kube-system
      labels:
          kubernetes.io/cluster-service: "true"
          addonmanager.kubernetes.io/mode: Reconcile
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
        addonmanager.kubernetes.io/mode: Reconcile
      name: system:coredns
    rules:
    - apiGroups:
      - ""
      resources:
      - endpoints
      - services
      - pods
      - namespaces
      verbs:
      - list
      - watch
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
        addonmanager.kubernetes.io/mode: EnsureExists
      name: system:coredns
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:coredns
    subjects:
    - kind: ServiceAccount
      name: coredns
      namespace: kube-system
    ---
    apiVersion: v1
    kind: ConfigMap
    metadata:
      name: coredns
      namespace: kube-system
      labels:
          addonmanager.kubernetes.io/mode: EnsureExists
    data:
      Corefile: |
        .:53 {
            errors
            log stdout
            health
            kubernetes cluster.kube 10.254.0.0/16,
            prometheus
            proxy . /etc/resolv.conf
            cache 30
        }
    ---
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: coredns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "CoreDNS"
    spec:
      replicas: 1
      selector:
        matchLabels:
          k8s-app: coredns
      template:
        metadata:
          labels:
            k8s-app: coredns
        spec:
          serviceAccountName: coredns
          tolerations:
            - key: node-role.kubernetes.io/master
              effect: NoSchedule
            - key: "CriticalAddonsOnly"
              operator: "Exists"
          containers:
          - name: coredns
            image: registry.ipscloud.com/coredns:latest
            imagePullPolicy: IfNotPresent
            resources:
              limits:
                memory: 500Mi
              requests:
                cpu: 500m
                memory: 500Mi
            args: [ "-conf", "/etc/coredns/Corefile" ]
            volumeMounts:
            - name: config-volume
              mountPath: /etc/coredns
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            - containerPort: 9153
              name: metrics
              protocol: TCP
            livenessProbe:
              httpGet:
                path: /health
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
          dnsPolicy: Default
          volumes:
            - name: config-volume
              configMap:
                name: coredns
                items:
                - key: Corefile
                  path: Corefile
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: coredns
      namespace: kube-system
      labels:
        k8s-app: coredns
        kubernetes.io/cluster-service: "true"
        addonmanager.kubernetes.io/mode: Reconcile
        kubernetes.io/name: "CoreDNS"
    spec:
      selector:
        k8s-app: coredns
      clusterIP: 10.254.0.2
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
      - name: metrics
        port: 9153
        protocol: TCP
    

    4.2 查看DNS日志检查

    [root@host-10-1-235-89 ssl] kubectl --kubeconfig=admin.conf -n kube-system logs coredns-77b9b6d8f4-jb4bc
    .:53
    2018/08/09 08:49:36 [INFO] CoreDNS-1.2.0
    2018/08/09 08:49:36 [INFO] linux/amd64, go1.10.3, 2e322f6
    CoreDNS-1.2.0
    linux/amd64, go1.10.3, 2e322f6
    

    4.3 测试

    创建一个nginx的service和pod:kubectl --kubeconfig=admin.conf create -f /nginx.yaml

    apiVersion: v1
    kind: Pod
    metadata:
      name: nginx
      labels:
        app: nginx
    spec:
      containers:
      - name: nginx
        image: registry.ipscloud.com/nginx:1.13.8
        ports:
        - containerPort: 80
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: nginx
    spec:
      ports:
      - port: 80
        targetPort: 80
        protocol: TCP
      selector:
        app: nginx
    

    查看nginx的pod

    [root@host-10-1-235-89 ssl]# kubectl --kubeconfig=admin.conf get pod
    NAME           READY     STATUS    RESTARTS   AGE
    nginx          1/1       Running   0          5m
    nginx-master   1/1       Running   0          5m
    
    

    问题

    1.Unable to register node "node1" with API server: nodes "node1" is forbidden: node "master" cannot modify node "node1

    解决办法:删除apiserver之前给kubelet下发的证书
    

    2.Error from server (Forbidden): Forbidden (user=kube-apiserver, verb=get, resource=nodes, subresource=proxy) ( pods/log coredns-7c9d9954f8-9cf6h)

    解决办法:
    wget https://kairen.github.io/files/manual-v1.10/master/apiserver-to-kubelet-rbac.yml.conf -O apiserver-to-kubelet-rbac.yml
    kubectl --kubeconfig=admin.conf -n kube-system create -f  apiserver-to-kubelet-rbac.yml
    或者执行如下内容:
    # This binding gives the kube-apiserver user full access to the kubelet API
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: kube-apiserver-kubelet-api-admin
      labels:
        addonmanager.kubernetes.io/mode: Reconcile
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: kubelet-api-admin
    subjects:
    - apiGroup: rbac.authorization.k8s.io
      kind: User
      name: kube-apiserver
    ---
    # This role allows full access to the kubelet API
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      name: kubelet-api-admin
      labels:
        addonmanager.kubernetes.io/mode: Reconcile
    rules:
    - apiGroups:
      - ""
      resources:
      - nodes/proxy
      - nodes/log
      - nodes/stats
      - nodes/metrics
      - nodes/spec
      verbs:
      - "*"
    

    3.replicasets.apps is forbidden: User "system:kube-controller-manager" cannot list replicasets.apps at the cluster scope

    $ kubectl --kubeconfig=admin.conf create clusterrolebinding  controller-node-clusterrolebing   --clusterrole=system:controller:node-controller --user=system:kube-scheduler
    

    4.error: unable to upgrade connection: Forbidden (user=kube-apiserver, verb=create, resource=nodes, subresource=proxy)
    创建apiserver到kubelet的权限有的版本默认添加

    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRole
    metadata:
      annotations:
        rbac.authorization.kubernetes.io/autoupdate: "true"
      labels:
        kubernetes.io/bootstrapping: rbac-defaults
      name: system:kube-apiserver-to-kubelet
    rules:
      - apiGroups:
          - ""
        resources:
          - nodes/proxy
          - nodes/stats
          - nodes/log
          - nodes/spec
          - nodes/metrics
        verbs:
          - "*"
    ---
    apiVersion: rbac.authorization.k8s.io/v1
    kind: ClusterRoleBinding
    metadata:
      name: system:kube-apiserver
      namespace: ""
    roleRef:
      apiGroup: rbac.authorization.k8s.io
      kind: ClusterRole
      name: system:kube-apiserver-to-kubelet
    subjects:
      - apiGroup: rbac.authorization.k8s.io
        kind: User
        name: kube-apiserver
    

    相关文章

      网友评论

          本文标题:106.Kubernetes安装

          本文链接:https://www.haomeiwen.com/subject/xovpvftx.html