1. 生产环境K8S平台规划
部署时用到的工具:
链接:https://pan.baidu.com/s/1Hbr1bc5vlgBGYvTc6psodQ
提取码:hlz2
1.1 单Master集群
单Master集群1.2 多Master集群(HA)
多Master集群1.3 环境准备
image.png2. 服务器硬件配置推荐
image.png3. 官方提供的三种部署方式
- minikube
Minikube是一个工具,可以在本地快速运行一个单点的Kubernetes,仅用于尝试Kubernetes或日常开发的用户使用。
部署地址:https://kubernetes.io/docs/setup/minikube/ - kubeadm
Kubeadm也是一个工具,提供kubeadm init和kubeadm join,用于快速部署Kubernetes集群。
部署地址:https://kubernetes.io/docs/reference/setup-tools/kubeadm/kubeadm/ - 二进制
推荐,从官方下载发行版的二进制包,手动部署每个组件,组成Kubernetes集群。
下载地址:https://github.com/kubernetes/kubernetes/releases
4. 操作系统初始化
关闭防火墙:
systemctl stop firewalld
systemctl disable firewalld
关闭selinux:
setenforce 0
sed -i 's/enforcing/disabled/' /etc/selinux/config
关闭swap:
swapoff -a
vim /etc/fstab
同步系统时间:
ntpdate time.windows.com
添加hosts:
vim /etc/hosts
192.168.9.30 master1
192.168.9.31 master2
192.168.9.32 node1
192.168.9.35 node2
5. Etcd集群
5.1 自签Etcd SSL证书
生成证书常用的2种工具:openssl、cfssl(推荐)。
cfssl安装证书
githup https://github.com/cloudflare/cfssl
官网地址: https://pkg.cfssl.org/
[root@master1 ~]# ls ---上传所需的工具包
anaconda-ks.cfg TLS.tar.gz
[root@master1 ~]# tar zxvf TLS.tar.gz
[root@master1 ~]# cd TLS
[root@master1 TLS]# ls
cfssl cfssl-certinfo cfssljson cfssl.sh etcd k8s
[root@master1 TLS]# cat cfssl.sh ---把预先下载好的3个文件拷贝到对应目录,并赋予执行权限
#curl -L https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o /usr/local/bin/cfssl
#curl -L https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o /usr/local/bin/cfssljson
#curl -L https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o /usr/local/bin/cfssl-certinfo
cp -rf cfssl cfssl-certinfo cfssljson /usr/local/bin
chmod +x /usr/local/bin/cfssl*
[root@master1 TLS]# sh cfssl.sh ---执行脚本
[root@master1 TLS]# ll /usr/local/bin/ ---cfssl工具就安装好了
total 18808
-rwxr-xr-x. 1 root root 10376657 Jun 28 21:04 cfssl
-rwxr-xr-x. 1 root root 6595195 Jun 28 21:04 cfssl-certinfo
-rwxr-xr-x. 1 root root 2277873 Jun 28 21:04 cfssljson
[root@master1 TLS]# cd etcd/ ---生成证书,进入etcd目录
[root@master1 etcd]# ll ---由json文件传入来生成证书
total 16
-rw-r--r--. 1 root root 287 Oct 3 2019 ca-config.json ---指CA机构
-rw-r--r--. 1 root root 209 Oct 3 2019 ca-csr.json ---指CA机构
-rwxr-xr-x. 1 root root 178 Oct 3 2019 generate_etcd_cert.sh ---创建证书的脚本
-rw-r--r--. 1 root root 306 Oct 3 2019 server-csr.json ---配置需要颁发证书的信息,域名、地区等
[root@master1 etcd]# cat generate_etcd_cert.sh
cfssl gencert -initca ca-csr.json | cfssljson -bare ca - ---通过ca-csr.json自建CA,初始化CA
cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server ---创建serverCA
[root@master1 etcd]# cfssl gencert -initca ca-csr.json | cfssljson -bare ca - ---首先自建一个CA
2020/06/28 21:17:35 [INFO] generating a new CA key and certificate from CSR
2020/06/28 21:17:35 [INFO] generate received request
2020/06/28 21:17:35 [INFO] received CSR
2020/06/28 21:17:35 [INFO] generating key: rsa-2048
2020/06/28 21:17:36 [INFO] encoded CSR
2020/06/28 21:17:36 [INFO] signed certificate with serial number 308444436325849675507235681448446863499900640999
[root@master1 etcd]# ls *pem ---可以看到创建成功的文件
ca-key.pem ca.pem
[root@master1 etcd]# vi server-csr.json ---设置给我们要颁发的域名、IP
{
"CN": "etcd",
"hosts": [ ---包含所有etcd节点的ip
"192.168.9.30", ---master1
"192.168.9.32", ---node1
"192.168.9.35" ---node2
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing"
}
]
}
[root@master1 etcd]# cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=www server-csr.json | cfssljson -bare server
---执行颁发证书的脚本
2020/06/28 22:14:21 [INFO] generate received request
2020/06/28 22:14:21 [INFO] received CSR
2020/06/28 22:14:21 [INFO] generating key: rsa-2048
2020/06/28 22:14:21 [INFO] encoded CSR
2020/06/28 22:14:21 [INFO] signed certificate with serial number 539684600444005209434902557496326850860873522014
2020/06/28 22:14:21 [WARNING] This certificate lacks a "hosts" field. This makes it unsuitable for
websites. For more information see the Baseline Requirements for the Issuance and Management
of Publicly-Trusted Certificates, v.1.1.6, from the CA/Browser Forum (https://cabforum.org);
specifically, section 10.2.3 ("Information Requirements").
[root@master1 etcd]# ll server*pem
-rw-------. 1 root root 1679 Jun 28 22:14 server-key.pem ---相当于key
-rw-r--r--. 1 root root 1338 Jun 28 22:14 server.pem ---相当于crt
[root@master1 etcd]# cat ca-config.json
{
"signing": {
"default": {
"expiry": "87600h" ---默认证书过期时间是10年
},
"profiles": {
"www": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
5.2 Etcd数据库集群部署
etcd是qos开源的一个key/value系统,主要用于服务的注册发现,共享配置。etcd现在也作为kubernetes的存储。
3、5、7节点是推荐配置;3个冗余1个节点故障,5个冗余2个节点故障,以此类推。会自动选举一个leader,数据会写入leader,同步到从节点;如果少于3是无法自动选举leader的;raft协议可以深入了解。
[root@master1 ~]# ls ---准备etcd安装包
anaconda-ks.cfg etcd.tar.gz TLS TLS.tar.gz
[root@master1 ~]# tar zxvf etcd.tar.gz
[root@master1 ~]# ls ---解压后会看到2个文件,etcd目录和etcd.service
anaconda-ks.cfg etcd etcd.service etcd.tar.gz TLS TLS.tar.gz
[root@master1 etcd]# ls bin/ ---etcd的主程序和客户端
etcd etcdctl
[root@master1 etcd]# ls cfg/ ---配置文件
etcd.conf
[root@master1 etcd]# ls ssl/ ---证书文件,需要先删除,然后使用刚才生成的
ca.pem server-key.pem server.pem
[root@master1 etcd]# rm ssl/* -f
[root@master1 etcd]# cd cfg/
[root@master1 cfg]# ls
etcd.conf
[root@master1 cfg]# cat etcd.conf ---查看配置文件
#[Member] ---节点信息
ETCD_NAME="etcd-1" ---集群名称,必须唯一
ETCD_DATA_DIR="/var/lib/etcd/default.etcd" ---etcd存放数据的目录
ETCD_LISTEN_PEER_URLS="https://192.168.9.30:2380" ---etcd监听的集群IP和端口,集群内部通信(当前机器IP)
ETCD_LISTEN_CLIENT_URLS="https://192.168.9.30:2379" ---客户端监听的IP和端口,程序链接用这个(当前机器IP)
#[Clustering] ---集群信息
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.9.30:2380" ---集群通告地址,集群内部通信(当前机器IP)
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.9.30:2379" ---集群客户端地址(当前机器IP)
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.9.30:2380,etcd-2=https://192.168.9.32:2380,etcd-3=https://192.168.9.35:2380"
---集群节点的所有链接信息,集群的ip和端口,通过这个组建集群
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster" ---集群口令,可以自定义
ETCD_INITIAL_CLUSTER_STATE="new" ---新建集群是new,如果是已存在集群用exsiting
[root@master1 ~]# cat etcd.service ---etcd服务的启动文件
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
[Service]
Type=notify
EnvironmentFile=/opt/etcd/cfg/etcd.conf ---读取配置文件,下面使用的就是配置文件的变量
ExecStart=/opt/etcd/bin/etcd \ ---定义了etcd工作目录的位置,etcd要移动到这里
--name=${ETCD_NAME} \
--data-dir=${ETCD_DATA_DIR} \
--listen-peer-urls=${ETCD_LISTEN_PEER_URLS} \
--listen-client-urls=${ETCD_LISTEN_CLIENT_URLS},http://127.0.0.1:2379 \
--advertise-client-urls=${ETCD_ADVERTISE_CLIENT_URLS} \
--initial-advertise-peer-urls=${ETCD_INITIAL_ADVERTISE_PEER_URLS} \
--initial-cluster=${ETCD_INITIAL_CLUSTER} \
--initial-cluster-token=${ETCD_INITIAL_CLUSTER_TOKEN} \
--initial-cluster-state=new \
# 客户端和etcd集群内部实际上是同一套证书,只是需要都单独配置一下
--cert-file=/opt/etcd/ssl/server.pem \ ---客户端访问使用的证书 server.pem
--key-file=/opt/etcd/ssl/server-key.pem \ ---客户端访问使用的证书 server-key.pem
--peer-cert-file=/opt/etcd/ssl/server.pem \ ---etcd集群内部访问使用的证书 server.pem
--peer-key-file=/opt/etcd/ssl/server-key.pem \ ---etcd集群内部访问使用的证书 server-key.pem
--trusted-ca-file=/opt/etcd/ssl/ca.pem \ ---客户端访问使用的证书,ca.pem 如果是权威机构的,这个可以不用配置
--peer-trusted-ca-file=/opt/etcd/ssl/ca.pem ---etcd集群内部访问使用的证书 ca.pem 如果是权威机构的,这个可以不用配置
Restart=on-failure
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
# 将证书拷贝到/opt/etcd/ssl/目录
[root@master1 cfg]# cp /root/TLS/etcd/{ca,server,server-key}.pem /root/etcd/ssl/
[root@master1 cfg]# cd
[root@master1 ~]# scp -r etcd root@192.168.9.30:/opt/ ---拷贝到本机的工作目录
[root@master1 ~]# scp -r etcd root@192.168.9.32:/opt/
[root@master1 ~]# scp -r etcd root@192.168.9.35:/opt/
[root@master1 ~]# scp etcd.service root@192.168.9.30:/usr/lib/systemd/system/ ---systemd存放配置文件的目录
[root@master1 ~]# scp etcd.service root@192.168.9.32:/usr/lib/systemd/system/
[root@master1 ~]# scp etcd.service root@192.168.9.35:/usr/lib/systemd/system/
# 修改master1配置文件,IP设置成每台机器对应的IP,节点名称改成自己的节点名
[root@master1 ~]# vi /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-1"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.9.30:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.9.30:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.9.30:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.9.30:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.9.30:2380,etcd-2=https://192.168.9.32:2380,etcd-3=https://192.168.9.35:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 修改node1配置文件,IP设置成每台机器对应的IP,节点名称改成自己的节点名
[root@node1 ~]# vi /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-2"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.9.32:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.9.32:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.9.32:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.9.32:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.9.30:2380,etcd-2=https://192.168.9.32:2380,etcd-3=https://192.168.9.35:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 修改node2配置文件,IP设置成每台机器对应的IP,节点名称改成自己的节点名
[root@node2 ~]# vi /opt/etcd/cfg/etcd.conf
#[Member]
ETCD_NAME="etcd-3"
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
ETCD_LISTEN_PEER_URLS="https://192.168.9.35:2380"
ETCD_LISTEN_CLIENT_URLS="https://192.168.9.35:2379"
#[Clustering]
ETCD_INITIAL_ADVERTISE_PEER_URLS="https://192.168.9.35:2380"
ETCD_ADVERTISE_CLIENT_URLS="https://192.168.9.35:2379"
ETCD_INITIAL_CLUSTER="etcd-1=https://192.168.9.30:2380,etcd-2=https://192.168.9.32:2380,etcd-3=https://192.168.9.35:2380"
ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
ETCD_INITIAL_CLUSTER_STATE="new"
# 启动服务并加入开机启动
[root@master1 ~]# ls /usr/lib/systemd/system/etcd.service ---查看etcd.service是否在system下面
[root@master1 ~]# systemctl daemon-reload ---重新加载配置文件
[root@master1 ~]# systemctl start etcd ---启动etcdt
[root@master1 ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
[root@node1 ~]# systemctl daemon-reload
[root@node1 ~]# systemctl start etcd
[root@node1 ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
[root@node2 ~]# systemctl daemon-reload
[root@node2 ~]# systemctl start etcd
[root@node2 ~]# systemctl enable etcd
Created symlink from /etc/systemd/system/multi-user.target.wants/etcd.service to /usr/lib/systemd/system/etcd.service.
# 查看集群状态
[root@master1 ~]# /opt/etcd/bin/etcdctl \
> --ca-file=/opt/etcd/ssl/ca.pem --cert-file=/opt/etcd/ssl/server.pem --key-file=/opt/etcd/ssl/server-key.pem \
> --endpoints="https://192.168.9.30:2379,https://192.168.9.32:2379,https://192.168.9.35:2379" \
> cluster-health
member 14528a1741b0792c is healthy: got healthy result from https://192.168.9.30:2379
member 251e4440e9736da1 is healthy: got healthy result from https://192.168.9.35:2379
member 25c7355cfac3dcd7 is healthy: got healthy result from https://192.168.9.32:2379
cluster is healthy
---可以看到都是健康的
6. 部署Master组件
6.1 自签APIServer SSL证书
APIserver是整个集群的入口,只有它启动了,别的组件才可以启动;我们基于https访问,所以需要自签证书。
[root@master1 ~]# cd TLS/k8s/
[root@master1 k8s]# ll
total 20
-rw-r--r--. 1 root root 294 Oct 3 2019 ca-config.json
-rw-r--r--. 1 root root 263 Oct 3 2019 ca-csr.json
-rwxr-xr-x. 1 root root 321 Oct 3 2019 generate_k8s_cert.sh
-rw-r--r--. 1 root root 230 Oct 3 2019 kube-proxy-csr.json
-rw-r--r--. 1 root root 718 Oct 3 2019 server-csr.json ---api server启用https所使用证书
应用程序访问https api(自签证书)可用通过以下2种方式:
1、证书添加IP可信任
2、携带CA证书
注意:在使用网关、转发等情况下,不能实现携带CA证书,那么我们可用添加信任的IP。
[root@master1 k8s]# cat server-csr.json ---配置可信任IP
{
"CN": "kubernetes",
"hosts": [
"10.0.0.1", ---server的IP地址,一般作为内部负载均衡使用
"127.0.0.1", ---本地
#k8s内部通过DNS访问的,默认都要有
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local",
#自定义可信任IP,主要是三个master的IP
"192.168.9.30", ---master1
"192.168.9.31", ---master2
"192.168.9.36", ---LBMaster
"192.168.9.37", ---LBBackup
"192.168.9.38" ---LBVIP(注意,最后一项不能有逗号)
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"L": "BeiJing",
"ST": "BeiJing",
"O": "k8s",
"OU": "System"
}
]
}
[root@master1 k8s]# bash generate_k8s_cert.sh ---生成证书
[root@master1 k8s]# ls *pem ---查看生成的证书
ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
6.2 单Master集群部署API Server、controller-manager、scheduler
6.2.1 在master节点下载二进制包
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.16.md#downloads-for-v1168
这里下载v1.16.1的server二进制的包
6.2.2 解压文件
tar zxvf kubernetes-server-linux-amd64.tar.gz
6.2.3 得到kubernetes目录
6.2.4 我们只需要kubernetes/server/bin以下文件
kube-apiserver
kubectl
kube-controller-manager
kube-scheduler
以上是为了获取最新的k8s,所以可以去官网下载k8s安装的二进制文件,我们还需要一些其他的辅助文件:
下面的文件实际上可以正常运行的,可以跳过上面几个步骤。
[root@master1 ~]# ls ---上传k8s-master.tar.gz
anaconda-ks.cfg etcd etcd.service etcd.tar.gz k8s-master.tar.gz TLS TLS.tar.gz
[root@master1 ~]# tar zxvf k8s-master.tar.gz ---解压
[root@master1 ~]# cat kube-apiserver.service
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
[Service]
EnvironmentFile=/opt/kubernetes/cfg/kube-apiserver.conf
ExecStart=/opt/kubernetes/bin/kube-apiserver $KUBE_APISERVER_OPTS
Restart=on-failure
[Install]
WantedBy=multi-user.target
[root@master1 ~]# tree kubernetes
kubernetes
├── bin ---安装程序,bin文件我们可以利用官方的新版本替换掉
│ ├── kube-apiserver
│ ├── kube-controller-manager
│ ├── kubectl
│ └── kube-scheduler
├── cfg ---配置文件
│ ├── kube-apiserver.conf
│ ├── kube-controller-manager.conf
│ ├── kube-scheduler.conf
│ └── token.csv
├── logs ---存放日志
└── ssl ---存放证书
4 directories, 8 files
# 将证书复制到目录
[root@master1 ~]# cd kubernetes/
[root@master1 kubernetes]# ls
bin cfg logs ssl
[root@master1 kubernetes]# cp /root/TLS/k8s/*.pem ssl/
[root@master1 kubernetes]# ls ssl/
ca-key.pem ca.pem kube-proxy-key.pem kube-proxy.pem server-key.pem server.pem
[root@master1 kubernetes]# rm ssl/kube-proxy* -f ---把用不到的删掉
[root@master1 kubernetes]# ls ssl/
ca-key.pem ca.pem server-key.pem server.pem
查看kube-apiserver.conf配置文件
[root@master1 kubernetes]# cd cfg/
[root@master1 cfg]# ls
kube-apiserver.conf kube-controller-manager.conf kube-scheduler.conf token.csv
[root@master1 cfg]# cat kube-apiserver.conf
KUBE_APISERVER_OPTS="--logtostderr=false \ ---输出日志
--v=2 \ ---日志级别 0-8级别范围
--log-dir=/opt/kubernetes/logs \ ---日志输出目录
--etcd-servers=https://192.168.9.30:2379,https://192.168.9.32:2379,https://192.168.9.35:2379 \ ---etcd服务地址配置
--bind-address=192.168.9.30 \ ---本机IP(内网IP)
--secure-port=6443 \ ---端口
--advertise-address=192.168.9.30 \ ---通告地址,一般和本机IP一样,告诉node通过哪个IP来链接访问
--allow-privileged=true \ ---允许创建的容器具有超级管理员权限
--service-cluster-ip-range=10.0.0.0/24 \ ---service IP范围,service会分配这个IP段的IP地址
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction \ ---启用控制插件,属于k8s高级功能,比如资源配额限制等
--authorization-mode=RBAC,Node \ ---授权模式,一般使用rbac角色来访问
--enable-bootstrap-token-auth=true \ ---启用bootstrap,为他们自动颁发证书,在token.csv 定义具体内容
--token-auth-file=/opt/kubernetes/cfg/token.csv \ ---让Node使用bootstrap颁发证书
--service-node-port-range=30000-32767 \ ---暴露servicenode的端口
#kubelet证书
--kubelet-client-certificate=/opt/kubernetes/ssl/server.pem \
--kubelet-client-key=/opt/kubernetes/ssl/server-key.pem \
#apiserver使用https证书
--tls-cert-file=/opt/kubernetes/ssl/server.pem \
--tls-private-key-file=/opt/kubernetes/ssl/server-key.pem \
--client-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-key-file=/opt/kubernetes/ssl/ca-key.pem \
#etcd使用https证书
--etcd-cafile=/opt/etcd/ssl/ca.pem \
--etcd-certfile=/opt/etcd/ssl/server.pem \
--etcd-keyfile=/opt/etcd/ssl/server-key.pem \
#日志审计配置
--audit-log-maxage=30 \
--audit-log-maxbackup=3 \
--audit-log-maxsize=100 \
--audit-log-path=/opt/kubernetes/logs/k8s-audit.log"
查看kube-controller-manager.conf文件
[root@master1 cfg]# cat kube-controller-manager.conf
KUBE_CONTROLLER_MANAGER_OPTS="--logtostderr=false \ ---配置日志
--v=2 \ ---配置日志级别
--log-dir=/opt/kubernetes/logs \ ---配置日志目录
--leader-elect=true \ ---集群选举,api server才做高可用, kube-controller-manager本身会基于etcd来实现高可用,启用该选项即可
--master=127.0.0.1:8080 \ ---apiserver的IP,我们设置链接本地,8080是apiserver监听端口,它默认会开放该端口
--address=127.0.0.1 \ ---组件监听地址,本地即可,无需对外
--allocate-node-cidrs=true \
--cluster-cidr=10.244.0.0/16 \ ---集群Pod的IP段
--service-cluster-ip-range=10.0.0.0/24 \ ---server的IP范围,和 kube-apiserverIP范围是一样的
#集群签名的证书,node加入集群颁发自动颁发kubelet证书,kubelet由controller-manager颁发,controller-manager由下面配置的颁发
--cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem \
--cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem \
#签署service-account所需要的私钥
--root-ca-file=/opt/kubernetes/ssl/ca.pem \
--service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem \
#为node颁发证书时间,10年
--experimental-cluster-signing-duration=87600h0m0s"
查看cat kube-scheduler.conf文件
[root@master1 cfg]# cat kube-scheduler.conf
KUBE_SCHEDULER_OPTS="--logtostderr=false \
--v=2 \
--log-dir=/opt/kubernetes/logs \
--leader-elect \ ---多个scheduler集群选举
--master=127.0.0.1:8080 \ ---链接apiserver地址
--address=127.0.0.1" ---监听本地地址
将文件移动到工作目录
[root@master1 ~]# mv /root/kubernetes/ /opt/
[root@master1 ~]# mv kube-apiserver.service kube-controller-manager.service kube-scheduler.service /usr/lib/systemd/system
启动服务
[root@master1 ~]# systemctl start kube-apiserver
[root@master1 ~]# ps -ef | grep kube ---发现已经启动了
root 18898 1 37 22:57 ? 00:00:04 /opt/kubernetes/bin/kube-apiserver --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --etcd-servers=https://192.168.9.30:2379,https://192.168.9.32:2379,https://192.168.9.35:2379 --bind-address=192.168.9.30 --secure-port=6443 --advertise-address=192.168.9.30 --allow-privileged=true --service-cluster-ip-range=10.0.0.0/24 --enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota,NodeRestriction --authorization-mode=RBAC,Node --enable-bootstrap-token-auth=true --token-auth-file=/opt/kubernetes/cfg/token.csv --service-node-port-range=30000-32767 --kubelet-client-certificate=/opt/kubernetes/ssl/server.pem --kubelet-client-key=/opt/kubernetes/ssl/server-key.pem --tls-cert-file=/opt/kubernetes/ssl/server.pem --tls-private-key-file=/opt/kubernetes/ssl/server-key.pem --client-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-key-file=/opt/kubernetes/ssl/ca-key.pem --etcd-cafile=/opt/etcd/ssl/ca.pem --etcd-certfile=/opt/etcd/ssl/server.pem --etcd-keyfile=/opt/etcd/ssl/server-key.pem --audit-log-maxage=30 --audit-log-maxbackup=3 --audit-log-maxsize=100 --audit-log-path=/opt/kubernetes/logs/k8s-audit.log
root 18907 18830 0 22:57 pts/1 00:00:00 grep --color=auto kube
[root@master1 ~]# ls /opt/kubernetes/logs/ ---发现生成了几个日志文件
kube-apiserver.ERROR
kube-apiserver.FATAL
kube-apiserver.INFO
kube-apiserver.master1.root.log.ERROR.20200702-225757.18898
kube-apiserver.master1.root.log.ERROR.20200702-225819.18908
kube-apiserver.master1.root.log.FATAL.20200702-225804.18898
kube-apiserver.master1.root.log.FATAL.20200702-225826.18908
kube-apiserver.master1.root.log.INFO.20200702-225743.18898
kube-apiserver.master1.root.log.INFO.20200702-225805.18908
kube-apiserver.master1.root.log.INFO.20200702-225826.18916
kube-apiserver.master1.root.log.WARNING.20200702-225744.18898
kube-apiserver.master1.root.log.WARNING.20200702-225806.18908
kube-apiserver.master1.root.log.WARNING.20200702-225827.18916
kube-apiserver.WARNING
[root@master1 ~]# systemctl start kube-controller-manager
[root@master1 ~]# ps -ef |grep kube-controller-manager
root 19486 1 0 23:18 ? 00:00:00 /opt/kubernetes/bin/kube-controller-manager --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect=true --master=127.0.0.1:8080 --address=127.0.0.1 --allocate-node-cidrs=true --cluster-cidr=10.244.0.0/16 --service-cluster-ip-range=10.0.0.0/24 --cluster-signing-cert-file=/opt/kubernetes/ssl/ca.pem --cluster-signing-key-file=/opt/kubernetes/ssl/ca-key.pem --root-ca-file=/opt/kubernetes/ssl/ca.pem --service-account-private-key-file=/opt/kubernetes/ssl/ca-key.pem --experimental-cluster-signing-duration=87600h0m0s
# 设置开机启动
[root@master1 ~]# systemctl start kube-scheduler
[root@master1 cfg]# ps -ef |grep kube-scheduler
root 19309 1 0 23:12 ? 00:00:01 /opt/kubernetes/bin/kube-scheduler --logtostderr=false --v=2 --log-dir=/opt/kubernetes/logs --leader-elect --master=127.0.0.1:8080 --address=127.0.0.1
# kubectl 移动到环境变量
[root@master1 ~]# mv /opt/kubernetes/bin/kubectl /usr/local/bin/
# 查看启动的情况
[root@master1 ~]# kubectl get cs ---没有报失败
NAME AGE
scheduler <unknown>
controller-manager <unknown>
etcd-2 <unknown>
etcd-1 <unknown>
etcd-0 <unknown>
# 因为我们启动了bootstrap自动颁发证书,所以还需要对用户进行设置
[root@master1 ~]# cat /opt/kubernetes/cfg/token.csv
c47ffb939f5ca36231d9e3121a252940,kubelet-bootstrap,10001,"system:node-bootstrapper"[root@master1 ~]#
# 结构:token,用户,uid,用户组
# 因为这个用户是没有权限的,需要我们在master1机器上给该用户授权
[root@master1 ~]# kubectl create clusterrolebinding kubelet-bootstrap \
> --clusterrole=system:node-bootstrapper \
> --user=kubelet-bootstrap
clusterrolebinding.rbac.authorization.k8s.io/kubelet-bootstrap created
自己生成token:
head -c 16 /dev/urandom | od -An -t x | tr -d ' '
但apiserver配置的token必须要与node节点bootstrap.kubeconfig配置里一致。
7. 部署Node组件
7.1 安装docker
下载docker二进制文件
官网链接 https://download.docker.com/linux/static/stable/x86_64/ 进去选择想要安装的版本
wget https://download.docker.com/linux/static/stable/x86_64/docker-19.03.5.tgz
提示:这里的docker已经整合到k8s-node.tar.gz了,所以不用单独下载。
[root@node1 ~]# ls ---上传k8s-node.tar.gz
anaconda-ks.cfg k8s-node.tar.gz
[root@node1 ~]# tar zxvf k8s-node.tar.gz ---解压
cni-plugins-linux-amd64-v0.8.2.tgz
daemon.json
docker-18.09.6.tgz
docker.service
kubelet.service
kube-proxy.service
kubernetes/
kubernetes/bin/
kubernetes/bin/kubelet
kubernetes/bin/kube-proxy
kubernetes/cfg/
kubernetes/cfg/kubelet-config.yml
kubernetes/cfg/bootstrap.kubeconfig
kubernetes/cfg/kube-proxy.kubeconfig
kubernetes/cfg/kube-proxy.conf
kubernetes/cfg/kubelet.conf
kubernetes/cfg/kube-proxy-config.yml
kubernetes/ssl/
kubernetes/logs/
[root@node1 ~]# ls
anaconda-ks.cfg docker-18.09.6.tgz kubelet.service
cni-plugins-linux-amd64-v0.8.2.tgz docker.service kube-proxy.service
daemon.json k8s-node.tar.gz kubernetes
[root@node1 ~]# tar zxvf docker-18.09.6.tgz ---解压
[root@node1 ~]# ls docker
containerd containerd-shim ctr docker dockerd docker-init docker-proxy runc
# 将文件移动到环境变量
[root@node1 ~]# mv docker/* /usr/bin
# 将docker.service移动到工作目录下
[root@node1 ~]# mv docker.service /usr/lib/systemd/system
[root@node1 ~]# cat /usr/lib/systemd/system/docker.service
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
[Service]
Type=notify
ExecStart=/usr/bin/dockerd
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
# 将docker配置文件daemon.json放到/etc/docker
[root@node1 ~]# cat daemon.json
{
"registry-mirrors": ["https://b9pmyelo.mirror.aliyuncs.com"], ---镜像加速的地址
"insecure-registries": ["192.168.9.30"] ---私有仓库地址(没有的话可以忽略)
}
[root@node1 ~]# mkdir /etc/docker
[root@node1 ~]# mv daemon.json /etc/docker/
这样docker就可以引用到这个文件
[root@node1 ~]# systemctl start docker ---启动docker
[root@node1 ~]# systemctl enable docker ---设置成开机启动
Created symlink from /etc/systemd/system/multi-user.target.wants/docker.service to /usr/lib/systemd/system/docker.service.
查看dokcer安装信息
[root@node1 ~]# docker info
Containers: 0
Running: 0
Paused: 0
Stopped: 0
Images: 0
Server Version: 18.09.6
Storage Driver: overlay2
Backing Filesystem: xfs
Supports d_type: true
Native Overlay Diff: true
Logging Driver: json-file
Cgroup Driver: cgroupfs
Plugins:
Volume: local
Network: bridge host macvlan null overlay
Log: awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog
Swarm: inactive
Runtimes: runc
Default Runtime: runc
Init Binary: docker-init
containerd version: bb71b10fd8f58240ca47fbb579b9d1028eea7c84
runc version: 2b18fe1d885ee5083ef9f0838fee39b62d653e30
init version: fec3683
Security Options:
seccomp
Profile: default
Kernel Version: 3.10.0-957.el7.x86_64
Operating System: CentOS Linux 7 (Core)
OSType: linux
Architecture: x86_64
CPUs: 1
Total Memory: 972.6MiB
Name: node1
ID: IENI:HWRI:J5GV:DHT4:QL6G:UDQB:CHNT:HJ3H:YIAF:3GUU:VGRJ:AGFN
Docker Root Dir: /var/lib/docker
Debug Mode (client): false
Debug Mode (server): false
Registry: https://index.docker.io/v1/
Labels:
Experimental: false
Insecure Registries:
192.168.31.70
127.0.0.0/8
Registry Mirrors:
http://bc437cce.m.daocloud.io/
Live Restore Enabled: false
Product License: Community Engine
网友评论