一,集群规划
master01 4C8G 10.0.0.121
master02 4C8G 10.0.0.122
master03 4C8G 10.0.0.123
nodes01 4C8G 10.0.0.124
nodes02 4C8G 10.0.0.125
nodes03 4C8G 10.0.0.126
etcd01 4C8G 10.0.0.127
etcd02 4C8G 10.0.0.128
etc0d3 4C8G 10.0.0.129
ha01 2C2G 10.0.0.130
ha02 2C2G 10.0.0.131
10.0.0.135(vip)
规划:三台maste节点三台node节点三台etcd两台ha
软件版本:
操作系统: ubuntu:20.04.5
kubernetes v1.25.4
containerd v1.6.10
etcd v3.5.3
cin-pulgin v1.1.1
Cfssl v1.6.1
二,基础环境配置
2.1,所有环境配置hosts
root@etcd01:~# cat /etc/hosts
10.0.0.121 master01
10.0.0.122 master02
10.0.0.123 master03
10.0.0.124 node01
10.0.0.125 node02
10.0.0.126 node03
10.0.0.127 etcd01
10.0.0.128 etcd02
10.0.0.129 etcd03
10.0.0.130 ha01
10.0.0.131 ha02
10.0.0.135 vip
2.2,所有节点关闭防火墙,selinux,swap
ufw disable
ufw status (ufw status命令查看当前的防火墙状态:inactive状态是防火墙关闭状态 active是开启状态。)
#关闭selinux
sed -ri 's/(^SELINUX=).*/\1disabled/' /etc/selinux/config
setenforce 0
#关闭swap分区
sed -ri 's@(^.*swap *swap.*0 0$)@#\1@' /etc/fstab
swapoff -a
2.3,所有时间同步
#方法1使用ntpdate
yum install ntpdate -y
ntpdate time2.aliyun.com
#写入定时任务
crontab -e
*/5 * * * * ntpdate time2.aliyun.com
#方法2使用chrony(推荐使用)
#安装chrony
yum install chrony -y
#在其中一台主机配置为时间服务器
cat /etc/chrony.conf
server time2.aliyun.com iburst #从哪同步时间
driftfile /var/lib/chrony/drift
makestep 1.0 3
rtcsync
allow 10.0.0.0/16 #允许的ntp客户端网段
local stratum 10
logdir /var/log/chrony
#重启服务
systemctl restart chronyd
#配置其他节点从服务端获取时间进行同步
cat /etc/chrony.conf
server 10.0.0.121 iburst
#重启验证
systemctl restart chronyd
chronyc sources -v
^* master01 3 6 17 5 -10us[ -109us] +/- 28ms #这样表示正常
2.4,所有节点优化内核参数
#修改内核参数
cat >/etc/sysctl.conf<<EOF
net.ipv4.tcp_keepalive_time=600
net.ipv4.tcp_keepalive_intvl=30
net.ipv4.tcp_keepalive_probes=10
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
net.ipv4.neigh.default.gc_stale_time=120
net.ipv4.conf.all.rp_filter=0 # 默认为1,系统会严格校验数据包的反向路径,可能导致丢包
net.ipv4.conf.default.rp_filter=0
net.ipv4.conf.default.arp_announce=2
net.ipv4.conf.lo.arp_announce=2
net.ipv4.conf.all.arp_announce=2
net.ipv4.ip_local_port_range= 45001 65000
net.ipv4.ip_forward=1
net.ipv4.tcp_max_tw_buckets=6000
net.ipv4.tcp_syncookies=1
net.ipv4.tcp_synack_retries=2
net.bridge.bridge-nf-call-ip6tables=1
net.bridge.bridge-nf-call-iptables=1
net.netfilter.nf_conntrack_max=2310720
net.ipv6.neigh.default.gc_thresh1=8192
net.ipv6.neigh.default.gc_thresh2=32768
net.ipv6.neigh.default.gc_thresh3=65536
net.core.netdev_max_backlog=16384 # 每CPU网络设备积压队列长度
net.core.rmem_max = 16777216 # 所有协议类型读写的缓存区大小
net.core.wmem_max = 16777216
net.ipv4.tcp_max_syn_backlog = 8096 # 第一个积压队列长度
net.core.somaxconn = 32768 # 第二个积压队列长度
fs.inotify.max_user_instances=8192 # 表示每一个real user ID可创建的inotify instatnces的数量上限,默认128.
fs.inotify.max_user_watches=524288 # 同一用户同时可以添加的watch数目,默认8192。
fs.file-max=52706963
fs.nr_open=52706963
kernel.pid_max = 4194303
net.bridge.bridge-nf-call-arptables=1
vm.swappiness=0 # 禁止使用 swap 空间,只有当系统 OOM 时才允许使用它
vm.overcommit_memory=1 # 不检查物理内存是否够用
vm.panic_on_oom=0 # 开启 OOM
vm.max_map_count = 262144
EOF
#加载ipvs模块
cat >/etc/modules-load.d/ipvs.conf <<EOF
ip_vs
ip_vs_lc
ip_vs_wlc
ip_vs_rr
ip_vs_wrr
ip_vs_lblc
ip_vs_lblcr
ip_vs_dh
ip_vs_sh
ip_vs_nq
ip_vs_sed
ip_vs_ftp
ip_vs_sh
nf_conntrack
ip_tables
ip_set
xt_set
ipt_set
ipt_rpfilter
ipt_REJECT
EOF
systemctl enable --now systemd-modules-load.service
#重启
reboot
#重启服务器执行检查
lsmod | grep -e ip_vs -e nf
2.5,使用master1节点做免秘钥
ssh-keygen -t rsa
all="master01 master02 master03 node01 node02 node03 etcd01 etcd02 etcd03"
for i in $all;do ssh-copy-id -i .ssh/id_rsa.pub $i;done
2.6,安装所需软件
apt install curl conntrack ipvsadm ipset iptables jq sysstat libseccomp rsync wget jq psmisc vim net-tools telnet lrzsz bash-completion -y
2.7,软件包准备
#下载kubernetesv1.25.4的二进制软件包
https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.25.md#downloads-for-v1254
#etcdctl二进制包
https://github.com/etcd-io/etcd/releases
#containerd二进制包
https://github.com/containerd/containerd/releases
#cfssl二进制包
https://github.com/cloudflare/cfssl/releases
#cni插件二进制包
https://github.com/containernetworking/plugins/releases
#crictl二进制包
https://github.com/kubernetes-sigs/cri-tools/releases
#docker-containerd二进制包
https://download.docker.com/linux/static/stable/x86_64/
#cri-docker二进制包
https://github.com/Mirantis/cri-dockerd/releases
三,安装docker && 安装容器运行时 containerd(所有master节点和node节点)
安装容器运行时
以下操作docker-ce和containerd选择安装一个就可以,需要在安装kubelet的节点都需要安装。在kubernetesv1.24版本后如果需要使用docker-ce作为容器运行时需要额外安装cri-docker
3.1,安装容器运行时containerd
#先安装cni插件:
wget https://github.com/containernetworking/plugins/releases/download/v1.1.1/cni-plugins-linux-amd64-v1.1.1.tgz
mkdir -p /etc/cni/{bin,net.d}
tar xf cni-plugins-linux-amd64-v1.1.1.tgz -C /etc/cni/bin/
#解压containerd安装
wget https://github.com/containerd/containerd/releases/download/v1.6.10/cri-containerd-cni-1.6.10-linux-arm64.tar.gz
tar xf cri-containerd-cni-1.6.10-linux-amd64.tar.gz -C /
#创建服务启动文件
cat > /etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/local/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=infinity
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
#创建配置文件
mkdir /etc/containerd
/usr/local/bin/containerd config default > /etc/containerd/config.toml
#修改配置
首先我们修改默认的 pause 镜像为国内的地址,替换 [plugins."io.containerd.grpc.v1.cri"] 下面的 sandbox_image:
[plugins."io.containerd.grpc.v1.cri"]
sandbox_image = "registry.aliyuncs.com/k8sxio/pause:3.2"
配置镜像仓库加速地址
[plugins."io.containerd.grpc.v1.cri".registry]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"]
endpoint = ["https://bqr1dr1n.mirror.aliyuncs.com"]
[plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"]
endpoint = ["https://registry.aliyuncs.com/k8sxio"]
#启动
systemctl enable --now containerd.service
#安装crictl客户端工具
wget https://github.com/kubernetes-sigs/cri-tools/releases/download/v1.25.0/crictl-v1.25.0-linux-amd64.tar.gz
tar xf crictl-v1.25.0-linux-amd64.tar.gz -C /usr/bin/
#生成配置文件
cat > /etc/crictl.yaml <<EOF
runtime-endpoint: unix:///run/containerd/containerd.sock
EOF
#测试
crictl info
3.2,安装docker
tar xf docker-20.10.15.tgz
#拷贝二进制文件
cp docker/* /usr/bin/
#创建containerd的service文件,并且启动
cat >/etc/systemd/system/containerd.service <<EOF
[Unit]
Description=containerd container runtime
Documentation=https://containerd.io
After=network.target local-fs.target
[Service]
ExecStartPre=-/sbin/modprobe overlay
ExecStart=/usr/bin/containerd
Type=notify
Delegate=yes
KillMode=process
Restart=always
RestartSec=5
LimitNPROC=infinity
LimitCORE=infinity
LimitNOFILE=1048576
TasksMax=infinity
OOMScoreAdjust=-999
[Install]
WantedBy=multi-user.target
EOF
systemctl enable --now containerd.service
#准备docker的service文件
cat > /etc/systemd/system/docker.service <<EOF
[Unit]
Description=Docker Application Container Engine
Documentation=https://docs.docker.com
After=network-online.target firewalld.service containerd.service
Wants=network-online.target
Requires=docker.socket containerd.service
[Service]
Type=notify
ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
StartLimitBurst=3
StartLimitInterval=60s
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
TasksMax=infinity
Delegate=yes
KillMode=process
OOMScoreAdjust=-500
[Install]
WantedBy=multi-user.target
EOF
#准备docker的socket文件
cat > /etc/systemd/system/docker.socket <<EOF
[Unit]
Description=Docker Socket for the API
[Socket]
ListenStream=/var/run/docker.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
#创建docker组
groupadd docker
#启动docker
systemctl enable --now docker.socket && systemctl enable --now docker.service
#验证
docker info
#创建docker配置文件
mkdir /etc/docker/ -p
cat >/etc/docker/daemon.json << EOF
{"registry-mirrors":["https://b9pmyelo.mirror.aliyuncs.com"]}
EOF
systemctl restart docker
#安装cri-Docker
#解压安装包
tar xf cri-dockerd-0.2.3.amd64.tgz
#拷贝二进制文件
cp cri-dockerd/* /usr/bin/
#生成service文件
cat >/etc/systemd/system/cri-docker.socket<<EOF
[Unit]
Description=CRI Docker Socket for the API
PartOf=cri-docker.service
[Socket]
ListenStream=%t/cri-dockerd.sock
SocketMode=0660
SocketUser=root
SocketGroup=docker
[Install]
WantedBy=sockets.target
EOF
cat >/etc/systemd/system/cri-docker.service<<EOF
[Unit]
Description=CRI Interface for Docker Application Container Engine
Documentation=https://docs.mirantis.com
After=network-online.target firewalld.service docker.service
Wants=network-online.target
Requires=cri-docker.socket
[Service]
Type=notify
ExecStart=/usr/bin/cri-dockerd --container-runtime-endpoint #fd:// --network-plugin=cni #--pod-infra-container-image=192.168.10.254:5000/k8s/pause:3.7
ExecReload=/bin/kill -s HUP $MAINPID
TimeoutSec=0
RestartSec=2
Restart=always
# Note that StartLimit* options were moved from "Service" to "Unit" in systemd 229.
# Both the old, and new location are accepted by systemd 229 and up, so using the old location
# to make them work for either version of systemd.
StartLimitBurst=3
# Note that StartLimitInterval was renamed to StartLimitIntervalSec in systemd 230.
# Both the old, and new name are accepted by systemd 230 and up, so using the old name to make
# this option work for either version of systemd.
StartLimitInterval=60s
# Having non-zero Limit*s causes performance problems due to accounting overhead
# in the kernel. We recommend using cgroups to do container-local accounting.
LimitNOFILE=infinity
LimitNPROC=infinity
LimitCORE=infinity
# Comment TasksMax if your systemd version does not support it.
# Only systemd 226 and above support this option.
TasksMax=infinity
Delegate=yes
KillMode=process
[Install]
WantedBy=multi-user.target
EOF
#启动
systemctl enable --now cri-docker.socket
systemctl enable --now cri-docker
四,生成集群相关证书(都在master01上操作)
4.1,安装cfssl工具并分发二进制文件
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.3/cfssl_1.6.3_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.2/cfssljson_1.6.2_linux_amd64
wget https://github.com/cloudflare/cfssl/releases/download/v1.6.2/cfssl-certinfo_1.6.2_linux_amd64
mv cfssl_1.6.3_linux_amd64 cfssl
mv cfssljson_1.6.2_linux_amd64 cfssljson
mv cfssl-certinfo_1.6.2_linux_amd64 cfssl-certinfo
chmod +x cfssl* && mv cfssl* /usr/bin/
#分发二进制文件
master="master01 master02 master03"
node="node01 node02 node03"
#分发master组件
for i in $master;do
scp kubernetes/server/bin/{kube-apiserver,kube-controller-manager,kube-scheduler,kube-proxy,kubelet,kubectl} $i:/usr/bin
done
#分发node组件
for i in $node;do
scp kubernetes/server/bin/{kube-proxy,kubelet} $i:/usr/bin
done
4.2,创建etcd集群证书
mkdir /opt/pki/etcd/ -p
cd /opt/pki/etcd/
#创建etcd的ca证书
mkdir ca && cd ca
#生成配置文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"etcd": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
#生成申请文件
cat > ca-csr.json <<EOF
{
"CA":{"expiry":"87600h"},
"CN": "etcd-cluster",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-cluster",
"OU": "System"
}
]
}
EOF
#生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
#etcd服务端
#生成etcd证书申请文件
cd /opt/pki/etcd/
cat > etcd-server-csr.json << EOF
{
"CN": "etcd-server",
"hosts": [
"10.0.0.127",
"10.0.0.128",
"10.0.0.129",
"127.0.0.1"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-server",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=ca/ca.pem \
-ca-key=ca/ca-key.pem \
-config=ca/ca-config.json \
-profile=etcd \
etcd-server-csr.json | cfssljson -bare etcd-server
#etcd客户端
#生成etcd证书申请文件
cd /opt/pki/etcd/
cat > etcd-client-csr.json << EOF
{
"CN": "etcd-client",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "etcd-client",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=ca/ca.pem \
-ca-key=ca/ca-key.pem \
-config=ca/ca-config.json \
-profile=etcd \
etcd-client-csr.json | cfssljson -bare etcd-client
验证
pwd
/opt/pki/etcd
#tree命令验证生成的文件
[root@k8s-master01 etcd]# tree
.
├── ca
│ ├── ca-config.json
│ ├── ca.csr
│ ├── ca-csr.json
│ ├── ca-key.pem
│ └── ca.pem
├── etcd-client.csr
├── etcd-client-csr.json
├── etcd-client-key.pem #客户端私钥
├── etcd-client.pem #客户端公钥
├── etcd-server.csr
├── etcd-server-csr.json
├── etcd-server-key.pem #服务端私钥
└── etcd-server.pem #服务端公钥
#拷贝到master节点和etcd节点
master_etcd="master01 master02 master03 etcd01 etcd02 etcd03"
for i in $master_etcd;do
ssh $i "mkdir /etc/etcd/ssl -p"
scp /opt/pki/etcd/ca/ca.pem /opt/pki/etcd/{etcd-server.pem,etcd-server-key.pem,etcd-client.pem,etcd-client-key.pem} $i:/etc/etcd/ssl/
done
4.3,创建kubernetes集群各组件证书
生成kubernetes ca证书
mkdir /opt/pki/kubernetes/ -p
cd /opt/pki/kubernetes/
mkdir ca
cd ca
#创建ca配置文件与申请文件
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "87600h"
},
"profiles": {
"kubernetes": {
"expiry": "87600h",
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
]
}
}
}
}
EOF
#生成申请文件
cat > ca-csr.json <<EOF
{
"CA":{"expiry":"87600h"},
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "kubernetes",
"OU": "System"
}
]
}
EOF
#生成ca证书
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
生成kube-apiserver证书
#创建目录
mkdir /opt/pki/kubernetes/kube-apiserver -p
cd /opt/pki/kubernetes/kube-apiserver
#生成证书申请文件
cat > kube-apiserver-csr.json <<EOF
{
"CN": "kube-apiserver",
"hosts": [
"127.0.0.1",
"10.0.0.121",
"10.0.0.122",
"10.0.0.123",
"10.0.0.135",
"10.200.0.1",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "kube-apiserver",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-apiserver-csr.json | cfssljson -bare kube-apiserver
#拷贝kube-apiserver到master节点
master="master01 master02 master03"
for i in $master;do
ssh $i "mkdir /etc/kubernetes/pki -p"
scp /opt/pki/kubernetes/ca/{ca.pem,ca-key.pem} /opt/pki/kubernetes/kube-apiserver/{kube-apiserver-key.pem,kube-apiserver.pem} $i:/etc/kubernetes/pki
done
#拷贝证书到node节点
node="node01 node02 node03"
for i in $node;do
ssh $i "mkdir /etc/kubernetes/pki -p"
scp /opt/pki/kubernetes/ca/ca.pem $i:/etc/kubernetes/pki
done
生成proxy-client和ca证书
#创建目录
mkdir /opt/pki/proxy-client
cd /opt/pki/proxy-client
#生成ca配置文件
cat > front-proxy-ca-csr.json <<EOF
{
"CA":{"expiry":"87600h"},
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
#生成ca文件
cfssl gencert -initca front-proxy-ca-csr.json | cfssljson -bare front-proxy-ca
#生成客户端证书申请文件
cat > front-proxy-client-csr.json <<EOF
{
"CN": "front-proxy-client",
"key": {
"algo": "rsa",
"size": 2048
}
}
EOF
#生成证书
cfssl gencert \
-ca=front-proxy-ca.pem \
-ca-key=front-proxy-ca-key.pem \
-config=../kubernetes/ca/ca-config.json \
-profile=kubernetes front-proxy-client-csr.json | cfssljson -bare front-proxy-client
#拷贝证书到节点
master="master01 master02 master03"
node="node01 node02 node03"
for i in $master;do
scp /opt/pki/proxy-client/{front-proxy-ca.pem,front-proxy-client.pem,front-proxy-client-key.pem} $i:/etc/kubernetes/pki
done
for i in $node;do
scp /opt/pki/proxy-client/front-proxy-ca.pem $i:/etc/kubernetes/pki
done
生成kube-controller-manager证书和文件
mkdir /opt/pki/kubernetes/kube-controller-manager -p
cd /opt/pki/kubernetes//kube-controller-manager
#生成证书请求文件
cat > kube-controller-manager-csr.json <<EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "system:kube-controller-manager",
"OU": "System"
}
]
}
EOF
#生成证书文件
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
#生成配置文件
export KUBE_APISERVER="https://10.0.0.135:8443"
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-credentials system:kube-controller-manager \
--client-certificate=kube-controller-manager.pem \
--client-key=kube-controller-manager-key.pem \
--embed-certs=true \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-controller-manager \
--kubeconfig=kube-controller-manager.kubeconfig
kubectl config use-context default --kubeconfig=kube-controller-manager.kubeconfig
拷贝证书到master节点
master="master01 master02 master03"
for i in $master;do
scp /opt/pki/kubernetes/kube-controller-manager/kube-controller-manager.kubeconfig $i:/etc/kubernetes/
done
生成kube-scheduler证书
#创建目录
mkdir /opt/pki/kubernetes/kube-scheduler
cd /opt/pki/kubernetes/kube-scheduler
#生成证书申请文件
cat > kube-scheduler-csr.json <<EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "system:kube-scheduler",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
#生成配置文件
export KUBE_APISERVER="https://10.0.0.135:8443"
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-credentials system:kube-scheduler \
--client-certificate=kube-scheduler.pem \
--client-key=kube-scheduler-key.pem \
--embed-certs=true \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=system:kube-scheduler \
--kubeconfig=kube-scheduler.kubeconfig
kubectl config use-context default --kubeconfig=kube-scheduler.kubeconfig
#拷贝文件到master
master="master01 master02 master03"
for i in $master;do
scp /opt/pki/kubernetes/kube-scheduler/kube-scheduler.kubeconfig $i:/etc/kubernetes
done
生成集群管理员证书
#创建目录
mkdir /opt/pki/kubernetes/admin
cd /opt/pki/kubernetes/admin
#生成证书申请文件
cat > admin-csr.json <<EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"TS": "Beijing",
"L": "Beijing",
"O": "system:masters",
"OU": "System"
}
]
}
EOF
#生成证书
cfssl gencert \
-ca=../ca/ca.pem \
-ca-key=../ca/ca-key.pem \
-config=../ca/ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
#生成配置文件
export KUBE_APISERVER="https://10.0.0.135:8443"
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=${KUBE_APISERVER} \
--kubeconfig=admin.kubeconfig
kubectl config set-credentials admin \
--client-certificate=admin.pem \
--client-key=admin-key.pem \
--embed-certs=true \
--kubeconfig=admin.kubeconfig
kubectl config set-context default \
--cluster=kubernetes \
--user=admin \
--kubeconfig=admin.kubeconfig
kubectl config use-context default --kubeconfig=admin.kubeconfig
#拷贝文件到master
master="master01 master02 master03"
for i in $master;do
scp /opt/pki/kubernetes/admin/admin.kubeconfig $i:/etc/kubernetes
done
五,etcd集群安装
5.1,etcd节点 1
tar xf etcd-v3.5.3-linux-amd64.tar.gz
cp etcd-v3.5.3-linux-amd64/etcd* /usr/bin/
#创建etcd配置文件
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-1'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.0.0.127:2380'
listen-client-urls: 'https://10.0.0.127:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.0.0.127:2380'
advertise-client-urls: 'https://10.0.0.127:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.0.127:2380,etcd-2=https://10.0.0.128:2380,etcd-3=https://10.0.0.129:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
5.2,etcd节点2
tar xf etcd-v3.5.3-linux-amd64.tar.gz
cp etcd-v3.5.3-linux-amd64/etcd* /usr/bin/
#创建etcd配置文件
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-2'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.0.0.128:2380'
listen-client-urls: 'https://10.0.0.128:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.0.0.128:2380'
advertise-client-urls: 'https://10.0.0.128:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.0.127:2380,etcd-2=https://10.0.0.128:2380,etcd-3=https://10.0.0.129:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
5.3,etcd节点3
tar xf etcd-v3.5.3-linux-amd64.tar.gz
cp etcd-v3.5.3-linux-amd64/etcd* /usr/bin/
#创建etcd配置文件
cat > /etc/etcd/etcd.config.yml <<EOF
name: 'etcd-3'
data-dir: /var/lib/etcd
wal-dir: /var/lib/etcd/wal
snapshot-count: 5000
heartbeat-interval: 100
election-timeout: 1000
quota-backend-bytes: 0
listen-peer-urls: 'https://10.0.0.129:2380'
listen-client-urls: 'https://10.0.0.129:2379,https://127.0.0.1:2379'
max-snapshots: 3
max-wals: 5
cors:
initial-advertise-peer-urls: 'https://10.0.0.129:2380'
advertise-client-urls: 'https://10.0.0.129:2379'
discovery:
discovery-fallback: 'proxy'
discovery-proxy:
discovery-srv:
initial-cluster: 'etcd-1=https://10.0.0.127:2380,etcd-2=https://10.0.0.128:2380,etcd-3=https://10.0.0.129:2380'
initial-cluster-token: 'etcd-cluster'
initial-cluster-state: 'new'
strict-reconfig-check: false
enable-v2: true
enable-pprof: true
proxy: 'off'
proxy-failure-wait: 5000
proxy-refresh-interval: 30000
proxy-dial-timeout: 1000
proxy-write-timeout: 5000
proxy-read-timeout: 0
client-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
peer-transport-security:
cert-file: '/etc/etcd/ssl/etcd-server.pem'
key-file: '/etc/etcd/ssl/etcd-server-key.pem'
peer-client-cert-auth: true
trusted-ca-file: '/etc/etcd/ssl/ca.pem'
auto-tls: true
debug: false
log-package-levels:
log-outputs: [default]
force-new-cluster: false
EOF
5.4,创建service文件并启动服务(etcd所有节点操作)
cat > /etc/systemd/system/etcd.service <<EOF
[Unit]
Description=Etcd Service
Documentation=https://coreos.com/etcd/docs/latest/
After=network.target
[Service]
Type=notify
ExecStart=/usr/bin/etcd --config-file=/etc/etcd/etcd.config.yml
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
Alias=etcd3.service
EOF
#启动服务
systemctl daemon-reload
systemctl enable --now etcd
##配置etcdctl使用v3 api
cat > /etc/profile.d/etcdctl.sh <<EOF
#!/bin/bash
export ETCDCTL_API=3
export ETCDCTL_ENDPOINTS=https://127.0.0.1:2379
export ETCDCTL_CACERT=/etc/etcd/ssl/ca.pem
export ETCDCTL_CERT=/etc/etcd/ssl/etcd-client.pem
export ETCDCTL_KEY=/etc/etcd/ssl/etcd-client-key.pem
EOF
#生效
source /etc/profile
#验证集群状态
root@etcd01:~# etcdctl member list --write-out='table'
+------------------+---------+--------+-------------------------+-------------------------+------------+
| ID | STATUS | NAME | PEER ADDRS | CLIENT ADDRS | IS LEARNER |
+------------------+---------+--------+-------------------------+-------------------------+------------+
| f1480aba887b1eb | started | etcd-3 | https://10.0.0.129:2380 | https://10.0.0.129:2379 | false |
| 358a0a2775d7a771 | started | etcd-1 | https://10.0.0.127:2380 | https://10.0.0.127:2379 | false |
| d6b17c4efb47744b | started | etcd-2 | https://10.0.0.128:2380 | https://10.0.0.128:2379 | false |
+------------------+---------+--------+-------------------------+-------------------------+------------+
六,安装kubernetes集群
6.1,master节点安装kube-apiserver
#创建ServiceAccount Key
openssl genrsa -out /etc/kubernetes/pki/sa.key 2048
openssl rsa -in /etc/kubernetes/pki/sa.key -pubout -out /etc/kubernetes/pki/sa.pub
master="master01 master02 master03"
#分发master组件
for i in $master;do
scp /etc/kubernetes/pki/{sa.pub,sa.key} $i:/etc/kubernetes/pki/
done
#创建service文件
a=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
cat > /etc/systemd/system/kube-apiserver.service <<EOF
[Unit]
Description=Kubernetes API Server
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-apiserver \\
--v=2 \\
--logtostderr=true \\
--allow-privileged=true \\
--bind-address=$a \\
--secure-port=6443 \\
--advertise-address=$a \\
--service-cluster-ip-range=10.200.0.0/16 \\
--service-node-port-range=30000-42767 \\
--etcd-servers=https://10.0.0.127:2379,https://10.0.0.128:2379,https://10.0.0.129:2379 \\
--etcd-cafile=/etc/etcd/ssl/ca.pem \\
--etcd-certfile=/etc/etcd/ssl/etcd-client.pem \\
--etcd-keyfile=/etc/etcd/ssl/etcd-client-key.pem \\
--client-ca-file=/etc/kubernetes/pki/ca.pem \\
--tls-cert-file=/etc/kubernetes/pki/kube-apiserver.pem \\
--tls-private-key-file=/etc/kubernetes/pki/kube-apiserver-key.pem \\
--kubelet-client-certificate=/etc/kubernetes/pki/kube-apiserver.pem \\
--kubelet-client-key=/etc/kubernetes/pki/kube-apiserver-key.pem \\
--service-account-key-file=/etc/kubernetes/pki/sa.pub \\
--service-account-signing-key-file=/etc/kubernetes/pki/sa.key \\
--service-account-issuer=https://kubernetes.default.svc.cluster.local \\
--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname \\
--enable-admission-plugins=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota \\
--authorization-mode=Node,RBAC \\
--enable-bootstrap-token-auth=true \\
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \\
--proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.pem \\
--proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client-key.pem \\
--requestheader-allowed-names=aggregator \\
--requestheader-group-headers=X-Remote-Group \\
--requestheader-extra-headers-prefix=X-Remote-Extra- \\
--requestheader-username-headers=X-Remote-User
Restart=on-failure
RestartSec=10s
LimitNOFILE=65535
[Install]
WantedBy=multi-user.target
EOF
#启动服务
systemctl enable --now kube-apiserver.service
apiserver 高可用配置
#安装keepalived haproxy
apt install -y keepa;ived haproxy
#ha01配置文件
cat >/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 10.0.0.140
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state MASTER
interface ens33
garp_master_delay 10
smtp_alert
virtual_router_id 56 # 修改对应的id
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.135 dev ens33 label ens33:0 # 添加vip
}
track_script {
chk_apiserver
}
}
EOF
#ha02 配置文件
cat >/etc/keepalived/keepalived.conf <<EOF
! Configuration File for keepalived
global_defs {
notification_email {
acassen
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 10.0.0.140
smtp_connect_timeout 30
router_id LVS_DEVEL
}
vrrp_script chk_apiserver {
script "/etc/keepalived/check_apiserver.sh"
interval 5
weight -5
fall 2
rise 1
}
vrrp_instance VI_1 {
state SLAVE
interface ens33
garp_master_delay 10
smtp_alert
virtual_router_id 56 # 修改对应的id
priority 80
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
10.0.0.125 dev ens33 label ens33:0 # 添加vip
}
track_script {
chk_apiserver
}
}
EOF
#ha配置文件
cat >/etc/haproxy/haproxy.cfg <<EOF
global
log /dev/log local0
log /dev/log local1 notice
chroot /var/lib/haproxy
stats socket /run/haproxy/admin.sock mode 660 level admin expose-fd listeners
stats timeout 30s
user haproxy
group haproxy
daemon
# Default SSL material locations
ca-base /etc/ssl/certs
crt-base /etc/ssl/private
# See: https://ssl-config.mozilla.org/#server=haproxy&server-version=2.0.3&config=intermediate
ssl-default-bind-ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384
ssl-default-bind-ciphersuites TLS_AES_128_GCM_SHA256:TLS_AES_256_GCM_SHA384:TLS_CHACHA20_POLY1305_SHA256
ssl-default-bind-options ssl-min-ver TLSv1.2 no-tls-tickets
defaults
log global
mode http
option httplog
option dontlognull
timeout connect 5000
timeout client 50000
timeout server 50000
errorfile 400 /etc/haproxy/errors/400.http
errorfile 403 /etc/haproxy/errors/403.http
errorfile 408 /etc/haproxy/errors/408.http
errorfile 500 /etc/haproxy/errors/500.http
errorfile 502 /etc/haproxy/errors/502.http
errorfile 503 /etc/haproxy/errors/503.http
errorfile 504 /etc/haproxy/errors/504.http
listen k8s-6443 # 添加监听
bind 10.0.0.135:8443
mode tcp
server k8s1 10.0.0.121:6443 check inter 3s fall 3 rise 5 # 每三秒一次健康检查
server k8s2 10.0.0.122:6443 check inter 3s fall 3 rise 5
server k8s3 10.0.0.123:6443 check inter 3s fall 3 rise 5
EOF
## 检查ha进程脚本
cat > /etc/keepalived/check_apiserver.sh <<EOF
#!/bin/bash
err=0
for k in $(seq 1 3)
do
check_code=$(pgrep haproxy)
if [[ $check_code == "" ]]; then
err=$(expr $err + 1)
sleep 1
continue
else
err=0
break
fi
done
if [[ $err != "0" ]]; then
echo "systemctl stop keepalived"
/usr/bin/systemctl stop keepalived
exit 1
else
exit 0
fi
EOF
#给脚本授权
chmod +x /etc/keepalived/check_apiserver.sh
#启动服务
systemctl daemon-reload
systemctl enable --now haproxy
systemctl enable --now keepalived
6.2,所有master节点安装kube-controller-manager
#生成service文件
cat > /etc/systemd/system/kube-controller-manager.service <<EOF
[Unit]
Description=Kubernetes Controller Manager
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-controller-manager \
--v=2 \
--logtostderr=true \
--root-ca-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-cert-file=/etc/kubernetes/pki/ca.pem \
--cluster-signing-key-file=/etc/kubernetes/pki/ca-key.pem \
--service-account-private-key-file=/etc/kubernetes/pki/sa.key \
--kubeconfig=/etc/kubernetes/kube-controller-manager.kubeconfig \
--leader-elect=true \
--use-service-account-credentials=true \
--node-monitor-grace-period=40s \
--node-monitor-period=5s \
--pod-eviction-timeout=2m0s \
--controllers=*,bootstrapsigner,tokencleaner \
--allocate-node-cidrs=true \
--cluster-cidr=10.200.0.0/16 \
--requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem \
--node-cidr-mask-size=24
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#启动服务
systemctl enable --now kube-controller-manager.service
6.3,所有master节点安装kube-scheduler
#生成service文件
cat > /etc/systemd/system/kube-scheduler.service <<EOF
[Unit]
Description=Kubernetes Scheduler
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-scheduler \
--v=2 \
--logtostderr=true \
--leader-elect=true \
--kubeconfig=/etc/kubernetes/kube-scheduler.kubeconfig
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#启动服务
systemctl enable --now kube-scheduler.service
在master01上配置kubelet工具
#拷贝admin.kubeconfig到~/.kube/config
mkdir /root/.kube/ -p
cp /etc/kubernetes/admin.kubeconfig /root/.kube/config
#验证集群状态,以下显示信息表示master节点的所有组件运行正常
[root@k8s-master01 ~]# kubectl get cs
NAME STATUS MESSAGE ERROR
controller-manager Healthy ok
scheduler Healthy ok
etcd-2 Healthy {"health":"true","reason":""}
etcd-0 Healthy {"health":"true","reason":""}
etcd-1 Healthy {"health":"true","reason":""}
6.4,所有节点安装kubelet
#创建目录
mkdir /opt/pki/kubernetes/kubelet -p
cd /opt/pki/kubernetes/kubelet
#生成随机认证key
a=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c6`
b=`head -c 16 /dev/urandom | od -An -t x | tr -d ' ' | head -c16`
#生成权限绑定文件
cat > bootstrap.secret.yaml <<EOF
apiVersion: v1
kind: Secret
metadata:
name: bootstrap-token-$a
namespace: kube-system
type: bootstrap.kubernetes.io/token
stringData:
description: "The default bootstrap token generated by 'kubelet '."
token-id: $a
token-secret: $b
usage-bootstrap-authentication: "true"
usage-bootstrap-signing: "true"
auth-extra-groups: system:bootstrappers:default-node-token,system:bootstrappers:worker,system:bootstrappers:ingress
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubelet-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-bootstrapper
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-bootstrap
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:bootstrappers:default-node-token
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: node-autoapprove-certificate-rotation
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:certificates.k8s.io:certificatesigningrequests:selfnodeclient
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:kube-apiserver-to-kubelet
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/stats
- nodes/log
- nodes/spec
- nodes/metrics
verbs:
- "*"
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: system:kube-apiserver
namespace: ""
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:kube-apiserver-to-kubelet
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
EOF
#生成配置文件
kubectl config set-cluster kubernetes \
--certificate-authority=../ca/ca.pem \
--embed-certs=true \
--server=https://10.0.0.135:8443 \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config set-credentials tls-bootstrap-token-user \
--token=$a.$b \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config set-context tls-bootstrap-token-user@kubernetes \
--cluster=kubernetes \
--user=tls-bootstrap-token-user \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl config use-context tls-bootstrap-token-user@kubernetes \
--kubeconfig=bootstrap-kubelet.kubeconfig
kubectl apply -f bootstrap.secret.yaml
#拷贝配置文件到master节点和node节点
all="master01 master02 master03 node01 node02 node03"
for i in $all;do scp /opt/pki/kubernetes/kubelet/bootstrap-kubelet.kubeconfig $i:/etc/kubernetes;done
#生成配置文件
name=`ifconfig ens33 | awk -rn 'NR==2{print $2}'`
hostname=`hostname`
kubernetes_ssl_dir="/etc/kubernetes/pki"
cat > /etc/kubernetes/kubelet.conf << EOF
KUBELET_OPTS="--hostname-override=${hostname} \\
--container-runtime=remote \\
--container-runtime-endpoint=unix:///run/containerd/containerd.sock \\
--kubeconfig=/etc/kubernetes/kubelet.kubeconfig \\
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.kubeconfig \\
--config=/etc/kubernetes/kubelet-config.yml \\
--cert-dir=${kubernetes_ssl_dir} "
EOF
##生成kubelet-config.yml文件
cat > /etc/kubernetes/kubelet-config.yml << EOF
kind: KubeletConfiguration
apiVersion: kubelet.config.k8s.io/v1beta1
address: ${name}
port: 10250
readOnlyPort: 10255
cgroupDriver: systemd
clusterDNS:
- 10.200.0.2 ##根据cidr修改该值
clusterDomain: cluster.local
failSwapOn: false
authentication:
anonymous:
enabled: false
webhook:
cacheTTL: 2m0s
enabled: true
x509:
clientCAFile: ${kubernetes_ssl_dir}/ca.pem
authorization:
mode: Webhook
webhook:
cacheAuthorizedTTL: 5m0s
cacheUnauthorizedTTL: 30s
evictionHard:
imagefs.available: 15%
memory.available: 100Mi
nodefs.available: 10%
nodefs.inodesFree: 5%
maxOpenFiles: 1000000
maxPods: 110
EOF
##生成kubelet.service文件
cat > /usr/lib/systemd/system/kubelet.service << "EOF"
[Unit]
Description=Kubernetes Kubelet
After=docker.service
[Service]
EnvironmentFile=/etc/kubernetes/kubelet.conf
ExecStart=/usr/bin/kubelet $KUBELET_OPTS
Restart=on-failure
RestartSec=10
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
##使修改生效并启用服务
systemctl daemon-reload
systemctl enable --now kubelet
6.5,所有节点安装kube-proxy
#master节点执行
#创建目录
mkdir /opt/pki/kubernetes/kube-proxy/ -p
cd /opt/pki/kubernetes/kube-proxy/
#生成配置文件
kubectl -n kube-system create serviceaccount kube-proxy
kubectl create clusterrolebinding system:kube-proxy --clusterrole system:node-proxier --serviceaccount kube-system:kube-proxy
cat >kube-proxy-scret.yml<<EOF
apiVersion: v1
kind: Secret
metadata:
name: kube-proxy
namespace: kube-system
annotations:
kubernetes.io/service-account.name: "kube-proxy"
type: kubernetes.io/service-account-token
EOF
kubectl apply -f kube-proxy-scret.yml
JWT_TOKEN=$(kubectl -n kube-system get secret/kube-proxy \
--output=jsonpath='{.data.token}' | base64 -d)
kubectl config set-cluster kubernetes \
--certificate-authority=/etc/kubernetes/pki/ca.pem \
--embed-certs=true \
--server=https://10.0.0.135:8443 \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-credentials kubernetes \
--token=${JWT_TOKEN} \
--kubeconfig=kube-proxy.kubeconfig
kubectl config set-context kubernetes \
--cluster=kubernetes \
--user=kubernetes \
--kubeconfig=kube-proxy.kubeconfig
kubectl config use-context kubernetes \
--kubeconfig=kube-proxy.kubeconfig
#拷贝配置文件到node节点
node="master01 master02 master03 node01 node02 node03"
for i in $node;do
scp /opt/pki/kubernetes/kube-proxy/kube-proxy.kubeconfig $i:/etc/kubernetes
done
#所有节点生成service文件
cat > /etc/systemd/system/kube-proxy.service <<EOF
[Unit]
Description=Kubernetes Kube Proxy
Documentation=https://github.com/kubernetes/kubernetes
After=network.target
[Service]
ExecStart=/usr/bin/kube-proxy \
--config=/etc/kubernetes/kube-proxy.conf \
--v=2
Restart=always
RestartSec=10s
[Install]
WantedBy=multi-user.target
EOF
#所有节点生成配置文件
a=`ifconfig ens33| awk -rn 'NR==2{print $2}'`
cat > /etc/kubernetes/kube-proxy.conf <<EOF
apiVersion: kubeproxy.config.k8s.io/v1alpha1
bindAddress: $a
clientConnection:
acceptContentTypes: ""
burst: 10
contentType: application/vnd.kubernetes.protobuf
kubeconfig: /etc/kubernetes/kube-proxy.kubeconfig
qps: 5
clusterCIDR: 10.100.0.0/16
configSyncPeriod: 15m0s
conntrack:
max: null
maxPerCore: 32768
min: 131072
tcpCloseWaitTimeout: 1h0m0s
tcpEstablishedTimeout: 24h0m0s
enableProfiling: false
healthzBindAddress: 0.0.0.0:10256
hostnameOverride: "$a"
iptables:
masqueradeAll: false
masqueradeBit: 14
minSyncPeriod: 0s
syncPeriod: 30s
ipvs:
masqueradeAll: true
minSyncPeriod: 5s
scheduler: "rr"
syncPeriod: 30s
kind: KubeProxyConfiguration
metricsBindAddress: 127.0.0.1:10249
mode: "ipvs"
nodePortAddresses: null
oomScoreAdj: -999
portRange: ""
udpIdleTimeout: 250ms
EOF
#启动服务
systemctl daemon-reload
systemctl enable --now kube-proxy.service
root@master01:~# kubectl get nodes
NAME STATUS ROLES AGE VERSION
10.0.0.121 Ready <none> 27m v1.25.4
10.0.0.122 Ready <none> 27m v1.25.4
10.0.0.123 Ready <none> 27m v1.25.4
10.0.0.124 Ready <none> 27m v1.25.4
10.0.0.125 Ready <none> 27m v1.25.4
10.0.0.126 Ready <none> 27m v1.25.4
#给节点打标签
kubectl label nodes 10.0.0.121 node-role.kubernetes.io/master=master01
kubectl label nodes 10.0.0.122 node-role.kubernetes.io/master=master02
kubectl label nodes 10.0.0.123 node-role.kubernetes.io/master=master03
kubectl label nodes 10.0.0.124 node-role.kubernetes.io/node=node01
kubectl label nodes 10.0.0.125 node-role.kubernetes.io/node=node02
kubectl label nodes 10.0.0.126 node-role.kubernetes.io/node=node03
七,安装网络插件calico
https://github.com/projectcalico/calico/tree/v3.24.5/manifests
vim calico.yaml
#修改配置
- name: CALICO_IPV4POOL_CIDR
value: "10.100.0.0/16"
kubectl apply -f calico.yaml
root@master01:~# kubectl get pods -A -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
kube-system calico-kube-controllers-798cc86c47-7msbp 1/1 Running 0 8m11s 10.100.178.65 10.0.0.124 <none> <none>
kube-system calico-node-bt4hm 1/1 Running 0 8m11s 10.0.0.123 10.0.0.123 <none> <none>
kube-system calico-node-cng8w 1/1 Running 0 8m11s 10.0.0.125 10.0.0.125 <none> <none>
kube-system calico-node-f8cvd 1/1 Running 0 8m11s 10.0.0.126 10.0.0.126 <none> <none>
kube-system calico-node-frmhz 1/1 Running 0 8m11s 10.0.0.124 10.0.0.124 <none> <none>
kube-system calico-node-hbslc 1/1 Running 0 8m11s 10.0.0.121 10.0.0.121 <none> <none>
kube-system calico-node-zszns 1/1 Running 0 8m11s 10.0.0.122 10.0.0.122 <none> <none>
#安装calico客户端工具
#创建配置文件
mkdir /etc/calico -p
cat >/etc/calico/calicoctl.cfg <<EOF
apiVersion: projectcalico.org/v3
kind: CalicoAPIConfig
metadata:
spec:
datastoreType: "kubernetes"
kubeconfig: "/root/.kube/config"
EOF
wget https://github.com/projectcalico/calicoctl/releases/download/v3.21.5/calicoctl-linux-amd64
chmod +x calicoctl-linux-amd64 calicoctl && mv calicoctl /usr/bin
#验证
root@master01:~# calicoctl node status
Calico process is running.
IPv4 BGP status
+--------------+-------------------+-------+----------+-------------+
| PEER ADDRESS | PEER TYPE | STATE | SINCE | INFO |
+--------------+-------------------+-------+----------+-------------+
| 10.0.0.123 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.124 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.125 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.126 | node-to-node mesh | up | 06:45:42 | Established |
| 10.0.0.122 | node-to-node mesh | up | 06:49:36 | Established |
+--------------+-------------------+-------+----------+-------------+
IPv6 BGP status
No IPv6 peers found.
八,安装coredns
#下载地址:https://github.com/coredns/deployment/blob/master/kubernetes/coredns.yaml.sed
vim coredns.yaml
#修改配置
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes cluster.local in-addr.arpa ip6.arpa { #这里修改
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
forward . 114.114.114.114 { #外部dns解析服务器
max_concurrent 1000
}
cache 30
loop
reload
loadbalance
}
clusterIP: 10.200.0.2 #这里改为这个地址
#部署
kubectl apply -f coredns.yaml
root@master01:~# kubectl get pod -A |grep core
kube-system coredns-54b8c69d54-2rnms 1/1 Running 0 3h8m
九,安装metrics-server
#下载地址
wget https://github.com/kubernetes-sigs/metrics-server/releases/download/v0.6.1/components.yaml
#修改配置
vim components.yaml
containers:
- args:
- --cert-dir=/tmp
- --secure-port=4443
- --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
- --kubelet-use-node-status-port
- --metric-resolution=15s
- --kubelet-insecure-tls
- --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.pem
- --requestheader-username-headers=X-Remote-User
- --requestheader-group-headers=X-Remote-Group
- --requestheader-extra-headers-prefix=X-Remote-Extra-
image: registry.cn-hangzhou.aliyuncs.com/google_containers/metrics-server:v0.6.1
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /tmp
name: tmp-dir
- mountPath: /etc/kubernetes/pki
name: ca-ssl
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
serviceAccountName: metrics-server
volumes:
- emptyDir: {}
name: tmp-dir
- name: ca-ssl
hostPath:
path: /etc/kubernetes/pki
#创建
kubectl apply -f components.yaml
#验证:
root@master01:~# kubectl top nodes
NAME CPU(cores) CPU% MEMORY(bytes) MEMORY%
10.0.0.121 390m 9% 1791Mi 22%
10.0.0.122 374m 9% 1158Mi 14%
10.0.0.123 317m 7% 1108Mi 14%
10.0.0.124 234m 5% 798Mi 10%
10.0.0.125 202m 5% 711Mi 9%
10.0.0.126 200m 5% 659Mi 8%
网友评论