基础环境
系统Ubuntu 16.04
主机名称 | IP | 备注 |
---|---|---|
node01 | 192.168.175.61 | master and etcd |
node02 | 192.168.175.62 | master and etcd |
node03 | 192.168.175.63 | master and etcd |
node04 | 192.168.175.64 | node |
VIP | 192.168.175.60 |
软件版本:
- docker v18.05.0-ce
- kubelet v1.10.3
- kubectl v1.10.3
- kubeadm v1.10.3
- etcd v3.3.5
环境初始化
修改主机名
hostnamectl set-hostname node01
hostnamectl set-hostname node02
hostnamectl set-hostname node03
hostnamectl set-hostname node04
配置主机映射
cat <<EOF > /etc/hosts
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
192.168.175.61 node01
192.168.175.62 node02
192.168.175.63 node03
192.168.175.64 node04
EOF
免密码登录
node01上执行ssh免密码登陆配置
ssh-keygen #一路回车即可
ssh-copy-id node02
ssh-copy-id node03
ssh-copy-id node04
基线配置
四台主机配置设置内核、K8S源、关闭Swap、配置ntp(配置完后重启一次)
swapoff -a
sed -i 's/.*swap.*/#&/' /etc/fstab #禁用swap分区
modprobe br_netfilter
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl -p /etc/sysctl.d/k8s.conf
ls /proc/sys/net/bridge
#https://opsx.alibaba.com/mirror 阿里云地址
apt-get update && apt-get install -y apt-transport-https
curl https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
cat <<EOF >/etc/apt/sources.list.d/kubernetes.list
deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main
EOF
apt-get update
apt-get install -y kubelet kubeadm kubectl
systemctl enable ntpdate.service
echo '*/30 * * * * /usr/sbin/ntpdate time7.aliyun.com >/dev/null 2>&1' > /tmp/crontab2.tmp
crontab /tmp/crontab2.tmp
systemctl start ntpdate.service
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf
echo "* soft nproc 65536" >> /etc/security/limits.conf
echo "* hard nproc 65536" >> /etc/security/limits.conf
echo "* soft memlock unlimited" >> /etc/security/limits.conf
echo "* hard memlock unlimited" >> /etc/security/limits.conf
安装、配置keepalived
安装keepalived
apt install -y keepalived
systemctl enable keepalived
node01的keepalived.conf
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_k8s
}
vrrp_script CheckK8sMaster {
script "curl -k https://192.168.175.60:6443"
interval 3
weight 3
timeout 9
}
vrrp_instance VI_1 {
state MASTER
interface ens160
virtual_router_id 61
priority 100
advert_int 1
mcast_src_ip 192.168.175.61
nopreempt
authentication {
auth_type PASS
auth_pass sqP05dQgMSlzrxHj
}
unicast_peer {
192.168.175.62
192.168.175.63
}
virtual_ipaddress {
192.168.175.60/24
}
track_script {
CheckK8sMaster
}
}
EOF
node02的keepalived.conf
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_k8s
}
global_defs {
router_id LVS_k8s
}
vrrp_script CheckK8sMaster {
script "curl -k https://192.168.175.60:6443"
interval 3
weight 3
timeout 9
}
vrrp_instance VI_1 {
state BACKUP
interface ens160
virtual_router_id 61
priority 90
advert_int 1
mcast_src_ip 192.168.175.62
nopreempt
authentication {
auth_type PASS
auth_pass sqP05dQgMSlzrxHj
}
unicast_peer {
192.168.175.61
192.168.175.63
}
virtual_ipaddress {
192.168.175.60/24
}
track_script {
CheckK8sMaster
}
}
EOF
node03的keepalived.conf
cat <<EOF > /etc/keepalived/keepalived.conf
global_defs {
router_id LVS_k8s
}
global_defs {
router_id LVS_k8s
}
vrrp_script CheckK8sMaster {
script "curl -k https://192.168.175.60:6443"
interval 3
weight 3
timeout 9
}
vrrp_instance VI_1 {
state BACKUP
interface ens160
virtual_router_id 61
priority 80
advert_int 1
mcast_src_ip 192.168.175.63
nopreempt
authentication {
auth_type PASS
auth_pass sqP05dQgMSlzrxHj
}
unicast_peer {
192.168.175.61
192.168.175.62
}
virtual_ipaddress {
192.168.175.60/24
}
track_script {
CheckK8sMaster
}
}
EOF
启动keepalived
systemctl restart keepalived
可以看到VIP已经绑定到node01上面了,可以ping通VIP地址
root@node01:~# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens160: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP group default qlen 1000
link/ether 00:0c:29:0b:06:c2 brd ff:ff:ff:ff:ff:ff
inet 192.168.175.61/24 brd 192.168.175.255 scope global ens160
valid_lft forever preferred_lft forever
inet 192.168.175.60/24 scope global secondary ens160
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe0b:6c2/64 scope link
valid_lft forever preferred_lft forever
创建etcd证书
(node01上执行即可)
设置cfssl环境
wget https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
wget https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64
chmod +x cfssl_linux-amd64
mv cfssl_linux-amd64 /usr/local/bin/cfssl
chmod +x cfssljson_linux-amd64
mv cfssljson_linux-amd64 /usr/local/bin/cfssljson
chmod +x cfssl-certinfo_linux-amd64
mv cfssl-certinfo_linux-amd64 /usr/local/bin/cfssl-certinfo
export PATH=/usr/local/bin:$PATH
创建 CA 配置文件(下面配置的IP为etc节点的IP)
mkdir /root/ssl
cd /root/ssl
cat > ca-config.json <<EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes-Soulmate": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth"
],
"expiry": "8760h"
}
}
}
}
EOF
cat > ca-csr.json <<EOF
{
"CN": "kubernetes-Soulmate",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shanghai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
cat > etcd-csr.json <<EOF
{
"CN": "etcd",
"hosts": [
"127.0.0.1",
"192.168.175.61",
"192.168.175.62",
"192.168.175.63"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "CN",
"ST": "shanghai",
"L": "shanghai",
"O": "k8s",
"OU": "System"
}
]
}
EOF
cfssl gencert -ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes-Soulmate etcd-csr.json | cfssljson -bare etcd
node01分发etcd证书到node02、node03上面
mkdir -p /etc/etcd/ssl #三台机器上分别执行
cp etcd.pem etcd-key.pem ca.pem /etc/etcd/ssl/
scp -r /etc/etcd/ssl/*.pem node02:/etc/etcd/ssl/
scp -r /etc/etcd/ssl/*.pem node03:/etc/etcd/ssl/
安装配置etcd
(三主节点)
安装etcd
apt install etcd -y
node01的etcd.service
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd --name node01 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --initial-advertise-peer-urls https://192.168.175.61:2380 --listen-peer-urls https://192.168.175.61:2380 --listen-client-urls https://192.168.175.61:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.175.61:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster node01=https://192.168.175.61:2380,node02=https://192.168.175.62:2380,node03=https://192.168.175.63:2380 --initial-cluster-state new --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
node02的etcd.service
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd --name node02 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --initial-advertise-peer-urls https://192.168.175.62:2380 --listen-peer-urls https://192.168.175.62:2380 --listen-client-urls https://192.168.175.62:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.175.62:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster node01=https://192.168.175.61:2380,node02=https://192.168.175.62:2380,node03=https://192.168.175.63:2380 --initial-cluster-state new --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
node03的etcd.service
cat <<EOF >/etc/systemd/system/etcd.service
[Unit]
Description=Etcd Server
After=network.target
After=network-online.target
Wants=network-online.target
Documentation=https://github.com/coreos
[Service]
Type=notify
WorkingDirectory=/var/lib/etcd/
ExecStart=/usr/bin/etcd --name node03 --cert-file=/etc/etcd/ssl/etcd.pem --key-file=/etc/etcd/ssl/etcd-key.pem --peer-cert-file=/etc/etcd/ssl/etcd.pem --peer-key-file=/etc/etcd/ssl/etcd-key.pem --trusted-ca-file=/etc/etcd/ssl/ca.pem --peer-trusted-ca-file=/etc/etcd/ssl/ca.pem --initial-advertise-peer-urls https://192.168.175.63:2380 --listen-peer-urls https://192.168.175.63:2380 --listen-client-urls https://192.168.175.63:2379,http://127.0.0.1:2379 --advertise-client-urls https://192.168.175.63:2379 --initial-cluster-token etcd-cluster-0 --initial-cluster node01=https://192.168.175.61:2380,node02=https://192.168.175.62:2380,node03=https://192.168.175.63:2380 --initial-cluster-state new --data-dir=/var/lib/etcd
Restart=on-failure
RestartSec=5
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target
EOF
添加自启动(etc集群最少2个节点才能启动,启动报错看mesages日志)
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
systemctl status etcd
在三个etcd节点执行一下命令检查
etcdctl --endpoints=https://192.168.175.61:2379,https://192.168.175.62:2379,https://192.168.175.63:2379 \
--ca-file=/etc/etcd/ssl/ca.pem \
--cert-file=/etc/etcd/ssl/etcd.pem \
--key-file=/etc/etcd/ssl/etcd-key.pem cluster-health
etcd升级
(apt安装的版本为v2.2.5,kubernetes v1.10要求版本最低为3.1)
官网下载最新安装包
wget https://github.com/coreos/etcd/releases/download/v3.3.5/etcd-v3.3.5-linux-amd64.tar.gz
tar zxf etcd-v3.3.5-linux-amd64.tar.gz
cp etcd-v3.3.5-linux-amd64/etcd /usr/bin/etcd
cp etcd-v3.3.5-linux-amd64/etcdctl /usr/bin/etcdctl
在/etc/profile文件添加以下一行,重启服务器
export ETCDCTL_API=3
重启etcd服务,并产看集群状态
root@k8s-n2:~/k8s# etcdctl member list
aa76456e260f7bd1, started, node02, https://192.168.175.62:2380, https://192.168.175.62:2379
d12950b45efa96da, started, node03, https://192.168.175.63:2380, https://192.168.175.63:2379
e598ba1c84356928, started, node01, https://192.168.175.61:2380, https://192.168.175.61:2379
安装docker
curl -fsSL "https://get.docker.com/" | sh
docker配置代理
docker 添加阿里云代理,修改配置文件/lib/systemd/system/docker.service
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2375 -H unix:///var/run/docker.sock --registry-mirror=https://ms3cfraz.mirror.aliyuncs.com
启动docker
添加开机自启动
systemctl daemon-reload
systemctl restart docker
systemctl enable docker
systemctl status docker
配置kubeadm
所有节点修改kubelet配置文件
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf
#添加这一行
Environment="KUBELET_CGROUP_ARGS=--cgroup-driver=cgroupfs"
#添加这一行
Environment="KUBELET_EXTRA_ARGS=--v=2 --fail-swap-on=false --pod-infra-container-image=registry.cn-hangzhou.aliyuncs.com/k8sth/pause-amd64:3.0"
所有节点修改完配置文件一定要重新加载配置
systemctl daemon-reload
systemctl enable kubelet
systemctl restart kubelet
初始化集群
node01、node02、node03添加集群初始配置文件(集群配置文件一样)
cat <<EOF > config.yaml
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
etcd:
endpoints:
- https://192.168.175.61:2379
- https://192.168.175.62:2379
- https://192.168.175.63:2379
caFile: /etc/etcd/ssl/ca.pem
certFile: /etc/etcd/ssl/etcd.pem
keyFile: /etc/etcd/ssl/etcd-key.pem
dataDir: /var/lib/etcd
networking:
podSubnet: 10.244.0.0/16
kubernetesVersion: 1.10.0
api:
advertiseAddress: "192.168.175.60"
token: "b99a00.a144ef80536d4344"
tokenTTL: "0s"
apiServerCertSANs:
- node01
- node02
- node03
- 192.168.175.61
- 192.168.175.62
- 192.168.175.63
- 192.168.175.64
- 192.168.175.60
featureGates:
CoreDNS: true
imageRepository: "registry.cn-hangzhou.aliyuncs.com/k8sth"
EOF
首先node01初始化集群
配置文件定义podnetwork是10.244.0.0/16
kubeadmin init –hlep可以看出,service默认网段是10.96.0.0/12
/etc/systemd/system/kubelet.service.d/10-kubeadm.conf默认dns地址cluster-dns=10.96.0.10
kubeadm init --config config.yaml
初始化失败后处理办法
kubeadm reset
#或
rm -rf /etc/kubernetes/*.conf
rm -rf /etc/kubernetes/manifests/*.yaml
docker ps -a |awk '{print $1}' |xargs docker rm -f
systemctl stop kubelet
kubeadm reset后,之前flannel创建的bridge device cni0和网口设备flannel.1依然健在。为了保证环境彻底恢复到初始状态,我们可以通过下面命令删除这两个设备:
# ifconfig cni0 down
# brctl delbr cni0
# ip link delete flannel.1
初始化正常的结果如下
Your Kubernetes master has initialized successfully!
To start using your cluster, you need to run the following as a regular user:
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
https://kubernetes.io/docs/concepts/cluster-administration/addons/
You can now join any number of machines by running the following on each node
as root:
kubeadm join 192.168.175.60:6443 --token b99a00.a144ef80536d4344 --discovery-token-ca-cert-hash sha256:a2551d730098fe59c8f0f9d77e07ab9e1ceb2d205678e4780826e8b7cc32aacf
node01上面执行如下命令
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubeadm生成证书密码文件分发到node02和node03上面去
scp -r /etc/kubernetes/pki node03:/etc/kubernetes/
scp -r /etc/kubernetes/pki node02:/etc/kubernetes/
部署flannel网络,只需要在node01执行就行
wget https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
#版本信息:quay.io/coreos/flannel:v0.10.0-amd64
kubectl create -f kube-flannel.yml
执行命令
[root@node01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
node01 Ready master 31m v1.10.3
[root@node01 ~]# kubectl get pods --all-namespaces
NAMESPACE NAME READY STATUS RESTARTS AGE
kube-system coredns-7997f8864c-4x7mg 1/1 Running 0 29m
kube-system coredns-7997f8864c-zfcck 1/1 Running 0 29m
kube-system kube-apiserver-node01 1/1 Running 0 29m
kube-system kube-controller-manager-node01 1/1 Running 0 30m
kube-system kube-flannel-ds-hw2xb 1/1 Running 0 1m
kube-system kube-proxy-s265b 1/1 Running 0 29m
kube-system kube-scheduler-node01 1/1 Running 0 30m
部署dashboard
kubernetes-dashboard.yaml文件内容如下
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Configuration to deploy release version of the Dashboard UI compatible with
# Kubernetes 1.8.
#
# Example usage: kubectl create -f <this_file>
# ------------------- Dashboard Secret ------------------- #
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kube-system
type: Opaque
---
# ------------------- Dashboard Service Account ------------------- #
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Role & Role Binding ------------------- #
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
rules:
# Allow Dashboard to create 'kubernetes-dashboard-key-holder' secret.
- apiGroups: [""]
resources: ["secrets"]
verbs: ["create"]
# Allow Dashboard to create 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["create"]
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics from heapster.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:"]
verbs: ["get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: kubernetes-dashboard-minimal
namespace: kube-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard-minimal
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kube-system
---
# ------------------- Dashboard Deployment ------------------- #
kind: Deployment
apiVersion: apps/v1beta2
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
nodeSelector:
node-role.kubernetes.io/master: ""
containers:
- name: kubernetes-dashboard
image: registry.cn-hangzhou.aliyuncs.com/k8sth/kubernetes-dashboard-amd64:v1.8.3
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
# ------------------- Dashboard Service ------------------- #
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kube-system
spec:
type: NodePort
ports:
- port: 443
targetPort: 8443
nodePort: 30000
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
namespace: kube-system
部署
kubectl create -f kubernetes-dashboard.yaml
获取token,通过令牌登陆
kubectl -n kube-system describe secret $(kubectl -n kube-system get secret | grep admin-user | awk '{print $1}')
通过firefox访问dashboard,输入token,即可登陆
https://192.168.175.61:30000/#!/login
在node02和node03上面分别执行初始化
kubeadm init --config config.yaml
#初始化的结果和node01的结果完全一样
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
查看节点信息
[root@node01 ~]# kubectl get nodes
NAME STATUS ROLES AGE VERSION
node01 Ready master 5h v1.10.3
node02 Ready master 2h v1.10.3
node03 Ready master 1h v1.10.3
[root@node01 ~]# kubectl get pods --all-namespaces -o wide
NAMESPACE NAME READY STATUS RESTARTS AGE IP NODE
kube-system coredns-7997f8864c-5bvlg 1/1 Running 0 6m 10.244.1.2 node02
kube-system coredns-7997f8864c-xbq2j 1/1 Running 0 6m 10.244.2.2 node03
kube-system kube-apiserver-node01 1/1 Running 3 5m 192.168.175.61 node01
kube-system kube-apiserver-node02 1/1 Running 0 1h 192.168.175.62 node02
kube-system kube-apiserver-node03 1/1 Running 0 1h 192.168.175.63 node03
kube-system kube-controller-manager-node01 1/1 Running 3 5m 192.168.175.61 node01
kube-system kube-controller-manager-node02 1/1 Running 0 1h 192.168.175.62 node02
kube-system kube-controller-manager-node03 1/1 Running 1 1h 192.168.175.63 node03
kube-system kube-flannel-ds-gwql9 1/1 Running 1 1h 192.168.175.61 node01
kube-system kube-flannel-ds-l8bfs 1/1 Running 1 1h 192.168.175.62 node02
kube-system kube-flannel-ds-xw5bv 1/1 Running 1 1h 192.168.175.63 node03
kube-system kube-proxy-cwlhw 1/1 Running 0 1h 192.168.175.63 node03
kube-system kube-proxy-jz9mk 1/1 Running 3 5h 192.168.175.61 node01
kube-system kube-proxy-zdbtc 1/1 Running 0 2h 192.168.175.62 node02
kube-system kube-scheduler-node01 1/1 Running 3 5m 192.168.175.61 node01
kube-system kube-scheduler-node02 1/1 Running 0 1h 192.168.175.62 node02
kube-system kube-scheduler-node03 1/1 Running 1 1h 192.168.175.63 node03
kube-system kubernetes-dashboard-7b44ff9b77-chdjp 1/1 Running 0 6m 10.244.2.3 node03
让master也运行pod(默认master不运行pod)
kubectl taint nodes --all node-role.kubernetes.io/master-
添加节点
添加node04节点到集群
在node04节点执行如下命令,即可将节点添加进集群
root@node04:~# kubeadm join 192.168.175.60:6443 --token b99a00.a144ef80536d4344 --discovery-token-ca-cert-hash sha256:a2551d730098fe59c8f0f9d77e07ab9e1ceb2d205678e4780826e8b7cc32aacf
[preflight] Running pre-flight checks.
[WARNING SystemVerification]: docker version is greater than the most recently validated version. Docker version: 18.05.0-ce. Max validated version: 17.03
[WARNING FileExisting-crictl]: crictl not found in system path
Suggestion: go get github.com/kubernetes-incubator/cri-tools/cmd/crictl
[discovery] Trying to connect to API Server "192.168.175.60:6443"
[discovery] Created cluster-info discovery client, requesting info from "https://192.168.175.60:6443"
[discovery] Requesting info from "https://192.168.175.60:6443" again to validate TLS against the pinned public key
[discovery] Cluster info signature and contents are valid and TLS certificate validates against pinned roots, will use API Server "192.168.175.60:6443"
[discovery] Successfully established connection with API Server "192.168.175.60:6443"
This node has joined the cluster:
* Certificate signing request was sent to master and a response
was received.
* The Kubelet was informed of the new secure connection details.
Run 'kubectl get nodes' on the master to see this node join the cluster.
[root@node01 ~]# kubectl get node
NAME STATUS ROLES AGE VERSION
node01 Ready master 45m v1.10.0
node02 Ready master 15m v1.10.0
node03 Ready master 14m v1.10.0
node04 Ready <none> 13m v1.10.0


网友评论