步骤:
- 系统环境:系统调整内核、加载模块、增加 docker-ce 和 kubernetes 软件仓库、安装软件
- 初始化配置
- 集群安装
- 网络插件安装
系统环境准备
系统 ubuntu:18.04
cat > /etc/sysctl.d/66-k8s.conf<<EOF
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
vm.swappiness=0
EOF
sysctl -f /etc/sysctl.d/66-k8s.conf
# modprobe
modprobe ip_vs
modprobe ip_vs_rr
modprobe ip_vs_wrr
modprobe ip_vs_sh
modprobe nf_conntrack
modinfo nf_conntrack_ipv4 && modprobe nf_conntrack_ipv4 && export nf_conntrack_ipv4="nf_conntrack_ipv4"
cat > /etc/modules-load.d/ip_vs.conf <<EOF
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
$nf_conntrack_ipv4
EOF
cat > /etc/modprobe.d/ip_vs.conf <<-"EOF"
options ip_vs conn_tab_bits=20
EOF
curl -s https://mirrors.ustc.edu.cn/docker-ce/linux/ubuntu/gpg | apt-key add -
curl -s https://mirrors.aliyun.com/kubernetes/apt/doc/apt-key.gpg | apt-key add -
test -d /etc/apt/sources.list.d && echo deb [arch=amd64] https://mirrors.tuna.tsinghua.edu.cn/docker-ce/linux/ubuntu/ $(lsb_release -cs) stable | sudo tee /etc/apt/sources.list.d/docker.list
test -d /etc/apt/sources.list.d && echo deb https://mirrors.aliyun.com/kubernetes/apt/ kubernetes-xenial main | sudo tee /etc/apt/sources.list.d/kubernetes.list
apt update
apt-get install -y ipvsadm docker-ce kubeadm ipset etcd-client
# 可选:手动下载镜像文件,加快后面的安装速度
s=$(kubeadm config images list)
n=$(echo $s | sed -r 's,k8s.gcr.io/,,g')
#for i in $n; do docker pull gcrxio/$i ; docker tag gcrxio/$i k8s.gcr.io/$i ; done
for i in $n; do docker pull gcr.azk8s.cn/google_containers/$i ; docker tag gcr.azk8s.cn/google_containers/$i k8s.gcr.io/$i ; done
#apt-mark hold kubelet kubeadm
配置集群初始化信息
NODEIPV4=$(ip -4 route get 8.8.8.8 | head -1 | awk '{print $7}')
cat > kube.yaml <<-EOF
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: InitConfiguration
# allow master run pods
nodeRegistration:
taints:
- effect: PreferNoSchedule
key: node-role.kubernetes.io/master
kubeletExtraArgs:
pod-infra-container-image: gcr.azk8s.cn/google_containers/pause-amd64:3.1
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
mode: "ipvs"
---
apiVersion: kubeadm.k8s.io/v1beta1
kind: ClusterConfiguration
#kubernetesVersion: stable #v1.15.1
controllerManager:
extraArgs:
address: 0.0.0.0
scheduler:
extraArgs:
address: 0.0.0.0
networking:
podSubnet: 10.244.0.0/16
serviceSubnet: 10.96.0.0/12
apiServer:
certSANs:
- k8s
- kubernetes
- kubernetes.default
- kubernetes.default.svc
- kubernetes.default.svc.cluster
- kubernetes.default.svc.cluster.local
- 10.96.0.1
- 127.0.0.1
- $NODEIPV4
imageRepository: "gcr.azk8s.cn/google_containers"
EOF
# 测试配置文件
kubeadm init --config kube.yaml --dry-run
初始化集群
kubeadm init --config kube.yaml
# 命令行工具配置
mkdir -p ~/.kube
ln -s /etc/kubernetes/admin.conf ~/.kube/config
# 测试命令行工具
kubectl get nodes -o wide
# 显示节点 NotReady,因为 CNI 网络节点还没有安装
cni 网络插件安装
以下几个网络插件安装任意一个
参考: https://kubernetes.io/docs/setup/independent/create-cluster-kubeadm/
# flannel
curl -O https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml
POD_CIDR="10.244.0.0/16"
sed -i -e "s?10.244.0.0/16?$POD_CIDR?g" kube-flannel.yml
sed -i -e 's?quay.io?quay.azk8s.cn?g' -e 's?k8s.gcr.io?gcr.azk8s.cn/google-containers?g' kube-flannel.yml
kubectl create -f flannel.yaml
# cilium
cilium_version=v1.5
curl -O https://raw.githubusercontent.com/cilium/cilium/${cilium_version}/examples/kubernetes/1.15/cilium.yaml
kubectl create -f cilium.yaml
# calico
calico_version=v3.9
POD_CIDR="10.244.0.0/16"
curl -O https://docs.projectcalico.org/${calico_version}/manifests/calico-typha.yaml
sed -i -e "s?192.168.0.0/16?$POD_CIDR?g" calico-typha.yaml
#sed -i -e "s?replicas: 1?replicas: 3?g" calico-typha.yaml
kubectl apply -f calico-typha.yaml
kubectl get nodes -o wide
# 显示节点 Ready,节点初始化完成
kubectl get pods -o wide --all-namespaces
# 显示所有 Pod 都是 Running,一切正常,安装完成
后续:
可以测试部署nginx、redis 等服务,也可以探索 ingress istio 等。
网友评论