kubernetes安装

作者: 涮羊肉的灰太狼 | 来源:发表于2017-03-23 10:03 被阅读1304次

环境准备:

192.168.10.101 从

192.168.10.102 主

192.168.10.103 从

1、配置环境

根据这个博客翻墙去吧~~~https://laod.cn/hosts/2017-google-hosts.html

2、导入下面镜像源

cat < /etc/yum.repos.d/kubernetes.repo

[kubernetes]

name=Kubernetes

baseurl=http://yum.kubernetes.io/repos/kubernetes-el7-x86_64

enabled=1

gpgcheck=1

repo_gpgcheck=1

gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg

https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg

EOF

3、服务端

# yum install kubernetes-master etcd flannel -y

4、客户端

# yum install kubernetes-node flannel etcd -y

5、配置etcd

[root@node101 ~]# cat /etc/etcd/etcd.conf

ETCD_NAME=etcd101

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="http://192.168.10.101:2380"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.101:2380"

ETCD_INITIAL_CLUSTER="etcd101=http://192.168.10.101:2380,etcd102=http://192.168.10.102:2380,etcd103=http://192.168.10.103:2380"

ETCD_INITIAL_CLUSTER_STATE="new"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.10.101:2379"

[root@node102 etcd]# cat /etc/etcd/etcd.conf

ETCD_NAME=etcd102

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="http://192.168.10.102:2380"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.102:2380"

ETCD_INITIAL_CLUSTER="etcd101=http://192.168.10.101:2380,etcd102=http://192.168.10.102:2380,etcd103=http://192.168.10.103:2380"

ETCD_INITIAL_CLUSTER_STATE="new"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.10.102:2379"

[root@node3 ~]# cat /etc/etcd/etcd.conf

ETCD_NAME=etcd103

ETCD_DATA_DIR="/var/lib/etcd/default.etcd"

ETCD_LISTEN_PEER_URLS="http://192.168.10.103:2380"

ETCD_LISTEN_CLIENT_URLS="http://0.0.0.0:2379"

ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.103:2380"

ETCD_INITIAL_CLUSTER="etcd101=http://192.168.10.101:2380,etcd102=http://192.168.10.102:2380,etcd103=http://192.168.10.103:2380"

ETCD_INITIAL_CLUSTER_STATE="new"

ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"

ETCD_ADVERTISE_CLIENT_URLS="http://192.168.10.103:2379"

修改etcd的启动文件

# cat /usr/lib/systemd/system/etcd.service

[Unit]

Description=Etcd Server

After=network.target

After=network-online.target

Wants=network-online.target

[Service]

Type=notify

WorkingDirectory=/var/lib/etcd/

EnvironmentFile=-/etc/etcd/etcd.conf

User=etcd

# set GOMAXPROCS to number of processors

ExecStart=/bin/bash -c "GOMAXPROCS=$(nproc) /usr/bin/etcd \

--name=\"${ETCD_NAME}\"\

--data-dir=\"${ETCD_DATA_DIR}\" \

--listen-peer-urls=\"${ETCD_LISTEN_PEER_URLS}\" \

--listen-client-urls=\"${ETCD_LISTEN_CLIENT_URLS}\"\

--advertise-client-urls=\"${ETCD_ADVERTISE_CLIENT_URLS}\"\

--initial-cluster-token=\"${ETCD_INITIAL_CLUSTER_TOKEN}\"\

--initial-cluster=\"${ETCD_INITIAL_CLUSTER}\" \

--initial-cluster-state=\"${ETCD_INITIAL_CLUSTER_STATE}\""

Restart=on-failure

LimitNOFILE=65536

[Install]

WantedBy=multi-user.target

启动etcd服务

# systemctl start etcd

etcd集群状态查看

# etcdctl cluster-health

member 6404974f67850bd9 is healthy: got healthy result from http://192.168.10.102:2379

member 859390733a8165f3 is healthy: got healthy result from http://192.168.10.101:2379

member c280bee19c4540ef is healthy: got healthy result from http://192.168.10.103:2379

cluster is healthy

# etcdctl member list

6404974f67850bd9: name=etcd102 peerURLs=http://192.168.10.102:2380 clientURLs=http://192.168.10.102:2379 isLeader=false

859390733a8165f3: name=etcd101 peerURLs=http://192.168.10.101:2380 clientURLs=http://192.168.10.101:2379 isLeader=true

c280bee19c4540ef: name=etcd103 peerURLs=http://192.168.10.103:2380 clientURLs=http://192.168.10.103:2379 isLeader=false

6、配置master节点

[root@node102 kubernetes]# cat /etc/kubernetes/apiserver | grep -v "^#" | grep -v "^$"

KUBE_API_ADDRESS="--insecure-bind-address=0.0.0.0"

KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.10.101:2379,http://192.168.10.102:2379,http://192.168.10.103:2379"

KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=172.17.0.0/16"

KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"

KUBE_API_ARGS=""

[root@node102 ~]# cat /etc/kubernetes/config | grep ^[^#]

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=0"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBE_MASTER="--master=http://192.168.10.102:8080"

7、配置node节点

node101

[root@node101 ~ ]# cat /etc/kubernetes/config | grep ^[^#]

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=0"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBE_MASTER="--master=http://192.168.10.102:8080"

[root@node101 ~]# cat /etc/kubernetes/kubelet | grep ^[^#]

KUBELET_ADDRESS="--address=0.0.0.0"

KUBELET_PORT="--port=10250"

KUBELET_HOSTNAME="--hostname-override=192.168.10.101"

KUBELET_API_SERVER="--api-servers=http://192.168.10.102:8080"

KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

KUBELET_ARGS=""

node103

[root@node3 ~]# cat /etc/kubernetes/config | grep ^[^#]

KUBE_LOGTOSTDERR="--logtostderr=true"

KUBE_LOG_LEVEL="--v=0"

KUBE_ALLOW_PRIV="--allow-privileged=false"

KUBE_MASTER="--master=http://192.168.10.102:8080"

[root@node3 ~]#  cat /etc/kubernetes/kubelet | grep ^[^#]

KUBELET_ADDRESS="--address=0.0.0.0"

KUBELET_PORT="--port=10250"

KUBELET_HOSTNAME="--hostname-override=192.168.10.103"

KUBELET_API_SERVER="--api-servers=http://192.168.10.102:8080"

KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"

KUBELET_ARGS=""

8、网络配置

docker启动之后会产生一个网卡docker0,这个网卡的地址默认是172.17.0.0/16,这是个虚拟网络,如果要是想让集群中的其他机器访问容器需要通过安装flannel来实现。所以安装完flannel和docker一定要确定一下网段和etcd中储存的是否一致,如果不一致重启docker服务。并且flannel的网段包含docker0网段。重启网络脚本见附件1:

node101、102、103配置一样

# cat /etc/sysconfig/flanneld | grep ^[^#]

FLANNEL_ETCD_ENDPOINTS="http://192.168.10.102:2379"

FLANNEL_ETCD_PREFIX="/kube/network"

创建网络配置文件

# etcdctl mk /kube/network/config '{"Network":"172.17.0.0/16"}' //这里的网段和apiserver里配置一样,再任何node节点执行即可。

{"Network":"172.17.0.0/16"}

启动网络服务

# systemctl start flanneld

验证

9、启动服务

主节点

# systemctl start kube-apiserver

# systemctl start kube-controller-manager

# systemctl start kube-scheduler

从节点

# systemctl start kube-proxy

# systemctl start kubelet

验证,在主节点

# kubectl get nodes  //查看k8s的客户端

NAME            STATUS    AGE

192.168.10.101  Ready    4m

192.168.10.103  Ready    3m

# kubectl get namespace  //查看k8s的命名空间

NAME          STATUS    AGE

default      Active    5m

kube-system  Active    5m

创建新的命名空间

# cat /etc/kubernetes/docker/kube-namespace.yaml

{

"kind": "Namespace",

"apiVersion": "v1",

"metadata": {

"name": "kube-zp"

}

}

# kubectl create -f /etc/kubernetes/docker/kube-namespace.yaml

namespace "kube-zp" created

# kubectl get namespace

NAME          STATUS    AGE

default      Active    10m

kube-system  Active    10m

kube-zp      Active    21s

验证,访问http://kube-apiserver:port

http://192.168.10.102:8080 查看所有请求api

http://192.168.10.102:8080/healthz/ping 查看健康状况

10、开启k8s的dashboard

# cat /etc/kubernetes/docker/kubernetes-dashboard.yaml | grep ^[^#]

kind: Deployment

apiVersion: extensions/v1beta1

metadata:

labels:

app: kubernetes-dashboard

name: kubernetes-dashboard

namespace: kube-system

spec:

replicas: 1

revisionHistoryLimit: 10

selector:

matchLabels:

app: kubernetes-dashboard

template:

metadata:

labels:

app: kubernetes-dashboard

# Comment the following annotation if Dashboard must not be deployed on master

annotations:

scheduler.alpha.kubernetes.io/tolerations: |

[

{

"key": "dedicated",

"operator": "Equal",

"value": "master",

"effect": "NoSchedule"

}

]

spec:

containers:

- name: kubernetes-dashboard

image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.0

imagePullPolicy: Always

ports:

- containerPort: 9090

protocol: TCP

args:

# Uncomment the following line to manually specify Kubernetes API server Host

# If not specified, Dashboard will attempt to auto discover the API server and connect

# to it. Uncomment only if the default does not work.

- --apiserver-host=http://192.168.10.102:8080

livenessProbe:

httpGet:

path: /

port: 9090

initialDelaySeconds: 30

timeoutSeconds: 30

---

kind: Service

apiVersion: v1

metadata:

labels:

app: kubernetes-dashboard

name: kubernetes-dashboard

namespace: kube-system

spec:

type: NodePort

ports:

- port: 80

targetPort: 9090

selector:

app: kubernetes-dashboard

创建pod

# kubectl create -f /etc/kubernetes/docker/kubernetes-dashboard.yaml

deployment "kubernetes-dashboard" created

service "kubernetes-dashboard" created

验证

相关文章

网友评论

  • WFF_FFW:你好:我按照你的顺序做了下来,最后的验证的第一张图的命令打出来是:
    kubectl get pod -o wide --namespace=default
    No resources found.
    并没有出来,其中这个文件我用的这下载的,你文章中的,没有格式,所以下载的这:wget http://docs.minunix.com/docker/kubernetes-dashboard.yaml
    WFF_FFW:你能不能帮我想想这个问题出在哪里,谢谢

本文标题:kubernetes安装

本文链接:https://www.haomeiwen.com/subject/updonttx.html