yum安装Kubernetes

作者: namchern | 来源:发表于2019-10-26 21:28 被阅读0次

    本篇文章主要介绍通过软件包管理工具安装Kubernetes。

    1.环境准备

      3台linux虚拟机准备,配置如下

    节点名称 内存 处理器 硬盘 IP地址 主机名称
    master 2G 1核 35G 192.168.10.101 k8s_master
    node1 2G 1核 35G 192.168.10.102 k8s_node1
    node2 2G 1核 35G 192.168.10.103 k8s_node2

       三个节点分别配置ip地址 主机名映射 vim /etc/hosts

    192.168.10.101  k8s_master
    192.168.10.102  k8s_node1
    192.168.10.103  k8s_node2
    

    2. 安装前的准备

      在安装K8S软件包之前需要对所有节点的软件环境进行配置和更新。

    2.1 禁用selinux

      selinux是2.6+版本的Linux内核中提供的强制访问控制系统,在很大程度上加强了Linux的安全性,但是它会影响Kubernetes的某些组件功能,所以需要禁用它。

    [root@k8s_master ~]# setenforce 0
    setenforce: SELinux is disabled
    

    彻底禁用selinux,修改配置文件。

    vim /etc/selinux/config
    

    将其中的 SELINUX=enforcing 修改为 SELINUX=disabled

    # This file controls the state of SELinux on the system.
    # SELINUX= can take one of these three values:
    #     enforcing - SELinux security policy is enforced.
    #     permissive - SELinux prints warnings instead of enforcing.
    #     disabled - No SELinux policy is loaded.
    #SELINUX=enforcing
    SELINUX=disabled
    # SELINUXTYPE= can take one of three two values:
    #     targeted - Targeted processes are protected,
    #     minimum - Modification of targeted policy. Only selected processes are protected. 
    #     mls - Multi Level Security protection.
    SELINUXTYPE=targeted
    

    2.2 禁用firewalld

      firewalld会影响Docker的网络功能,需要在安装部署前将其禁用掉。

    systemctl stop firewalld
    systemctl disable firewalld
    

    2.3 更新yum库

    yum update -y
    

    2.4 同步三个节点的系统时间

    ntpdate ntp1.aliyun.com
    

    其中 ntp1.aliyun.com为阿里的时间(NTP)服务器。

    3. etcd集群配置

      etcd是一个高可用的分布式键值数据库,Kubernetes利用etcd来存储某些数据。为了提高可用性,需要在三个节点服务器上部署etcd,形成集群。

    3.1 在master节点上执行命令,

    yum -y install kubernetes-master kubernetes-client etcd
    

    3.1.1 修改etcd的配置文件 /etc/etcd/etcd.conf

    #[Member]
    #ETCD_CORS=""
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    #ETCD_WAL_DIR=""
    ETCD_LISTEN_PEER_URLS="http://192.168.10.101:2380"
    ETCD_LISTEN_CLIENT_URLS="http://192.168.10.101:2379,http://127.0.0.1:2379"
    #ETCD_MAX_SNAPSHOTS="5"
    #ETCD_MAX_WALS="5"
    ETCD_NAME="etcd1"
    #ETCD_SNAPSHOT_COUNT="100000"
    #ETCD_HEARTBEAT_INTERVAL="100"
    #ETCD_ELECTION_TIMEOUT="1000"
    #ETCD_QUOTA_BACKEND_BYTES="0"
    #ETCD_MAX_REQUEST_BYTES="1572864"
    #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
    #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
    #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
    #
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.101:2380"
    ETCD_ADVERTISE_CLIENT_URLS="http://192.168.10.101:2379"
    #ETCD_DISCOVERY=""
    #ETCD_DISCOVERY_FALLBACK="proxy"
    #ETCD_DISCOVERY_PROXY=""
    #ETCD_DISCOVERY_SRV=""
    ETCD_INITIAL_CLUSTER="etcd1=http://192.168.10.101:2380,etcd2=http://192.168.10.102:2380,etcd3=http://192.168.10.103:2380"
    #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    #ETCD_INITIAL_CLUSTER_STATE="new"
    #ETCD_STRICT_RECONFIG_CHECK="true"
    #ETCD_ENABLE_V2="true"
    #
    #[Proxy]
    #ETCD_PROXY="off"
    #ETCD_PROXY_FAILURE_WAIT="5000"
    #ETCD_PROXY_REFRESH_INTERVAL="30000"
    #ETCD_PROXY_DIAL_TIMEOUT="1000"
    #ETCD_PROXY_WRITE_TIMEOUT="5000"
    #ETCD_PROXY_READ_TIMEOUT="0"
    #
    #[Security]
    #ETCD_CERT_FILE=""
    #ETCD_KEY_FILE=""
    #ETCD_CLIENT_CERT_AUTH="false"
    #ETCD_TRUSTED_CA_FILE=""
    #ETCD_AUTO_TLS="false"
    #ETCD_PEER_CERT_FILE=""
    #ETCD_PEER_KEY_FILE=""
    #ETCD_PEER_CLIENT_CERT_AUTH="false"
    #ETCD_PEER_TRUSTED_CA_FILE=""
    #ETCD_PEER_AUTO_TLS="false"
    #
    #[Logging]
    #ETCD_DEBUG="false"
    #ETCD_LOG_PACKAGE_LEVELS=""
    #ETCD_LOG_OUTPUT="default"
    #
    #[Unsafe]
    #ETCD_FORCE_NEW_CLUSTER="false"
    #
    #[Version]
    #ETCD_VERSION="false"
    #ETCD_AUTO_COMPACTION_RETENTION="0"
    #
    #[Profiling]
    #ETCD_ENABLE_PPROF="false"
    #ETCD_METRICS="basic"
    #
    #[Auth]
    #ETCD_AUTH_TOKEN="simple"
    

    3.2 在node1节点执行命令

    yum -y install kubernetes-node etcd flannel docker
    

    3.2.1 修改/etc/etcd/etcd.conf 配置文件

    #[Member]
    #ETCD_CORS=""
    ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
    #ETCD_WAL_DIR=""
    ETCD_LISTEN_PEER_URLS="http://192.168.10.102:2380"
    ETCD_LISTEN_CLIENT_URLS="http://192.168.10.102:2379,http://127.0.0.1:2379"
    #ETCD_MAX_SNAPSHOTS="5"
    #ETCD_MAX_WALS="5"
    ETCD_NAME="etcd2"
    #ETCD_SNAPSHOT_COUNT="100000"
    #ETCD_HEARTBEAT_INTERVAL="100"
    #ETCD_ELECTION_TIMEOUT="1000"
    #ETCD_QUOTA_BACKEND_BYTES="0"
    #ETCD_MAX_REQUEST_BYTES="1572864"
    #ETCD_GRPC_KEEPALIVE_MIN_TIME="5s"
    #ETCD_GRPC_KEEPALIVE_INTERVAL="2h0m0s"
    #ETCD_GRPC_KEEPALIVE_TIMEOUT="20s"
    #
    #[Clustering]
    ETCD_INITIAL_ADVERTISE_PEER_URLS="http://192.168.10.102:2380"
    ETCD_ADVERTISE_CLIENT_URLS="http://192.168.10.102:2379"
    #ETCD_DISCOVERY=""
    #ETCD_DISCOVERY_FALLBACK="proxy"
    #ETCD_DISCOVERY_PROXY=""
    #ETCD_DISCOVERY_SRV=""
    ETCD_INITIAL_CLUSTER="etcd1=http://192.168.10.101:2380,etcd2=http://192.168.10.102:2380,etcd3=http://192.168.10.103:2380"
    #ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
    #ETCD_INITIAL_CLUSTER_STATE="new"
    #ETCD_STRICT_RECONFIG_CHECK="true"
    #ETCD_ENABLE_V2="true"
    #
    #[Proxy]
    #ETCD_PROXY="off"
    #ETCD_PROXY_FAILURE_WAIT="5000"
    #ETCD_PROXY_REFRESH_INTERVAL="30000"
    #ETCD_PROXY_DIAL_TIMEOUT="1000"
    #ETCD_PROXY_WRITE_TIMEOUT="5000"
    #ETCD_PROXY_READ_TIMEOUT="0"
    #
    #[Security]
    #ETCD_CERT_FILE=""
    #ETCD_KEY_FILE=""
    #ETCD_CLIENT_CERT_AUTH="false"
    #ETCD_TRUSTED_CA_FILE=""
    #ETCD_AUTO_TLS="false"
    #ETCD_PEER_CERT_FILE=""
    #ETCD_PEER_KEY_FILE=""
    #ETCD_PEER_CLIENT_CERT_AUTH="false"
    #ETCD_PEER_TRUSTED_CA_FILE=""
    #ETCD_PEER_AUTO_TLS="false"
    #
    #[Logging]
    #ETCD_DEBUG="false"
    #ETCD_LOG_PACKAGE_LEVELS=""
    #ETCD_LOG_OUTPUT="default"
    #
    #[Unsafe]
    #ETCD_FORCE_NEW_CLUSTER="false"
    #
    #[Version]
    #ETCD_VERSION="false"
    #ETCD_AUTO_COMPACTION_RETENTION="0"
    #
    #[Profiling]
    #ETCD_ENABLE_PPROF="false"
    #ETCD_METRICS="basic"
    #
    #[Auth]
    #ETCD_AUTH_TOKEN="simple"
    

    3.3 在node2上操作与node1保持一致

      需要注意的是配置文件中的配置项需要修改成node2的。

    3.4 三个节点都执行命令

    systemctl enable etcd
    

    3.5 同时启动etcd服务

    systemctl start etcd
    

    3.6 查看etcd集群健康状况

    etcdctl cluster-health
    
    [root@k8s_master kubernetes]# etcdctl cluster-health
    member 6404974f67850bd9 is healthy: got healthy result from http://192.168.10.102:2379
    member 859390733a8165f3 is healthy: got healthy result from http://192.168.10.101:2379
    member c280bee19c4540ef is healthy: got healthy result from http://192.168.10.103:2379
    cluster is healthy
    

    4.Master节点的配置

      master节点主要运行着apiserver、controller-manager以及scheduler等主要的服务进程。这些服务的配置文件都位于 /etc/kubernetes目录中。
      本节主要说明Kubernetes的master节点配置。

    vim /etc/kubernetes/apiserver
    
    ###
    # kubernetes system config
    #
    # The following values are used to configure the kube-apiserver
    #
    
    # The address on the local server to listen to.
    # KUBE_API_ADDRESS="--insecure-bind-address=127.0.0.1"
    # api-server进程绑定的ip地址。修改为`--address=0.0.0.0` 表示绑定本机的所有IP地址。
    KUBE_API_ADDRESS="--address=0.0.0.0"
    
    # The port on the local server to listen on.
    # 指定api-server监听的端口
    KUBE_API_PORT="--port=8080"
    
    # Port minions listen on
    # kubelet 监听的服务端口。
    KUBELET_PORT="--kubelet-port=10250"
    
    # Comma separated list of nodes in the etcd cluster
    # 指定etcd集群中每个节点的地址
    KUBE_ETCD_SERVERS="--etcd-servers=http://192.168.10.101:2379,http://192.168.10.102:2379,http://192.168.10.103:2379"
    
    # Address range to use for services
    # kubernets中服务的ip地址范围
    KUBE_SERVICE_ADDRESSES="--service-cluster-ip-range=10.254.0.0/16"
    
    # default admission control policies
    # SecurityContextDeny,ServiceAccount 这两个值与权限有关,测试的时候可以去掉
    #KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota"
    KUBE_ADMISSION_CONTROL="--admission-control=NamespaceLifecycle,NamespaceExists,LimitRanger,ResourceQuota"
    
    # Add your own!
    KUBE_API_ARGS=""
    

    配置完成后,使用以下命令启动master节点上面的下面三个服务。

    systemctl start kube-apiserver
    systemctl start kube-controller-manager
    systemctl start kube-scheduler
    

    加入开机启动项

    systemctl enable kube-apiserver
    systemctl enable kube-controller-manager
    systemctl enable kube-scheduler
    

    kubernetes的api-server提供的各个接口都是RESTful的,我们可以通过浏览器或者其它方式访问master节点的8080端口。api-server会以JSON的形式返回各个API的地址。

    [root@k8s_master kubernetes]# curl -XGET http://192.168.10.101:8080/
    {
      "paths": [
        "/api",
        "/api/v1",
        "/apis",
        "/apis/apps",
        "/apis/apps/v1beta1",
        "/apis/authentication.k8s.io",
        "/apis/authentication.k8s.io/v1beta1",
        "/apis/authorization.k8s.io",
        "/apis/authorization.k8s.io/v1beta1",
        "/apis/autoscaling",
        "/apis/autoscaling/v1",
        "/apis/batch",
        "/apis/batch/v1",
        "/apis/batch/v2alpha1",
        "/apis/certificates.k8s.io",
        "/apis/certificates.k8s.io/v1alpha1",
        "/apis/extensions",
        "/apis/extensions/v1beta1",
        "/apis/policy",
        "/apis/policy/v1beta1",
        "/apis/rbac.authorization.k8s.io",
        "/apis/rbac.authorization.k8s.io/v1alpha1",
        "/apis/storage.k8s.io",
        "/apis/storage.k8s.io/v1beta1",
        "/healthz",
        "/healthz/ping",
        "/healthz/poststarthook/bootstrap-controller",
        "/healthz/poststarthook/extensions/third-party-resources",
        "/healthz/poststarthook/rbac/bootstrap-roles",
        "/logs",
        "/metrics",
        "/swaggerapi/",
        "/ui/",
        "/version"
      ]
    }
    

    5.Node节点的配置

      node节点上面主要运行 kube-proxy和kubelet等进程。我们需要修改的配置文件主要有 /etc/kubernetes/config/etc/kubernetes/proxy 以及 /etc/kubernetes/kubelet,这3个文件分别为kubernetes全局配置文件、kube-proxy 配置文件以及kubelet配置文件。

    5.1 修改 /etc/kubernetes/config

    主要修改 KUBE_MASTER指定apiserver的地址

    ###
    # kubernetes system config
    #
    # The following values are used to configure various aspects of all
    # kubernetes services, including
    #
    #   kube-apiserver.service
    #   kube-controller-manager.service
    #   kube-scheduler.service
    #   kubelet.service
    #   kube-proxy.service
    # logging to stderr means we get it in the systemd journal
    KUBE_LOGTOSTDERR="--logtostderr=true"
    
    # journal message level, 0 is debug
    KUBE_LOG_LEVEL="--v=0"
    
    # Should this cluster be allowed to run privileged docker containers
    KUBE_ALLOW_PRIV="--allow-privileged=false"
    
    # How the controller-manager, scheduler, and proxy find the apiserver
    # 指定apiserver的地址
    KUBE_MASTER="--master=http://192.168.10.101:8080"
    

    5.2 修改 /etc/kubernetes/kubelet

    ###
    # kubernetes kubelet (minion) config
    
    # The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
    # 绑定本机所有的网络接口
    KUBELET_ADDRESS="--address=0.0.0.0"
    
    # The port for the info server to serve on
    # 指定kubelet监听的端口
    KUBELET_PORT="--port=10250"
    
    # You may leave this blank to use the actual hostname
    # 此处测试后必须使用ip地址,使用hostname在master节点获取不到
    KUBELET_HOSTNAME="--hostname-override=192.168.10.102"
    
    # location of the api-server
    # api-server的地址
    KUBELET_API_SERVER="--api-servers=http://192.168.10.101:8080"
    
    # pod infrastructure container
    KUBELET_POD_INFRA_CONTAINER="--pod-infra-container-image=registry.access.redhat.com/rhel7/pod-infrastructure:latest"
    
    # Add your own!
    KUBELET_ARGS=""
    

    5.3 修改 /etc/kubernetes/proxy

    ###
    # kubernetes proxy config
    
    # default config should be adequate
    
    # Add your own!
    KUBE_PROXY_ARGS="--bind-address=0.0.0.0"
    

    5.4 启用配置

    systemctl enable kube-proxy
    systemctl enable kubelet
    

    5.5 启动kube-proxy 和 kubelet 服务

    systemctl restart kube-proxy
    systemctl restart kubelet
    

    5.6 验证测试

    在master节点使用以下命令测试

    [root@k8s_master kubernetes]# kubectl get nodes
    NAME             STATUS    AGE
    192.168.10.102   Ready     10s
    192.168.10.103   Ready     8s
    

    6 配置网络

    6.1 docker 阿里云加速配置

    vim /etc/docker/daemon.json
    

    内容如下

    {
    "registry-mirrors": ["https://0o7m8o4u.mirror.aliyuncs.com"]
    }
    

    6.2 Flannel 配置

      Flannel是Kubernetes中常用的网络配置工具,用于配置第三层(网络层)网络结构。Flannel需要在集群中的每一台机器上运行一个名为flanneld的代理程序,负责从预配置地址空间中为每台主机分配一个网段。Flannel直接使用Kubernetes API 或 etcd 存储网络配置,分配的子网及任何辅助数据。

    6.2.1 分配docker网段

      在配置Flannel之前,我们需要预先给Docker网络分配网段。在master节点上执行以下命令:

    etcdctl mk /atomic.io/network/config '{"Network":"172.17.0.0/16","SubnetMin":"172.17.1.0","SubnetMax":"172.17.254.0"}'
    

      在etcd中添加一个名称为 /automic.io/network/config 的主键,通过该主键设置提供给Docker容器使用的网段以及子网。

    6.2.2 修改flannel配置文件

      在Node1和Node2这两个node节点上面修改/etc/sysconfig/flanneld

    vim /etc/sysconfig/flanneld
    
    # Flanneld configuration options  
    
    # etcd url location.  Point this to the server where etcd runs
    # etcd 集群各个节点的地址
    FLANNEL_ETCD_ENDPOINTS="http://192.168.10.101:2379,http://192.168.10.102:2379,http://192.168.10.103:2379"
    
    # etcd config key.  This is the configuration key that flannel queries
    # For address range assignment
    # 指定ectd中网络配置的主键,该主键要与前面设置的主键值完全一致。
    FLANNEL_ETCD_PREFIX="/atomic.io/network"
    
    # Any additional options that you want to pass
    # --iface 用来指定Flannel网络使用的网络接口。
    FLANNEL_OPTIONS="--iface=eth0"
    

    6.2.3 启动flanneld

      分别在node1和node2节点上启动flanneld

    systemctl enable flanneld
    systemctl start flanneld
    

    启动成功后,通过命令 ip address show 查看网络接口,会发现多出一个flannel0的网络接口

    [root@k8s_node1 kubernetes]# ip address show
    1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
        link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
        inet 127.0.0.1/8 scope host lo
           valid_lft forever preferred_lft forever
        inet6 ::1/128 scope host 
           valid_lft forever preferred_lft forever
    2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
        link/ether 00:0c:29:01:26:69 brd ff:ff:ff:ff:ff:ff
        inet 192.168.10.102/24 brd 192.168.10.255 scope global noprefixroute eth0
           valid_lft forever preferred_lft forever
        inet6 fe80::135b:8559:71:4455/64 scope link noprefixroute 
           valid_lft forever preferred_lft forever
        inet6 fe80::da48:2273:79f5:65ce/64 scope link noprefixroute 
           valid_lft forever preferred_lft forever
        inet6 fe80::be93:9fe:a854:66da/64 scope link noprefixroute 
           valid_lft forever preferred_lft forever
    3: docker0: <NO-CARRIER,BROADCAST,MULTICAST,UP> mtu 1500 qdisc noqueue state DOWN group default 
        link/ether 02:42:98:0d:46:0a brd ff:ff:ff:ff:ff:ff
        inet 172.17.0.1/16 scope global docker0
           valid_lft forever preferred_lft forever
    4: flannel0: <POINTOPOINT,MULTICAST,NOARP,UP,LOWER_UP> mtu 1472 qdisc pfifo_fast state UNKNOWN group default qlen 500
        link/none 
        inet 172.17.92.0/16 scope global flannel0
           valid_lft forever preferred_lft forever
        inet6 fe80::265:5425:9539:94c/64 scope link flags 800 
           valid_lft forever preferred_lft forever
    

      另外,flannel还生成了两个配置文件,分别是

    • /run/flannel/subnet.env

      FLANNEL_NETWORK=172.17.0.0/16
      FLANNEL_SUBNET=172.17.92.1/24
      FLANNEL_MTU=1472
      FLANNEL_IPMASQ=false
      
    • /run/flannel/docker

    DOCKER_OPT_BIP="--bip=172.17.92.1/24"
    DOCKER_OPT_IPMASQ="--ip-masq=true"
    DOCKER_OPT_MTU="--mtu=1472"
    DOCKER_NETWORK_OPTIONS=" --bip=172.17.92.1/24 --ip-masq=true --mtu=1472"
    

    好了,到这儿kubernetes已经安装成功了。

    相关文章

      网友评论

        本文标题:yum安装Kubernetes

        本文链接:https://www.haomeiwen.com/subject/qlbuvctx.html