美文网首页学习笔记
Day83-k8s_service,deployment资源,d

Day83-k8s_service,deployment资源,d

作者: 三德书生 | 来源:发表于2019-12-10 21:21 被阅读0次

    3.3 service资源
    3.4 deployment资源
    3.5 tomcat+mysql练习
    4:k8s的附加组件
    4.1 dns服务

    service资源

    • service帮助pod暴露端口
      创建一个service
    [root@k8s-master svc]# cat k8s_svc.yml 
    apiVersion: v1
    kind: Service
    metadata:
      name: myweb
    spec:
      type: NodePort  #ClusterIP
      ports:
        - port: 80          #clusterIP
          nodePort: 30000   #node port
          targetPort: 80    #pod port
      selector:
        app: myweb
    

    kubectl scale rc nginx --replicas=2

    kubectl exec -it pod_name /bin/bash

    修改nodePort范围

    vim  /etc/kubernetes/apiserver
    KUBE_API_ARGS="--service-node-port-range=3000-50000"
    

    命令行创建service资源

    kubectl expose rc nginx --type=NodePort --port=80
    

    service默认使用iptables来实现负载均衡, k8s 1.8新版本中推荐使用lvs(四层负载均衡 传输层tcp,udp)

    deployment资源

    • 有rc在滚动升级之后,会造成服务访问中断,于是k8s引入了deployment资源
      创建deployment
    [root@k8s-master deploy]# cat k8s_deploy.yml 
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: nginx-deployment
    spec:
      replicas: 3
      template:
        metadata:
          labels:
            app: nginx
        spec:
          containers:
          - name: nginx
            image: 10.0.0.11:5000/nginx:1.13
            ports:
            - containerPort: 80
            resources:  
              limits:
                cpu: 100m
              requests:
                cpu: 100m
    
    deployment升级和回滚
    • 命令行创建deployment
    kubectl run nginx --image=10.0.0.11:5000/nginx:1.13 --replicas=3 --record
    
    • 命令行升级版本
    kubectl set image deploy nginx nginx=10.0.0.11:5000/nginx:1.15
    
    • 查看deployment所有历史版本
    kubectl rollout history deployment nginx
    
    • deployment回滚到上一个版本
    kubectl rollout undo deployment nginx
    
    • deployment回滚到指定版本
    kubectl rollout undo deployment nginx --to-revision=2
    
    tomcat+mysql练习

    在k8s中容器之间相互访问,通过VIP地址!

    [root@k8s-master tomcat_demo]# ls
    mysql-rc.yml  mysql-svc.yml  tomcat-rc.yml  tomcat-svc.yml
    [root@k8s-master tomcat_demo]# cat mysql-rc.yml 
    apiVersion: v1
    kind: ReplicationController
    metadata:
      name: mysql
    spec:
      replicas: 1
      selector:
        app: mysql
      template:
        metadata:
          labels:
            app: mysql
        spec:
          containers:
            - name: mysql
              image: 10.0.0.11:5000/mysql:5.7
              ports:
              - containerPort: 3306
              env:
              - name: MYSQL_ROOT_PASSWORD
                value: '123456'
    
    [root@k8s-master tomcat_demo]# cat mysql-svc.yml 
    apiVersion: v1
    kind: Service
    metadata:
      name: mysql
    spec:
      ports:
        - port: 3306
          targetPort: 3306
      selector:
        app: mysql
    
    [root@k8s-master tomcat_demo]# cat tomcat-rc.yml 
    apiVersion: v1
    kind: ReplicationController
    metadata:
      name: myweb
    spec:
      replicas: 2
      selector:
        app: myweb
      template:
        metadata:
          labels:
            app: myweb
        spec:
          containers:
            - name: myweb
              image: 10.0.0.11:5000/tomcat-app:v2
              ports:
              - containerPort: 8080
              env:
              - name: MYSQL_SERVICE_HOST
                value: 'mysql'
              - name: MYSQL_SERVICE_PORT
                value: '3306'
    
    [root@k8s-master tomcat_demo]# cat tomcat-svc.yml 
    apiVersion: v1
    kind: Service
    metadata:
      name: myweb
    spec:
      type: NodePort
      ports:
        - port: 8080
          nodePort: 30008
          targetPort: 8080
      selector:
        app: myweb
    

    k8s的附加组件

    • k8s集群中dns服务的作用,就是讲svc的名字解析成VIP地址
    dns服务

    安装dns服务

    1:下载dns_docker镜像包

    wget [http://192.168.12.202/docker_image/docker_k8s_dns.tar.gz](http://192.168.12.202/docker_image/docker_k8s_dns.tar.gz)
    

    2:导入dns_docker镜像包(node1节点)

    docker load -i docker_k8s_dns.tar.gz
    

    3:修改skydns-rc.yaml, 在node1 创建dns服务

      spec:
        nodeName: 10.0.0.12
    
    [root@k8s-master dns]# ls
    skydns-rc.yaml  skydns-svc.yaml
    [root@k8s-master dns]# cat skydns-rc.yaml 
    # Copyright 2016 The Kubernetes Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    # TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*
    # Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
    # in sync with this file.
    
    # __MACHINE_GENERATED_WARNING__
    
    apiVersion: extensions/v1beta1
    kind: Deployment
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
    spec:
      replicas: 1
      # replicas: not specified here:
      # 1. In order to make Addon Manager do not reconcile this replicas parameter.
      # 2. Default is 1.
      # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
      strategy:
        rollingUpdate:
          maxSurge: 10%
          maxUnavailable: 0
      selector:
        matchLabels:
          k8s-app: kube-dns
      template:
        metadata:
          labels:
            k8s-app: kube-dns
          annotations:
            scheduler.alpha.kubernetes.io/critical-pod: ''
            scheduler.alpha.kubernetes.io/tolerations: '[{"key":"CriticalAddonsOnly", "operator":"Exists"}]'
        spec:
          nodeName: 10.0.0.13
          containers:
          - name: kubedns
            image: gcr.io/google_containers/kubedns-amd64:1.9
            resources:
              # TODO: Set memory limits when we've profiled the container for large
              # clusters, then set request = limit to keep this container in
              # guaranteed class. Currently, this container falls into the
              # "burstable" category so the kubelet doesn't backoff from restarting it.
              limits:
                memory: 170Mi
              requests:
                cpu: 100m
                memory: 70Mi
            livenessProbe:
              httpGet:
                path: /healthz-kubedns
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            readinessProbe:
              httpGet:
                path: /readiness
                port: 8081
                scheme: HTTP
              # we poll on pod startup for the Kubernetes master service and
              # only setup the /readiness HTTP server once that's available.
              initialDelaySeconds: 3
              timeoutSeconds: 5
            args:
            - --domain=cluster.local.
            - --dns-port=10053
            - --config-map=kube-dns
            - --kube-master-url=http://10.0.0.11:8080
            # This should be set to v=2 only after the new image (cut from 1.5) has
            # been released, otherwise we will flood the logs.
            - --v=0
            #__PILLAR__FEDERATIONS__DOMAIN__MAP__
            env:
            - name: PROMETHEUS_PORT
              value: "10055"
            ports:
            - containerPort: 10053
              name: dns-local
              protocol: UDP
            - containerPort: 10053
              name: dns-tcp-local
              protocol: TCP
            - containerPort: 10055
              name: metrics
              protocol: TCP
          - name: dnsmasq
            image: gcr.io/google_containers/kube-dnsmasq-amd64:1.4
            livenessProbe:
              httpGet:
                path: /healthz-dnsmasq
                port: 8080
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            args:
            - --cache-size=1000
            - --no-resolv
            - --server=127.0.0.1#10053
            #- --log-facility=-
            ports:
            - containerPort: 53
              name: dns
              protocol: UDP
            - containerPort: 53
              name: dns-tcp
              protocol: TCP
            # see: https://github.com/kubernetes/kubernetes/issues/29055 for details
            resources:
              requests:
                cpu: 150m
                memory: 10Mi
          - name: dnsmasq-metrics
            image: gcr.io/google_containers/dnsmasq-metrics-amd64:1.0
            livenessProbe:
              httpGet:
                path: /metrics
                port: 10054
                scheme: HTTP
              initialDelaySeconds: 60
              timeoutSeconds: 5
              successThreshold: 1
              failureThreshold: 5
            args:
            - --v=2
            - --logtostderr
            ports:
            - containerPort: 10054
              name: metrics
              protocol: TCP
            resources:
              requests:
                memory: 10Mi
          - name: healthz
            image: gcr.io/google_containers/exechealthz-amd64:1.2
            resources:
              limits:
                memory: 50Mi
              requests:
                cpu: 10m
                # Note that this container shouldn't really need 50Mi of memory. The
                # limits are set higher than expected pending investigation on #29688.
                # The extra memory was stolen from the kubedns container to keep the
                # net memory requested by the pod constant.
                memory: 50Mi
            args:
            - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
            - --url=/healthz-dnsmasq
            - --cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1:10053 >/dev/null
            - --url=/healthz-kubedns
            - --port=8080
            - --quiet
            ports:
            - containerPort: 8080
              protocol: TCP
          dnsPolicy: Default  # Don't use cluster DNS.
    [root@k8s-master dns]# cat skydns-svc.yaml 
    # Copyright 2016 The Kubernetes Authors.
    #
    # Licensed under the Apache License, Version 2.0 (the "License");
    # you may not use this file except in compliance with the License.
    # You may obtain a copy of the License at
    #
    #     http://www.apache.org/licenses/LICENSE-2.0
    #
    # Unless required by applicable law or agreed to in writing, software
    # distributed under the License is distributed on an "AS IS" BASIS,
    # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    # See the License for the specific language governing permissions and
    # limitations under the License.
    
    # TODO - At some point, we need to rename all skydns-*.yaml.* files to kubedns-*.yaml.*
    
    # Warning: This is a file generated from the base underscore template file: skydns-svc.yaml.base
    
    apiVersion: v1
    kind: Service
    metadata:
      name: kube-dns
      namespace: kube-system
      labels:
        k8s-app: kube-dns
        kubernetes.io/cluster-service: "true"
        kubernetes.io/name: "KubeDNS"
    spec:
      selector:
        k8s-app: kube-dns
      clusterIP: 10.254.230.254
      ports:
      - name: dns
        port: 53
        protocol: UDP
      - name: dns-tcp
        port: 53
        protocol: TCP
    
    

    4:创建dns服务

    kubectl  create  -f   skydns-deploy.yaml
    
    kubectl create -f skydns-svc.yaml
    

    5:检查

    kubectl get all --namespace=kube-system
    

    6:修改所有node节点kubelet的配置文件

    vim  /etc/kubernetes/kubelet
    
    KUBELET_ARGS="--cluster_dns=10.254.230.254 --cluster_domain=cluster.local"
    
    systemctl   restart kubelet
    
    知识点归纳

    3.3 service 资源

    • 暴露端口,负载均衡和服务的自动发现
      node IP 端口范围 api-server
      cluster IP api-server
      pod IP etcd /atomic.io/network/config

    kubectl expose deployment nginx --port=80 --target-port=80 --type=NodePort

    3.4 deployment资源

    • 解决了rc在滚动升级的时候访问会中断. rc1 myweb1--->rc2 myweb2 svc myweb
      deployment --> rs --> pod
      rs和rc有什么关系? rs新一代副本控制器 支持通配符匹配标签

    • 命令行创建deployment:
      kubectl run nginx --image=10.0.0.11:5000/nginx:1.13 --replicas=3 --record
      命令行更新deployment中pod的镜像地址:
      kubectl set image deployment nginx nginx=10.0.0.11:5000/nginx:1.15
      命令行查看deployment版本
      kubectl rollout history deployment nginx
      命令行回滚deployment上一个版本
      kubectl rollout undo deployment nginx
      命令行回滚deployment到指定版本
      kubectl rollout undo deployment nginx --to-revision=1

    3.5 tomcat+mysql 练习

    • VIP 连接

    4: k8s附加组件
    4.1 dns

    • 将SVC的名字解析成对应VIP

    • k8s的资源操作:
      增kubectl create -f xxx.yml
      删kubectl delete 资源类型 资源名字 || kubectl delete -f xxx.yml
      改kubectl edit 资源类型 资源名字
      查kubectl get 资源类型 -o wide
      查kubectl describe 资源类型 资源名字

    相关文章

      网友评论

        本文标题:Day83-k8s_service,deployment资源,d

        本文链接:https://www.haomeiwen.com/subject/zdyggctx.html