美文网首页中台建设
k8s使用helm部署efk

k8s使用helm部署efk

作者: Firetheworld | 来源:发表于2020-11-09 11:14 被阅读0次

    目录:

    • 前提
    • 一 部署ES
    • 二 部署Filebeat
    • 三 部署 Kibana

    正文

    前提:

    k8s中部署容器日志采集系统。

    各组件介绍请查看以前文件:https://www.jianshu.com/writer#/notebooks/29770203/notes/41804428/preview

    这次我们使用的环境如下:
    k8s v1.15.3
    helm 2.14.3
    efk: v6.8.4
    动态存储: Storageckass

    下载好官方的elastic,选择版本,
    elastic的helm仓库的github:https://github.com/elastic/helm-charts

    一部署ES

    更改配置项:增加storageclass,对ES的数据持久化

    cd helm-charts/elasticsearch
    vim value.yaml  #更改配置
    helm install  . --name=elasticsearch  --namespace=kube-public #部署ES
    
    ---
    clusterName: "elasticsearch"
    nodeGroup: "master"
    
    # The service that non master groups will try to connect to when joining the cluster
    # This should be set to clusterName + "-" + nodeGroup for your master group
    masterService: ""
    
    # Elasticsearch roles that will be applied to this nodeGroup
    # These will be set as environment variables. E.g. node.master=true
    roles:
      master: "true"
      ingest: "true"
      data: "true"
    
    replicas: 3
    minimumMasterNodes: 2
    
    esMajorVersion: ""
    
    # Allows you to add any config files in /usr/share/elasticsearch/config/
    # such as elasticsearch.yml and log4j2.properties
    esConfig: {}
    #  elasticsearch.yml: |
    #    key:
    #      nestedkey: value
    #  log4j2.properties: |
    #    key = value
    
    # Extra environment variables to append to this nodeGroup
    # This will be appended to the current 'env:' key. You can use any of the kubernetes env
    # syntax here
    extraEnvs: []
    #  - name: MY_ENVIRONMENT_VAR
    #    value: the_value_goes_here
    
    # Allows you to load environment variables from kubernetes secret or config map
    envFrom: []
    # - secretRef:
    #     name: env-secret
    # - configMapRef:
    #     name: config-map
    
    # A list of secrets and their paths to mount inside the pod
    # This is useful for mounting certificates for security and for mounting
    # the X-Pack license
    secretMounts: []
    #  - name: elastic-certificates
    #    secretName: elastic-certificates
    #    path: /usr/share/elasticsearch/config/certs
    #    defaultMode: 0755
    
    image: "docker.in.zwxict.com/tools/elasticsearch/elasticsearch" #推送到私网仓库
    imageTag: "6.8.14-SNAPSHOT"
    imagePullPolicy: "IfNotPresent"
    
    podAnnotations: {}
      # iam.amazonaws.com/role: es-cluster
    
    # additionals labels
    labels: {}
    
    esJavaOpts: "-Xmx1g -Xms1g"
    
    resources:
      requests:
        cpu: "2000m"
        memory: "4Gi"
      limits:
        cpu: "2000m"
        memory: "4Gi"
    
    initResources: {}
      # limits:
      #   cpu: "25m"
      #   # memory: "128Mi"
      # requests:
      #   cpu: "25m"
      #   memory: "128Mi"
    
    sidecarResources: {}
      # limits:
      #   cpu: "25m"
      #   # memory: "128Mi"
      # requests:
      #   cpu: "25m"
      #   memory: "128Mi"
    
    networkHost: "0.0.0.0"
    
    volumeClaimTemplate:
      accessModes: [ "ReadWriteOnce" ]
      storageClassName: "pub-nfs-sc" # 增加动态存储,用于数据持久化
      resources:
        requests:
          storage: 50Gi
    
    rbac:
      create: false
      serviceAccountAnnotations: {}
      serviceAccountName: ""
    
    podSecurityPolicy:
      create: false
      name: ""
      spec:
        privileged: true
        fsGroup:
          rule: RunAsAny
        runAsUser:
          rule: RunAsAny
        seLinux:
          rule: RunAsAny
        supplementalGroups:
          rule: RunAsAny
        volumes:
          - secret
          - configMap
          - persistentVolumeClaim
    
    persistence:
      enabled: true
      labels:
        # Add default labels for the volumeClaimTemplate fo the StatefulSet
        enabled: false
      annotations: {}
    
    extraVolumes: []
      # - name: extras
      #   emptyDir: {}
    
    extraVolumeMounts: []
      # - name: extras
      #   mountPath: /usr/share/extras
      #   readOnly: true
    
    extraContainers: []
      # - name: do-something
      #   image: busybox
      #   command: ['do', 'something']
    
    extraInitContainers: []
      # - name: do-something
      #   image: busybox
      #   command: ['do', 'something']
    
    # This is the PriorityClass settings as defined in
    # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
    priorityClassName: ""
    
    # By default this will make sure two pods don't end up on the same node
    # Changing this to a region would allow you to spread pods across regions
    antiAffinityTopologyKey: "kubernetes.io/hostname"
    
    # Hard means that by default pods will only be scheduled if there are enough nodes for them
    # and that they will never end up on the same node. Setting this to soft will do this "best effort"
    antiAffinity: "hard"
    
    # This is the node affinity settings as defined in
    # https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#node-affinity-beta-feature
    nodeAffinity: {}
    
    # The default is to deploy all pods serially. By setting this to parallel all pods are started at
    # the same time when bootstrapping the cluster
    podManagementPolicy: "Parallel"
    
    # The environment variables injected by service links are not used, but can lead to slow Elasticsearch boot times when
    # there are many services in the current namespace.
    # If you experience slow pod startups you probably want to set this to `false`.
    enableServiceLinks: true
    
    protocol: http
    httpPort: 9200
    transportPort: 9300
    
    service:
      labels: {}
      labelsHeadless: {}
      type: ClusterIP
      nodePort: ""
      annotations: {}
      httpPortName: http
      transportPortName: transport
      loadBalancerIP: ""
      loadBalancerSourceRanges: []
      externalTrafficPolicy: ""
    
    updateStrategy: RollingUpdate
    
    # This is the max unavailable setting for the pod disruption budget
    # The default value of 1 will make sure that kubernetes won't allow more than 1
    # of your pods to be unavailable during maintenance
    maxUnavailable: 1
    
    podSecurityContext:
      fsGroup: 1000
      runAsUser: 1000
    
    securityContext:
      capabilities:
        drop:
        - ALL
      # readOnlyRootFilesystem: true
      runAsNonRoot: true
      runAsUser: 1000
    
    # How long to wait for elasticsearch to stop gracefully
    terminationGracePeriod: 120
    
    sysctlVmMaxMapCount: 262144
    
    readinessProbe:
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 3
      timeoutSeconds: 5
    
    # https://www.elastic.co/guide/en/elasticsearch/reference/6.8/cluster-health.html#request-params wait_for_status
    clusterHealthCheckParams: "wait_for_status=green&timeout=1s"
    
    ## Use an alternate scheduler.
    ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
    ##
    schedulerName: ""
    
    imagePullSecrets: []
    nodeSelector: {}
    tolerations: []
    
    # Enabling this will publically expose your Elasticsearch instance.
    # Only enable this if you have security enabled on your cluster
    ingress:
      enabled: false
      annotations: {}
        # kubernetes.io/ingress.class: nginx
        # kubernetes.io/tls-acme: "true"
      path: /
      hosts:
        - chart-example.local
      tls: []
      #  - secretName: chart-example-tls
      #    hosts:
      #      - chart-example.local
    
    nameOverride: ""
    fullnameOverride: ""
    
    # https://github.com/elastic/helm-charts/issues/63
    masterTerminationFix: false
    
    lifecycle: {}
      # preStop:
      #   exec:
      #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
      # postStart:
      #   exec:
      #     command:
      #       - bash
      #       - -c
      #       - |
      #         #!/bin/bash
      #         # Add a template to adjust number of shards/replicas
      #         TEMPLATE_NAME=my_template
      #         INDEX_PATTERN="logstash-*"
      #         SHARD_COUNT=8
      #         REPLICA_COUNT=1
      #         ES_URL=http://localhost:9200
      #         while [[ "$(curl -s -o /dev/null -w '%{http_code}\n' $ES_URL)" != "200" ]]; do sleep 1; done
      #         curl -XPUT "$ES_URL/_template/$TEMPLATE_NAME" -H 'Content-Type: application/json' -d'{"index_patterns":['\""$INDEX_PATTERN"\"'],"settings":{"number_of_shards":'$SHARD_COUNT',"number_of_replicas":'$REPLICA_COUNT'}}'
    
    sysctlInitContainer:
      enabled: true
    
    keystore: []
    
    # Deprecated
    # please use the above podSecurityContext.fsGroup instead
    fsGroup: ""
    

    如没有命名空间,请先进行命名空间创建:

    $ kubectl create ns kube-public
    

    二 部署Filebeat

    cd helm-charts/filebeat
    vim value.yaml  #更改配置
    helm install  . --name=filebeat  --namespace=kube-public #部署filebeat
    
    ---
    # Allows you to add any config files in /usr/share/filebeat
    # such as filebeat.yml
    filebeatConfig:
      filebeat.yml: |
        filebeat.inputs:
        - type: docker
          containers.ids:
            - '*'
          processors:
          - add_kubernetes_metadata:
              in_cluster: true
        output.elasticsearch:
          host: '${NODE_NAME}'
          hosts: '${ELASTICSEARCH_HOSTS:elasticsearch-master:9200}'
    
    # Extra environment variables to append to the DaemonSet pod spec.
    # This will be appended to the current 'env:' key. You can use any of the kubernetes env
    # syntax here
    extraEnvs: []
    #  - name: MY_ENVIRONMENT_VAR
    #    value: the_value_goes_here
    
    extraVolumeMounts: []
      # - name: extras
      #   mountPath: /usr/share/extras
      #   readOnly: true
    
    extraVolumes: []
      # - name: extras
      #   emptyDir: {}
    
    extraContainers: ""
    # - name: dummy-init
    #   image: busybox
    #   command: ['echo', 'hey']
    
    extraInitContainers: []
    # - name: dummy-init
    #   image: busybox
    #   command: ['echo', 'hey']
    
    envFrom: []
    # - configMapRef:
    #     name: configmap-name
    
    # Root directory where Filebeat will write data to in order to persist registry data across pod restarts (file position and other metadata).
    hostPathRoot: /var/lib
    hostNetworking: false
    dnsConfig: {}
    # options:
    #   - name: ndots
    #     value: "2"
    image: "docker.in.zwxict.com/tools/beats/filebeat" # 镜像从私网仓库拉取
    imageTag: "6.8.14-SNAPSHOT"
    imagePullPolicy: "IfNotPresent"
    imagePullSecrets: []
    
    livenessProbe:
      exec:
        command:
          - sh
          - -c
          - |
            #!/usr/bin/env bash -e
            curl --fail 127.0.0.1:5066
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 5
    
    readinessProbe:
      exec:
        command:
          - sh
          - -c
          - |
            #!/usr/bin/env bash -e
            filebeat test output
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 10
      timeoutSeconds: 5
    
    # Whether this chart should self-manage its service account, role, and associated role binding.
    managedServiceAccount: true
    
    # additionals labels
    labels: {}
    
    podAnnotations: {}
      # iam.amazonaws.com/role: es-cluster
    
    # Various pod security context settings. Bear in mind that many of these have an impact on Filebeat functioning properly.
    #
    # - User that the container will execute as. Typically necessary to run as root (0) in order to properly collect host container logs.
    # - Whether to execute the Filebeat containers as privileged containers. Typically not necessarily unless running within environments such as OpenShift.
    podSecurityContext:
      runAsUser: 0
      privileged: false
    
    resources:
      requests:
        cpu: "100m"
        memory: "100Mi"
      limits:
        cpu: "1000m"
        memory: "200Mi"
    
    # Custom service account override that the pod will use
    serviceAccount: ""
    
    # Annotations to add to the ServiceAccount that is created if the serviceAccount value isn't set.
    serviceAccountAnnotations: {}
      # eks.amazonaws.com/role-arn: arn:aws:iam::111111111111:role/k8s.clustername.namespace.serviceaccount
    
    # A list of secrets and their paths to mount inside the pod
    # This is useful for mounting certificates for security other sensitive values
    secretMounts: []
    #  - name: filebeat-certificates
    #    secretName: filebeat-certificates
    #    path: /usr/share/filebeat/certs
    
    # How long to wait for Filebeat pods to stop gracefully
    terminationGracePeriod: 30
    
    tolerations: []
    
    nodeSelector: {}
    
    affinity: {}
    
    # This is the PriorityClass settings as defined in
    # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
    priorityClassName: ""
    
    updateStrategy: RollingUpdate
    
    # Override various naming aspects of this chart
    # Only edit these if you know what you're doing
    nameOverride: ""
    fullnameOverride: ""
    

    三 部署 Kibana

    cd helm-charts/kibana
    vim value.yaml  #更改配置
    helm install  . --name=kibana--namespace=kube-public #部署Kibana
    
    ---
    elasticsearchHosts: "http://elasticsearch-master:9200"
    
    replicas: 1
    
    # Extra environment variables to append to this nodeGroup
    # This will be appended to the current 'env:' key. You can use any of the kubernetes env
    # syntax here
    extraEnvs:
      - name: "NODE_OPTIONS"
        value: "--max-old-space-size=1800"
    #  - name: MY_ENVIRONMENT_VAR
    #    value: the_value_goes_here
    
    # Allows you to load environment variables from kubernetes secret or config map
    envFrom: []
    # - secretRef:
    #     name: env-secret
    # - configMapRef:
    #     name: config-map
    
    # A list of secrets and their paths to mount inside the pod
    # This is useful for mounting certificates for security and for mounting
    # the X-Pack license
    secretMounts: []
    #  - name: kibana-keystore
    #    secretName: kibana-keystore
    #    path: /usr/share/kibana/data/kibana.keystore
    #    subPath: kibana.keystore # optional
    
    image: "docker.in.zwxict.com/tools/kibana/kibana"
    imageTag: "6.8.14-SNAPSHOT"
    imagePullPolicy: "IfNotPresent"
    
    # additionals labels
    labels: {}
    
    podAnnotations: {}
      # iam.amazonaws.com/role: es-cluster
    
    resources:
      requests:
        cpu: "1000m"
        memory: "2Gi"
      limits:
        cpu: "1000m"
        memory: "2Gi"
    
    protocol: http
    
    serverHost: "0.0.0.0"
    
    healthCheckPath: "/app/kibana"
    
    # Allows you to add any config files in /usr/share/kibana/config/
    # such as kibana.yml
    kibanaConfig: {}
    #   kibana.yml: |
    #     key:
    #       nestedkey: value
    
    # If Pod Security Policy in use it may be required to specify security context as well as service account
    
    podSecurityContext:
      fsGroup: 1000
    
    securityContext:
      capabilities:
        drop:
        - ALL
      # readOnlyRootFilesystem: true
      runAsNonRoot: true
      runAsUser: 1000
    
    serviceAccount: ""
    
    # This is the PriorityClass settings as defined in
    # https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass
    priorityClassName: ""
    
    httpPort: 5601
    
    extraContainers: ""
    # - name: dummy-init
    #   image: busybox
    #   command: ['echo', 'hey']
    
    extraInitContainers: ""
    # - name: dummy-init
    #   image: busybox
    #   command: ['echo', 'hey']
    
    updateStrategy:
      type: "Recreate"
    
    service:
      type: ClusterIP
      loadBalancerIP: ""
      port: 5601
      nodePort: ""
      labels: {}
      annotations: {}
        # cloud.google.com/load-balancer-type: "Internal"
        # service.beta.kubernetes.io/aws-load-balancer-internal: 0.0.0.0/0
        # service.beta.kubernetes.io/azure-load-balancer-internal: "true"
        # service.beta.kubernetes.io/openstack-internal-load-balancer: "true"
        # service.beta.kubernetes.io/cce-load-balancer-internal-vpc: "true"
      loadBalancerSourceRanges: []
        # 0.0.0.0/0
    
    ingress:
      enabled: true  #打开Ingress访问入口
      annotations: {}
        # kubernetes.io/ingress.class: nginx
        # kubernetes.io/tls-acme: "true"
      path: /
      hosts:
        - kb.ai.in.zwxict.com  #增加hosts
      tls: []
      #  - secretName: chart-example-tls
      #    hosts:
      #      - chart-example.local
    
    readinessProbe:
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 3
      timeoutSeconds: 5
    
    imagePullSecrets: []
    nodeSelector: {}
    tolerations: []
    affinity: {}
    
    nameOverride: ""
    fullnameOverride: ""
    
    lifecycle: {}
      # preStop:
      #   exec:
      #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
      # postStart:
      #   exec:
      #     command: ["/bin/sh", "-c", "echo Hello from the postStart handler > /usr/share/message"]
    
    # Deprecated - use only with versions < 6.6
    elasticsearchURL: "" # "http://elasticsearch-master:9200"
    

    相关文章

      网友评论

        本文标题:k8s使用helm部署efk

        本文链接:https://www.haomeiwen.com/subject/hxklbktx.html