美文网首页linux运维
k8s部署-51-怎样在k8s日志输出之后,能较好的界面化展现k

k8s部署-51-怎样在k8s日志输出之后,能较好的界面化展现k

作者: 运维家 | 来源:发表于2022-05-02 23:04 被阅读0次

    如何能简洁明了的来查看k8s日志呢?我们这里使用上文说的最后一种方案:logPilot+ElasticSearch+Kibana的方式来实现,其中LogPilot的作用是用来收集node节点上的日志,然后传输到ElasticSearch存储中,Kibana来读取ElasticSearch的方式来实现,将k8s的日志界面化展示的功能,下面我们就来一步一步操作下吧。

    1、k8s中的ElasticSearch安装

    首先我们登录到node1节点上,创建yaml文件;

    需要注意的是,如果是线上情况,为了保证ElasticSearch高可用的特性,需要三个节点,如果worker节点数量不足的情况下,最少需要保持两个ElasticSearch节点,我这里由于服务器资源的不足,我就选择了安装两个ElasticSearch节点;

    (1)创建yaml文件

    [root@node1 ~]# mkdir namespace/logs
    [root@node1 ~]# cd namespace/logs/ 
    [root@node1 logs]# vim elasticsearch.yaml 
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: elasticsearch-api
      namespace: kube-system
      labels:
        name: elasticsearch
    spec:
      selector:
        app_name: es
      ports:
      - name: transport
        port: 9200
        protocol: TCP
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: elasticsearch-discovery
      namespace: kube-system
      labels:
        name: elasticsearch
    spec:
      selector:
        app_name: es
      ports:
      - name: transport
        port: 9300
        protocol: TCP
    ---
    apiVersion: apps/v1
    kind: StatefulSet
    metadata:
      name: elasticsearch
      namespace: kube-system
      labels:
        kubernetes.io/cluster-service: "true"
    spec:
      replicas: 2
      serviceName: "elasticsearch-service"
      selector:
        matchLabels:
          app_name: es
      template:
        metadata:
          labels:
            app_name: es
        spec:
          tolerations:
          - effect: NoSchedule
            key: node-role.kubernetes.io/master
          serviceAccountName: dashboard-admin
          initContainers:
          - name: init-sysctl
            image: busybox:1.27
            command:
            - sysctl
            - -w
            - vm.max_map_count=262144
           # command: ["/bin/bash", "-ce", "tail -f /dev/null"]
            securityContext:
              privileged: true
          containers:
          - name: elasticsearch
            image: registry.cn-hangzhou.aliyuncs.com/imooc/elasticsearch:5.5.1
            ports:
            - containerPort: 9200
              protocol: TCP
            - containerPort: 9300
              protocol: TCP
            securityContext:
              capabilities:
                add:
                  - IPC_LOCK
                  - SYS_RESOURCE
            resources:
              limits:
                memory: 2000Mi
              requests:
                cpu: 100m
                memory: 2000Mi
            env:
              - name: "http.host"
                value: "0.0.0.0"
              - name: "network.host"
                value: "_eth0_"
              - name: "cluster.name"
                value: "docker-cluster"
              - name: "bootstrap.memory_lock"
                value: "false"
              - name: "discovery.zen.ping.unicast.hosts"
                value: "elasticsearch-discovery"
              - name: "discovery.zen.ping.unicast.hosts.resolve_timeout"
                value: "10s"
              - name: "discovery.zen.ping_timeout"
                value: "6s"
              - name: "discovery.zen.minimum_master_nodes"
                value: "2"
              - name: "discovery.zen.fd.ping_interval"
                value: "2s"
              - name: "discovery.zen.no_master_block"
                value: "write"
              - name: "gateway.expected_nodes"
                value: "2"
              - name: "gateway.expected_master_nodes"
                value: "1"
              - name: "transport.tcp.connect_timeout"
                value: "60s"
              - name: "ES_JAVA_OPTS"
                value: "-Xms2g -Xmx2g"
            livenessProbe:
              tcpSocket:
                port: transport
              initialDelaySeconds: 20
              periodSeconds: 10
            volumeMounts:
            - name: es-data
              mountPath: /data
          terminationGracePeriodSeconds: 30
          volumes:
          - name: es-data
            hostPath:
              path: /es-data
    [root@node1 logs]# 

    (2)给节点打标签

    从上面的yaml文件可以看到,ElasticSearch服务运行在了Labelapp_name=es这样的节点上,所以需要先给我们的worker节点添加节点标签;

    [root@node1 logs]# kubectl label node node2 app_name=es
    [root@node1 logs]# kubectl label node node3 app_name=es

    (3)运行yaml,使其生效

    [root@node1 logs]# kubectl apply -f elasticsearch.yaml 
    service/elasticsearch-api unchanged
    service/elasticsearch-discovery unchanged
    statefulset.apps/elasticsearch created
    [root@node1 logs]#

    我们可以使用如下命令来查看是否运行正常;

    [root@node1 logs]# kubectl get all -n kube-system

    2、k8s中的log-pliot安装

    [root@node1 logs]# vim log-pilot.yaml 
    ---
    apiVersion: extensions/v1beta1
    kind: DaemonSet
    metadata:
      name: log-pilot
      namespace: kube-system
      labels:
        k8s-app: log-pilot
        kubernetes.io/cluster-service: "true"
    spec:
      template:
        metadata:
          labels:
            k8s-app: log-es
            kubernetes.io/cluster-service: "true"
            version: v1.22
        spec:
          tolerations:
          - key: node-role.kubernetes.io/master
            effect: NoSchedule
          serviceAccountName: dashboard-admin
          containers:
          - name: log-pilot
            image: registry.cn-hangzhou.aliyuncs.com/imooc/log-pilot:0.9-filebeat
            resources:
              limits:
                memory: 200Mi
              requests:
                cpu: 100m
                memory: 200Mi
            env:
              - name: "FILEBEAT_OUTPUT"
                value: "elasticsearch"
              - name: "ELASTICSEARCH_HOST"
                value: "elasticsearch-api"
              - name: "ELASTICSEARCH_PORT"
                value: "9200"
              - name: "ELASTICSEARCH_USER"
                value: "elastic"
              - name: "ELASTICSEARCH_PASSWORD"
                value: "changeme"
            volumeMounts:
            - name: sock
              mountPath: /var/run/containerd/containerd.sock
            - name: root
              mountPath: /host
              readOnly: true
            - name: varlib
              mountPath: /var/lib/filebeat
            - name: varlog
              mountPath: /var/log/filebeat
            securityContext:
              capabilities:
                add:
                - SYS_ADMIN
          terminationGracePeriodSeconds: 30
          volumes:
          - name: sock
            hostPath:
              path: /var/run/containerd/containerd.sock
          - name: root
            hostPath:
              path: /
          - name: varlib
            hostPath:
              path: /var/lib/filebeat
              type: DirectoryOrCreate
          - name: varlog
            hostPath:
              path: /var/log/filebeat
              type: DirectoryOrCreate
    [root@node1 logs]# kubectl apply -f log-pilot.yaml 

    3、k8s中的Kibana安装

    (1)创建yaml文件

    [root@node1 logs]# vim kibana.yaml 
    ---
    apiVersion: v1
    kind: Service
    metadata:
      name: kibana
      namespace: kube-system
      labels:
        component: kibana
    spec:
      selector:
        component: kibana
      ports:
      - name: http
        port: 80
        targetPort: http
    ---
    #ingress
    apiVersion: extensions/v1beta1
    kind: Ingress
    metadata:
      name: kibana
      namespace: kube-system
    spec:
      rules:
      - host: kibana.mooc.com
        http:
          paths:
          - path: /
            backend:
              serviceName: kibana
              servicePort: 80
    ---
    apiVersion: apps/v1beta1
    kind: Deployment
    metadata:
      name: kibana
      namespace: kube-system
      labels:
        component: kibana
    spec:
      replicas: 1
      selector:
        matchLabels:
         component: kibana
      template:
        metadata:
          labels:
            component: kibana
        spec:
          containers:
          - name: kibana
            image: registry.cn-hangzhou.aliyuncs.com/imooc/kibana:5.5.1
            env:
            - name: CLUSTER_NAME
              value: docker-cluster
            - name: ELASTICSEARCH_URL
              value: http://elasticsearch-api:9200/
            resources:
              limits:
                cpu: 1000m
              requests:
                cpu: 100m
            ports:
            - containerPort: 5601
              name: http
    [root@node1 logs]# kubectl apply -f kibana.yaml

    (2)访问验证

    在客户端中windows电脑中配置hosts文件,添加内容如下:

    192.168.112.131 kibana.yunweijia.com

    然后我们就可以通过浏览器来访问kibana的界面了,界面如下:

    我们暂时先不操作这里,因为我们实际上并没有配置具体日志采集,所以这里的配置也没有什么作用,接下来我们去配置下日志采集吧。

    4、配置服务的日志采集

    (1)创建yaml文件

    出现kibana界面的时候,就表示我们的日志采集集群就已经搭建完毕了,那么我们如何真正的把日志界面化展示呢?还需要在相关的yaml文件中进行相关配置,这里给出一个示例:

    剩余内容请转至VX公众号 “运维家” ,回复 “157” 查看。

    ------ 以下内容为防伪内容,忽略即可 ------

    ------ 以下内容为防伪内容,忽略即可 ------

    ------ 以下内容为防伪内容,忽略即可 ------

    linux安装git,linux加减乘除,linux用户禁用,linuxc程序实例,ec20linux,linux给u盘写入pe,linux播放音乐无声音,c100f安装linux,linux查询服务的启动类型,linux的命令是二进制码,linux不能桥接,安卓htcg1linux,Linux项目开发在哪运行,linux清空路由缓存,linux一键端吾爱破解,linux怎么看进程ni值,linux命令+trtest,中华麒麟linux系统,linux离线安装stb,linux网卡拥塞。

    相关文章

      网友评论

        本文标题:k8s部署-51-怎样在k8s日志输出之后,能较好的界面化展现k

        本文链接:https://www.haomeiwen.com/subject/yvoiyrtx.html