美文网首页
graylog3 日志服务器集群搭建

graylog3 日志服务器集群搭建

作者: 都市小木屋 | 来源:发表于2020-03-28 17:52 被阅读0次

    环境:centos7.6 、graylog3版本

    架构:前端负载采用nginx、后端node分别部署graylog3集群、ES6集群、mongodb集群。

    Graylog整体组成:

    Graylog:提供 graylog 对外接口,web UI, CPU
    Elasticsearch:日志文件的持久化存储和检索, IO
    MongoDB:存储一些 Graylog 的配置 数据
    Nginx:负载均衡

    逻辑架构.png

    开始部署:

    1、关闭防火墙及selinux

    #主机23.231-234同步操作:
    [root@graylog01 ~]# sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
    [root@graylog01 ~]# setenforce 0
    
    [root@graylog01 ~]# systemctl stop firewalld
    [root@graylog01 ~]# systemctl disable  firewalld
    
    #修改主机名:
    [root@graylog01 ~]# hostnamectl set-hostname graylog01
    

    2、安装依赖

    //主机23.232-234同步操作:
    [root@graylog01 ~]# yum install epel-release pwgen  java-1.8.0-openjdk-headless.x86_64 -y
    

    3、安装MongoDB

    #主机23.232-234同步操作:
    [root@graylog01 ~]# touch /etc/yum.repos.d/mongodb-org.repo
    
    [root@graylog01 ~]# cat << EOF > /etc/yum.repos.d/mongodb-org.repo
    [mongodb-org-4.0]
    name=MongoDB Repository
    baseurl=https://repo.mongodb.org/yum/redhat/$releasever/mongodb-org/4.0/x86_64/
    gpgcheck=1
    enabled=1
    gpgkey=https://www.mongodb.org/static/pgp/server-4.0.asc
    EOF
    
    [root@graylog01 ~]# yum install -y mongodb-org
    
    [root@graylog01 ~]# systemctl daemon-reload
    [root@graylog01 ~]# systemctl enable mongod.service
    [root@graylog01 ~]# systemctl start mongod.service
    
    #运行
      [root@graylog01 ~]# mongo --host 127.0.0.1:27017
      > db
      > show dbs
    
    #配置远程连接
    
    #编辑 mongod.conf 配置
      [root@graylog01 ~]# cp /etc/mongod.conf /etc/mongod.conf.bak
      [root@graylog01 ~]# vi /etc/mongod.conf
      #将 bindIp: 127.0.0.1 改成 bindIp: 0.0.0.0,这里注意 tab 和空格对齐
    
    #开放端口   
      [root@graylog01 ~]# firewall-cmd --zone=public --add-port=27017/tcp --permanent
      [root@graylog01 ~]# firewall-cmd --reload
      [root@graylog01 ~]# firewall-cmd --zone=public --query-port=27017/tcp
    
    #重启服务
      [root@graylog01 ~]# systemctl restart mongod.service
    
    #【集群配置】
    #修改所有mongdb节点的配置文件/etc/mongod.conf,添加集群replication信息replSetName: rs0,并重启服务。
    #配置如下:
    [root@graylog01 ~]# cat /etc/mongod.conf |grep -Ev "^#|^$"
    systemLog:
      destination: file
      logAppend: true
      path: /data/mongo/log/mongod.log
      #path: /var/log/mongodb/mongod.log
    storage:
      dbPath: /data/mongo/data
      #dbPath: /var/lib/mongo
      journal:
        enabled: true
    processManagement:
      fork: true  # fork and run in background
      pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
      timeZoneInfo: /usr/share/zoneinfo
    net:
      port: 27017
      bindIp: 0.0.0.0  # Enter 0.0.0.0,:: to bind to all IPv4 and IPv6 addresses or, alternatively, use the net.bindIpAll setting.
    replication:
      replSetName: rs0
    
    #之后,重启数据库
    systemctl restart mongod.service
    #初始化mongodb集群(在其中一台操作),使用本机hostname或IP加端口,当前操作主机172.16.23.231:
    [root@graylog01 ~]# mongo
    > rs.initiate( {
       _id : "rs0",
       members: [ { _id : 0, host : "172.16.23.232:27017" } ]
    })
    #验证集群配置
    rs0:PRIMARY> rs.conf()
    {
       "_id" : "rs0",
       "version" : 3,
       "protocolVersion" : NumberLong(1),
       "members" : [
          {
             "_id" : 0,
             "host" : "172.16.23.232:27017",
             "arbiterOnly" : false,
             "buildIndexes" : true,
             "hidden" : false,
             "priority" : 1,
             "tags" : {
    
             },
             "slaveDelay" : NumberLong(0),
             "votes" : 1
          }
       ],
       "settings" : {
          "chainingAllowed" : true,
          "heartbeatIntervalMillis" : 2000,
          "heartbeatTimeoutSecs" : 10,
          "electionTimeoutMillis" : 10000,
          "catchUpTimeoutMillis" : 2000,
          "getLastErrorModes" : {
    
          },
          "getLastErrorDefaults" : {
             "w" : 1,
             "wtimeout" : 0
          },
          "replicaSetId" : ObjectId("5e182b012ef13614f3b11681")
       }
    }
    
    #将其他节点加入集群,并查看集群配置:
    
    rs0:PRIMARY> rs.add("172.16.23.233")
    rs0:PRIMARY> rs.add("172.16.23.234")
    rs0:PRIMARY> rs.status()
    {
        "set" : "rs0",
        "date" : ISODate("2020-03-28T07:27:08.144Z"),
        "myState" : 1,
        "term" : NumberLong(11),
        "syncingTo" : "",
        "syncSourceHost" : "",
        "syncSourceId" : -1,
        "heartbeatIntervalMillis" : NumberLong(2000),
        "optimes" : {
            "lastCommittedOpTime" : {
                "ts" : Timestamp(1585380427, 8),
                "t" : NumberLong(11)
            },
            "readConcernMajorityOpTime" : {
                "ts" : Timestamp(1585380427, 8),
                "t" : NumberLong(11)
            },
            "appliedOpTime" : {
                "ts" : Timestamp(1585380428, 1),
                "t" : NumberLong(11)
            },
            "durableOpTime" : {
                "ts" : Timestamp(1585380427, 8),
                "t" : NumberLong(11)
            }
        },
        "lastStableCheckpointTimestamp" : Timestamp(1585380393, 5),
        "electionCandidateMetrics" : {
            "lastElectionReason" : "electionTimeout",
            "lastElectionDate" : ISODate("2020-03-21T13:29:36.101Z"),
            "electionTerm" : NumberLong(11),
            "lastCommittedOpTimeAtElection" : {
                "ts" : Timestamp(1584795559, 2),
                "t" : NumberLong(10)
            },
            "lastSeenOpTimeAtElection" : {
                "ts" : Timestamp(1584795569, 1),
                "t" : NumberLong(10)
            },
            "numVotesNeeded" : 2,
            "priorityAtElection" : 1,
            "electionTimeoutMillis" : NumberLong(10000),
            "numCatchUpOps" : NumberLong(0),
            "newTermStartDate" : ISODate("2020-03-21T13:29:37.607Z"),
            "wMajorityWriteAvailabilityDate" : ISODate("2020-03-21T13:29:37.772Z")
        },
        "members" : [
            {
                "_id" : 0,
                "name" : "172.16.23.232:27017",
                "health" : 1,
                "state" : 1,
                "stateStr" : "PRIMARY",
                "uptime" : 4831953,
                "optime" : {
                    "ts" : Timestamp(1585380428, 1),
                    "t" : NumberLong(11)
                },
                "optimeDate" : ISODate("2020-03-28T07:27:08Z"),
                "syncingTo" : "",
                "syncSourceHost" : "",
                "syncSourceId" : -1,
                "infoMessage" : "",
                "electionTime" : Timestamp(1584797376, 1),
                "electionDate" : ISODate("2020-03-21T13:29:36Z"),
                "configVersion" : 3,
                "self" : true,
                "lastHeartbeatMessage" : ""
            },
            {
                "_id" : 1,
                "name" : "172.16.23.233:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 583041,
                "optime" : {
                    "ts" : Timestamp(1585380426, 2),
                    "t" : NumberLong(11)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1585380426, 2),
                    "t" : NumberLong(11)
                },
                "optimeDate" : ISODate("2020-03-28T07:27:06Z"),
                "optimeDurableDate" : ISODate("2020-03-28T07:27:06Z"),
                "lastHeartbeat" : ISODate("2020-03-28T07:27:07.295Z"),
                "lastHeartbeatRecv" : ISODate("2020-03-28T07:27:06.814Z"),
                "pingMs" : NumberLong(4),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "172.16.23.232:27017",
                "syncSourceHost" : "172.16.23.232:27017",
                "syncSourceId" : 0,
                "infoMessage" : "",
                "configVersion" : 3
            },
            {
                "_id" : 2,
                "name" : "172.16.23.234:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 583054,
                "optime" : {
                    "ts" : Timestamp(1585380426, 1),
                    "t" : NumberLong(11)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1585380426, 1),
                    "t" : NumberLong(11)
                },
                "optimeDate" : ISODate("2020-03-28T07:27:06Z"),
                "optimeDurableDate" : ISODate("2020-03-28T07:27:06Z"),
                "lastHeartbeat" : ISODate("2020-03-28T07:27:06.826Z"),
                "lastHeartbeatRecv" : ISODate("2020-03-28T07:27:08.065Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "172.16.23.232:27017",
                "syncSourceHost" : "172.16.23.232:27017",
                "syncSourceId" : 0,
                "infoMessage" : "",
                "configVersion" : 3
            }
        ],
        "ok" : 1,
        "operationTime" : Timestamp(1585380428, 1),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1585380428, 1),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    #创建graylog数据库,并添加graylog用户,赋予readWrite和dbAdmin权限:
    
    rs0:PRIMARY> use graylog
    switched to db graylog
    rs0:PRIMARY> db.createUser( {
        user: "graylog",
         pwd: "xxxxx",
         roles: [ { role: "readWrite", db: "graylog" } ]
       });
    rs0:PRIMARY> db.grantRolesToUser( "graylog" , [ { role: "dbAdmin", db: "graylog" } ])
    rs0:PRIMARY> show users
    rs0:PRIMARY> db.auth("graylog","xxxxx")
    

    4、Elasticsearch集群部署

    # graylog3.0 使用的elasticsearch不低于5.6.13版本,我这里用的最6.4.3(暂不支持7.x,亲测)
    主机23.232-234同步操作:
    编译安装软件包:
    [root@graylog01 ~]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.4.3.tar.gz
    [root@graylog01 ~]# tar -zxf elasticsearch-6.4.3.tar.gz
    [root@graylog01 ~]# mv elasticsearch-6.4.3 /usr/local/elasticsearch
    
    [root@graylog01 ~]# mkdir -p /usr/local/elasticsearch/data  #创建数据目录
    [root@graylog01 ~]# mkdir -p /usr/local/elasticsearch/logs  #创建日志目录
        
    #elasticsearch不能用root用户启动
    [root@graylog01 ~]# useradd elk
    [root@graylog01 ~]# chown -R elk:elk /usr/local/elasticsearch #赋予权限
        
    [root@graylog01 ~]# vim /etc/profile #添加环境变量 
    export JAVA_HOME=/usr/local/jdk
    export CLASSPATH=$CLASSPATH:$JAVA_HOME/lib:$JAVA_HOME/jre/lib
    export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin
        
    [root@graylog01 ~]# source /etc/profile #环境变量立即生效
    [root@graylog01 ~]# java -version #查看java版本(本机jdk源码安装)
    
    [root@graylog01 ~]# java -version
    java version "1.8.0_181"
    Java(TM) SE Runtime Environment (build 1.8.0_181-b13)
    Java HotSpot(TM) 64-Bit Server VM (build 25.181-b13, mixed mode)
    
    #配置信息:
    [root@graylog01 ~]# cat /usr/local/elasticsearch/config/elasticsearch.yml |grep -Ev "^#|^$"
    #graylog01:
    cluster.name: graylog3
    node.name: graylog01
    node.master: true
    node.data: true
    bootstrap.memory_lock: false
    bootstrap.system_call_filter: false
    path.data: /data/elasticsearch/data
    path.logs: /data/elasticsearch/logs
    network.host: 0.0.0.0
    http.port: 9200
    discovery.zen.ping.unicast.hosts: ["172.16.23.232:9300", "172.16.23.233:9300","172.16.23.234:9300"]
    
    
    #graylog02
    cluster.name: graylog3
    node.name: graylog02
    node.master: true
    node.data: true
    bootstrap.memory_lock: false
    bootstrap.system_call_filter: false
    path.data: /data/elasticsearch/data
    path.logs: /data/elasticsearch/logs
    network.host: 0.0.0.0
    http.port: 9200
    discovery.zen.ping.unicast.hosts: ["172.16.23.232:9300", "172.16.23.233:9300", "172.16.23.234:9300"]
    
    
    #graylog03
    cluster.name: graylog3
    node.name: graylog03
    node.master: true
    node.data: true
    bootstrap.memory_lock: false
    bootstrap.system_call_filter: false
    path.data: /data/elasticsearch/data
    path.logs: /data/elasticsearch/logs
    network.host: 0.0.0.0
    http.port: 9200
    discovery.zen.ping.unicast.hosts: ["172.16.23.232:9300", "172.16.23.233:9300", "172.16.23.234:9300"]
    
    #启动Elasticsearch服务:
    
    [编译安装启动]
    [root@graylog01 ~]# su - elk
    [root@graylog01 ~]# /data/elasticsearch/bin/elasticsearch -d # -d 后台运行模式
    
    [yum安装启动]
    [root@graylog01 ~]# chkconfig --add elasticsearch
    [root@graylog01 ~]# systemctl daemon-reload
    [root@graylog01 ~]# systemctl enable elasticsearch.service
    [root@graylog01 ~]# systemctl restart elasticsearch.service
    

    5、Graylog安装

    [root@graylog01 ~]# rpm -Uvh https://packages.graylog2.org/repo/packages/graylog-3.0-repository_latest.rpm
    [root@graylog01 ~]# yum install graylog-server -y
    #修改配置文件:, password_secret和root_password_sha2是必须的,不设置则无法启动,设置方法如下:
    [root@graylog01 ~]# vim /etc/graylog/server/server.conf
    ---------------------------------------------------------------------------------
    # passworde_secret可以通过命令:pwgen -N 1 -s 96 来随机生成,下面就是我随机生成的
    password_secret = PLb50jbamnyu3QdWH0o9vYxytEvAH7I6OCjIGxEcKJO08ZHmbYMi00vh1GdJYn5NBItg86hr58imlIUzdE8elVzKkML1oC2Y
     
    # admin用户密码生成命令:echo -n yourpassword | sha256sum
    # 生成后,请记住你的 YourPassword(三台一致)
    root_password_sha2 = ac9689e2272427085e35b9d3e3e8bed88cb3434828b43b86fc0596cad4c6e270
     # admin用户邮箱
    root_email = "root@example.com"
     
    # 时区
    root_timezone = Asia/Shanghai
    
    # 检索高亮
    allow_highlighting = true
     
    # elasticsearch 相关配置
    elasticsearch_hosts = http://graylog01:9200,http://graylog02:9200,http://graylog03:9200 #集群模式
    elasticsearch_shards =1 
    elasticsearch_replicas = 0
     
    # mongodb 连接配置,这里直接本机起的mongodb,没有设置验证
    mongodb_uri = mongodb://localhost/graylog
     
    # 电子邮件smtp,设置为自己的邮箱smtp服务
    transport_email_enabled = true
    transport_email_hostname = smtp.exmail.qq.com
    transport_email_port = 465
    transport_email_use_auth = true
    transport_email_use_tls = false
    transport_email_use_ssl = true
    transport_email_auth_username = root@example.com
    transport_email_auth_password = 123456
    transport_email_subject_prefix = [graylog]
    transport_email_from_email = root@example.com
    transport_email_web_interface_url = http://graylog.example.com
     
    # 网络访问相关,重要,graylog3比2.x版本简洁了很多网络配置,只需配置http_bind_address即可。
    http_bind_address = 0.0.0.0:9000
     
    # 配置外网地址,我这里用了域名+nginx做反向代理,所以外网地址如下。没有的话就直接就用外网ip+port,如:http://外网ip:9000/
    http_publish_uri = http://172.16.23.232:9000/
     
    # http_external_uri = http://graylog.example.com/ 单节点的话,此配置不需要配置,默认使用http_publish_uri
    ---------------------------------------------------------------------------------
    #线上环境配置文件参考
    [root@graylog01 data]# grep -Ev "^#|^$" /etc/graylog/server/server.conf
    is_master = true
    node_id_file = /etc/graylog/server/node-id
    password_secret = 7N84Dhanhy0bFwLscsDXcuWFxdFYg5ZBpYUEdIGR
    root_password_sha2 = ac9689e2272427085e35b9d3e3e8bed88cb3434828b43b86fc0596cad4c6e270
    root_timezone = Asia/Shanghai
    bin_dir = /usr/share/graylog-server/bin
    data_dir = /var/lib/graylog-server
    plugin_dir = /usr/share/graylog-server/plugin
    http_bind_address = 0.0.0.0:9000
    http_publish_uri = http://172.16.23.232:9000/
    elasticsearch_hosts = http://graylog01:9200,http://graylog02:9200,http://graylog03:9200
    rotation_strategy = count
    elasticsearch_max_docs_per_index = 20000000
    elasticsearch_max_number_of_indices = 20
    retention_strategy = delete
    elasticsearch_shards = 4
    elasticsearch_replicas = 0
    elasticsearch_index_prefix = graylog
    allow_leading_wildcard_searches = false
    allow_highlighting = true
    elasticsearch_analyzer = standard
    output_batch_size = 500
    output_flush_interval = 1
    output_fault_count_threshold = 5
    output_fault_penalty_seconds = 30
    processbuffer_processors = 5
    outputbuffer_processors = 3
    processor_wait_strategy = blocking
    ring_size = 65536
    inputbuffer_ring_size = 65536
    inputbuffer_processors = 2
    inputbuffer_wait_strategy = blocking
    message_journal_enabled = true
    message_journal_dir = /var/lib/graylog-server/journal
    lb_recognition_period_seconds = 3
    mongodb_uri = mongodb://172.16.23.232:27017,172.16.23.233:27017,172.16.23.234:27017/graylog
    mongodb_max_connections = 1000
    mongodb_threads_allowed_to_block_multiplier = 5
    proxied_requests_thread_pool_size = 32
     ---------------------------------------------------------------------------------
    # 启动需要手动设置Java路径
    [root@graylog01 ~]# vim /etc/sysconfig/graylog-server
    ---------------------------------------------------------------------------------
    JAVA=/usr/local/jdk1.8/bin/java #根据实际路径填写即可
    ---------------------------------------------------------------------------------
     
    # 启动服务
    [root@graylog01 ~]# chkconfig --add graylog-server
    [root@graylog01 ~]# systemctl daemon-reload
    [root@graylog01 ~]# systemctl enable graylog-server.service
    [root@graylog01 ~]# systemctl start graylog-server.service
    

    6、web登录

    #直接访问本机IP:172.16.23.232:9000
    
    Web UI.png

    7、安装Graylog Sidecar(Graylog Collector Sidecar)

    Graylog Sidecar是一个轻量级配置管理系统,适用于不同的日志收集器,也称为后端。Graylog节点充当包含日志收集器配置的集中式集线器。在支持的消息生成设备/主机上,Sidecar可以作为服务(Windows主机)或守护程序(Linux主机)运行。进行在不同机器上进行日志的采集并发送到graylog server.在graylog3.0版本以前,称为Graylog Collector Sidecar,在3.0中改为了Graylog Sidecar,在官方文档中有详细安装指导:官方文档入口(http://docs.graylog.org/en/3.0/pages/sidecar.html)。这里也参考进行安装。版本对照表如下,首先去github上下载相应的rpm安装包。官方GITHUB下载地址(https://github.com/Graylog2/collector-sidecar/releases)

    逻辑图.png

    Sidecar version Graylog server version

    Sidecar version Graylog server version
    1.0.x 3.0.x
    0.1.x 2.2.x,2.3.x,2.4.x,2.5.x,3.0.x
    0.0.9 2.1.x
    # 安装
    [root@graylog01 ~]# rpm -i graylog-sidecar-1.0.2-1.x86_64.rpm
     
    # 修改配置
    [root@graylog01 ~]# grep -Ev "^#|^$"  /etc/graylog/sidecar/sidecar.yml
    ----------------------------------------------------------------------------
    server_url: "http://172.16.23.231:9000/api/" # api的外网地址,23.231为前端nginx负载地址,下文详细说明。
    # api token 必要的,不然启动不了,token需要在web界面上进行手动创建
    server_api_token: "cvm841q4odgofktsihn2t0jsuit0ulivdu5hpj8mh1vjph78t0e" #收集多台主机日志,这里可配置统一的api token,分业务区分,方便管理。
    node_name: "graylog01" # 自定义节点名称
    update_interval: 10
    send_status: true
    ----------------------------------------------------------------------------
     
    # 安装系统服务
    [root@graylog01 ~]# graylog-sidecar -service install
    [root@graylog01 ~]# systemctl start graylog-sidecar
    #手动创建server_api_token。
    
    创建server_api_token01.png
    创建server_api_token02.png

    现在可以启动graylog-sidecar了。启动后,在web界面上就可以看到一个节点了,然后下面记录怎么手动配置这个节点的日志采集。然后就需要定义sidecar的filebeat配置,用这个配置来启动filebeat进行日志采集,并输入到上面定义的beats input。但是graylog3.0中,graylog sidecar的linux版本不包含filebeat(3.0版本之前是默认包含filebeat的),需要自己手动下载安装filebeat,安装非常简单,通过官方下载页面,直接下载rpm包进行安装就行:[https://www.elastic.co/cn/downloads/beats/filebeat])PS:我这里是演示的用filebeat进行日志采集,如果用nxlog进行采集,同样的需要安装nxlog程序。

    #安装filebeat
    [root@graylog01 ~]# rpm -i filebeat-6.6.0-x86_64.rpm
    

    8、nginx负载部署

    引用快速部署:centos7 yum安装nginx

    #添加配置
    #web负载配置
    upstream graylog_servers {
            least_conn;
            server 172.16.23.232:9000;
            server 172.16.23.233:9000;
            server 172.16.23.234:9000;
        }
    
        server {
            listen        9000;
            proxy_buffer_size 16k;
            proxy_timeout  10s;
            error_log     graylog_servers.log;
            proxy_pass graylog_servers;
        }
    
    

    9、graylog日志收集实践

    客户端需要配置好graylog-sidecar及filebeat并且成功与server通信。


    客户端成功监听.png 定义收集器.png
    定义监听端口.png
    配置收集策略.png
    策略详细配置23.231为nginx负载端口.png
    应用策略.png
    #最后nginx添加配置
        upstream graylog_server_log {
            least_conn;
            server 172.16.23.232:6004;
            server 172.16.23.233:6004;
            server 172.16.23.234:6004;
        }
    
        server {
            listen        6004;
            proxy_buffer_size 16k;
            proxy_timeout  10s;
            error_log     graylog_servers.log;
            proxy_pass graylog_server_log;
        }
    

    web搜索展示:


    preview.png

    为了便于条理化维护管理,我们可以对创建的日志进行分类管理,在实际工作中,服务日志会非常多,这么多的日志,如果不进行存储限制,那么不久就会占满磁盘,查询变慢等等,而且过久的历史日志对于实际工作中的有效性也会很低,graylog则自身集成了日志数据限制的配置,可以通过如下进行设置::

    创建索引.png
    索引详细.png
    详细配置里对日志的存储进行限制:
    1、限定日志数据量,数据量达到是便会自动删除最旧的历史数据,以维持数据量恒定
    2、限定大小,指定数据文件总容量大小
    3、限定时间,删除超时的日志数据。 可根据自己的实际情况进行设置。

    定义好索引,还可以基于索引创建stream流,对不同的日志进行分类管理:


    stream.png
    设置stream规则.png

    10、可视化

    1、创建dashboard
    2、检索数据
    3、将结果加入到dashboard


    创建dashboard.png 检索数据.png 效果图.png

    The End

    #注:本文在作者[原文链接:https://blog.csdn.net/weixin_41004350/article/details/87253316及https://www.cnblogs.com/wayneiscoming/p/7635357.html]基础上进行归纳整理,并部署应用于线上系统,亲自实践应用。侵删。
    

    相关文章

      网友评论

          本文标题:graylog3 日志服务器集群搭建

          本文链接:https://www.haomeiwen.com/subject/rqojuhtx.html