搭建ELK日志管理(Elasticsearch,Logstash,Kibana)
搭建环境要求
docker,docker-compose,rabbitmq
搭建流程(以centos操作系统为例)
- 创建 docker-compose.yml
vi docker-compose.yml
version: '3'
services:
elasticsearch:
image: elasticsearch:7.3.1
environment:
discovery.type: single-node
ports:
- "9200:9200"
- "9300:9300"
- "9100:9100"
logstash:
image: logstash:7.3.1
command: logstash -f /etc/logstash/conf.d/logstash.conf
volumes:
# 挂载logstash配置文件
- ./config:/etc/logstash/conf.d
- ./config/logstash.yml:/etc/logstash/logstash.yml
ports:
- "5000:5000"
kibana:
image: kibana:7.3.1
environment:
# 请求的elasticsearch地址
- ELASTICSEARCH_URL=elasticsearch ip+port
ports:
- "5601:5601"
- 在docker-compose.yml文件所在目录下创建 config 文件夹并进入
mkdir config
cd config
- 创建logstash.conf配置文件和logstash.yml配置文件
vi logstash.conf
input {
# 此处使用的input源为rabbitmq
rabbitmq {
host => "rabbitmq_ip"
port => rabbitmq_port
subscription_retry_interval_seconds => "5"
vhost => "/"
# 下面的队列信息配置要与项目中的rabbitmq队列信息配置相同
exchange => "rabbitmq_exchange"
exchange_type => "fanout"
queue => "rabbitmq_queue"
durable => true
auto_delete => false
user => "rabbitmq_user"
password => "rabbitmq_password"
}
}
filter {
grok {
match => { "message" => "%{TIMESTAMP_ISO8601:timestamp}\s+%{LOGLEVEL:severity}\s+\[%{DATA:service},%{DATA:trace},%{DATA:span},%{DATA:exportable}\]\s+%{DATA:pid}\s+---\s+\[%{DATA:thread}\]\s+%{DATA:class}\s+:\s+%{GREEDYDATA:rest}" }
}
}
output {
elasticsearch {
# 改成你的Elasticsearch地址
hosts => "elasticsearch ip+port"
}
}
vi logstash.yml
# 配置elasticsearch配置信息
st: "0.0.0.0"
xpack.monitoring.elasticsearch.url: elasticsearch ip+port
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: changeme
- 启动项目
cd ..
docker-compose up -d
搭建完成后访问Kibana( http://yourip:5601/ )


然后输入条件即可分析日志
网友评论