如果日志过多,多Elastic压力非常大,可以增加消息队列
简易架构图

-----Filebeat配置yml------
[root@demo001 filebeat-7.8.1-linux-x86_64]# cat filekafa.yml
filebeat.inputs:
- type: log
enabled: true
paths:
- /opt/elas.log
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
output.kafka:
hosts: ["172.16.20.53:9092"]
topic: "elas"
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
注意topic字段
---------Logstash配置conf文件--------
[root@demo002 config]# cat file.conf
input{
kafka{
bootstrap_servers => "172.16.20.53:9092"
topics => "elas"
consumer_threads => 1
decorate_events => true
codec => "json"
auto_offset_reset => "latest"
}
}
filter {
ruby {
code => "event.timestamp.time.localtime" ##时区设置
}
mutate {
remove_field => ["beat"] #删除自带字段
}
grok {
match => {"message" => "\[(?<time>\d+-\d+-\d+\s\d+:\d+:\d+)\] \[(?<level>\w+)\] (?<thread>[\w|-]+) (?<class>[\w|\.]+) (?<lineNum>\d+):(?<msg>.+)"
} #正则过滤
}
}
output {
elasticsearch {
hosts => ["172.16.20.53:9200","172.16.20.54:9200","172.16.20.56:9200"]
index => "elas-%{+YYYY-MM-dd}"
}
}
同样注意topic
手动模拟日志
[root@demo001 opt]# echo "hello my name is joy 004" >> elas.log
[root@demo001 opt]# echo "hello my name is joy 005" >> elas.log



网友评论