一、思路
对前面配置的优化。
https://www.jianshu.com/p/6b5f38bb4610
分两个topic,并在索引上体现分类
二、filebeat.yml
filebeat.inputs:
- type: log
enabled: false
paths:
- /var/log/*.log
- type: filestream
enabled: false
paths:
- /var/log/*.log
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
setup.kibana:
host: "192.168.18.13:5601"
setup.template.overwrite: true
setup.template.enabled: true
setup.ilm.enabled: false
output.kafka:
hosts: ["192.168.18.15:9092","192.168.18.16:9092"]
topics:
- topic: "linux_system_auth"
when.equals:
event:
dataset: "system.auth"
- topic: "linux_system_syslog"
when.equals:
event:
dataset: "system.syslog"
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
三、logstash
1、input.conf
input {
kafka {
bootstrap_servers => "192.168.18.15:9092,192.168.18.16:9092"
topics => ["linux_system_auth"]
add_field => { type_name => "linux_system_auth" }
consumer_threads => 5
codec => json
}
kafka {
bootstrap_servers => "192.168.18.15:9092,192.168.18.16:9092"
topics => ["linux_system_syslog"]
add_field => { type_name => "linux_system_syslog" }
consumer_threads => 5
codec => json
}
}
2、linux_system_out.conf
output {
if "linux_system_auth" in [type_name] {
elasticsearch {
hosts => ["192.168.18.13:9200"]
index => "os-linux-auth-%{+YYYY.MM.dd}"
}
}
if "linux_system_syslog" in [type_name] {
elasticsearch {
hosts => ["192.168.18.13:9200"]
index => "os-linux-syslog-%{+YYYY.MM.dd}"
}
}
}
使用此种收集方法存在弊端
详见https://www.jianshu.com/p/26ddc38fcb2d
网友评论