taildirToKafka.conf
a1.sources = r1
a1.sinks = k1
a1.channels = c1
#Describe the source
a1.sources.r1.type = org.apache.flume.source.wintaildir.TaildirSource
a1.sources.r1.positionFile = /var/log/flume/taildir_position.json
a1.sources.r1.filegroups = f1 f2
a1.sources.r1.filegroups.f1 =/Log/a/^.*\.log(.*)$
a1.sources.r1.headers.f1.headerKey1 = value1
a1.sources.r1.filegroups.f2 =/Log/b/^.*\.log.[0-9](.*)$
a1.sources.r1.headers.f2.headerKey2 = value2
a1.sources.r1.fileHeader = true
a1.sources.r1.maxBatchCount = 50
a1.sources.r1.batchSize = 1000
# Describe the sink
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = test
a1.sinks.k1.kafka.bootstrap.servers = 99.xx.xx.xxx:9092,99.xx.xx.xxx:9092
a1.sinks.k1.kafka.flumeBatchSize = 1000
a1.sinks.k1.kafka.producer.max.request.size = 104857600
a1.sinks.k1.kafka.producer.acks = 1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.sinks.k1.kafka.producer.compression.type = snappy
# Use a channel which buffers events in memory
a1.channels.c1.type = memory
a1.channels.c1.capacity = 100000
a1.channels.c1.transactionCapacity = 1000
# Bind the source and sink to the channel
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
网友评论