kafka入门教程

作者: Jerry_Hao | 来源:发表于2018-01-03 10:32 被阅读3次

    kafka安装及运行

    • 官网下载0.10.1.0版本,java1.8
    • 启动zookeeper
      bin/zookeeper-server-start.sh config/zookeeper.properties
    • 启动broker
      bin/kafka-server-start.sh config/server.properties
    • 创建一个topic
      bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 1 --topic test
    • 启动consumer
      bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic test --from-beginning
    • 启动producer
      bin/kafka-console-producer.sh --broker-list localhost:9092 --topic test

    java代码测试

    pom文件

    <dependency>
        <groupId>org.apache.kafka</groupId>
        <artifactId>kafka_2.10</artifactId>
        <version>0.10.1.0</version>
    </dependency>
    <dependency>
        <groupId>org.apache.kafka</groupId>
        <artifactId>kafka-streams</artifactId>
        <version>0.10.1.0</version>
    </dependency>
    

    producer-demo1

    import java.util.Properties;
    
    import org.apache.kafka.clients.producer.Producer;
    import org.apache.kafka.clients.producer.KafkaProducer;
    import org.apache.kafka.clients.producer.ProducerRecord;
    
    public class KafkaProducerDemo1 {
        public static void main(String[] args) {
            Properties props = new Properties();
            props.put("bootstrap.servers", "localhost:9092");
            props.put("acks", "all");
            props.put("retries", 0);
            props.put("batch.size", 16384);
            props.put("linger.ms", 1);
            props.put("buffer.memory", 33554432);
            props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
    
            Producer<String, String> producer = new KafkaProducer<>(props);
            char c = 'a';
            for(int i = 1; i < 2; i++) {
                String k = new String(new char[]{c});
                String v = k + k;
                producer.send(new ProducerRecord<String, String>("test", k, v));
                c++;
            }
            producer.close();
        }
    }
    

    consumer-demo1

    每次重新创建一个groupid,从开始的offset读取message

    import java.io.UnsupportedEncodingException;
    import java.util.Properties;
    import java.util.Arrays;
    import java.util.UUID;
    
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    import org.apache.kafka.clients.consumer.ConsumerConfig;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    
    
    public class KafkaConsumerDemo1 {
        public static void main(String[] args) throws InterruptedException, UnsupportedEncodingException {
            Properties props = new Properties();
            props.put("bootstrap.servers", "localhost:9092");
            props.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString());
            props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
            props.put("enable.auto.commit", "true");
            props.put("auto.commit.interval.ms", "1000");
            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
    
            try {
                consumer.subscribe(Arrays.asList("test"));
    
                while(true) {
                    ConsumerRecords<String, String> records = consumer.poll(100);
                    for (ConsumerRecord<String, String> record : records) {
                        System.out.println(record.offset() + ": " + record.value());
                    }
                    Thread.sleep(1000);
                }
            } finally {
                consumer.close();
            }
        }
    }
    

    consumer-demo2

    固定groupid,从指定的offset读取message

    import java.io.UnsupportedEncodingException;
    import java.util.Properties;
    import java.util.Arrays;
    
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    import org.apache.kafka.clients.consumer.ConsumerConfig;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.common.TopicPartition;
    
    public class KafkaConsumerDemo2 {
        public static void main(String[] args) throws InterruptedException, UnsupportedEncodingException {
            Properties props = new Properties();
            props.put("bootstrap.servers", "localhost:9092");
            props.put(ConsumerConfig.GROUP_ID_CONFIG, "group_test_1");
            props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
            props.put("enable.auto.commit", "false");
            props.put("auto.commit.interval.ms", "1000");
            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
    
            try {
                String topic = "test";
                long offset = 10;
                TopicPartition partition0 = new TopicPartition(topic, 0);
                consumer.assign(Arrays.asList(partition0));
                consumer.seek(partition0, offset);
    
                while(true) {
                    ConsumerRecords<String, String> records = consumer.poll(100);
                    for (ConsumerRecord<String, String> record : records) {
                        System.out.println(record.offset() + ": " + record.value());
                    }
                    consumer.commitSync();
                    Thread.sleep(1000);
                }
            } finally {
                consumer.close();
            }
        }
    }
    

    consumer-demo3

    固定groupid,自己维护offset的位置信息

    import java.io.UnsupportedEncodingException;
    import java.util.Properties;
    import java.util.Arrays;
    import java.util.List;
    import java.util.Collections;
    
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    import org.apache.kafka.clients.consumer.ConsumerConfig;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.clients.consumer.OffsetAndMetadata;
    import org.apache.kafka.common.TopicPartition;
    
    public class KafkaConsumerDemo3 {
        public static void main(String[] args) throws InterruptedException, UnsupportedEncodingException {
            Properties props = new Properties();
            props.put("bootstrap.servers", "localhost:9092");
            props.put(ConsumerConfig.GROUP_ID_CONFIG, "group_test_1");
            props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
            props.put("enable.auto.commit", "false");
            props.put("auto.commit.interval.ms", "1000");
            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
    
            try {
                consumer.subscribe(Arrays.asList("test"));
    
                while (true) {
                    ConsumerRecords<String, String> records = consumer.poll(100);
                    for (TopicPartition partition : records.partitions()) {
                        List<ConsumerRecord<String, String>> partitionRecords = records.records(partition);
                        for (ConsumerRecord<String, String> record : partitionRecords) {
                            System.out.println(record.offset() + ": " + record.value());
                        }
                        long lastOffset = partitionRecords.get(partitionRecords.size() - 1).offset();
                        consumer.commitSync(Collections.singletonMap(partition, new OffsetAndMetadata(lastOffset + 1)));
                    }
                }
            } finally {
                consumer.close();
            }
        }
    }
    

    kafka-streaming-demo1

    import java.util.Map;
    import java.util.HashMap;
    
    import org.apache.kafka.streams.StreamsConfig;
    import org.apache.kafka.streams.KafkaStreams;
    import org.apache.kafka.streams.kstream.KStreamBuilder;
    import org.apache.kafka.common.serialization.Serdes;
    
    public class KafkaStreamingDemo1 {
        public static void main(String[] args) {
            Map<String, Object> props = new HashMap<>();
            props.put(StreamsConfig.APPLICATION_ID_CONFIG, "my-stream-processing-application");
            props.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
            props.put(StreamsConfig.KEY_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
            props.put(StreamsConfig.VALUE_SERDE_CLASS_CONFIG, Serdes.String().getClass().getName());
            StreamsConfig config = new StreamsConfig(props);
    
            KStreamBuilder builder = new KStreamBuilder();
            builder.stream("test").filter((k,v) -> (v.equals("b"))).print();
    
            KafkaStreams streams = new KafkaStreams(builder, config);
            streams.start();
        }
    }
    

    相关文章

      网友评论

        本文标题:kafka入门教程

        本文链接:https://www.haomeiwen.com/subject/ojfhnxtx.html