美文网首页
kafka搭建

kafka搭建

作者: 狼牙战士 | 来源:发表于2018-07-12 09:49 被阅读0次

    Kafka实践

    提前准备:

    • 安装Java
    • 安装zookeeper

    一、kafka集群安装

    分别在h1、h2、h3三台机器上下载解压kafka安装包,然后修改配置文件

    1.h1上,vi /home/vagrant/kafka_2.11-1.1.0/config/server.properties

    broker.id=1
    listeners=PLAINTEXT://10.0.34.112:9092
    zookeeper.connect=h1:2181,h2:2181,h3:2181
    

    2.h2上,vi /home/vagrant/kafka_2.11-1.1.0/config/server.properties

    broker.id=2
    listeners=PLAINTEXT://10.0.34.111:9092
    zookeeper.connect=h1:2181,h2:2181,h3:2181
    

    3.h3上,vi /home/vagrant/kafka_2.11-1.1.0/config/server.properties

    broker.id=3
    listeners=PLAINTEXT://10.0.34.110:9092
    zookeeper.connect=h1:2181,h2:2181,h3:2181
    

    二、启动集群,操作kafka

    相关命令:

    开启zookeeper:
    zkServer.sh start
    
    关闭zookeeper:
    zkServer.sh stop
    
    启动kafka
    ./bin/kafka-server-start.sh config/server.properties
    
    创建主题,指定分区数和副本数
    ./bin/kafka-topics.sh --create --zookeeper h1:2181,h2:2181,h3:2181 --replication-factor 2 --partitions 3 --topic first-topic
    
    显示主题列表
    ./bin/kafka-topics.sh --list --zookeeper localhost:2181
    
    查看指定主题
    ./bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic first-topic
    
    修改主题分区数
    ./bin/kafka-topics.sh —zookeeper localhost:2181 --alter --topic first-topic --parti-tions 2
    
    删除主题
    ./bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic first-topic
    
    启动生产者
    ./bin/kafka-console-producer.sh --broker-list h1:9092,h2:9092,h3:9092 --topic first-topic
    
    启动消费者
    ./bin/kafka-console-consumer.sh --bootstrap-server h1:9092,h2:9092,h3:9092 --from-beginning -topic first-topic
    
    停止kakfa
    ./bin/kafka-server-stop.sh config/server.properties
    

    启动kafka之后,启动一个生产者和消费者:演示发送消息和接收消息

    image

    连接zookeeper,查看topic

    image

    三、使用Java API操作kafka

    1.创建、删除topic

    package testKafka;
    
    import java.util.Properties;
    
    import org.apache.kafka.common.security.JaasUtils;
    import kafka.admin.AdminUtils;
    import kafka.admin.RackAwareMode;
    import kafka.utils.ZkUtils;
    
    public class CreateTopic {
    
        public static void main(String[] args) {
            ZkUtils zkUtils = ZkUtils.apply("10.0.34.112:2181,10.0.34.111:2181,10.0.34.110:2181", 30000,30000,JaasUtils.isZkSecurityEnabled());
            AdminUtils.createTopic(zkUtils, "second-topic", 1, 1, new Properties(), RackAwareMode.Enforced$.MODULE$);
            
    //      AdminUtils.deleteTopic(zkUtils, "second-topic");
            zkUtils.close();
        }
    
    }
    

    执行代码,查看控制台,多一个topic

    image

    2.创建生产者,发送消息

    package testKafka;
    
    import java.util.Properties;
    
    import org.apache.kafka.clients.producer.KafkaProducer;
    import org.apache.kafka.clients.producer.Producer;
    import org.apache.kafka.clients.producer.ProducerRecord;
    
    public class ProducerDemo {
    
        public static void main(String[] args) {
            Properties properties = new Properties();
            properties.put("bootstrap.servers", "10.0.34.112:9092,10.0.34.111:9092,10.0.34.110:9092");
            properties.put("acks", "all");
            properties.put("retries", 10);
            properties.put("batch.size", 16384);
            properties.put("linger.ms", 1);
            properties.put("buffer.memory", 33554432);
            properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
            
            Producer<String, String> producer = null;
            try {
                producer = new KafkaProducer<String,String>(properties);
                for(int i =0;i<10;i++) {
                    String msg = "Message" + i;
                    producer.send(new ProducerRecord<String, String>("first-topic", msg));
                    System.out.println("Sent: "+msg);
                }
                
            } catch (Exception e) {
                e.printStackTrace();
            } finally {
                producer.close();
            }
        }
    }
    

    查看控制台

    image

    3.创建消费者,接收数据

    package testKafka;
    
    import java.util.Arrays;
    import java.util.Properties;
    
    import org.apache.kafka.clients.consumer.ConsumerRecord;
    import org.apache.kafka.clients.consumer.ConsumerRecords;
    import org.apache.kafka.clients.consumer.KafkaConsumer;
    
    public class ConsumerDemo {
    
        public static void main(String[] args) {
            Properties props = new Properties();
            props.put("bootstrap.servers", "10.0.34.112:9092,10.0.34.111:9092,10.0.34.110:9092");
            props.put("group.id", "test");
            props.put("enable.auto.commit", "true");
            props.put("auto.commit.interval.ms", "1000");
            props.put("session.timeout.ms", "30000");
            props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
            KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
            consumer.subscribe(Arrays.asList("first-topic"));
            while (true) {
              ConsumerRecords<String, String> records = consumer.poll(100);
              
              for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, key = %s, value = %s\n", record.offset(), record.key(), record.value());
            }
        }
    }
    

    截图

    image

    相关文章

      网友评论

          本文标题:kafka搭建

          本文链接:https://www.haomeiwen.com/subject/kbjlpftx.html