美文网首页
kafka-stream流式系统设计与实现(demo)

kafka-stream流式系统设计与实现(demo)

作者: 伊丽莎白菜 | 来源:发表于2022-07-30 22:59 被阅读0次

需求分析

目的

重构某个由定时任务调度的系统,升级为流式系统。

技术选型

kafka-stream 2.7.0

kafka 2.7.0

整体流程

  1. 消费source-topic的order数据
  2. 窗口聚合: windowBy,aggregate
  3. 若干中间处理器: map、filter...,最终组成task
  4. 扁平展开为多条数据: flatMap
  5. 将task数据发往下游sink-topic
stream-system.png

程序实现(demo)

  1. kafka基础配置

     private static Properties buildConfigProps() {
        Properties props = new Properties();
        String applicationId = "test_33333";
        props.put("bootstrap.servers", "192.168.10.152:9092");
        props.put("application.id", applicationId);
        props.put("auto.commit.interval.ms", "1000");
        props.put("session.timeout.ms", "30000");
        props.put("max.poll.records", 1000);
        props.put("auto.offset.reset", "earliest");
        props.put("key.deserializer", StringDeserializer.class.getName());
        props.put("value.deserializer", StringDeserializer.class.getName());
        props.put("key.serializer", StringSerializer.class.getName());
        props.put("value.serializer", StringSerializer.class.getName());
        return props;
      }
    
  2. fast-json实现的序列化处理器

    import com.alibaba.fastjson2.JSON;
    import org.apache.kafka.common.serialization.Serializer;
    
    public class JSONSerializer<T> implements Serializer<T> {
    
      @Override
      public byte[] serialize(String topic, T data) {
        if (data == null) {
          return null;
        }
        return JSON.toJSONBytes(data);
      }
    
    }
    
    import com.alibaba.fastjson2.JSON;
    import org.apache.kafka.common.serialization.Deserializer;
    
    public class JSONDeserializer<T> implements Deserializer<T> {
    
    
      @Override
      public T deserialize(String topic, byte[] data) {
        if (data == null || data.length == 0) {
          return null;
        }
        return (T) JSON.parse(data);
      }
    
    }
    
  3. 异常处理逻辑

public abstract class RetryExceptionHandler {

  public static final String SOURCE_TOPIC_KEY = "sourceTopic";
  public static final String PRODUCER_KEY = "producer";

  protected String sourceTopic;
  protected KafkaProducer<String, String> producer;

  public void configure(Map<String, ?> config) {
    this.sourceTopic = (String) config.get(SOURCE_TOPIC_KEY);
    this.producer = (KafkaProducer<String, String>) config.get(PRODUCER_KEY);
  }

}
@Slf4j
public class RetryDeserializationExceptionHandler extends RetryExceptionHandler implements DeserializationExceptionHandler {

  @Override
  public DeserializationHandlerResponse handle(ProcessorContext context,
      ConsumerRecord<byte[], byte[]> record, Exception exception) {
    log.error("Exception caught during Deserialization, sending to the source topic, " +
            "taskId: {}, topic: {}, partition: {}, offset: {}",
        context.taskId(), record.topic(), record.partition(), record.offset(),
        exception);
    byte[] value = record.value();
    producer.send(new ProducerRecord<>(sourceTopic, new String(value, StandardCharsets.UTF_8)));
    return DeserializationHandlerResponse.CONTINUE;
  }

}
@Slf4j
public class RetryProductionExceptionHandler extends RetryExceptionHandler implements
    ProductionExceptionHandler {

  @Override
  public ProductionExceptionHandlerResponse handle(ProducerRecord<byte[], byte[]> record,
      Exception exception) {
    log.error("Exception caught during Production, sending to the source topic, " +
        "topic: {}, partition: {}, offset: {}", record.topic(), record.partition(), exception);
    ProducerRecord<String, String> producerRecord = new ProducerRecord<>(
        new String(record.key(), StandardCharsets.UTF_8), new String(record.value(), StandardCharsets.UTF_8));
    producer.send(producerRecord);
    return ProductionExceptionHandlerResponse.CONTINUE;
  }

}
@Slf4j
public class RestartUncaughtExceptionHandler implements Thread.UncaughtExceptionHandler {

  public static final int MAX_AGE = 3;
  private StreamsBuilder streamsBuilder;
  private Properties props;

  private AtomicInteger age;

  public RestartUncaughtExceptionHandler(StreamsBuilder streamsBuilder, Properties props) {
    this.streamsBuilder = streamsBuilder;
    this.props = props;
    this.age = new AtomicInteger();
  }

  @Override
  public void uncaughtException(Thread t, Throwable e) {
    log.error("thread: {} process failed. age: {}", t.getName(), age, e);
    if (age.get() > MAX_AGE) {
      log.info("stop the stream application after retry times: {}", age);
      return;
    }
    age.incrementAndGet();
    KafkaStreams kafkaStreams = new KafkaStreams(streamsBuilder.build(), props);
    kafkaStreams.setUncaughtExceptionHandler(this);
    kafkaStreams.start();
  }

}
  1. kafka-stream核心逻辑

      private static final String SOURCE_TOPIC = "sourceTopic";
      private static final String SINK_TOPIC = "sinkTopic";
    
      @Test
      void helloWorld() {
        // kafka config
        Properties props = buildConfigProps();
        Serde<String> stringSerde = Serdes.String();
        props.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, stringSerde.getClass().getName());
        props.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, stringSerde.getClass().getName());
        props.put(StreamsConfig.PROCESSING_GUARANTEE_CONFIG, StreamsConfig.EXACTLY_ONCE_BETA);
        props.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, 10000);
    
     props.put(StreamsConfig.DEFAULT_DESERIALIZATION_EXCEPTION_HANDLER_CLASS_CONFIG,
         RetryDeserializationExceptionHandler.class.getName());
     props.put(StreamsConfig.DEFAULT_PRODUCTION_EXCEPTION_HANDLER_CLASS_CONFIG,
         RetryProductionExceptionHandler.class.getName());
        props.put(RetryExceptionHandler.PRODUCER_KEY, producer);
        props.put(RetryExceptionHandler.SOURCE_TOPIC_KEY, SOURCE_TOPIC);
       Serde<List<String>> jsonSerde = Serdes.serdeFrom(new JSONSerializer<>(),
            new JSONDeserializer<>());
        StreamsBuilder streamsBuilder = new StreamsBuilder();
    
        KStream<String, String> kStream = streamsBuilder.stream(SOURCE_TOPIC,
            Consumed.with(stringSerde, stringSerde));
    
        Duration windowSize = Duration.ofSeconds(10);
        Materialized<String, List<String>, WindowStore<Bytes, byte[]>> storeMaterialized = Materialized.<String, List<String>, WindowStore<Bytes, byte[]>>as(
                "time-windowed-aggregated-stream-store").withKeySerde(stringSerde).withValueSerde(jsonSerde)
            .withRetention(Duration.ofMinutes(5));
    
        ConcurrentHashMap<String, Long> aggRecordMap = new ConcurrentHashMap<>();
        String lastMsgTimeKey = "lastMsgTimeKey";
        String signal = "signal";
        KTable<Windowed<String>, List<String>> kTable = kStream.groupBy((k, v) -> "defaultKey")
            .windowedBy(TimeWindows.of(windowSize).grace(Duration.ZERO))
            .aggregate(() -> new ArrayList<>(), (k, v, agg) -> {
              System.out.println("========== aggregate record ==========");
              log.info("k: {}, v: {}, agg: {}", k, v, agg);
              if (!signal.equals(v)) {
                agg.add(v);
              }
              aggRecordMap.put(lastMsgTimeKey, System.currentTimeMillis());
              return agg;
            }, storeMaterialized).suppress(Suppressed.untilWindowCloses(BufferConfig.unbounded()));
    
        String backFlow = "backFlow";
        KStream<String, JSONObject>[] branches = kTable.mapValues(list -> list).mapValues(list -> list)
            .toStream().flatMap(
                (k, v) -> {
                  List<KeyValue<String, JSONObject>> keyValues = new ArrayList<>(v.size());
                  System.out.println("========== flatMap record ==========");
                  log.info("k: {}, v: {}", k, v);
                  v.stream().forEach(str -> {
                    JSONObject jsonObject = JSON.parseObject(str);
                    int index = jsonObject.getIntValue("index");
                    boolean backFlowFlag = jsonObject.getBooleanValue(backFlow);
                    if (!backFlowFlag && index % 2 == 0) {
                      jsonObject.put(backFlow, true);
                    } else {
                      jsonObject.remove(backFlow);
                    }
                    keyValues.add(new KeyValue<>(String.valueOf(index), jsonObject));
                  });
                  log.info("keyValues: {}", keyValues);
                  return keyValues;
                })
            .branch((k, v) -> !v.getBooleanValue(backFlow), (k, v) -> v.getBooleanValue(backFlow));
    
        branches[0].mapValues(v -> v.toJSONString())
            .to(SINK_TOPIC, Produced.with(stringSerde, stringSerde));
    
        KafkaProducer<String, String> producer = new KafkaProducer<>(buildConfigProps());
        branches[1].map((k, v) -> new KeyValue<>(k, new ProducerRecord<>(SOURCE_TOPIC, k, v.toJSONString())))
            .foreach((k, v) -> producer.send(v));
    
        KafkaStreams kafkaStreams = new KafkaStreams(streamsBuilder.build(), props);
        kafkaStreams.setUncaughtExceptionHandler(
         new RestartUncaughtExceptionHandler(streamsBuilder, props));
        kafkaStreams.start();
        while (true) {
          System.out.println("运行中......");
          Long lastModifiedKey = aggRecordMap.getOrDefault(lastMsgTimeKey, 0L);
          if (lastModifiedKey > 0 && System.currentTimeMillis() - lastModifiedKey > windowSize.toMillis()) {
            producer.send(new ProducerRecord<>(SOURCE_TOPIC, lastModifiedKey.toString(), signal));
          }
          try {
            TimeUnit.SECONDS.sleep(2);
          } catch (InterruptedException e) {
            throw new RuntimeException(e);
          }
        }
      }
    
    

遇到的坑

  1. 实验发现,TimeWindow在生产者持续生产消息时,可以按照预期工作。但生产者停止发送消息后,最后一次窗口无法闭合,直到生产者再次发送消息。

    尝试过各种修改,搞不定,怀疑kafka-stream本来就是这么设计的,无界数据,不需要考虑停止...
    在发送邮件给kafka开发者社区users@kafka.apache.org询问后,我得到了大佬John Roesler(vvcephei@apache.org)的答复: kafka事件时间基于生产者推动,生产者停止,时钟也就停止了。
    为了解决这个问题,只能写个轮巡任务去定期发假消息(dummy record).

  2. 某些场景,部分记录需要回流到源端,下个周期重新处理,所以demo中使用了branch操作。
    实验中发现发送直接to到源主题中的消息,无法再次进入stream中,可能是kafka规避死循环的某种机制。但可以直接使用Producer发送到源端。

  3. kafka中流动的是orderId,而不是整个order,是因为业务上order可能会非常大,可能会超出kafka单条消息限制,并且造成网络拥堵。
    暂时实现为传递orderId的半流式系统,待后续重构order结构。

相关文章

网友评论

      本文标题:kafka-stream流式系统设计与实现(demo)

      本文链接:https://www.haomeiwen.com/subject/qbihwrtx.html