美文网首页大数据
Flink实时ETL练习项目

Flink实时ETL练习项目

作者: 羋学僧 | 来源:发表于2020-12-07 14:05 被阅读0次

    一、需求背景

    针对算法产生的日志数据进行清洗拆分

    • 1、算法产生的日志数据是嵌套json格式,需要拆分打平
    • 2、针对算法中的国家字段进行大区转换
    • 3、把数据回写到 Kafka

    二、数据格式

    Kafka中的算法日志数据格式

    {"dt":"2019-11-19 20:33:39","countryCode":"TW","data":[{"type":"s1","score":0.8,"level":"D"},{"type":"s2","score":0.1,"level":"B"}]}
    
    {"dt":"2019-11-19 20:33:41","countryCode":"KW","data":[{"type":"s2","score":0.2,"level":"A"},{"type":"s1","score":0.2,"level":"D"}]}
    
    {"dt":"2019-11-19 20:33:43","countryCode":"HK","data":[{"type":"s5","score":0.5,"level":"C"},{"type":"s2","score":0.8,"level":"B"}]}
    
    {"dt":"2019-11-19 20:33:39","countryCode":"TW","data":[{"type":"s1","score":0.8,"level":"D"},{"type":"s2","score":0.1,"level":"B"}]}
    

    Flink中ETL输出数据格式

    {"dt":"2019-11-19 20:33:39","countryCode":"AREA_CT","type":"s1","score":0.8,"level":"D"}
    {"dt":"2019-11-19 20:33:39","countryCode":"AREA_CT","type":"s2","score":0.1,"level":"B"} 
    

    国家地区信息数据
    存储字Redis中

    三、产生数据

    国家地区信息数据生成语句

    hset areas AREA_US US
    hset areas AREA_CT TW,HK
    hset areas AREA_AR PK,KW,SA
    hset areas AREA_IN IN
    

    算法日志数据生成

    /**
     * 模拟数据源
     */
    public class kafkaProducer {
    
        public static void main(String[] args) throws Exception{
            Properties prop = new Properties();
            //指定kafka broker地址
            prop.put("bootstrap.servers", "bigdata03:9092");
            //指定key value的序列化方式
            prop.put("key.serializer", StringSerializer.class.getName());
            prop.put("value.serializer", StringSerializer.class.getName());
            //指定topic名称
            String topic = "data";
    
            //创建producer链接
            KafkaProducer<String, String> producer = new KafkaProducer<String,String>(prop);
    
            //{"dt":"2018-01-01 10:11:11","countryCode":"US","data":[{"type":"s1","score":0.3,"level":"A"},{"type":"s2","score":0.2,"level":"B"}]}
    
    
            while(true){
                String message = "{\"dt\":\""+getCurrentTime()+"\",\"countryCode\":\""+getCountryCode()+"\",\"data\":[{\"type\":\""+getRandomType()+"\",\"score\":"+getRandomScore()+",\"level\":\""+getRandomLevel()+"\"},{\"type\":\""+getRandomType()+"\",\"score\":"+getRandomScore()+",\"level\":\""+getRandomLevel()+"\"}]}";
                System.out.println(message);
                //同步的方式,往Kafka里面生产数据
                producer.send(new ProducerRecord<String, String>(topic,message));
                Thread.sleep(2000);
            }
            //关闭链接
            //producer.close();
        }
    
        public static String getCurrentTime(){
            SimpleDateFormat sdf = new SimpleDateFormat("YYYY-MM-dd HH:mm:ss");
            return sdf.format(new Date());
        }
    
        public static String getCountryCode(){
            String[] types = {"US","TW","HK","PK","KW","SA","IN"};
            Random random = new Random();
            int i = random.nextInt(types.length);
            return types[i];
        }
    
    
        public static String getRandomType(){
            String[] types = {"s1","s2","s3","s4","s5"};
            Random random = new Random();
            int i = random.nextInt(types.length);
            return types[i];
        }
    
        public static double getRandomScore(){
            double[] types = {0.3,0.2,0.1,0.5,0.8};
            Random random = new Random();
            int i = random.nextInt(types.length);
            return types[i];
        }
    
        public static String getRandomLevel(){
            String[] types = {"A","A+","B","C","D"};
            Random random = new Random();
            int i = random.nextInt(types.length);
            return types[i];
        }
    
    
    }
    
    

    四、Redis数据读取

    map:
    key:US value:AREA_US
    key:TW value:AREA_CT
    key:HK value:AREA_CT
    
    public class RedisSource implements SourceFunction<HashMap<String,String>> {
    
        private Logger logger=LoggerFactory.getLogger(RedisSource.class);
    
    
        private Jedis jedis;
        private boolean isRunning=true;
    
        @Override
        public void run(SourceContext<HashMap<String, String>> sourceContext) throws Exception {
            this.jedis = new Jedis("bigdata02",6379);
            HashMap<String, String> map = new HashMap<>();
            while(isRunning){
               try{
                   map.clear();
                   Map<String, String> areas = jedis.hgetAll("areas");
                   for(Map.Entry<String,String> entry:areas.entrySet()){
                       String area = entry.getKey();
                       String value = entry.getValue();
                       String[] fields = value.split(",");
                       for (String country:fields){
                           map.put(country,area);
                       }
                   }
                   if(map.size() > 0){
                       sourceContext.collect(map);
                   }
               }catch (JedisConnectionException e){
                   logger.error("redis连接一场:"+ e.getCause());
               }catch (Exception e){
                   logger.error("数据源发生了异常!!");
               }
    
            }
        }
    
        @Override
        public void cancel() {
            isRunning = false;
            if(jedis != null){
                jedis.close();
            }
    
        }
    }
    
    

    五、数据处理

    添加flink run -m yarn-cluster

                <dependency>
                    <groupId>org.apache.flink</groupId>
                    <artifactId>flink-shaded-hadoop-2-uber</artifactId>
                    <version>2.8.3-10.0</version>
                </dependency>
    
    /**
     * 实时ETL
     */
    public class DataClean {
        public static void main(String[] args) throws Exception{
            System.setProperty("HADOOP_USER_NAME", "bigdata");
            StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
            env.setParallelism(3);//假设Kafka的主题是3个分区
            //设置checkpoint
            env.enableCheckpointing(60000);
            env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
            env.getCheckpointConfig().setMinPauseBetweenCheckpoints(10000);
            env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
            //flink停止的时候要不要清空checkpoint的数据
            env.getCheckpointConfig().enableExternalizedCheckpoints(
                    CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
            env.setStateBackend(new RocksDBStateBackend("hdfs://bigdata02:9000/FlinkETL/checkpoint"));
    
            //Kafka数据源
            String topic="data";
            Properties properties = new Properties();
            properties.put("bootstrap.servers","bigdata03:9092");
            properties.put("group.id","dataclean_consumer");
            properties.put("enable.auto.commit","false");
            properties.put("auto.offset.reset","earliest");
    
            FlinkKafkaConsumer011<String> consumer = new FlinkKafkaConsumer011<>(
                    topic,
                    new SimpleStringSchema(),
                    properties
            );
            DataStreamSource<String> allData = env.addSource(consumer);
    
            // redis
            DataStream<HashMap<String, String>> mapData = env.addSource(new RedisSource()).broadcast();
    
            SingleOutputStreamOperator<String> etlDataStream = allData.connect(mapData).flatMap(new CoFlatMapFunction<String, HashMap<String, String>, String>() {
               //其实不给也行。
                HashMap<String, String> allMap = new HashMap<String, String>();
    
                //在这儿一开始,我们还是需要给allmap一些初始的数据。
    
    
                //alldata kafka
                @Override
                public void flatMap1(String line, Collector<String> collector) throws Exception {
    //{"dt":"2019-11-19 20:33:39","countryCode":"TW","data":[{"type":"s1","score":0.8,"level":"D"},{"type":"s2","score":0.1,"level":"B"}]}
                    JSONObject jsonObject = JSONObject.parseObject(line);
                    String dt = jsonObject.getString("dt");
                    String countryCode = jsonObject.getString("countryCode");
                    //根据省份获取大区
                    String area = allMap.get(countryCode);
                    JSONArray data = jsonObject.getJSONArray("data");
                    for (int i = 0; i < data.size(); i++) {
                        //0 {"type":"s1","score":0.8,"level":"D"}
                        //1 {"type":"s2","score":0.1,"level":"B"}
                        JSONObject dataJSONObject = data.getJSONObject(i);
                        //添加日期
                        dataJSONObject.put("dt", dt);
                        //添加大区
                        dataJSONObject.put("area", area);
                        collector.collect(dataJSONObject.toString());
                    }
    
                }
    
                //mapdata redis
                @Override
                public void flatMap2(HashMap<String, String> map, Collector<String> collector) throws Exception {
                    allMap = map;
                }
            });
    
            // etlDataStream.print().setParallelism(1);
            String etltopic="etldata";
            Properties sinkProperties = new Properties();
            sinkProperties.put("bootstrap.servers","bigdata03:9092");
            FlinkKafkaProducer011<String> kafkaSink = new FlinkKafkaProducer011<>(etltopic,
                    new SimpleStringSchema(),
                    sinkProperties);
    
    
            etlDataStream.addSink(kafkaSink);
    
            /**
             *
             * source: kafka
             * sink: kafka
             *
             * 可以实现数据处理且只处理一次
             *
             * 1: checkpoint(offset)
             * 2: 写到Kafka(结果数据)
             *          这个两个步骤到事务一致性,可以实现这两个操作要么就一起成功,要么就一起失败。
             *
             */
    
    
    
            env.execute("data clean");
    
        }
    }
    
    

    六、在集群上执行

    flink run -m yarn-cluster(开辟资源+提交任务)


    Flink lib 文件夹下添加flink-shaded-hadoop-2-uber jar包

    flink-shaded-hadoop-2-uber下载地址

    flink-shaded-hadoop-2-uber编译地址

    cd /home/bigdata/data/
    
    java -cp etl-1.0-SNAPSHOT-jar-with-dependencies.jar com.nx.flink.producer.kafkaProducer
    
    flink run -m yarn-cluster -yqu default -ynm etl -ys 2 -yjm 1024 -ytm 1024 -c com.nx.flink.core.DataClean etl-1.0-SNAPSHOT-jar-with-dependencies.jar
    

    没执行成功,虚拟机内存不足

    相关文章

      网友评论

        本文标题:Flink实时ETL练习项目

        本文链接:https://www.haomeiwen.com/subject/jzdvwktx.html