美文网首页
Flink系列 - 实时数仓之数据入ElasticSearch实

Flink系列 - 实时数仓之数据入ElasticSearch实

作者: 小飞牛_666 | 来源:发表于2021-03-09 10:48 被阅读0次

      我们实时的流处理入 ElasticSearch 中还是比较麻烦的,虽然说 flink 提供了相关的 sink 接口,但是一般来说仅仅是简单的将数据插入而已,对于优化以及使用用户名和密码登录操作的话,不管官网还是网上,写得零零碎碎的,对于大佬来说可以拼接起来再用,但是对于像我这种菜鸟来说,那简直是看天书一样,一愣一愣的。今天写这个案例主要是项目中涉及了这个需求,废了半条命终于整理出来了,现在做个总结,以便避免初学者再掉坑。
      废话不多说,接下来我们开搞。。。

    一、启动服务器
    [syy@nfdw elasticsearch-7.6.1]$ pwd
    /opt/modules/elasticsearch-7.6.1
    [syy@nfdw elasticsearch-7.6.1]$ bin/elasticsearch
    
    [syy@nfdw kibana-7.6.1-linux-x86_64]$ pwd
    /opt/modules/kibana-7.6.1-linux-x86_64
    [syy@nfdw kibana-7.6.1-linux-x86_64]$ bin/kibana
    

    登录 kibana 控制台:http://IP:5601/app/kibana#/dev_tools/console ,登录成功如下:

    image.png
    二、代码实现

    2.1 添加依赖

            <!-- https://mvnrepository.com/artifact/org.apache.flink/flink-connector-elasticsearch7 -->
            <dependency>
                <groupId>org.apache.flink</groupId>
                <artifactId>flink-connector-elasticsearch7_2.11</artifactId>
                <version>${flink.version}</version>
            </dependency>
            <!-- https://mvnrepository.com/artifact/com.google.code.gson/gson -->
            <dependency>
                <groupId>com.google.code.gson</groupId>
                <artifactId>gson</artifactId>
                <version>2.8.6</version>
            </dependency>
    
    

    2.2 主体代码

    public class App {
    
        public static void main(String[] args) throws Exception {
    
            //  获取环境对象
            StreamExecutionEnvironment env = GetStreamExecutionEnvironment.getEnv();
            //请求kafka数据
            Properties prop = new Properties();
            prop.setProperty("bootstrap.servers","cdh101:9092");
            prop.setProperty("group.id","cloudera_mirrormaker");
            prop.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
            FlinkKafkaConsumer011<String> myConsumer = new FlinkKafkaConsumer011("luchangyin", new SimpleStringSchema() ,prop);
            myConsumer.setStartFromLatest();  //最近的
    
            //请求kafka数据
            DataStreamSource<String> dataStream = env.addSource(myConsumer);
            //dataStream.print();   // {"id":"226","name":"tang tang - 226","sal":280751,"dept":"美女部","ts":1615191802523}
    
            SingleOutputStreamOperator<Employees> result = dataStream.map(new MapFunction<String, Employees>() {
    
                @Override
                public Employees map(String s) throws Exception {
                    Employees emp = MyJsonUtils.str2JsonObj(s);
                    emp.setEmpStartTime(new Date(emp.getTs()));
                    emp.setDt(MyDateUtils.getDate2Second(emp.getEmpStartTime()));
                    return emp;
                }
            });
    
            //result.print();
            // Employees(eId=257, eName=fei fei - 257, eSal=97674.0, eDept=美女部, ts=1615251002894, empStartTime=Tue Mar 09 08:50:02 GMT+08:00 2021, dt=2021-03-09)
    
            // 设置ES的服务器地址
            List<HttpHost> esAddresses = null;
            try {
                esAddresses = ESSinkUtil.getEsAddresses("10.122.1.115:9200");
            } catch (MalformedURLException e) {
                e.printStackTrace();
            }
    
            // 我们可以通过调试此方法的三个数值参数进行优化
            ESSinkUtil.addSink(esAddresses, "elastic", "123456", 100,100, 1,
                    5, result, new ElasticsearchSinkFunction<Employees>() {
                        @Override
                        public void process(Employees emp, RuntimeContext runtimeContext, RequestIndexer requestIndexer) {
                            String indexStr = "employee_"+ MyDateUtils.getTime2Day(emp.getEmpStartTime()).replaceAll("-","");
                            System.out.println("索引为-> "+ indexStr);
                            requestIndexer.add(Requests.indexRequest()
                                    .index(indexStr)
                                    .source(GsonUtil.toJSONBytes(emp), XContentType.JSON));
                        }
                    });
    
    
            env.execute("wo xi huan ni");
    
        }
    
    }
    
    

    2.3 实现SinkEs的工具类:

    package com.nfdw.utils;
    
    import org.apache.commons.lang.StringUtils;
    import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
    import org.apache.flink.streaming.connectors.elasticsearch.ElasticsearchSinkFunction;
    import org.apache.flink.streaming.connectors.elasticsearch7.ElasticsearchSink;
    import org.apache.http.HttpHost;
    
    import java.net.MalformedURLException;
    import java.net.URL;
    import java.util.ArrayList;
    import java.util.List;
    
    public class ESSinkUtil {
    
        /**
         * es sink
         *
         * @param hosts               es hosts
         * @param bulkFlushMaxActions bulk flush size
         * @param parallelism         并行数
         * @param data                数据
         * @param func
         * @param <T>
         */
        public static <T> void addSink(List<HttpHost> esAddresses, String userName, String passwd, int bulkFlushMaxActions,
                                       int bulkFlushMaxSizeMb, long bulkFlushInterval, int parallelism,
                                       SingleOutputStreamOperator<T> data, ElasticsearchSinkFunction<T> func) {
            //todo:xpack security
            ElasticsearchSink.Builder<T> esSinkBuilder = new ElasticsearchSink.Builder<>(esAddresses, func);
    
            // 鉴权,正对写 es 需要密码的场景
            if(StringUtils.isNotEmpty(userName) && StringUtils.isNotEmpty(passwd)){
                esSinkBuilder.setRestClientFactory(new HDRestClientFactory(userName,passwd));
            }
    
            //失败处理策略
            esSinkBuilder.setFailureHandler(new RetryRequestFailureHandler());
    
            //bulk
            esSinkBuilder.setBulkFlushMaxActions(bulkFlushMaxActions);
            esSinkBuilder.setBulkFlushMaxSizeMb(bulkFlushMaxSizeMb);
            esSinkBuilder.setBulkFlushInterval(bulkFlushInterval);
    
            //-----------------------------------
            data.addSink(esSinkBuilder.build()).setParallelism(parallelism);
        }
    
        /**
         * 解析配置文件的 es hosts
         *
         * @param hosts
         * @return
         * @throws MalformedURLException
         */
        public static List<HttpHost> getEsAddresses(String hosts) throws MalformedURLException {
            String[] hostList = hosts.split(",");
            List<HttpHost> addresses = new ArrayList<>();
            for (String host : hostList) {
                if (host.startsWith("http")) {
                    URL url = new URL(host);
                    addresses.add(new HttpHost(url.getHost(), url.getPort()));
                } else {
                    String[] parts = host.split(":", 2);
                    if (parts.length > 1) {
                        addresses.add(new HttpHost(parts[0], Integer.parseInt(parts[1])));
                    } else {
                        throw new MalformedURLException("invalid elasticsearch hosts format");
                    }
                }
            }
            return addresses;
        }
    
    }
    
    

    2.4 设置密码操作类 HDRestClientFactory:

    package com.nfdw.utils;
    
    import org.apache.flink.streaming.connectors.elasticsearch7.RestClientFactory;
    import org.apache.http.auth.AuthScope;
    import org.apache.http.auth.UsernamePasswordCredentials;
    import org.apache.http.client.CredentialsProvider;
    import org.apache.http.client.config.RequestConfig;
    import org.apache.http.impl.client.BasicCredentialsProvider;
    import org.apache.http.impl.nio.client.HttpAsyncClientBuilder;
    import org.elasticsearch.client.RestClientBuilder;
    
    public class HDRestClientFactory implements RestClientFactory {
    
        private String userName;
        private String password;
        transient CredentialsProvider credentialsProvider;
    
        public HDRestClientFactory(String userName, String password) {
            this.userName = userName;
            this.password = password;
        }
    
        @Override
        public void configureRestClientBuilder(RestClientBuilder restClientBuilder) {
            if (credentialsProvider == null) {
                credentialsProvider = new BasicCredentialsProvider();
                credentialsProvider.setCredentials(AuthScope.ANY, new UsernamePasswordCredentials(userName, password));
            }
            restClientBuilder.setHttpClientConfigCallback(new RestClientBuilder.HttpClientConfigCallback() {
                @Override
                public HttpAsyncClientBuilder customizeHttpClient(HttpAsyncClientBuilder httpAsyncClientBuilder) {
                    return httpAsyncClientBuilder.setDefaultCredentialsProvider(credentialsProvider);
                }
            }).setRequestConfigCallback(new RestClientBuilder.RequestConfigCallback() {
                @Override
                public RequestConfig.Builder customizeRequestConfig(RequestConfig.Builder builder) {
                    builder.setConnectTimeout(5000);
                    builder.setSocketTimeout(60000);
                    builder.setConnectionRequestTimeout(2000);
                    return builder;
                }
            });
        }
    }
    
    

    2.5 创建失败策列处理类 RetryRequestFailureHandler :

    package com.nfdw.utils;
    
    import lombok.extern.slf4j.Slf4j;
    import org.apache.flink.streaming.connectors.elasticsearch.ActionRequestFailureHandler;
    import org.apache.flink.streaming.connectors.elasticsearch.RequestIndexer;
    import org.apache.flink.util.ExceptionUtils;
    import org.elasticsearch.action.ActionRequest;
    import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
    import java.io.IOException;
    import java.net.SocketTimeoutException;
    import java.util.Optional;
    
    @Slf4j
    public class RetryRequestFailureHandler implements ActionRequestFailureHandler {
    
        public RetryRequestFailureHandler() {
        }
    
        @Override
        public void onFailure(ActionRequest actionRequest, Throwable throwable, int i, RequestIndexer requestIndexer) throws Throwable {
            if (ExceptionUtils.findThrowable(throwable, EsRejectedExecutionException.class).isPresent()) {
                requestIndexer.add(new ActionRequest[]{actionRequest});
            } else {
                if (ExceptionUtils.findThrowable(throwable, SocketTimeoutException.class).isPresent()) {
                    return;
                } else {
                    Optional<IOException> exp = ExceptionUtils.findThrowable(throwable, IOException.class);
                    if (exp.isPresent()) {
                        IOException ioExp = exp.get();
                        if (ioExp != null && ioExp.getMessage() != null && ioExp.getMessage().contains("max retry timeout")) {
                            log.error(ioExp.getMessage());
                            return;
                        }
                    }
                }
                throw throwable;
            }
        }
    }
    
    

    2.6 创建一个 gson 解析类:

    package com.nfdw.utils;
    
    import com.google.gson.Gson;
    import com.google.gson.GsonBuilder;
    
    import java.lang.reflect.Type;
    import java.nio.charset.Charset;
    
    public class GsonUtil {
    
        private final static Gson gson = new Gson();
    
        private final static Gson disableHtmlEscapingGson = new GsonBuilder().disableHtmlEscaping().create();
    
        public static <T> T fromJson(String value, Class<T> type) {
            return gson.fromJson(value, type);
        }
    
        public static <T> T fromJson(String value, Type type) {
            return gson.fromJson(value, type);
        }
    
        public static String toJson(Object value) {
            return gson.toJson(value);
        }
    
        public static String toJsonDisableHtmlEscaping(Object value) {
            return disableHtmlEscapingGson.toJson(value);
        }
    
        public static byte[] toJSONBytes(Object value) {
            return gson.toJson(value).getBytes(Charset.forName("UTF-8"));
        }
    
    }
    
    
    三、运行程序结果查询如下
    image.png

      这里需要注意的一点是:sinkEs 的流必须是 SingleOutputStreamOperator 的对象,至于优化就是调节工具类中的那几个数值参数即可,好了,Flink 对 ES 的操作到此为止,希望能够帮助到你哦。。。

    相关文章

      网友评论

          本文标题:Flink系列 - 实时数仓之数据入ElasticSearch实

          本文链接:https://www.haomeiwen.com/subject/jzuvqltx.html