...">
美文网首页
flink upsert kafka sql

flink upsert kafka sql

作者: loukey_j | 来源:发表于2021-04-19 15:56 被阅读0次

    pom:
    <?xml version="1.0" encoding="UTF-8"?>
    <project xmlns="http://maven.apache.org/POM/4.0.0"
    xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
    xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>

    <groupId>com.sf.bdp</groupId>
    <artifactId>kafka-wide-table</artifactId>
    <version>1.0-SNAPSHOT</version>
    <properties>
        <maven.compiler.source>1.8</maven.compiler.source>
        <maven.compiler.target>1.8</maven.compiler.target>
        <encoding>UTF-8</encoding>
        <scala.version>2.11.12</scala.version>
        <scala.binary.version>2.11</scala.binary.version>
        <hadoop.version>2.7.3</hadoop.version>
        <protostuff.version>1.0.5</protostuff.version>
        <flink.version>1.12.2</flink.version>
        <kafka.version>2.3.1</kafka.version>
    </properties>
    
    <dependencies>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table</artifactId>
            <version>${flink.version}</version>
        </dependency>
    
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-connector-kafka_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>
    
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-streaming-java_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-clients_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-runtime_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>
    
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-java</artifactId>
            <version>${flink.version}</version>
        </dependency>
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-api-java-bridge_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>
    
        <!--<dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>-->
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-table-planner-blink_2.11</artifactId>
            <version>${flink.version}</version>
        </dependency>
    
        <dependency>
            <groupId>org.apache.flink</groupId>
            <artifactId>flink-json</artifactId>
            <version>${flink.version}</version>
        </dependency>
    </dependencies>
    

    </project>

    code:

    import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
    import org.apache.flink.table.api.TableResult;
    import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

    public class KafkaSoureTest {
    public static void main(String[] args) throws Exception {
    //StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
    //EnvironmentSettings bsSettings = EnvironmentSettings.newInstance().useBlinkPlanner().inStreamingMode().build();
    //StreamTableEnvironment tEnv = StreamTableEnvironment.create(streamEnv, bsSettings);

        StreamExecutionEnvironment streamEnv = StreamExecutionEnvironment.getExecutionEnvironment();
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(streamEnv);
        tEnv.executeSql("CREATE TABLE soruce1 (\n" +
                "    id     STRING,     \n" +
                "    pv          BIGINT,     \n" +
                "    uv          BIGINT,     \n" +
                "    PRIMARY KEY (id) NOT ENFORCED\n" +
                ") WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = 'soruce1_pvuv',\n" +
                "  'properties.bootstrap.servers' = '10.202.116.43:9092',\n" +
                "  'key.json.ignore-parse-errors' = 'true',\n" +
                "  'value.json.fail-on-missing-field' = 'false',\n" +
                //"  'scan.startup.mode' = 'latest-offset',\n" +
                "  'value.json.ignore-parse-errors' = 'true',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json',\n" +
                "  'value.fields-include' = 'EXCEPT_KEY'\n" +
                ")");
        tEnv.executeSql("CREATE TABLE sink1 (\n" +
                "    id     STRING,     \n" +
                "    pv          BIGINT,     \n" +
                "    uv          BIGINT,     \n" +
                "    PRIMARY KEY (id) NOT ENFORCED\n" +
                ") WITH (\n" +
                "  'connector' = 'upsert-kafka',\n" +
                "  'topic' = 'sink1_pvuv',\n" +
                "  'properties.bootstrap.servers' = '10.202.116.43:9092',\n" +
                "  'key.json.ignore-parse-errors' = 'true',\n" +
                "  'value.json.fail-on-missing-field' = 'false',\n" +
                //"  'scan.startup.mode' = 'latest-offset',\n" +
                "  'value.json.ignore-parse-errors' = 'true',\n" +
                "  'key.format' = 'json',\n" +
                "  'value.format' = 'json',\n" +
                "  'value.fields-include' = 'EXCEPT_KEY'\n" +
                ")");
        tEnv.executeSql("INSERT INTO sink1 SELECT * from soruce1");
        tEnv.executeSql("select * from sink1").print();
        tEnv.execute("test source");
    }
    

    }

    append 模式 KafkaSoureTest10
    12> (true,1,apple,null,null)
    12> (false,1,apple,null,null)
    12> (true,1,apple,1,liujie)
    12> (true,1,apple9,1,liujie)
    12> (true,1,apple9,1,zhangshan)
    12> (true,1,apple,1,zhangshan)
    12> (true,1,apple10,1,zhangshan)
    12> (true,1,apple10,1,liujie)
    12> (true,1,apple10,1,wangwu)
    12> (true,1,apple9,1,wangwu)
    12> (true,1,apple,1,wangwu)


    {"productId":"1","name":"apple"}
    {"productId":"1","name":"apple9"}
    {"productId":"1","name":"apple10"}

    {"goodsId":"1","userName":"liujie"}
    {"goodsId":"1","userName":"zhangshan"}
    {"goodsId":"1","userName":"wangwu"}

    upsert 模式

    {"productId":"2"} {"productId":"2","name":"apple"}
    | +I | 2 | apple | (NULL) | (NULL) |
    {"goodsId":"2"} {"goodsId":"2","userName":"name1"}
    | -D | 2 | apple | (NULL) | (NULL) |
    | +I | 2 | apple | 2 | name1 |
    {"productId":"2"} {"productId":"2","name":"apple10"}
    | -D | 2 | apple | 2 | name1 |
    | +I | 2 | apple10 | 2 | name1 |
    {"goodsId":"2"} {"goodsId":"2","userName":"name2"}
    | -U | 2 | apple10 | 2 | name1 |
    | +I | 2 | apple10 | (NULL) | (NULL) |
    | -D | 2 | apple10 | (NULL) | (NULL) |
    | +I | 2 | apple10 | 2 | name2 |
    {"productId":"2"} {}
    | -D | 2 | apple10 | 2 | name2 |
    | +I | 2 | (NULL) | 2 | name2 |

    相关文章

      网友评论

          本文标题:flink upsert kafka sql

          本文链接:https://www.haomeiwen.com/subject/hrgilltx.html