美文网首页clickhouse
spark 3.0.0 csv文件导入clickhouse

spark 3.0.0 csv文件导入clickhouse

作者: 二十赶朝暮__ | 来源:发表于2020-09-04 17:34 被阅读0次

pom.xml文件:
因为spark jackson/guava 会有版本冲突,因此需要shade隔绝

<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://maven.apache.org/POM/4.0.0"
         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
    <modelVersion>4.0.0</modelVersion>
    <groupId>spark_clickhouse</groupId>
    <artifactId>spark_clickhouse</artifactId>
    <version>1.0-SNAPSHOT</version>
    <dependencies>
        <dependency>
            <groupId>org.apache.spark</groupId>
            <artifactId>spark-sql_2.12</artifactId>
            <version>3.0.0</version>
            <scope>compile</scope>
        </dependency>
        <dependency>
            <groupId>ru.yandex.clickhouse</groupId>
            <artifactId>clickhouse-jdbc</artifactId>
            <version>0.2.4</version>
            <scope>compile</scope>
        </dependency>

        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-core</artifactId>
            <version>2.10.2</version>
        </dependency>
        <dependency>
            <groupId>com.fasterxml.jackson.core</groupId>
            <artifactId>jackson-databind</artifactId>
            <version>2.10.2</version>
        </dependency>
        <dependency>
        <groupId>com.fasterxml.jackson.core</groupId>
        <artifactId>jackson-annotations</artifactId>
        <version>2.10.2</version>
        </dependency>
        <dependency>
            <groupId>com.fasterxml.jackson.module</groupId>
            <artifactId>jackson-module-scala_2.12</artifactId>
            <version>2.10.2</version>
        </dependency>

    </dependencies>

    <build>
        <plugins>
            <plugin>
            <groupId>org.apache.maven.plugins</groupId>
            <artifactId>maven-shade-plugin</artifactId>
            <version>3.1.0</version>
            <executions>
                <execution>
                    <phase>package</phase>
                    <goals>
                        <goal>shade</goal>
                    </goals>
                    <configuration>
                        <relocations>
                            <relocation>
                                <pattern>com.fasterxml.jackson</pattern>
                                <shadedPattern>noc.com.fasterxml.jackson</shadedPattern>
                            </relocation>
                            <relocation>
                                <pattern>com.google.guava</pattern>
                                <shadedPattern>noc.com.google.guava</shadedPattern>
                            </relocation>
                        </relocations>
                    </configuration>
                </execution>
            </executions>
            </plugin>
        </plugins>
    </build>

</project>

另外还有一个net.jpountz.lz4:lz4:1.3.0的jar包,与org.lz4:lz4-java:1.7.1冲突,1.3.0的jar包去掉了。

import java.util.Properties

import org.apache.spark.SparkConf
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.storage.StorageLevel

object SparkWriteCk {
  val properties = new Properties()
  properties.put("driver", "ru.yandex.clickhouse.ClickHouseDriver")
  properties.put("user", "default")
  properties.put("password", "*****")
  properties.put("batchsize","100000")
  properties.put("socket_timeout","300000")
  properties.put("numPartitions","8")
  properties.put("rewriteBatchedStatements","true")
  val url = "jdbc:clickhouse://服务器IP:8123/default"
  val table = "fact_customer_qty"

  def main(args: Array[String]): Unit = {
    val sc = new SparkConf()
    val session = SparkSession.builder().master("local[*]").config(sc).appName("write-to-ck").getOrCreate()

    val columns = StructType(
      List(
        StructField("ymd",StringType,false),
        StructField("sup_name",StringType,false),
        StructField("item_name",StringType,false),
        StructField("need_qty",IntegerType,false),
        StructField("qty",IntegerType,false),
        StructField("unitcode",StringType,false)
        )
    )

    val df = session.read.format("csv").
      option("header",false).
      option("inferSchema",true).
      option("sep",",").
      schema(columns).
      load("C:\\Users\\86136\\IdeaProjects\\spark_learning\\spark_scala\\resources\\fact_customer_qty.csv")
      .persist(StorageLevel.MEMORY_ONLY_SER_2)

    print(df.schema)
    df.write.mode(SaveMode.Append).jdbc(url,table,properties)
    println(s"write done")
    df.unpersist(true)
  }
}

csv文件截图如下:


image.png

相关文章

网友评论

    本文标题:spark 3.0.0 csv文件导入clickhouse

    本文链接:https://www.haomeiwen.com/subject/kegosktx.html