spark-sql \
--master yarn \
--deploy-mode client \
--num-executors "20" \
--executor-cores "2" \
--executor-memory "6g" \
--driver-memory "6g" \
--conf spark.driver.maxResultSize=4g \
--conf spark.kryoserializer.buffer.max=1024m \
--conf spark.debug.maxToStringFields=999 \
--conf spark.sql.broadcastTimeout=2600 \
--conf spark.network.timeout=1200 \
--conf spark.rpc.askTimeout=1200 \
--conf spark.rpc.lookupTimeout=360 \
--conf spark.locality.wait=10 \
--conf spark.memory.fraction=0.80 \
--conf spark.sql.parquet.writeLegacyFormat=true \
--conf spark.sql.crossJoin.enabled=true \
--hiveconf hive.metastore.execute.setugi=true \
--hiveconf hive.exec.dynamic.partition=true \
--hiveconf hive.exec.dynamic.partition.mode=nonstrict \
--hiveconf hive.exec.max.dynamic.partitions=1000000 \
--hiveconf hive.exec.max.dynamic.partitions.pernode=100000 \
--hiveconf hive.mapred.supports.subdirectories=true \
--hiveconf mapreduce.input.fileinputformat.input.dir.recursive=true -S
网友评论