美文网首页
2021-04-20

2021-04-20

作者: sydt2011 | 来源:发表于2021-04-20 16:56 被阅读0次

    历史节点的配置
    druid.service=druid/historical
    druid.port=8083

    HTTP server threads

    druid.server.http.numThreads=100

    Processing threads and buffers

    druid.processing.buffer.sizeBytes=1073741824
    druid.query.groupBy.maxOnDiskStorage=4294967296
    druid.processing.numMergeBuffers=16
    druid.processing.numThreads=60
    druid.processing.tmpDir=var/druid/processing

    Segment storage

    druid.segmentCache.locations=[{"path":"/data0/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data1/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data2/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data3/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data4/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data5/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data6/var/druid/segment-cache","maxSize":11000000000000},{"path":"/data7/var/druid/segment-cache","maxSize":11000000000000}]
    druid.server.maxSize=88000000000000

    Query cache

    druid.historical.cache.useCache=true
    druid.historical.cache.populateCache=true
    druid.cache.type=caffeine
    druid.cache.sizeInBytes=40000000000

    druid.server.tier=tier_1
    历史节点的内存配置:

    -server
    -Xms120g
    -Xmx120g
    -XX:MaxDirectMemorySize=120g
    -XX:+PrintGCDetails
    -Xloggc:var/sv/gchistorical.log
    -XX:+PrintGCDateStamps
    -Duser.timezone=UTC+0800
    -Dfile.encoding=UTF-8
    -Djava.io.tmpdir=var/tmp
    -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

    broker的配置:
    druid.service=druid/broker
    druid.port=8082

    HTTP server threads

    druid.broker.http.numConnections=80
    druid.server.http.numThreads=200
    druid.broker.http.readTimeout=PT1M
    druid.server.http.defaultQueryTimeout=60000

    Processing threads and buffers

    druid.processing.buffer.sizeBytes=1
    druid.processing.numMergeBuffers=4
    druid.processing.numThreads=15
    druid.processing.tmpDir=var/druid/processing
    druid.server.http.maxScatterGatherBytes=6000000000

    Query cache disabled -- push down caching and merging instead

    druid.cache.type=redis

    druid.broker.cache.useCache=true
    druid.broker.cache.populateCache=true
    druid.cache.sizeInBytes=60000000000

    Query cache --- redis cache

    druid.cache.host=

    druid.cache.password=

    druid.cache.port=6379

    SQL

    druid.sql.enable=true

    druid.extensions.loadList=["druid-caffeine-cache", "druid-hdfs-storage", "mysql-metadata-storage", "druid-datasketches", "druid-histogram", "druid-kerberos", "materialized-view-selection"]

    broker节点的内存配置:

    -server
    -Xms230g
    -Xmx230g
    -XX:NewSize=40g
    -XX:MaxNewSize=40g
    -XX:MaxDirectMemorySize=10g
    -XX:+PrintGCDetails
    -Xloggc:var/sv/gcbroker.log
    -XX:+PrintGCDateStamps
    -Duser.timezone=UTC+0800
    -Dfile.encoding=UTF-8
    -Djava.io.tmpdir=var/tmp
    -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

    middle节点:
    druid.service=druid/middlemanager
    druid.port=8091

    Number of tasks per middleManager

    druid.worker.capacity=20

    Task launch parameters

    druid.indexer.runner.javaOpts=-server -Xmx6g -XX:MaxDirectMemorySize=12g -XX:+UseG1GC -XX:MaxGCPauseMillis=100 -Duser.timezone=GMT+8 -Dfile.encoding=UTF-8 -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager -Dhdp.version=2.6.1.0-129 -Dhadoop.mapred.job.queue.name=olap
    druid.indexer.task.baseTaskDir=var/druid/task
    druid.indexer.task.restoreTasksOnRestart=true

    HTTP server threads

    druid.server.http.numThreads=50

    Processing threads and buffers

    druid.processing.buffer.sizeBytes=100000000

    druid.processing.buffer.sizeBytes=1073741824

    druid.processing.numMergeBuffers=2
    druid.indexer.fork.property.druid.processing.buffer.sizeBytes=268435456
    druid.indexer.fork.property.druid.query.groupBy.maxOnDiskStorage=4294967296
    druid.indexer.fork.property.druid.processing.numThreads=6

    druid.processing.numThreads=15

    druid.processing.tmpDir=var/druid/processing

    Hadoop indexing

    druid.indexer.task.hadoopWorkingPath=var/druid/hadoop-tmp
    druid.indexer.task.defaultHadoopCoordinates=["org.apache.hadoop:hadoop-client:2.7.3"]
    ~

    middle节点的内存配置:
    -server
    -Xms256m
    -Xmx256m
    -Duser.timezone=UTC+0800
    -Dfile.encoding=UTF-8
    -Djava.io.tmpdir=var/tmp
    -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

    协调节点:
    druid.service=druid/coordinator
    druid.port=8081

    druid.coordinator.startDelay=PT90S
    druid.coordinator.period=PT10S

    druid.coordinator.balancer.strategy=cachingCost
    协调节点的内存配置

    -server
    -Xms60g
    -Xmx90g
    -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=16 -XX:ConcGCThreads=4 -XX:InitiatingHeapOccupancyPercent=45 -XX:G1ReservePercent=25 -XX:-OmitStackTraceInFastThrow
    -Duser.timezone=UTC+0800
    -Dfile.encoding=UTF-8
    -XX:+PrintGCDetails
    -Xloggc:var/sv/gccoor.log
    -XX:+PrintGCDateStamps
    -Djava.io.tmpdir=var/tmp
    -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager
    -Dderby.stream.error.file=var/druid/derby.log

    overlrod节点:
    druid.service=druid/overlord
    druid.port=8090

    druid.indexer.queue.startDelay=PT30S

    druid.indexer.runner.type=remote
    druid.indexer.storage.type=metadata

    druid.indexer.runner.maxZnodeBytes=1048576

    -server
    -Xms12g
    -Xmx20g
    -Duser.timezone=UTC+0800
    -Dfile.encoding=UTF-8
    -XX:+PrintGCDetails
    -Xloggc:var/sv/gcover.log
    -XX:+PrintGCDateStamps
    -Djava.io.tmpdir=var/tmp
    -Djava.util.logging.manager=org.apache.logging.log4j.jul.LogManager

    启动脚本:

    [Unit]
    Description=Imply-historical
    Documentation=https://imply.io
    Wants=network-online.target
    After=network-online.target
    
    [Service]
    Environment=IMPLY_HOME=/usr/local/imply-2.7.8
    Environment=CONF_DIR=/usr/local/imply-2.7.8/conf
    Environment=VAR_DIR=/usr/local/imply-2.7.8/var
    #EnvironmentFile=-/etc/sysconfig/imply
    
    WorkingDirectory=/usr/local/imply-2.7.8
    
    #User=imply
    #Group=imply
    
    #ExecStartPre=
    
    ExecStart=/usr/local/imply-2.7.8/bin/supervise -c conf/supervise/coordinator-overlord.conf
    
    # StandardOutput is configured to redirect to journalctl since
    # some error messages may be logged in standard output before
    # imply logging system is initialized. Imply-historical
    # stores its logs in /var/log/imply and does not use
    # journalctl by default. If you also want to enable journalctl
    # logging, you can simply remove the "quiet" option from ExecStart.
    StandardOutput=journal
    StandardError=inherit
    
    # Specifies the maximum file descriptor number that can be opened by this process
    LimitNOFILE=655350
    
    # Specifies the maximum number of processes
    LimitNPROC=655350
    
    # Specifies the maximum size of virtual memory
    LimitMEMLOCK=infinity
    
    # Specifies the maximum file size
    LimitFSIZE=infinity
    
    # Disable timeout logic and wait until process is stopped
    TimeoutStopSec=0
    
    # SIGTERM signal is used to stop the Java process
    KillSignal=SIGTERM
    
    # Send the signal only to the JVM rather than its control group
    KillMode=process
    
    # Java process is never killed
    SendSIGKILL=no
    
    # When a JVM receives a SIGTERM signal it exits with code 143
    SuccessExitStatus=143
    
    [Install]
    WantedBy=multi-user.target
    

    相关文章

      网友评论

          本文标题:2021-04-20

          本文链接:https://www.haomeiwen.com/subject/kvvnlltx.html