美文网首页我爱编程
mongodb分片复制集搭建

mongodb分片复制集搭建

作者: cubotudo | 来源:发表于2018-08-09 18:18 被阅读0次

    目录

    1       部署规划... 2

    2       内核参数调整... 2

    2.1             sysctl.conf 2

    2.2             limits.conf 3

    2.3             其他... 3

    2.4             启动ntp时间同步... 4

    3       Mongodb安装... 4

    3.1             添加环境变量... 4

    3.2             节点配置说明... 4

    3.2.1         Mongod数据节点... 4

    3.2.2         Config节点... 5

    3.2.3         Route节点... 7

    3.3             启动mongod. 7

    3.4             Config Replica-set配置... 7

    3.5             Shard Replica-set配置... 8

    3.5.1         Shard1. 8

    3.5.2         Shard2. 8

    3.5.3         Shard3. 8

    3.6             添加用户... 9

    3.6.1         添加全局用户... 9

    3.6.2         添加Shard Replica-set用户... 9

    3.7             Sharding配置... 9

    3.8             检查节点... 10

    4       停止应用... 10

    4.1             替换应用war/jar包... 10

    4.2             修改应用mongo配置... 11

    4.3             禁止注册接口... 11

    4.3.1         Mongo中移除注册相关api配置... 11

    4.3.2         Redis中移除注册相关api缓存... 12

    5       数据倒换... 12

    5.1             新集群关闭Balancer 12

    5.2             旧集群数据导出... 12

    5.3             新集群数据导入... 13

    5.4             新集群创建索引... 14

    5.5             新集群sequence处理... 14

    5.6             新集群开启Balancer 14

    5.7             检查数据分片状况... 15

    6       启动应用... 15

    7       开放注册接口... 16

    8       测试验证... 16

    [if !supportLists]1      [endif]部署规划

    分为3个sharding, 每个sharding是一个replica-set,每个replica-set包含两个数据节点和一种仲裁(Arb)节点

    [if !supportLists]2      [endif]内核参数调整

    [if !supportLists]2.1  [endif]sysctl.conf

    vi /etc/sysctl.conf

    #加入:

    vm.nr_hugepages= 0

    vm.overcommit_memory=1

    vm.zone_reclaim_mode=0

    net.ipv4.tcp_syncookies = 1

    net.ipv4.tcp_tw_reuse = 1

    net.ipv4.tcp_tw_recycle = 1

    net.ipv4.tcp_max_tw_buckets = 5000

    net.ipv4.tcp_synack_retries = 2

    net.ipv4.tcp_syn_retries = 2

    net.ipv4.tcp_keepalive_time = 1800

    net.core.rmem_default = 256960

    net.core.rmem_max = 513920

    net.core.wmem_default = 256960

    net.core.wmem_max = 513920

    net.ipv4.tcp_wmem = 8192 256960 16777216

    net.ipv4.tcp_rmem = 32768 256960 16777216

    net.ipv4.tcp_max_orphans = 3276800

    net.ipv4.tcp_fin_timeout = 30

    net.core.somaxconn = 16384

    net.core.netdev_max_backlog = 16384

    kernel.msgmnb = 655360000

    kernel.msgmax = 6553600

    kernel.msgmni = 4096

    #生效

    sysctl –p

    [if !supportLists]2.2  [endif]limits.conf

    vi /etc/security/limits.conf

    #加入

    *     soft  nproc      65536

    *     hard nproc      65536

    *     soft  nofile      65536

    *     hard nofile      65536

    [if !supportLists]2.3  [endif]其他

    #关闭THP

    echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled

    echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag

    cat/sys/kernel/mm/transparent_hugepage/enabled

    cat/sys/kernel/mm/transparent_hugepage/defrag

    #取消虚拟内存限制

    vi /etc/profile

    加入

    ulimit -v unlimited

    . /etc/profile

    #磁盘使用使用noatime,nodiratime

    /dev/md2 / ext4 defaults,noatime 0 0

    mount -o remount /

    #设置较小的磁盘readahead参数值32(16KB)

    blockdev --getra /dev/sda

    blockdev --setra 32 /dev/sda

    [if !supportLists]2.4  [endif]启动ntp时间同步

    vi /usr/local/sbin/ntpddate.sh

    killall ntpd

    /usr/local/ntp-dev-4.2.7p26/bin/ntpdate10.95.99.56

    crontab -e

    1 * * * * /usr/local/sbin/ntpddate.sh

    [if !supportLists]3      [endif]Mongodb安装

    [if !supportLists]3.1  [endif]添加环境变量

    MONGODB_HOME=/home/mongodb/mongodb-3.2

    export PATH=$MONGODB_HOME/bin:$PATH

    [if !supportLists]3.2  [endif]节点配置说明

    在4台服务器上建立mongodb相关目录

    mkdir -p/home/mongodb/{etc,mongoLog,mongoData}/

    mkdir -p/home/mongodb/mongoData/{shard1,shard2,shard3,config}

    [if !supportLists]3.2.1       [endif]Mongod数据节点

    shard1、shard2、shard3配置文件仅端口、数据和日志文件目录不一样

    shard1端口27021,shard2端口27022,shard3端口27023

    #shard1.conf

    systemLog:

     quiet: true

     logAppend: true

     path: /home/mongodb/mongoLog/shard1.log

     destination: file

     traceAllExceptions: true

    storage:

     engine: wiredTiger

     dbPath: /home/mongodb/mongoData/shard1

     directoryPerDB: true

     journal:

      enabled: true

     wiredTiger:

     engineConfig:

      directoryForIndexes: true

     collectionConfig:

      blockCompressor: snappy

     indexConfig:

      prefixCompression: true

    processManagement:

     fork: true

    net:

     port: 27021

     maxIncomingConnections: 50000

     http:

     enabled: false

    operationProfiling:

     slowOpThresholdMs: 500

     mode: slowOp

    security:

     keyFile: /home/mongodb/etc/mongodbkeyfile

     javascriptEnabled: false

    replication:

     replSetName: shard1

     oplogSizeMB: 1000

    sharding:

     clusterRole: shardsvr

    [if !supportLists]3.2.2       [endif]Config节点

    三个config配置一样;

    #config.conf

    systemLog:

     quiet: true

     logAppend: true

     path:/home/mongodb/mongoLog/config.log

     destination: file

     traceAllExceptions: true

    storage:

     engine: wiredTiger

     dbPath: /home/mongodb/mongoData/config

     directoryPerDB: true

     journal:

      enabled: true

     wiredTiger:

     engineConfig:

      directoryForIndexes: true

     collectionConfig:

      blockCompressor: snappy

     indexConfig:

      prefixCompression: true

    processManagement:

     fork: true

    net:

     port: 4000

     http:

     enabled: false

    operationProfiling:

     slowOpThresholdMs: 500

     mode: slowOp

    security:

     keyFile: /home/mongodb/etc/mongodbkeyfile

     javascriptEnabled: false

    replication:

     replSetName:config

     oplogSizeMB: 1000

    sharding:

     clusterRole: configsvr

    [if !supportLists]3.2.3       [endif]Route节点

    三个route配置相同

    #route.conf

    systemLog:

     quiet: true

     logAppend: true

     path: /home/mongodb/mongoLog/route.log

     destination: file

     traceAllExceptions: true

    processManagement:

     fork: true

    net:

     port: 4010

     http:

     enabled: false

    security:

     keyFile: /home/mongodb/etc/mongodbkeyfile

    sharding:

     configDB: config/10.95.216.87:4000, 10.95.216.89:4000,10.95.216.90:4000

     chunkSize: 64

    [if !supportLists]3.3  [endif]启动mongod

    登陆87、88、89、90

    cd /home/monogdb/mongodb-3.2/bin

    #禁用numa模式启动numactl --interleave=all

    numactl --interleave=all mongod -f /home/mongodb/etc/shard1.conf

    numactl --interleave=all mongod -f /home/mongodb/etc/shard2.conf

    numactl --interleave=all mongod -f /home/mongodb/etc/shard3.conf

    numactl --interleave=all mongod -f /home/mongodb/etc/config.conf

    numactl --interleave=all mongos -f /home/mongodb/etc/route.conf

    [if !supportLists]3.4  [endif]Config Replica-set配置

    #登陆10.95.216.87

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 4000

    config = {_id: 'config', configsvr:true,

    members: [

    {_id: 0, host: '10.95.216.87:4000',priority:1},

    {_id: 1, host: '10.95.216.89:4000'},

    {_id: 2, host: '10.95.216.90:4000'}]

    };

    rs.initiate(config);

    rs.status();

    [if !supportLists]3.5  [endif]Shard Replica-set配置

    [if !supportLists]3.5.1       [endif]Shard1

    #登陆10.95.216.87

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 27021

    config = {_id: 'shard1', members: [

    {_id: 0, host: '10.95.216.87:27021',priority:1},

    {_id: 1, host: '10.95.216.88:27021'},

    {_id: 2, host: '10.95.216.90:27021', arbiterOnly: true}]

    };

    rs.initiate(config);

    rs.status();

    [if !supportLists]3.5.2       [endif]Shard2

    #登陆10.95.216.88

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 27022

    config = {_id: 'shard2', members: [

    {_id: 0, host: '10.95.216.88:27022',priority:1},

    {_id: 1, host: '10.95.216.89:27022'},

    {_id: 2, host: '10.95.216.90:27022', arbiterOnly: true}]

    };

    rs.initiate(config);

    rs.status();

    Shard3

    #登陆10.95.216.89

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 27023

    config = {_id: 'shard3', members: [

    {_id: 0, host: '10.95.216.89:27023',priority:1},

    {_id: 1, host: '10.95.216.87:27023'},

    {_id: 2, host: '10.95.216.90:27023', arbiterOnly: true}]

    };

    rs.initiate(config);

    rs.status();

    添加用户

    添加全局用户

    #登陆87

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 4010

    #添加超级用户

    use admin

    db.createUser( { user: "super",pwd: "Vip1@RfvUjm", roles: [ "root"] } );

    db.auth("super","Vip1@RfvUjm");

    #添加admin用户

    db.createUser({user:"admin",pwd:"Lvs1@RfvUjm",

             roles:["clusterAdmin","dbAdmin","dbAdminAnyDatabase",

                        "userAdminAnyDatabase","readWriteAnyDatabase",

                       {role:"readWrite",db:"config"}]

             });

    #添加数据库用户

    use db_uus

    db.createUser( { user:"acctMongoUser", pwd: "7B6Yzxn8eh", roles: ["readWrite"] } );

    添加Shard Replica-set用户

    #登陆87、88、89

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 27021

    #添加超级用户

    use admin

    db.createUser( { user: "super01",pwd: "Vip1@RfvUjm", roles: [ "root"] } );

    db.auth("super01","Vip1@RfvUjm");

    rs.status();

    ./mongo --port 27022

    #添加超级用户

    use admin

    db.createUser( { user: "super02",pwd: "Vip1@RfvUjm", roles: [ "root"] } );

    db.auth("super02","Vip1@RfvUjm");

    rs.status();

    ./mongo --port 27023

    #添加超级用户

    use admin

    db.createUser( { user: "super03",pwd: "Vip1@RfvUjm", roles: [ "root"] } );

    db.auth("super03","Vip1@RfvUjm");

    rs.status();

    Sharding 配置

    #登陆10.95.216.87

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 4010

    use admin

    db.auth("super","Vip1@RfvUjm");

    db.runCommand({addshard:"shard1/10.95.216.87:27021,10.95.216.88:27021"});

    db.runCommand({addshard:"shard2/10.95.216.88:27022,10.95.216.89:27022"});

    db.runCommand({addshard:"shard3/10.95.216.89:27023,10.95.216.87:27023"});

    sh.status();

    use admin;

    新集群开启Balancer

    #登陆10.95.216.87

    cd /home/monogdb/mongodb-3.2/bin

    mongo --port 4010

    use admin

    db.auth("super","Vip1@RfvUjm");

    sh.startBalancer();

    sh.getBalancerState();

    sh.isBalancerRunning();

    #登陆10.95.196.191

    mongo --port 4010

    use admin;

    db.auth("admin","Lvs1@RfvUjm");

    #开启balancer

    sh.startBalancer();

    检查数据分片状况

    #登陆10.95.216.87

    cd /home/monogdb/mongodb-3.2/bin

    ./mongo --port 4010

    use admin

    db.auth("super","Vip1@RfvUjm");

    sh.status();

    相关文章

      网友评论

        本文标题:mongodb分片复制集搭建

        本文链接:https://www.haomeiwen.com/subject/ryvcbftx.html