美文网首页Linux科技
MongoDB分片集群搭建

MongoDB分片集群搭建

作者: Miracle001 | 来源:发表于2019-03-13 09:26 被阅读3次

    架构图

    准备

    centos7 1804
    网络NAT+仅主机
    3台主机
    192.168.25.11--3个config server、1个shard、1个router
    192.168.25.12--1个shard、1个router
    192.168.25.13 --1个shard
    
    /etc/hosts
    192.168.25.11 node1.fgq.com node1
    192.168.25.12 node2.fgq.com node2
    192.168.25.13 node3.fgq.com node3
    192.168.25.14 node4.fgq.com node4
    192.168.25.15 node5.fgq.com node5
    
    下载mongodb包
    https://www.mongodb.com/download-center/community
    mongodb-linux-x86_64-rhel70-4.0.6.tgz
    
    注:config server 使用复制集不用有arbiter节点。3.4版本以后config必须为复制集
    注:在做分片的时候,要避免Shard在同一台主机上,这样就没办法实现分片
    
    
    3个节点都操作,以node1为例
    
    # 创建目录
    [root@node1 ~]# mkdir -p /fgq/{base-env,data/mongodb/{conf,data,logs,socket}}
    [root@node1 ~]# cd /fgq/base-env/
    [root@node1 base-env]# rz  上传mongodb包
    [root@node1 base-env]# tar zxf mongodb-linux-x86_64-rhel70-4.0.6.tgz 
    [root@node1 base-env]# ln -s mongodb-linux-x86_64-rhel70-4.0.6 mongodb
    # 环境变量
    [root@node1 base-env]# vim /etc/profile.d/mongodb.sh 
    export PATH=$PATH:/fgq/base-env/mongodb/bin
    [root@node1 base-env]# source /etc/profile.d/mongodb.sh
    # 添加用户
    [root@node1 base-env]# useradd mongod;echo 123456|passwd --stdin mongod
    
    

    shard集群配置

    node1
    [root@node1 base-env]# cd ../data/mongodb/conf/
    [root@node1 conf]# vim mongod.conf
    systemLog:
      destination: file
      path: /fgq/data/mongodb/logs/mongod.log
      logAppend: true
    storage:
      journal:
        enabled: true
      dbPath: /fgq/data/mongodb/data/mongod
      directoryPerDB: true
      #engine: wiredTiger
      wiredTiger:
        engineConfig:
          # cacheSizeGB: 1
          directoryForIndexes: true
        collectionConfig:
          blockCompressor: zlib
        indexConfig:
          prefixCompression: true
    processManagement:
      fork: true
      pidFilePath: /fgq/data/mongodb/socket/mongod.pid
    net:
    #  port:   默认端口27018
      bindIp: 192.168.25.11
    replication:
      oplogSizeMB: 50
      replSetName: rs1
    sharding:
      clusterRole: shardsvr
    # 创建shard存储目录
    [root@node1 ~]# mkdir /fgq/data/mongodb/data/mongod
    # 启动
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongod.conf
    about to fork child process, waiting until server is ready for connections.
    forked process: 1384
    child process started successfully, parent exiting
    [root@node1 ~]# netstat -ntlup |grep mongod
    tcp        0      0 192.168.25.11:27018     0.0.0.0:*               LISTEN      1384/mongod 
    
    把配置文件复制到其他节点上
    [root@node1 conf]# scp mongod.conf node2:/fgq/data/mongodb/conf/
    [root@node1 conf]# scp mongod.conf node3:/fgq/data/mongodb/conf/
    
    
    node2
    [root@node2 ~]# vim /fgq/data/mongodb/conf/mongod.conf
      把bindIp改为192.168.25.12即可
      bindIp: 192.168.25.12
    [root@node2 ~]# mkdir /fgq/data/mongodb/data/mongod
    [root@node2 ~]# mongod -f /fgq/data/mongodb/conf/mongod.conf
    about to fork child process, waiting until server is ready for connections.
    forked process: 4929
    child process started successfully, parent exiting
    [root@node2 ~]# netstat -ntlup |grep mongod
    tcp        0      0 192.168.25.12:27018     0.0.0.0:*               LISTEN      4929/mongod
    
    
    node3
    [root@node3 ~]# vim /fgq/data/mongodb/conf/mongod.conf
      把bindIp改为192.168.25.13即可
      bindIp: 192.168.25.13
    [root@node3 ~]# mkdir /fgq/data/mongodb/data/mongod
    [root@node3 ~]# mongod -f /fgq/data/mongodb/conf/mongod.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1472
    child process started successfully, parent exiting
    [root@node3 ~]# netstat -ntlup |grep mongod
    tcp        0      0 192.168.25.13:27018     0.0.0.0:*               LISTEN      1472/mongod
    
    配置复制集
    [root@node1 ~]# mongo --host 192.168.25.11 --port 27018
    > config = {_id: 'rs1', members: [{_id: 0, host: '192.168.25.11:27018'},{_id: 1, host: '192.168.25.12:27018'},{_id: 2, host: '192.168.25.13:27018'}]}
    {
        "_id" : "rs1",
        "members" : [
            {
                "_id" : 0,
                "host" : "192.168.25.11:27018"
            },
            {
                "_id" : 1,
                "host" : "192.168.25.12:27018"
            },
            {
                "_id" : 2,
                "host" : "192.168.25.13:27018"
            }
        ]
    }
    > rs.initiate(config)
    {
        "ok" : 1,
        "operationTime" : Timestamp(1552382083, 1),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552382083, 1),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    rs1:SECONDARY>   再次回车就变成了主节点
    rs1:PRIMARY>
    现在只配置一个shard集群,按理说应该配置2个shard集群
    

    config集群配置(仅node1)

    node1
    [root@node1 ~]# cd /fgq/data/mongodb/conf/
    [root@node1 conf]# vim mongosvr-20000.conf
    systemLog:
     destination: file
    ###日志存储位置
     path: /fgq/data/mongodb/logs/mongosvr-20000.log
     logAppend: true
    storage:
    ##journal配置
     journal:
      enabled: true
    ##数据文件存储位置
     dbPath: /fgq/data/mongodb/data/mongosvr-20000
    ##是否一个库一个文件夹
     directoryPerDB: true
    ##数据引擎
    # engine: wiredTiger
    ##WT引擎配置
     wiredTiger:
      engineConfig:
    ##WT最大使用cache(根据服务器实际情况调节)
       cacheSizeGB: 1
    ##是否将索引也按数据库名单独存储
       directoryForIndexes: true
    ##表压缩配置
      collectionConfig:
       blockCompressor: zlib
    ##索引配置
      indexConfig:
       prefixCompression: true
    processManagement:
     fork: true  # fork and run in background
     pidFilePath: /fgq/data/mongodb/socket/mongosvr-20000.pid
    ##端口配置
    net:
     port: 20000
     bindIp: 192.168.25.11
    replication:
     oplogSizeMB: 50
     replSetName: configReplSet
    sharding:
     clusterRole: configsvr
    [root@node1 conf]# cp mongosvr-20000.conf mongosvr-21000.conf 
    [root@node1 conf]# cp mongosvr-20000.conf mongosvr-22000.conf
    [root@node1 conf]# vim mongosvr-21000.conf
    :%s/20000/21000/g
    [root@node1 conf]# vim mongosvr-22000.conf
    :%s/20000/22000/g
    # 创建数据文件存储目录
    [root@node1 conf]# mkdir /fgq/data/mongodb/data/{mongosvr-20000,mongosvr-21000,mongosvr-22000}
    
    # 启动config server集群
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongosvr-20000.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1601
    child process started successfully, parent exiting
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongosvr-21000.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1642
    child process started successfully, parent exiting
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongosvr-22000.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1680
    child process started successfully, parent exiting
    [root@node1 ~]# ss -ntlup|grep mongod
    tcp    LISTEN     0      128    192.168.25.11:20000                 *:*                   users:(("mongod",pid=1601,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:21000                 *:*                   users:(("mongod",pid=1642,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:27018                 *:*                   users:(("mongod",pid=1384,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:22000                 *:*                   users:(("mongod",pid=1680,fd=11))
    
    # 配置config server复制集
    [root@node1 ~]# mongo --host 192.168.25.11 --port 20000 admin
    # 配置复制集信息
    > config = {_id: 'configReplSet', members: [{_id: 0, host: '192.168.25.11:20000'},{_id: 1, host: '192.168.25.11:21000'},{_id: 2, host: '192.168.25.11:22000'}]}
    {
        "_id" : "configReplSet",
        "members" : [
            {
                "_id" : 0,
                "host" : "192.168.25.11:20000"
            },
            {
                "_id" : 1,
                "host" : "192.168.25.11:21000"
            },
            {
                "_id" : 2,
                "host" : "192.168.25.11:22000"
            }
        ]
    }
    # 初始化配置
    > rs.initiate(config)
    {
        "ok" : 1,
        "operationTime" : Timestamp(1552383358, 1),
        "$gleStats" : {
            "lastOpTime" : Timestamp(1552383358, 1),
            "electionId" : ObjectId("000000000000000000000000")
        },
        "lastCommittedOpTime" : Timestamp(0, 0),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552383358, 1),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    configReplSet:SECONDARY> 一会就变成主节点了
    configReplSet:PRIMARY>
    

    mongos节点配置(node1、node2)

    默认端口27017
    node1
    [root@node1 ~]# cd /fgq/data/mongodb/conf/
    [root@node1 conf]# vim mongos.conf
    systemLog:
     destination: file
    ###日志存储位置
     path: /fgq/data/mongodb/logs/mongos.log
     logAppend: true
    processManagement:
     fork: true  # fork and run in background
     pidFilePath: /fgq/data/mongodb/socket/mongos.pid
    ##端口配置
    net:
    # port: 
     bindIp: 192.168.25.11
    
    ## 将confige server 添加到路由
    sharding:
     configDB: configReplSet/192.168.25.11:20000,192.168.25.11:21000,192.168.25.11:22000
    # 启动mongos
    [root@node1 ~]# mongos -f /fgq/data/mongodb/conf/mongos.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 4452
    child process started successfully, parent exiting
    [root@node1 ~]# ss -ntlup|grep mongos
    tcp    LISTEN     0      128    192.168.25.11:27017                 *:*                   users:(("mongos",pid=4452,fd=10))
    
    
    
    node2
    [root@node2 ~]# cd /fgq/data/mongodb/conf/
    [root@node2 conf]# vim mongos.conf 
    systemLog:
     destination: file
    ###日志存储位置
     path: /fgq/data/mongodb/logs/mongos.log
     logAppend: true
    processManagement:
     fork: true  # fork and run in background
     pidFilePath: /fgq/data/mongodb/socket/mongos.pid
    ##端口配置
    net:
    # port: 
     bindIp: 192.168.25.12
    
    ## 将confige server 添加到路由
    sharding:
     configDB: configReplSet/192.168.25.11:20000,192.168.25.11:21000,192.168.25.11:22000
    # 启动mongos
    [root@node2 ~]# mongos -f /fgq/data/mongodb/conf/mongos.conf
    about to fork child process, waiting until server is ready for connections.
    forked process: 5121
    child process started successfully, parent exiting
    [root@node2 ~]# ss -ntlup|grep mongos
    tcp    LISTEN     0      128    192.168.25.12:27017                 *:*                   users:(("mongos",pid=5121,fd=10))
    
    # 登陆到mongos
    [root@node1 ~]# mongo --host 192.168.25.11
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c877d8a1cb917fff631a208")
      }
      shards:  ## 目前还没有节点加入到分片中
      active mongoses:
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
    
    # 添加分片节点
    mongos> sh.addShard("rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018")
    {
        "shardAdded" : "rs1",
        "ok" : 1,
        "operationTime" : Timestamp(1552385323, 4),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552385323, 4),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    
    
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c877d8a1cb917fff631a208")
      }
      shards:  # 添加的一个Shard集群
            {  "_id" : "rs1",  "host" : "rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018",  "state" : 1 }
      active mongoses:
            "4.0.6" : 2
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
    
    sh.enableSharding("testdb")
        指明在哪个数据库上启用shard功能
        一个库内并非所有的collection都需要启用shard
    mongos> sh.enableSharding("testdb")
    {
        "ok" : 1,
        "operationTime" : Timestamp(1552385817, 7),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552385817, 7),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c877d8a1cb917fff631a208")
      }
      shards:
            {  "_id" : "rs1",  "host" : "rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018",  "state" : 1 }
      active mongoses:
            "4.0.6" : 2
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "rs1",  "partitioned" : true,  "version" : {  "uuid" : UUID("7e85f1cf-953a-43a0-8b01-dcee0a67923d"),  "lastMod" : 1 } }
    
    
    基于年龄做分片
    sh.shardCollection("testdb.students",{"age":1})
        指明哪个数据库的collection做shard--testdb.students
        指明对哪个collection的哪个字段做索引--age:1  升序排列
        还可以指明unique唯一
    mongos> sh.shardCollection("testdb.students",{"age":1})
    {
        "collectionsharded" : "testdb.students",
        "collectionUUID" : UUID("8182737b-054e-48ce-b0df-9a9bb76d35e7"),
        "ok" : 1,
        "operationTime" : Timestamp(1552385979, 14),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552385979, 14),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    
    
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c877d8a1cb917fff631a208")
      }
      shards:
            {  "_id" : "rs1",  "host" : "rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018",  "state" : 1 }
      active mongoses:
            "4.0.6" : 2
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "rs1",  "partitioned" : true,  "version" : {  "uuid" : UUID("7e85f1cf-953a-43a0-8b01-dcee0a67923d"),  "lastMod" : 1 } }
                    testdb.students
                            shard key: { "age" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "age" : { "$minKey" : 1 } } -->> { "age" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) 
    
    
    mongos> use testdb
    switched to db testdb
    mongos> for (i=1;i<=100000;i++) db.students.insert({name:"student"+i,age:(i%120),class:"class"+(i%10),address:"#25 Lianyun Road,Zhengzhou,China"})
    数据插入过程中,再开一个窗口
    
    [root@node1 ~]# mongo --host 192.168.25.11
    mongos> use testdb
    switched to db testdb
    mongos> db.students.find().count()
    17013
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c877d8a1cb917fff631a208")
      }
      shards:
            {  "_id" : "rs1",  "host" : "rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018",  "state" : 1 }
      active mongoses:
            "4.0.6" : 2
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "rs1",  "partitioned" : true,  "version" : {  "uuid" : UUID("7e85f1cf-953a-43a0-8b01-dcee0a67923d"),  "lastMod" : 1 } }
                    testdb.students
                            shard key: { "age" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "age" : { "$minKey" : 1 } } -->> { "age" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) 
    
    数据生成后,还是这个样子,郁闷?????
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c877d8a1cb917fff631a208")
      }
      shards:
            {  "_id" : "rs1",  "host" : "rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018",  "state" : 1 }
      active mongoses:
            "4.0.6" : 2
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "rs1",  "partitioned" : true,  "version" : {  "uuid" : UUID("7e85f1cf-953a-43a0-8b01-dcee0a67923d"),  "lastMod" : 1 } }
                    testdb.students
                            shard key: { "age" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    rs1 1
                            { "age" : { "$minKey" : 1 } } -->> { "age" : { "$maxKey" : 1 } } on : rs1 Timestamp(1, 0)
    
    
    此时登录到shard的集群rs1上
    [root@node1 ~]# mongo --host 192.168.25.11 --port 27018
    rs1:PRIMARY> use testdb
    switched to db testdb
    rs1:PRIMARY> db.students.find().count()
    100000
    数据存储到这里了
    
    
    # 列出shard集群中所有的shard
    mongos> use admin
    switched to db admin
    mongos> db.runCommand('listshards')
    {
        "shards" : [
            {
                "_id" : "rs1",
                "host" : "rs1/192.168.25.11:27018,192.168.25.12:27018,192.168.25.13:27018",
                "state" : 1
            }
        ],
        "ok" : 1,
        "operationTime" : Timestamp(1552386594, 1),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552386594, 1),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    db.printShardingstatus()    功能等于sh.status()
    
    mongos> sh.isBalancerRunning()
    false
    数据均衡时才会运行,此时数据是均衡的,所以没有运行,无需管它,强大的自我管理功能
    mongos> sh.getBalancerState()
    true
    是否启用了均衡器
    
    sh.setBalancerState()   是否让它自动均衡 false--不自动均衡
        对整个业务业务影响不大的时候,是可以关掉自动均衡的
    sh.moveChunk()  移动chunk到其他shard上,config server会自动更新节点元数据,影响全局的,不到万不得已不要操作
    
    
    注意:
    此处上面搭建的集群中,有shard做了集群,其实只需要config server 做集群即可,shard集群可以不做
    

    不做shard集群的分片搭建

    ## 架构
    mongos  路由  一般是2个--高可用,此处是1个
    config server 一般是3个
    shard1  一般是3个,此处1个
    shard2  一般是3个,此处1个
    冗余  备份集群
    
    192.168.25.11--3个config server、1个router(mongos)、1个shard
    192.168.25.12--1个shard
    
    2个节点都操作,以node1为例
    [root@node1 ~]# mkdir -p /fgq/{base-env,data/mongodb/{conf,data,logs,socket}}
    [root@node1 ~]# cd /fgq/base-env/
    [root@node1 base-env]# rz  上传包 mongodb-linux-x86_64-rhel70-4.0.6.tgz
    [root@node1 base-env]# tar zxf mongodb-linux-x86_64-rhel70-4.0.6.tgz
    [root@node1 base-env]# ln -s mongodb-linux-x86_64-rhel70-4.0.6 mongodb
    [root@node1 base-env]# vim /etc/profile.d/mongodb.sh
    export PATH=$PATH:/fgq/base-env/mongodb/bin
    [root@node1 base-env]# source /etc/profile.d/mongodb.sh
    [root@node1 base-env]# mongo -h
    
    node1操作
    # config集群配置(仅node1)
    [root@node1 ~]# cd /fgq/data/mongodb/conf/
    [root@node1 conf]# vim mongosvr-20000.conf
    systemLog:
     destination: file
    ###日志存储位置
     path: /fgq/data/mongodb/logs/mongosvr-20000.log
     logAppend: true
    storage:
    ##journal配置
     journal:
      enabled: true
    ##数据文件存储位置
     dbPath: /fgq/data/mongodb/data/mongosvr-20000
    ##是否一个库一个文件夹
     directoryPerDB: true
    ##数据引擎
    # engine: wiredTiger
    ##WT引擎配置
     wiredTiger:
      engineConfig:
    ##WT最大使用cache(根据服务器实际情况调节)
       cacheSizeGB: 1
    ##是否将索引也按数据库名单独存储
       directoryForIndexes: true
    ##表压缩配置
      collectionConfig:
       blockCompressor: zlib
    ##索引配置
      indexConfig:
       prefixCompression: true
    processManagement:
     fork: true  # fork and run in background
     pidFilePath: /fgq/data/mongodb/socket/mongosvr-20000.pid
    ##端口配置
    net:
     port: 20000
     bindIp: 192.168.25.11
    replication:
     oplogSizeMB: 50
     replSetName: configReplSet
    sharding:
     clusterRole: configsvr
    [root@node1 conf]# cp mongosvr-20000.conf mongosvr-21000.conf 
    [root@node1 conf]# cp mongosvr-20000.conf mongosvr-22000.conf
    [root@node1 conf]# vim mongosvr-22000.conf
    :%s/20000/22000/g 
    [root@node1 conf]# vim mongosvr-21000.conf
    :%s/20000/21000/g 
    
    # 创建数据文件存储目录
    [root@node1 conf]# cd /fgq/data/mongodb/data/
    [root@node1 data]# mkdir mongosvr-20000
    [root@node1 data]# mkdir mongosvr-21000
    [root@node1 data]# mkdir mongosvr-22000
    
    # 启动config server集群
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongosvr-20000.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1390
    child process started successfully, parent exiting
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongosvr-21000.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1428
    child process started successfully, parent exiting
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongosvr-22000.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1466
    child process started successfully, parent exiting
    [root@node1 ~]# ss -ntlup|grep mongo
    tcp    LISTEN     0      128    192.168.25.11:20000                 *:*                   users:(("mongod",pid=1390,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:21000                 *:*                   users:(("mongod",pid=1428,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:22000                 *:*                   users:(("mongod",pid=1466,fd=11))
    
    [root@node1 ~]# mongo --host 192.168.25.11 --port 20000 admin
    # 配置config server复制集
    > config = {_id: 'configReplSet', members: [{_id: 0, host: '192.168.25.11:20000'},{_id: 1, host: '192.168.25.11:21000'},{_id: 2, host: '192.168.25.11:22000'}]}
    {
        "_id" : "configReplSet",
        "members" : [
            {
                "_id" : 0,
                "host" : "192.168.25.11:20000"
            },
            {
                "_id" : 1,
                "host" : "192.168.25.11:21000"
            },
            {
                "_id" : 2,
                "host" : "192.168.25.11:22000"
            }
        ]
    }
    # 初始化配置
    > rs.initiate(config)
    {
        "ok" : 1,
        "operationTime" : Timestamp(1552448175, 1),
        "$gleStats" : {
            "lastOpTime" : Timestamp(1552448175, 1),
            "electionId" : ObjectId("000000000000000000000000")
        },
        "lastCommittedOpTime" : Timestamp(0, 0),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448175, 1),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    configReplSet:SECONDARY> 
    configReplSet:PRIMARY> rs.status()
    {
        "set" : "configReplSet",
        "date" : ISODate("2019-03-13T03:36:31.371Z"),
        "myState" : 1,
        "term" : NumberLong(1),
        "syncingTo" : "",
        "syncSourceHost" : "",
        "syncSourceId" : -1,
        "configsvr" : true,
        "heartbeatIntervalMillis" : NumberLong(2000),
        "optimes" : {
            "lastCommittedOpTime" : {
                "ts" : Timestamp(1552448188, 3),
                "t" : NumberLong(1)
            },
            "readConcernMajorityOpTime" : {
                "ts" : Timestamp(1552448188, 3),
                "t" : NumberLong(1)
            },
            "appliedOpTime" : {
                "ts" : Timestamp(1552448188, 3),
                "t" : NumberLong(1)
            },
            "durableOpTime" : {
                "ts" : Timestamp(1552448188, 3),
                "t" : NumberLong(1)
            }
        },
        "lastStableCheckpointTimestamp" : Timestamp(1552448187, 1),
        "members" : [
            {
                "_id" : 0,
                "name" : "192.168.25.11:20000",
                "health" : 1,
                "state" : 1,
                "stateStr" : "PRIMARY",
                "uptime" : 187,
                "optime" : {
                    "ts" : Timestamp(1552448188, 3),
                    "t" : NumberLong(1)
                },
                "optimeDate" : ISODate("2019-03-13T03:36:28Z"),
                "syncingTo" : "",
                "syncSourceHost" : "",
                "syncSourceId" : -1,
                "infoMessage" : "could not find member to sync from",
                "electionTime" : Timestamp(1552448185, 1),
                "electionDate" : ISODate("2019-03-13T03:36:25Z"),
                "configVersion" : 1,
                "self" : true,
                "lastHeartbeatMessage" : ""
            },
            {
                "_id" : 1,
                "name" : "192.168.25.11:21000",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 16,
                "optime" : {
                    "ts" : Timestamp(1552448188, 3),
                    "t" : NumberLong(1)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1552448188, 3),
                    "t" : NumberLong(1)
                },
                "optimeDate" : ISODate("2019-03-13T03:36:28Z"),
                "optimeDurableDate" : ISODate("2019-03-13T03:36:28Z"),
                "lastHeartbeat" : ISODate("2019-03-13T03:36:29.918Z"),
                "lastHeartbeatRecv" : ISODate("2019-03-13T03:36:30.525Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "192.168.25.11:20000",
                "syncSourceHost" : "192.168.25.11:20000",
                "syncSourceId" : 0,
                "infoMessage" : "",
                "configVersion" : 1
            },
            {
                "_id" : 2,
                "name" : "192.168.25.11:22000",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 16,
                "optime" : {
                    "ts" : Timestamp(1552448188, 3),
                    "t" : NumberLong(1)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1552448188, 3),
                    "t" : NumberLong(1)
                },
                "optimeDate" : ISODate("2019-03-13T03:36:28Z"),
                "optimeDurableDate" : ISODate("2019-03-13T03:36:28Z"),
                "lastHeartbeat" : ISODate("2019-03-13T03:36:29.907Z"),
                "lastHeartbeatRecv" : ISODate("2019-03-13T03:36:30.494Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "192.168.25.11:20000",
                "syncSourceHost" : "192.168.25.11:20000",
                "syncSourceId" : 0,
                "infoMessage" : "",
                "configVersion" : 1
            }
        ],
        "ok" : 1,
        "operationTime" : Timestamp(1552448188, 3),
        "$gleStats" : {
            "lastOpTime" : Timestamp(1552448175, 1),
            "electionId" : ObjectId("7fffffff0000000000000001")
        },
        "lastCommittedOpTime" : Timestamp(1552448188, 3),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448188, 3),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    
    # node1的shard配置
    [root@node1 ~]# vim /fgq/data/mongodb/conf/mongod.conf
    systemLog:
      destination: file
      path: /fgq/data/mongodb/logs/mongod.log
      logAppend: true
    storage:
      journal:
        enabled: true
      dbPath: /fgq/data/mongodb/data/mongod
      directoryPerDB: true
      #engine: wiredTiger
      wiredTiger:
        engineConfig:
          # cacheSizeGB: 1
          directoryForIndexes: true
        collectionConfig:
          blockCompressor: zlib
        indexConfig:
          prefixCompression: true
    processManagement:
      fork: true
      pidFilePath: /fgq/data/mongodb/socket/mongod.pid
    net:
      port:   40000
      bindIp: 192.168.25.11
    #replication:
    #  oplogSizeMB: 50
    #  replSetName: rs1
    sharding:
      clusterRole: shardsvr
    
    [root@node1 ~]# mkdir /fgq/data/mongodb/data/mongod
    [root@node1 ~]# mongod -f /fgq/data/mongodb/conf/mongod.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1689
    child process started successfully, parent exiting
    [root@node1 ~]# ss -ntlup|grep mongo
    tcp    LISTEN     0      128    192.168.25.11:40000                 *:*                   users:(("mongod",pid=1689,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:20000                 *:*                   users:(("mongod",pid=1390,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:21000                 *:*                   users:(("mongod",pid=1428,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:22000                 *:*                   users:(("mongod",pid=1466,fd=11))
    [root@node1 ~]# scp /fgq/data/mongodb/conf/mongod.conf node2:/fgq/data/mongodb/conf/
    
    
    # node1的mongos配置
    [root@node1 ~]# vim /fgq/data/mongodb/conf/mongos.conf
    systemLog:
     destination: file
    ###日志存储位置
     path: /fgq/data/mongodb/logs/mongos.log
     logAppend: true
    processManagement:
     fork: true  # fork and run in background
     pidFilePath: /fgq/data/mongodb/socket/mongos.pid
    ##端口配置
    net:
     port: 30000
     bindIp: 192.168.25.11
    
    ## 将confige server 添加到路由
    sharding:
     configDB: configReplSet/192.168.25.11:20000,192.168.25.11:21000,192.168.25.11:22000
    
    [root@node1 ~]# mongos -f /fgq/data/mongodb/conf/mongos.conf 
    about to fork child process, waiting until server is ready for connections.
    forked process: 1728
    child process started successfully, parent exiting
    [root@node1 ~]# ss -ntlup|grep mongo
    tcp    LISTEN     0      128    192.168.25.11:40000                 *:*                   users:(("mongod",pid=1689,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:20000                 *:*                   users:(("mongod",pid=1390,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:21000                 *:*                   users:(("mongod",pid=1428,fd=11))
    tcp    LISTEN     0      128    192.168.25.11:30000                 *:*                   users:(("mongos",pid=1728,fd=10))
    tcp    LISTEN     0      128    192.168.25.11:22000                 *:*                   users:(("mongod",pid=1466,fd=11))
    
    # 连接mongos
    [root@node1 ~]# mongo --host 192.168.25.11 --port 30000
    mongos> help
    mongos> sh.help()
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c887abb5ed79590bf60002c")
      }
      shards:  # 没有节点加入到分片中
      active mongoses:
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
    
    # 添加分片节点
    mongos> sh.addShard("192.168.25.11:40000")
    {
        "shardAdded" : "shard0000",
        "ok" : 1,
        "operationTime" : Timestamp(1552448682, 4),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448682, 4),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    mongos> sh.addShard("192.168.25.12:40000")
    {
        "shardAdded" : "shard0001",
        "ok" : 1,
        "operationTime" : Timestamp(1552448689, 2),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448689, 2),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c887abb5ed79590bf60002c")
      }
      shards:  # 成功添加节点到分片中
            {  "_id" : "shard0000",  "host" : "192.168.25.11:40000",  "state" : 1 }
            {  "_id" : "shard0001",  "host" : "192.168.25.12:40000",  "state" : 1 }
      active mongoses:
            "4.0.6" : 1
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
    
    
    sh.enableSharding("testdb")
        指明在哪个数据库上启用shard功能
        一个库内并非所有的collection都需要启用shard
    sh.status()  
        查看databases,此处是支持shard的数据库有哪些,config是默认的
    mongos> sh.enableSharding("testdb")
    {
        "ok" : 1,
        "operationTime" : Timestamp(1552448725, 6),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448725, 6),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c887abb5ed79590bf60002c")
      }
      shards:
            {  "_id" : "shard0000",  "host" : "192.168.25.11:40000",  "state" : 1 }
            {  "_id" : "shard0001",  "host" : "192.168.25.12:40000",  "state" : 1 }
      active mongoses:
            "4.0.6" : 1
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
            {  "_id" : "testdb",  "primary" : "shard0000",  "partitioned" : true,  "version" : {  "uuid" : UUID("47a9fc65-adcf-48f0-8eb8-a67bfd7be796"),  "lastMod" : 1 } }
    
    
    基于年龄做分片
    sh.shardCollection("testdb.students",{"age":1})
        指明哪个数据库的collection做shard--testdb.students
        指明对哪个collection的哪个字段做索引--age:1  升序排列
        还可以指明unique唯一
    mongos> sh.shardCollection("testdb.students",{"age":1})
    {
        "collectionsharded" : "testdb.students",
        "collectionUUID" : UUID("71b9d628-312f-4536-b6a1-08305f7d9080"),
        "ok" : 1,
        "operationTime" : Timestamp(1552448788, 9),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448788, 9),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c887abb5ed79590bf60002c")
      }
      shards:
            {  "_id" : "shard0000",  "host" : "192.168.25.11:40000",  "state" : 1 }
            {  "_id" : "shard0001",  "host" : "192.168.25.12:40000",  "state" : 1 }
      active mongoses:
            "4.0.6" : 1
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
            {  "_id" : "testdb",  "primary" : "shard0000",  "partitioned" : true,  "version" : {  "uuid" : UUID("47a9fc65-adcf-48f0-8eb8-a67bfd7be796"),  "lastMod" : 1 } }
                    testdb.students
                            shard key: { "age" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    shard0000   1
                            { "age" : { "$minKey" : 1 } } -->> { "age" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0) 
    
    mongos> use testdb
    switched to db testdb
    数据插入过程中,再开一个窗口,查看是否均衡
    mongos> for (i=1;i<=100000;i++) db.students.insert({name:"student"+i,age:(i%120),class:"class"+(i%10),address:"#25 Lianyun Road,Zhengzhou,China"})
    WriteResult({ "nInserted" : 1 })
    
    [root@node1 ~]# mongo --host 192.168.25.11 --port 30000
    mongos> use testdb
    switched to db testdb
    mongos> db.students.find().count()
    35289
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c887abb5ed79590bf60002c")
      }
      shards:
            {  "_id" : "shard0000",  "host" : "192.168.25.11:40000",  "state" : 1 }
            {  "_id" : "shard0001",  "host" : "192.168.25.12:40000",  "state" : 1 }
      active mongoses:
            "4.0.6" : 1
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
            {  "_id" : "testdb",  "primary" : "shard0000",  "partitioned" : true,  "version" : {  "uuid" : UUID("47a9fc65-adcf-48f0-8eb8-a67bfd7be796"),  "lastMod" : 1 } }
                    testdb.students
                            shard key: { "age" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    shard0000   1
                            { "age" : { "$minKey" : 1 } } -->> { "age" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0) 
    
    sh.status()  会出现下面这种情况,但是实际上并没有出现,具体原因待查
        chunks 
            shard0000  1  1个分片-->1个-->2个
            shard0001  2  2个分片-->过一会变为3个分片--2个
        age  1-->>119   1-->>45
        age  119-->>    45-->>119
                        119-->>
        正在进行chunk均衡操作
    过一会再查看一下,chunk就均衡了,100000数据都生成了
    
    
    数据全部生成后,sh.status() 结果和第一次执行的不同之处在于databases的"config"
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5c887abb5ed79590bf60002c")
      }
      shards:
            {  "_id" : "shard0000",  "host" : "192.168.25.11:40000",  "state" : 1 }
            {  "_id" : "shard0001",  "host" : "192.168.25.12:40000",  "state" : 1 }
      active mongoses:
            "4.0.6" : 1
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    shard0000   1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "shard0000",  "partitioned" : true,  "version" : {  "uuid" : UUID("47a9fc65-adcf-48f0-8eb8-a67bfd7be796"),  "lastMod" : 1 } }
                    testdb.students
                            shard key: { "age" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    shard0000   1
                            { "age" : { "$minKey" : 1 } } -->> { "age" : { "$maxKey" : 1 } } on : shard0000 Timestamp(1, 0) 
    
    use admin 
    db.runCommand("listShards")  列出shard集群中所有的shard
    此处就是admin,所以不用切换admin
    mongos> db.runCommand('listshards')
    {
        "shards" : [
            {
                "_id" : "shard0000",
                "host" : "192.168.25.11:40000",
                "state" : 1
            },
            {
                "_id" : "shard0001",
                "host" : "192.168.25.12:40000",
                "state" : 1
            }
        ],
        "ok" : 1,
        "operationTime" : Timestamp(1552448995, 1),
        "$clusterTime" : {
            "clusterTime" : Timestamp(1552448995, 1),
            "signature" : {
                "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
                "keyId" : NumberLong(0)
            }
        }
    }
    mongos> sh.isBalancerRunning()
    false
    数据均衡时才会运行,此时数据是均衡的,所以没有运行,无需管它,强大的自我管理功能
    mongos> sh.getBalancerState()
    true
    是否启用了均衡器
    
    sh.setBalancerState()   
        是否让它自动均衡 false--不自动均衡
        对整个业务业务影响不大的时候,是可以关掉自动均衡的
    sh.moveChunk()  
        移动chunk到其他shard上,config server会自动更新节点元数据,影响全局的,不到万不得已不要操作
    
    db.printShardingstatus()    功能等于sh.status()
    
    mongodb的框架
    

    相关文章

      网友评论

        本文标题:MongoDB分片集群搭建

        本文链接:https://www.haomeiwen.com/subject/vcmbpqtx.html