美文网首页
MongoDB Sharding 集群部署

MongoDB Sharding 集群部署

作者: awker | 来源:发表于2018-07-18 20:43 被阅读0次

    一、configsvr 配置 (10.3.20.14:27017、10.3.20.51:27017、10.3.20.54:27017)

    配置过程:以 10.3.20.14:27017 为例,其他两台类似

    1、安装配置 mongodb

    # wget https://mirrors.aliyun.com/mongodb/yum/redhat/6Server/mongodb-org/3.6/x86_64/RPMS/mongodb-org-3.6.6-1.el6.x86_64.rpm
    # wget https://mirrors.aliyun.com/mongodb/yum/redhat/6Server/mongodb-org/3.6/x86_64/RPMS/mongodb-org-mongos-3.6.6-1.el6.x86_64.rpm
    # wget https://mirrors.aliyun.com/mongodb/yum/redhat/6Server/mongodb-org/3.6/x86_64/RPMS/mongodb-org-server-3.6.6-1.el6.x86_64.rpm
    # wget https://mirrors.aliyun.com/mongodb/yum/redhat/6Server/mongodb-org/3.6/x86_64/RPMS/mongodb-org-shell-3.6.6-1.el6.x86_64.rpm
    # wget https://mirrors.aliyun.com/mongodb/yum/redhat/6Server/mongodb-org/3.6/x86_64/RPMS/mongodb-org-tools-3.6.6-1.el6.x86_64.rpm
    
    # ls
    mongodb-org-3.6.6-1.el6.x86_64.rpm         mongodb-org-server-3.6.6-1.el6.x86_64.rpm  mongodb-org-tools-3.6.6-1.el6.x86_64.rpm
    mongodb-org-mongos-3.6.6-1.el6.x86_64.rpm  mongodb-org-shell-3.6.6-1.el6.x86_64.rpm
    
    # rpm -ivh *
    warning: mongodb-org-3.6.6-1.el6.x86_64.rpm: Header V3 RSA/SHA1 Signature, key ID 91fa4ad5: NOKEY
    Preparing...                ########################################### [100%]
       1:mongodb-org-tools      ########################################### [ 20%]
       2:mongodb-org-shell      ########################################### [ 40%]
       3:mongodb-org-server     ########################################### [ 60%]
       4:mongodb-org-mongos     ########################################### [ 80%]
       5:mongodb-org            ########################################### [100%]
       
       
    # egrep -v "^$|^#" /etc/mongod.conf 
    systemLog:
      destination: file
      logAppend: true
      path: /var/log/mongodb/mongod.log
    storage:
      dbPath: /var/lib/mongo
      journal:
        enabled: true
    processManagement:
      fork: true  # fork and run in background
      pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
      timeZoneInfo: /usr/share/zoneinfo
    net:
      port: 27017
      bindIp: 127.0.0.1,10.3.20.14  # Listen to local interface only, comment to listen on all interfaces.
    
    

    2、启动 mongodb

    # /etc/init.d/mongod start
    Starting mongod:                                           [  OK  ]
    # ps -ef|grep mongod
    mongod     4074      1 32 18:19 ?        00:00:01 /usr/bin/mongod -f /etc/mongod.conf
    root       4098   1421  0 18:20 pts/0    00:00:00 grep mongod
    # netstat -nltup|grep 4074
    tcp        0      0 10.3.20.14:27017            0.0.0.0:*                   LISTEN      4074/mongod         
    tcp        0      0 127.0.0.1:27017             0.0.0.0:*                   LISTEN      4074/mongod   
    

    3、mongodb 认证配置

    # mongo
    MongoDB shell version v3.6.6
    connecting to: mongodb://127.0.0.1:27017
    MongoDB server version: 3.6.6
    Server has startup warnings: 
    2018-07-16T18:28:34.139+0800 I STORAGE  [initandlisten] 
    2018-07-16T18:28:34.139+0800 I STORAGE  [initandlisten] ** WARNING: Using the XFS filesystem is strongly recommended with the WiredTiger storage engine
    2018-07-16T18:28:34.139+0800 I STORAGE  [initandlisten] **          See http://dochub.mongodb.org/core/prodnotes-filesystem
    2018-07-16T18:28:35.074+0800 I CONTROL  [initandlisten] 
    2018-07-16T18:28:35.074+0800 I CONTROL  [initandlisten] ** WARNING: Access control is not enabled for the database.
    2018-07-16T18:28:35.074+0800 I CONTROL  [initandlisten] **          Read and write access to data and configuration is unrestricted.
    2018-07-16T18:28:35.074+0800 I CONTROL  [initandlisten] 
    > show databases
    admin  0.000GB
    local  0.000GB
    > use admin
    switched to db admin
    
    // 创建超级用户
    > db.createUser({user: 'root', pwd: '123456', roles: ['root']})
    Successfully added user: { "user" : "root", "roles" : [ "root" ] }
    > 
    
    // 配置要认证才能登录(加上 security: authorization: 'enabled' 配置)
    # egrep -v "^$|^#" /etc/mongod.conf 
    systemLog:
      destination: file
      logAppend: true
      path: /var/log/mongodb/mongod.log
    storage:
      dbPath: /var/lib/mongo
      journal:
        enabled: true
    processManagement:
      fork: true  # fork and run in background
      pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
      timeZoneInfo: /usr/share/zoneinfo
    net:
      port: 27017
      bindIp: 127.0.0.1,10.3.20.14  # Listen to local interface only, comment to listen on all interfaces.
    security:
      authorization: 'enabled'
    
    
    # /etc/init.d/mongod restart
    
    // 访问方式 1
    # mongo -u root -p 123456 --authenticationDatabase admin
    > show databases;
    admin   0.000GB
    config  0.000GB
    local   0.000GB
    

    4、配置 configsvr 复制集

    配置集群 keyfile(3台机器用同一个key)

    # openssl rand -base64 756 > mongo.key
    # mv mongo.key /var/lib/mongo/
    # chown mongod:mongod /var/lib/mongo/mongo.key
    # chmod 600 /var/lib/mongo/mongo.key
    
    # egrep -v "^#|^$" /etc/mongod.conf
    systemLog:
      destination: file
      logAppend: true
      path: /var/log/mongodb/mongod.log
    storage:
      dbPath: /var/lib/mongo
      journal:
        enabled: true
    processManagement:
      fork: true  # fork and run in background
      pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
      timeZoneInfo: /usr/share/zoneinfo
    net:
      port: 27017
      bindIp: 127.0.0.1,10.3.20.14  # Listen to local interface only, comment to listen on all interfaces.
    security:
      authorization: 'enabled'
      keyFile: /var/lib/mongo/mongo.key
    replication:
      oplogSizeMB: 500
      replSetName: repdata
    sharding:
      clusterRole: configsvr
    
    # /etc/init.d/mongod restart
    
    // 这步只需要在 10.3.20.14 操作,建立 configsvr 的复制集
    # mongo -u root -p 123456 10.3.20.14/admin
    >  cfg={ _id:"repdata", configsvr: true, members:[ {_id:0,host:'10.3.20.14:27017',priority:2}, {_id:1,host:'10.3.20.51:27017',priority:1}, {_id:2,host:'10.3.20.54:27017',priority:1}] }
    > rs.initiate(cfg)
    

    二、sharding01 配置 (10.3.20.102:27017、10.3.20.105:27017、10.3.20.132:27017)

    1、安装 mongodb,参考 (一)的 1、2、3步骤

    2、集群之间的 key,使用(一)的 4 步骤的 mongo.key

    3、sharding01 复制集配置

    # egrep -v "^$|^#" /etc/mongod.conf 
    systemLog:
      destination: file
      logAppend: true
      path: /var/log/mongodb/mongod.log
    storage:
      dbPath: /var/lib/mongo
      journal:
        enabled: true
    processManagement:
      fork: true  # fork and run in background
      pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
      timeZoneInfo: /usr/share/zoneinfo
    net:
      port: 27017
      bindIp: 127.0.0.1,10.3.20.102  # Listen to local interface only, comment to listen on all interfaces.
    security:
      authorization: 'enabled'
      keyFile: /var/lib/mongo/mongo.key
    replication:
      replSetName: sharding01
    sharding:
      clusterRole: shardsvr
    
    # /etc/init.d/mongod restart
    
    // 这步只需要在 10.3.20.102 操作,建立 sharding01 的复制集
    # mongo -u root -p 123456 10.3.20.102/admin
    MongoDB shell version v3.6.6
    connecting to: mongodb://10.3.20.102:27017/admin
    MongoDB server version: 3.6.6
    Server has startup warnings: 
    2018-07-18T15:16:32.731+0800 I CONTROL  [initandlisten] 
    2018-07-18T15:16:32.731+0800 I CONTROL  [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
    2018-07-18T15:16:32.731+0800 I CONTROL  [initandlisten] **        We suggest setting it to 'never'
    2018-07-18T15:16:32.731+0800 I CONTROL  [initandlisten] 
    >  cfg={ _id:"sharding01", members:[ {_id:0,host:'10.3.20.102:27017',priority:2}, {_id:1,host:'10.3.20.105:27017',priority:1}, {_id:2,host:'10.3.20.132:27017',priority:1}] }
    {
        "_id" : "sharding01",
        "members" : [
            {
                "_id" : 0,
                "host" : "10.3.20.102:27017",
                "priority" : 2
            },
            {
                "_id" : 1,
                "host" : "10.3.20.105:27017",
                "priority" : 1
            },
            {
                "_id" : 2,
                "host" : "10.3.20.132:27017",
                "priority" : 1
            }
        ]
    }
    > rs.initiate(cfg)
    { "ok" : 1 }
    
    > rs.status()
    {
        "set" : "sharding01",
        "date" : ISODate("2018-07-18T07:53:31.202Z"),
        "myState" : 1,
        "term" : NumberLong(1),
        "syncingTo" : "",
        "syncSourceHost" : "",
        "syncSourceId" : -1,
        "heartbeatIntervalMillis" : NumberLong(2000),
        "optimes" : {
            "lastCommittedOpTime" : {
                "ts" : Timestamp(1531900403, 1),
                "t" : NumberLong(1)
            },
            "readConcernMajorityOpTime" : {
                "ts" : Timestamp(1531900403, 1),
                "t" : NumberLong(1)
            },
            "appliedOpTime" : {
                "ts" : Timestamp(1531900403, 1),
                "t" : NumberLong(1)
            },
            "durableOpTime" : {
                "ts" : Timestamp(1531900403, 1),
                "t" : NumberLong(1)
            }
        },
        "members" : [
            {
                "_id" : 0,
                "name" : "10.3.20.102:27017",
                "health" : 1,
                "state" : 1,
                "stateStr" : "PRIMARY",
                "uptime" : 2223,
                "optime" : {
                    "ts" : Timestamp(1531900403, 1),
                    "t" : NumberLong(1)
                },
                "optimeDate" : ISODate("2018-07-18T07:53:23Z"),
                "syncingTo" : "",
                "syncSourceHost" : "",
                "syncSourceId" : -1,
                "infoMessage" : "",
                "electionTime" : Timestamp(1531899882, 1),
                "electionDate" : ISODate("2018-07-18T07:44:42Z"),
                "configVersion" : 1,
                "self" : true,
                "lastHeartbeatMessage" : ""
            },
            {
                "_id" : 1,
                "name" : "10.3.20.105:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 539,
                "optime" : {
                    "ts" : Timestamp(1531900403, 1),
                    "t" : NumberLong(1)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1531900403, 1),
                    "t" : NumberLong(1)
                },
                "optimeDate" : ISODate("2018-07-18T07:53:23Z"),
                "optimeDurableDate" : ISODate("2018-07-18T07:53:23Z"),
                "lastHeartbeat" : ISODate("2018-07-18T07:53:30.991Z"),
                "lastHeartbeatRecv" : ISODate("2018-07-18T07:53:31.120Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "10.3.20.102:27017",
                "syncSourceHost" : "10.3.20.102:27017",
                "syncSourceId" : 0,
                "infoMessage" : "",
                "configVersion" : 1
            },
            {
                "_id" : 2,
                "name" : "10.3.20.132:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 539,
                "optime" : {
                    "ts" : Timestamp(1531900403, 1),
                    "t" : NumberLong(1)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1531900403, 1),
                    "t" : NumberLong(1)
                },
                "optimeDate" : ISODate("2018-07-18T07:53:23Z"),
                "optimeDurableDate" : ISODate("2018-07-18T07:53:23Z"),
                "lastHeartbeat" : ISODate("2018-07-18T07:53:31.035Z"),
                "lastHeartbeatRecv" : ISODate("2018-07-18T07:53:29.620Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "10.3.20.102:27017",
                "syncSourceHost" : "10.3.20.102:27017",
                "syncSourceId" : 0,
                "infoMessage" : "",
                "configVersion" : 1
            }
        ],
        "ok" : 1
    }
    
    

    三、sharding02 配置 (10.3.20.141:27017、10.3.20.142:27017、10.3.20.143:27017)

    1、安装 mongodb,参考 (一)的 1、2、3步骤

    2、集群之间的 key,使用(一)的 4 步骤的 mongo.key

    3、sharding02 复制集配置

    # egrep -v "^$|^#" /etc/mongod.conf 
    systemLog:
      destination: file
      logAppend: true
      path: /var/log/mongodb/mongod.log
    storage:
      dbPath: /var/lib/mongo
      journal:
        enabled: true
    processManagement:
      fork: true  # fork and run in background
      pidFilePath: /var/run/mongodb/mongod.pid  # location of pidfile
      timeZoneInfo: /usr/share/zoneinfo
    net:
      port: 27017
      bindIp: 127.0.0.1,10.3.20.141  # Listen to local interface only, comment to listen on all interfaces.
    security:
      authorization: 'enabled'
      keyFile: /var/lib/mongo/mongo.key
    replication:
      replSetName: sharding02
    sharding:
      clusterRole: shardsvr
    
    # /etc/init.d/mongod restart
    
    // 这步只需要在 10.3.20.141 操作,建立 sharding02 的复制集  
    # mongo -u root -p 123456 10.3.20.141/admin
    cfg={ _id:"sharding02", members:[ {_id:0,host:'10.3.20.141:27017',priority:2}, {_id:1,host:'10.3.20.142:27017',priority:1}, {_id:2,host:'10.3.20.143:27017',priority:1}] }
    {
        "_id" : "sharding02",
        "members" : [
            {
                "_id" : 0,
                "host" : "10.3.20.141:27017",
                "priority" : 2
            },
            {
                "_id" : 1,
                "host" : "10.3.20.142:27017",
                "priority" : 1
            },
            {
                "_id" : 2,
                "host" : "10.3.20.143:27017",
                "priority" : 1
            }
        ]
    }
    > rs.initiate(cfg)
    { "ok" : 1 }
    
    > rs.status()
    {
        "set" : "sharding02",
        "date" : ISODate("2018-07-18T08:23:04.320Z"),
        "myState" : 2,
        "term" : NumberLong(0),
        "syncingTo" : "",
        "syncSourceHost" : "",
        "syncSourceId" : -1,
        "heartbeatIntervalMillis" : NumberLong(2000),
        "optimes" : {
            "lastCommittedOpTime" : {
                "ts" : Timestamp(0, 0),
                "t" : NumberLong(-1)
            },
            "appliedOpTime" : {
                "ts" : Timestamp(1531902177, 1),
                "t" : NumberLong(-1)
            },
            "durableOpTime" : {
                "ts" : Timestamp(1531902177, 1),
                "t" : NumberLong(-1)
            }
        },
        "members" : [
            {
                "_id" : 0,
                "name" : "10.3.20.141:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 133,
                "optime" : {
                    "ts" : Timestamp(1531902177, 1),
                    "t" : NumberLong(-1)
                },
                "optimeDate" : ISODate("2018-07-18T08:22:57Z"),
                "syncingTo" : "",
                "syncSourceHost" : "",
                "syncSourceId" : -1,
                "infoMessage" : "could not find member to sync from",
                "configVersion" : 1,
                "self" : true,
                "lastHeartbeatMessage" : ""
            },
            {
                "_id" : 1,
                "name" : "10.3.20.142:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 6,
                "optime" : {
                    "ts" : Timestamp(1531902177, 1),
                    "t" : NumberLong(-1)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1531902177, 1),
                    "t" : NumberLong(-1)
                },
                "optimeDate" : ISODate("2018-07-18T08:22:57Z"),
                "optimeDurableDate" : ISODate("2018-07-18T08:22:57Z"),
                "lastHeartbeat" : ISODate("2018-07-18T08:23:03.949Z"),
                "lastHeartbeatRecv" : ISODate("2018-07-18T08:23:04.102Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "",
                "syncSourceHost" : "",
                "syncSourceId" : -1,
                "infoMessage" : "",
                "configVersion" : 1
            },
            {
                "_id" : 2,
                "name" : "10.3.20.143:27017",
                "health" : 1,
                "state" : 2,
                "stateStr" : "SECONDARY",
                "uptime" : 6,
                "optime" : {
                    "ts" : Timestamp(1531902177, 1),
                    "t" : NumberLong(-1)
                },
                "optimeDurable" : {
                    "ts" : Timestamp(1531902177, 1),
                    "t" : NumberLong(-1)
                },
                "optimeDate" : ISODate("2018-07-18T08:22:57Z"),
                "optimeDurableDate" : ISODate("2018-07-18T08:22:57Z"),
                "lastHeartbeat" : ISODate("2018-07-18T08:23:03.949Z"),
                "lastHeartbeatRecv" : ISODate("2018-07-18T08:23:04.103Z"),
                "pingMs" : NumberLong(0),
                "lastHeartbeatMessage" : "",
                "syncingTo" : "",
                "syncSourceHost" : "",
                "syncSourceId" : -1,
                "infoMessage" : "",
                "configVersion" : 1
            }
        ],
        "ok" : 1
    }
    
    

    四、mongos 配置 (10.3.20.14:27018、10.3.20.51:27018、10.3.20.54:27018)

    1、安装 mongodb,参考 (一)的 1、2、3步骤

    2、集群之间的 key,使用(一)的 4 步骤的 mongo.key

    3、mongos 配置

    # egrep -v "^$|^#" /etc/mongos.conf 
    systemLog:
      destination: file
      path: /var/log/mongodb/mongos.log
      logAppend: true
    processManagement:
      fork: true
      pidFilePath: /var/run/mongodb/mongos.pid
    net:
      bindIp: 127.0.0.1,10.3.20.14
      port: 27018
    security:
      keyFile: /var/lib/mongo/mongo.key
    sharding:
      configDB: repdata/10.3.20.14:27017,10.3.20.51:27017,10.3.20.54:27017
    
    # mongos -f /etc/mongos.conf
    
    # mongo --host 10.3.20.14 --port 27018
    mongos> use admin
    // 这里用的是哪个集群(configsvr、sharding01、sharding02)的认证?
    mongos> db.auth('root','123456')
    1
    mongos> sh.addShard("sharding01/10.3.20.102:27017,10.3.20.105:27017,10.3.20.132:27017")
    {
        "shardAdded" : "sharding01",
        "ok" : 1,
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531904052, 8),
            "signature" : {
                "hash" : BinData(0,"3jHS/99+ya4pASBp7Pm0JNOBMX8="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531904052, 8)
    }
    mongos> sh.addShard("sharding02/10.3.20.141:27017,10.3.20.142:27017,10.3.20.143:27017")
    {
        "shardAdded" : "sharding02",
        "ok" : 1,
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531904104, 4),
            "signature" : {
                "hash" : BinData(0,"XiVKlJFOagVCUpd1C/coLrjG2JY="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531904104, 2)
    }
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5b4edd10f9eb70d9ba075cb4")
      }
      shards:
            {  "_id" : "sharding01",  "host" : "sharding01/10.3.20.102:27017,10.3.20.105:27017,10.3.20.132:27017",  "state" : 1 }
            {  "_id" : "sharding02",  "host" : "sharding02/10.3.20.141:27017,10.3.20.142:27017,10.3.20.143:27017",  "state" : 1 }
      active mongoses:
            "3.6.6" : 3
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
    
    
    
    

    五、sharding 集群测试

    # mongo --host 10.3.20.14 --port 27018
    MongoDB shell version v3.6.6
    connecting to: mongodb://10.3.20.14:27018/
    MongoDB server version: 3.6.6
    mongos> show dbs
    2018-07-18T17:40:02.938+0800 E QUERY    [thread1] Error: listDatabases failed:{
        "ok" : 0,
        "errmsg" : "not authorized on admin to execute command { listDatabases: 1.0, $clusterTime: { clusterTime: Timestamp(1531906797, 2), signature: { hash: BinData(0, 6FCC8A5814C82C67692A8AE43619C749DAD936D2), keyId: 6579439166424088595 } }, $db: \"admin\" }",
        "code" : 13,
        "codeName" : "Unauthorized",
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531906797, 2),
            "signature" : {
                "hash" : BinData(0,"b8yKWBTILGdpKorkNhnHSdrZNtI="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531906797, 2)
    } :
    _getErrorWithCode@src/mongo/shell/utils.js:25:13
    Mongo.prototype.getDBs@src/mongo/shell/mongo.js:65:1
    shellHelper.show@src/mongo/shell/utils.js:849:19
    shellHelper@src/mongo/shell/utils.js:739:15
    @(shellhelp2):1:1
    mongos> use admin
    switched to db admin
    mongos> db.auth('root','123456')
    1
    mongos> show dbs
    admin   0.000GB
    config  0.001GB
    testdb  0.000GB
    
    mongos> db.runCommand({enablesharding: "testdb"})
    {
        "ok" : 1,
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531904238, 9),
            "signature" : {
                "hash" : BinData(0,"vKr+hcrTWpHzUwTqvJJzV8OOFNs="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531904238, 9)
    }
    mongos> db.runCommand({shardcollection: "testdb.table1", key: {id: 1}})
    {
        "collectionsharded" : "testdb.table1",
        "collectionUUID" : UUID("baec305a-3609-4a41-87dc-36c60dd840ae"),
        "ok" : 1,
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531904291, 14),
            "signature" : {
                "hash" : BinData(0,"DgOlbRTK1vRlmPYaimfbYLB8GtU="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531904291, 14)
    }
    
    mongos> db.runCommand({"enablesharding" : "shardb"})
    {
        "ok" : 1,
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531907043, 6),
            "signature" : {
                "hash" : BinData(0,"fz2aCI/h/mej6FJptpndtZzb8FY="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531907043, 6)
    }
    mongos> db.runCommand({"shardcollection" : "shardb.coll",key : {_id: 1}})
    {
        "collectionsharded" : "shardb.coll",
        "collectionUUID" : UUID("5a377dde-a98e-4362-95af-8f01d076c5bf"),
        "ok" : 1,
        "$clusterTime" : {
            "clusterTime" : Timestamp(1531907053, 13),
            "signature" : {
                "hash" : BinData(0,"/CyVOPY7x8ZkvlR/6rc+jJ7YetY="),
                "keyId" : NumberLong("6579439166424088595")
            }
        },
        "operationTime" : Timestamp(1531907053, 13)
    }
    
    mongos> for(var i=0;i<10000;i++){db.coll.insert({_id:i+1,content:'test'+i})}
    WriteResult({ "nInserted" : 1 })
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5b4edd10f9eb70d9ba075cb4")
      }
      shards:
            {  "_id" : "sharding01",  "host" : "sharding01/10.3.20.102:27017,10.3.20.105:27017,10.3.20.132:27017",  "state" : 1 }
            {  "_id" : "sharding02",  "host" : "sharding02/10.3.20.141:27017,10.3.20.142:27017,10.3.20.143:27017",  "state" : 1 }
      active mongoses:
            "3.6.6" : 3
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    sharding01  1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : sharding01 Timestamp(1, 0) 
            {  "_id" : "shardb",  "primary" : "sharding01",  "partitioned" : true }
                    shardb.coll
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    sharding01  1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : sharding01 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "sharding02",  "partitioned" : true }
                    testdb.table1
                            shard key: { "id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    sharding02  1
                            { "id" : { "$minKey" : 1 } } -->> { "id" : { "$maxKey" : 1 } } on : sharding02 Timestamp(1, 0) 
    
    
    mongos> for(var i=0;i<10000;i++){db.table1.insert({_id:i+1,content:'test'+i})}
    WriteResult({ "nInserted" : 1 })
    mongos> sh.status()
    --- Sharding Status --- 
      sharding version: {
        "_id" : 1,
        "minCompatibleVersion" : 5,
        "currentVersion" : 6,
        "clusterId" : ObjectId("5b4edd10f9eb70d9ba075cb4")
      }
      shards:
            {  "_id" : "sharding01",  "host" : "sharding01/10.3.20.102:27017,10.3.20.105:27017,10.3.20.132:27017",  "state" : 1 }
            {  "_id" : "sharding02",  "host" : "sharding02/10.3.20.141:27017,10.3.20.142:27017,10.3.20.143:27017",  "state" : 1 }
      active mongoses:
            "3.6.6" : 3
      autosplit:
            Currently enabled: yes
      balancer:
            Currently enabled:  yes
            Currently running:  no
            Failed balancer rounds in last 5 attempts:  0
            Migration Results for the last 24 hours: 
                    No recent migrations
      databases:
            {  "_id" : "config",  "primary" : "config",  "partitioned" : true }
                    config.system.sessions
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    sharding01  1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : sharding01 Timestamp(1, 0) 
            {  "_id" : "shardb",  "primary" : "sharding01",  "partitioned" : true }
                    shardb.coll
                            shard key: { "_id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    sharding01  1
                            { "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : sharding01 Timestamp(1, 0) 
            {  "_id" : "testdb",  "primary" : "sharding02",  "partitioned" : true }
                    testdb.table1
                            shard key: { "id" : 1 }
                            unique: false
                            balancing: true
                            chunks:
                                    sharding02  1
                            { "id" : { "$minKey" : 1 } } -->> { "id" : { "$maxKey" : 1 } } on : sharding02 Timestamp(1, 0) 
    
    
    

    相关文章

      网友评论

          本文标题:MongoDB Sharding 集群部署

          本文链接:https://www.haomeiwen.com/subject/pzpapftx.html