一,概念
mongodb复制集是一主多从多节点实现数据库集群的方式
1.1作用
首页目标是通过主节点快速的将oplog同步给从节点,并在主节点发生故障的时候进行选举达到数据库的高可用。
除此之外,还有:
- 数据分发—将数据从一个区域复制到另一个区域,减少另一个区域的读延迟
- 读写分离—不同类型的压力分别在不同的节点上执行
- 异地容灾—在数据中心故障时候快速切换到异地
1.2数据分发过程
当一个修改操作,无论是插入、更新或删除,到达主节点时,它对数据的操作将被
记录下来(经过一些必要的转换),这些记录称为 oplog。
从节点通过在主节点上打开一个 tailable 游标不断获取新进入主节点的 oplog,并
在自己的数据上回放,以此保持跟主节点的数据一致。
1.3选举过程
- 具有投票权的节点之间两两互相发送心跳;
- 当5次心跳未收到时判断为节点失联;
- 如果失联的是主节点,从节点会发起选举,选出新的主节点;
- 如果失联的是从节点则不会产生新的选举;
- 选举基于 RAFT一致性算法 实现,选举成功的必要条件是大多数投票节点存活;
- 复制集中最多可以有50个节点,但具有投票权的节点最多7个。
影响选举的因素:
- 整个集群必须有大多数节点存活;
- 被选举为主节点的节点必须:
1, 能够与多数节点建立连接
2,具有较新的 oplog
3,具有较高的优先级(如果有配置)
二,部署
准备3份配置文件mongo1.conf,mongo2.conf,mongo3.conf
将端口号分别设置为28017/28018/28019
3个数据目录:
/root/mongo/data/db1
/root/mongo/data/db2
/root/mongo/data/db3
3个日志文件路径:
/root/mongo/data/db1/mongod.log
/root/mongo/data/db2/mongod.log
/root/mongo/data/db3/mongod.log
分别修改下面的配置
systemLog:
destination: file
path: /root/mongo/data/db1/mongod.log # log path
logAppend: true
storage:
dbPath: /root/mongo/data/db1 # data directory
net:
bindIp: 0.0.0.0
port: 28017 # port
replication:
replSetName: rs0
processManagement:
fork: true
启动:
mongod -f mongo1.conf
mongod -f mongo2.conf
mongod -f mongo3.conf
//进入第一个mongo
mongo --port 28017
//在mongo shell,初始化复制集并将节点加入复制集
rs.initiate({
_id: "rs0",
members: [{
_id: 0,
host: "localhost:28017"
},{
_id: 1,
host: "localhost:28018"
},{
_id: 2,
host: "localhost:28019"
}]
})
//mongo shell,查看整个复制集群状态
rs.status()
//返回,具体含义:https://docs.mongodb.com/manual/reference/command/replSetGetStatus/#dbcmd.replSetGetStatus
{
"set" : "rs0",
"date" : ISODate("2021-01-19T17:57:06.050Z"),//复制集当前时间,可以通过和members[n].lastHeartbeat比对来确定节点延迟
"myState" : 1,//0-10来代表节点不同状态,具体意思:https://docs.mongodb.com/manual/reference/replica-states/
"term" : NumberLong(1),//投票计数器
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {//当前节点视角,最后已写入大多数副本集成员的最新操作的信息
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"lastCommittedWallTime" : ISODate("2021-01-19T17:56:56.332Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"readConcernMajorityWallTime" : ISODate("2021-01-19T17:56:56.332Z"),
"appliedOpTime" : {//最后应用于当前节点操作信息
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"durableOpTime" : {//最后写入当前节点日志操作信息
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"lastAppliedWallTime" : ISODate("2021-01-19T17:56:56.332Z"),
"lastDurableWallTime" : ISODate("2021-01-19T17:56:56.332Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1611078996, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2021-01-19T17:51:36.306Z"),
"electionTerm" : NumberLong(1),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1611078684, 1),
"t" : NumberLong(-1)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 1,
"electionTimeoutMillis" : NumberLong(10000),
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2021-01-19T17:51:36.319Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2021-01-19T17:51:37.461Z")
},
"members" : [
{
"_id" : 0,
"name" : "localhost:28017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 783,
"optime" : {
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-01-19T17:56:56Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1611078696, 1),
"electionDate" : ISODate("2021-01-19T17:51:36Z"),
"configVersion" : 1,
"configTerm" : 1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "localhost:28018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 341,
"optime" : {
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-01-19T17:56:56Z"),
"optimeDurableDate" : ISODate("2021-01-19T17:56:56Z"),
"lastHeartbeat" : ISODate("2021-01-19T17:57:04.311Z"),
"lastHeartbeatRecv" : ISODate("2021-01-19T17:57:05.814Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "localhost:28017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 1
},
{
"_id" : 2,
"name" : "localhost:28019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 341,
"optime" : {
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"optimeDurable" : {
"ts" : Timestamp(1611079016, 1),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2021-01-19T17:56:56Z"),
"optimeDurableDate" : ISODate("2021-01-19T17:56:56Z"),
"lastHeartbeat" : ISODate("2021-01-19T17:57:04.312Z"),
"lastHeartbeatRecv" : ISODate("2021-01-19T17:57:05.815Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "localhost:28017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 1
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1611079016, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1611079016, 1)
}
从节点默认是备份节点,无法读取数据,将从节点设置成可读从节点
rs.slaveOk()//旧版本
rs.secondaryOk()//新版本
尝试主节点写入,从节点读取:
//主
mongo localhost:28017
db.test.insert({ a:1 })
//从
mongo localhost:28018
db.test.find()
网友评论