说明
raft-example是etcd中一个示例代码,阅读后对于理解etcd整个工作原理有很大帮助,这里面核心有几个组件:httpKVAPI、raftNode、KVstore
数据结构
HttpKVAPI
type httpKVAPI struct {
store *kvstore
confChangeC chan<- raftpb.ConfChange
}
该数据结构中有两个数据结构:
- store:数据的持久化底层存储
- confChangeC:这是个channle,用于传递集群节点修改的请求
其他的比较简单,主要是提供HTTP的服务,接收请求并发送给raftNode
case r.Method == "PUT":
v, err := ioutil.ReadAll(r.Body)
if err != nil {
log.Printf("Failed to read on PUT (%v)\n", err)
http.Error(w, "Failed on PUT", http.StatusBadRequest)
return
}
//将接受到键值数据请求发送出去
h.store.Propose(key, string(v))
// Optimistic-- no waiting for ack from raft. Value is not yet
// committed so a subsequent GET on the key may return old value
w.WriteHeader(http.StatusNoContent)
func (s *kvstore) Propose(k string, v string) {
var buf bytes.Buffer
if err := gob.NewEncoder(&buf).Encode(kv{k, v}); err != nil {
log.Fatal(err)
}
s.proposeC <- buf.String()
}
raftNode
raftNode是最核心的数据结构组件,他起到承上启下的作用,他接收HTTP发送过来的请求,由于封装了etcd-raft模块,使得上层不用直接跟raft模块交互,另外他管理了snapshot和WAL日志,并最终发送持久化通道,使得数据持久化到kvstore,数据结构如下:
type raftNode struct {
proposeC <-chan string // proposed messages (k,v)
confChangeC <-chan raftpb.ConfChange // proposed cluster config changes
commitC chan<- *string // entries committed to log (k,v)
errorC chan<- error // errors from raft session
id int // client ID for raft session
peers []string // raft peer URLs
join bool // node is joining an existing cluster
waldir string // path to WAL directory
snapdir string // path to snapshot directory
getSnapshot func() ([]byte, error)
lastIndex uint64 // index of log at start
confState raftpb.ConfState
snapshotIndex uint64
appliedIndex uint64
// raft backing for the commit/error channel
node raft.Node
raftStorage *raft.MemoryStorage
wal *wal.WAL
snapshotter *snap.Snapshotter
snapshotterReady chan *snap.Snapshotter // signals when snapshotter is ready
snapCount uint64
transport *rafthttp.Transport
stopc chan struct{} // signals proposal channel closed
httpstopc chan struct{} // signals http server to shutdown
httpdonec chan struct{} // signals http server shutdown complete
}
启动流程
func main() {
cluster := flag.String("cluster", "http://127.0.0.1:9021", "comma separated cluster peers")
id := flag.Int("id", 1, "node ID")
kvport := flag.Int("port", 9121, "key-value server port")
join := flag.Bool("join", false, "join an existing cluster")
flag.Parse()
proposeC := make(chan string)
defer close(proposeC)
confChangeC := make(chan raftpb.ConfChange)
defer close(confChangeC)
// raft provides a commit stream for the proposals from the http api
var kvs *kvstore
getSnapshot := func() ([]byte, error) { return kvs.getSnapshot() }
commitC, errorC, snapshotterReady := newRaftNode(*id, strings.Split(*cluster, ","), *join, getSnapshot, proposeC, confChangeC)
kvs = newKVStore(<-snapshotterReady, proposeC, commitC, errorC)
// the key-value http handler will propose updates to raft
serveHttpKVAPI(kvs, *kvport, confChangeC, errorC)
}
核心有三个步骤
1、启动一个raftNode
newRaftNode代码中实例后启动一个goroutine startRaft方法,代码如下:
func (rc *raftNode) startRaft() {
if !fileutil.Exist(rc.snapdir) {
if err := os.Mkdir(rc.snapdir, 0750); err != nil {
log.Fatalf("raftexample: cannot create dir for snapshot (%v)", err)
}
}
rc.snapshotter = snap.New(zap.NewExample(), rc.snapdir)
rc.snapshotterReady <- rc.snapshotter
oldwal := wal.Exist(rc.waldir)
rc.wal = rc.replayWAL()
rpeers := make([]raft.Peer, len(rc.peers))
for i := range rpeers {
rpeers[i] = raft.Peer{ID: uint64(i + 1)}
}
c := &raft.Config{
ID: uint64(rc.id),
ElectionTick: 10,
HeartbeatTick: 1,
Storage: rc.raftStorage,
MaxSizePerMsg: 1024 * 1024,
MaxInflightMsgs: 256,
MaxUncommittedEntriesSize: 1 << 30,
}
if oldwal {
rc.node = raft.RestartNode(c)
} else {
startPeers := rpeers
if rc.join {
startPeers = nil
}
rc.node = raft.StartNode(c, startPeers)
}
rc.transport = &rafthttp.Transport{
Logger: zap.NewExample(),
ID: types.ID(rc.id),
ClusterID: 0x1000,
Raft: rc,
ServerStats: stats.NewServerStats("", ""),
LeaderStats: stats.NewLeaderStats(strconv.Itoa(rc.id)),
ErrorC: make(chan error),
}
rc.transport.Start()
for i := range rc.peers {
if i+1 != rc.id {
rc.transport.AddPeer(types.ID(i+1), []string{rc.peers[i]})
}
}
go rc.serveRaft()//负责监听当前节点的地址,完成和其他节点的通信
//1、负责接收上层模块到etcd-raft模块之间的通信,
//2、同时etcd-raft模块返回给上层模块的数据机其他相关的操作
go rc.serveChannels()
}
这里面又开启了两个groutine,分别执行serveRaft和serveChannels
- serveRaft
这个方法主要负责和其他节点之间的通信 - serveChannels
func (rc *raftNode) serveChannels() {
snap, err := rc.raftStorage.Snapshot()
if err != nil {
panic(err)
}
rc.confState = snap.Metadata.ConfState
rc.snapshotIndex = snap.Metadata.Index
rc.appliedIndex = snap.Metadata.Index
defer rc.wal.Close()
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
// send proposals over raft
go func() {
confChangeCount := uint64(0)
for rc.proposeC != nil && rc.confChangeC != nil {
select {
case prop, ok := <-rc.proposeC:
fmt.Println("prop value is:",prop)
if !ok {
rc.proposeC = nil
} else {
// blocks until accepted by raft state machine
rc.node.Propose(context.TODO(), []byte(prop))
}
case cc, ok := <-rc.confChangeC:
if !ok {
rc.confChangeC = nil
} else {
confChangeCount++
cc.ID = confChangeCount
rc.node.ProposeConfChange(context.TODO(), cc)
}
}
}
// client closed channel; shutdown raft if not already
close(rc.stopc)
}()
// event loop on raft state machine updates
for {
select {
case <-ticker.C:
rc.node.Tick()
// store raft entries to wal, then publish over commit channel
case rd := <-rc.node.Ready():
fmt.Println("node is ready ,rd is ",rd.Entries)
rc.wal.Save(rd.HardState, rd.Entries)
if !raft.IsEmptySnap(rd.Snapshot) {
rc.saveSnap(rd.Snapshot)//将新的快照数据写入快照文件中
rc.raftStorage.ApplySnapshot(rd.Snapshot)//将新的快照持久化到raftNode
rc.publishSnapshot(rd.Snapshot)//通知上层应用加载新快照
}
rc.raftStorage.Append(rd.Entries)
rc.transport.Send(rd.Messages)//将待发送的消息发送到指定节点
//将已提交、待应用的Entry记录应用到上层应用的状态机中
if ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries)); !ok {
rc.stop()
return
}
//触发日志压缩
rc.maybeTriggerSnapshot()
rc.node.Advance()//上层应用处理完Ready实例,通知etcd-raft组件准备返回下一个Ready实例
case err := <-rc.transport.ErrorC:
rc.writeError(err)
return
case <-rc.stopc:
rc.stop()
return
}
}
}
这里方法完成两个功能:
1、负责接收上层模块到etcd-raft模块之间的通信
2、同时etcd-raft模块返回给上层模块的数据和其他相关的操作
for {
select {
case <-ticker.C:
rc.node.Tick()
// store raft entries to wal, then publish over commit channel
case rd := <-rc.node.Ready():
fmt.Println("node is ready ,rd is ",rd.Entries)
rc.wal.Save(rd.HardState, rd.Entries)
if !raft.IsEmptySnap(rd.Snapshot) {
rc.saveSnap(rd.Snapshot)//将新的快照数据写入快照文件中
rc.raftStorage.ApplySnapshot(rd.Snapshot)//将新的快照持久化到raftNode
rc.publishSnapshot(rd.Snapshot)//通知上层应用加载新快照
}
rc.raftStorage.Append(rd.Entries)
rc.transport.Send(rd.Messages)//将待发送的消息发送到指定节点
//将已提交、待应用的Entry记录应用到上层应用的状态机中
if ok := rc.publishEntries(rc.entriesToApply(rd.CommittedEntries)); !ok {
rc.stop()
return
}
//触发日志压缩
rc.maybeTriggerSnapshot()
rc.node.Advance()//上层应用处理完Ready实例,通知etcd-raft组件准备返回下一个Ready实例
case err := <-rc.transport.ErrorC:
rc.writeError(err)
return
case <-rc.stopc:
rc.stop()
return
}
}
看这个方法,主要处理了两个通道,node.Ready和ticker.C,
node.readyc通道是其他节点(peer)已经保存接收到消息后的通知,此时保存wal日志,然后存储entries,同时将待发送的消息发送到其他节点,调用publishEntries是的entry记录应用到上层应用的状态机,然后调用Advance方法通知底层etcd-raft模块将unstable中对应的记录删除。
publishEntries的核心代码如下:
s := string(ents[i].Data)
select {
case rc.commitC <- &s:
case <-rc.stopc:
return false
}
// special nil commit to signal replay has finished
if ents[i].Index == rc.lastIndex {
select {
case rc.commitC <- nil:
case <-rc.stopc:
return false
}
}
在wal日志保存后发送commit通道,通知kvstore持久化,如果发送nil,通知kvstore加载快照进行重放,后面kvstore会提到相关的代码逻辑。
定时器用于定时更新逻辑时钟,重新设置选举时间和心跳时间的超时
// send proposals over raft
go func() {
confChangeCount := uint64(0)
for rc.proposeC != nil && rc.confChangeC != nil {
select {
case prop, ok := <-rc.proposeC:
if !ok {
rc.proposeC = nil
} else {
// blocks until accepted by raft state machine
rc.node.Propose(context.TODO(), []byte(prop))
}
case cc, ok := <-rc.confChangeC:
if !ok {
rc.confChangeC = nil
} else {
confChangeCount++
cc.ID = confChangeCount
rc.node.ProposeConfChange(context.TODO(), cc)
}
}
}
// client closed channel; shutdown raft if not already
close(rc.stopc)
}()
此方法主要处理前文提到的httpKVAPI接收请求到请求,通过proposeC和confChangeC这两个通道通信,
2、创建一个kvStore
3、启动HTTPAPI服务,用于接收请求
kvStore
type kvstore struct {
proposeC chan<- string // channel for proposing updates
mu sync.RWMutex
kvStore map[string]string // current committed key-value pairs
snapshotter *snap.Snapshotter
}
kvStore的核心方法,readCommit():
func (s *kvstore) readCommits(commitC <-chan *string, errorC <-chan error) {
for data := range commitC {
// 接收到的 data 为 nil,从快照中加载数据
if data == nil {
// done replaying log; new data incoming
// OR signaled to load snapshot
snapshot, err := s.snapshotter.Load()
if err == snap.ErrNoSnapshot {
return
}
if err != nil {
log.Panic(err)
}
log.Printf("loading snapshot at term %d and index %d", snapshot.Metadata.Term, snapshot.Metadata.Index)
if err := s.recoverFromSnapshot(snapshot.Data); err != nil {
log.Panic(err)
}
continue
}
// 2. 接收到的 data 不为 nil,序列化 data 并存储到 kvStore
var dataKv kv
dec := gob.NewDecoder(bytes.NewBufferString(*data))
if err := dec.Decode(&dataKv); err != nil {
log.Fatalf("raftexample: could not decode message (%v)", err)
}
fmt.Println("start save data:",dataKv)
s.mu.Lock()
s.kvStore[dataKv.Key] = dataKv.Val
s.mu.Unlock()
}
if err, ok := <-errorC; ok {
log.Fatal(err)
}
}
如上文提到这边接收到commitC通道后进行持久化处理,当data为nil时从快照中加载数据并重放后放到存储中,当data不为nil时序列化 data 并存储到 kvStore
工作流程
etcd-raft-example.jpg1、httpKVAPI发送请求
2、raftnode接收后读取数据并通过etcd-raft通知其他节点(这边目前还有点疑问,是发送其他节点什么数据?其他节点做了什么?)
3、当接收到readyc通道数据后,保存wal日志、生成快照、保存到内存存储中,然后将数据发送到其他节点,最后将待应用的记录应用到状态机中
4、kvstrore存储接收到commitC消息后持久化存储
5、触发日志压缩
6、上层应用处理完Ready实例,通知etcd-raft组件准备返回下一个Ready实例
客户端执行过程
1、客户端发送给leader请求后,leader会把log entry append到日志中,然后发送给其他节点,使用gRpc通信,AppendEntriesRPC
2、当leader确定log entry被大多数节点写入日志,apply这条log entry到状态机中然后返回结果给客户端
网友评论