简介
go 内存分配
1 检查GC是否在工作中,如果是而且当前G分配了内存则协助GC做工作
这个机制叫GC Assist, 用于防止分配内存太快导致GC回收跟不上的情况发生.
2从mcache取,本地,无锁
3从mcentral取,全局,需要加锁
4从mheap取,全局,需要加锁
go 进行堆内存分配时,会做相应的检查,如果满足gc条件,则
1 stop the world
2 等待 sweep 结束
3 设置 gc 阶段为 \_GCmark,启动写屏障,辅助 GC,添加根标记任务
4 start the world
5 开始并发标记,从根对象查找可到的对象,标记为灰,并添加到任务队列,如果灰色对象有引用其他对象,则自身标记为黑,引用对象标记为灰色
6 stop the world
7 设置 gc 阶段为 \_GCmarktermination,禁止 workers 和 assists,flushing mcaches,设置 spans 为需要清理
8 设置 gc 阶段为 \_GCoff,设置 sweep 状态和禁用写屏障
9 start the world
10 开启后台并发清除
所以go的标记和清除是不需要stop the world的
go 堆上内存分配是通过 newobject 来分配的
runtime/malloc.go 中
func newobject(typ *_type) unsafe.Pointer {
return mallocgc(typ.size, typ, true)
}
func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
...
var assistG *g
if gcBlackenEnabled != 0 {
// Charge the current user G for this allocation.
assistG = getg()
if assistG.m.curg != nil {
assistG = assistG.m.curg
}
// Charge the allocation against the G. We'll account
// for internal fragmentation at the end of mallocgc.
assistG.gcAssistBytes -= int64(size)
if assistG.gcAssistBytes < 0 {
// This G is in debt. Assist the GC to correct
// this before allocating. This must happen
// before disabling preemption.
gcAssistAlloc(assistG)
}
}
...
// 32k
if size <= maxSmallSize {
...
spc := makeSpanClass(sizeclass, noscan)
span := c.alloc[spc]
v := nextFreeFast(span)
if v == 0 {
v, span, shouldhelpgc = c.nextFree(spc)
}
...
} else {
var s *mspan
shouldhelpgc = true
systemstack(func() {
s = largeAlloc(size, needzero, noscan)
})
}
...
publicationBarrier()
// Allocate black during GC.
// All slots hold nil so no scanning is needed.
// This may be racing with GC so do it atomically if there can be
// a race marking the bit.
if gcphase != _GCoff {
gcmarknewobject(uintptr(x), size, scanSize)
}
...
if shouldhelpgc {
if t := (gcTrigger{kind: gcTriggerHeap}); t.test() {
gcStart(t)
}
}
...
}
runtime/mgc.go 中
func (t gcTrigger) test() bool {
if !memstats.enablegc || panicking != 0 || gcphase != _GCoff {
return false
}
switch t.kind {
case gcTriggerHeap:
// Non-atomic access to heap_live for performance. If
// we are going to trigger on this, this thread just
// atomically wrote heap_live anyway and we'll see our
// own write.
return memstats.heap_live >= memstats.gc_trigger
case gcTriggerTime:
if gcpercent < 0 {
return false
}
lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime))
return lastgc != 0 && t.now-lastgc > forcegcperiod
case gcTriggerCycle:
// t.n > work.cycles, but accounting for wraparound.
return int32(t.n-work.cycles) > 0
}
return true
}
func gcStart(trigger gcTrigger) {
...
gcBgMarkStartWorkers()
...
systemstack(func() {
finishsweep_m()
})
clearpools()
...
// In STW mode, disable scheduling of user Gs. This may also
// disable scheduling of this goroutine, so it may block as
// soon as we start the world again.
if mode != gcBackgroundMode {
schedEnableUser(false)
}
setGCPhase(_GCmark)
gcBgMarkPrepare() // Must happen before assist enable.
gcMarkRootPrepare()
// Mark all active tinyalloc blocks. Since we're
// allocating from these, they need to be black like
// other allocations. The alternative is to blacken
// the tiny block on every allocation from it, which
// would slow down the tiny allocator.
gcMarkTinyAllocs()
// Concurrent mark.
systemstack(func() {
now = startTheWorldWithSema(trace.enabled)
work.pauseNS += now - work.pauseStart
work.tMark = now
})
if mode != gcBackgroundMode {
Gosched()
}
}
func finishsweep_m() {
// Sweeping must be complete before marking commences, so
// sweep any unswept spans. If this is a concurrent GC, there
// shouldn't be any spans left to sweep, so this should finish
// instantly. If GC was forced before the concurrent sweep
// finished, there may be spans to sweep.
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
nextMarkBitArenaEpoch()
}
func gcBgMarkWorker(_p_ *p) {
...
gcDrain(&_p_.gcw, gcDrainFlushBgCredit)
...
gcMarkDone()
...
}
func gcMarkDone() {
...
gcMarkTermination()
...
}
func gcMarkTermination(nextTriggerRatio float64) {
...
gcSweep(work.mode)
...
}
func gcSweep(mode gcMode) {
...
lock(&sweep.lock)
if sweep.parked {
sweep.parked = false
ready(sweep.g, 0, true)
}
unlock(&sweep.lock)
...
}
func gcenable() {
// Kick off sweeping and scavenging.
c := make(chan int, 2)
go bgsweep(c)
go bgscavenge(c)
<-c
<-c
memstats.enablegc = true // now that runtime is initialized, GC is okay
}
runtime/mgcmark.go 中
func markroot(gcw *gcWork, i uint32) {
...
scanstack(gp, gcw)
...
}
func scanstack(gp *g, gcw *gcWork) {
...
scanblock(b, t.ptrdata, gcdata, gcw, &state)
...
}
func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork, stk *stackScanState) {
...
if obj, span, objIndex := findObject(p, b, i); obj != 0 {
greyobject(obj, b, i, span, gcw, objIndex)
}
...
}
func gcDrain(gcw *gcWork, flags gcDrainFlags) {
...
scanobject(b, gcw)
...
}
func scanobject(b uintptr, gcw *gcWork) {
...
if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
greyobject(obj, b, i, span, gcw, objIndex)
}
...
}
func main() {
...
gcenable()
...
}
runtime/mgcsweep.go 中
func bgsweep(c chan int) {
...
for {
for sweepone() != ^uintptr(0) {
sweep.nbgsweep++
Gosched()
}
for freeSomeWbufs(true) {
Gosched()
}
lock(&sweep.lock)
if !isSweepDone() {
// This can happen if a GC runs between
// gosweepone returning ^0 above
// and the lock being acquired.
unlock(&sweep.lock)
continue
}
sweep.parked = true
goparkunlock(&sweep.lock, waitReasonGCSweepWait, traceEvGoBlock, 1)
}
}
func sweepone() uintptr {
s.sweep(false)
}
func (s *mspan) sweep(preserve bool) bool {
}
网友评论