src/runtime/malloc.go | 10 ++++++++++ src/runtime/mbitmap.go | 2 +- src/runtime/mgcsweep.go | 1 + src/runtime/mheap.go | 10 ++++++++++ diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index eb24fdb0e8f57b2c897112fa4293a17b538424f1..c89e0bfe9b908404425e0a0e0114e14869379151 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1068,6 +1068,16 @@ // collector. Otherwise, on weakly ordered machines, // the garbage collector could follow a pointer to x, // but see uninitialized memory or stale heap bits. publicationBarrier() + // As x and the heap bits are initialized, update + // freeIndexForScan now so x is seen by the GC + // (including convervative scan) as an allocated object. + // While this pointer can't escape into user code as a + // _live_ pointer until we return, conservative scanning + // may find a dead pointer that happens to point into this + // object. Delaying this update until now ensures that + // conservative scanning considers this pointer dead until + // this point. + span.freeIndexForScan = span.freeindex // Allocate black during GC. // All slots hold nil so no scanning is needed. diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index a3a6590d65f17d6aa36a705bca924b7db2973402..91daa31559d836f0370f7f9890bff4a2f40c91c9 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -224,7 +224,7 @@ // The caller must ensure s.state is mSpanInUse, and there must have // been no preemption points since ensuring this (which could allow a // GC transition, which would allow the state to change). func (s *mspan) isFree(index uintptr) bool { - if index < s.freeindex { + if index < s.freeIndexForScan { return false } bytep, mask := s.allocBits.bitp(index) diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index de57f18c4ff6073c34988c22611ac171fea212ef..5f8f024b829f3ac70369c80f706fa51476b627f9 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -625,6 +625,7 @@ } s.allocCount = nalloc s.freeindex = 0 // reset allocation index to start of span. + s.freeIndexForScan = 0 if trace.enabled { getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index b19a2ff4088c51b66e257b31554d1fc63a01bc79..d655f2da6e021d2958b50410a6950541c13b56ad 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -456,6 +456,14 @@ elemsize uintptr // computed from sizeclass or from npages limit uintptr // end of data in span speciallock mutex // guards specials list specials *special // linked list of special records sorted by offset. + + // freeIndexForScan is like freeindex, except that freeindex is + // used by the allocator whereas freeIndexForScan is used by the + // GC scanner. They are two fields so that the GC sees the object + // is allocated only when the object and the heap bits are + // initialized (see also the assignment of freeIndexForScan in + // mallocgc, and issue 54596). + freeIndexForScan uintptr } func (s *mspan) base() uintptr { @@ -1235,6 +1243,7 @@ } // Initialize mark and allocation structures. s.freeindex = 0 + s.freeIndexForScan = 0 s.allocCache = ^uint64(0) // all 1s indicating all free. s.gcmarkBits = newMarkBits(s.nelems) s.allocBits = newAllocBits(s.nelems) @@ -1602,6 +1611,7 @@ span.speciallock.key = 0 span.specials = nil span.needzero = 0 span.freeindex = 0 + span.freeIndexForScan = 0 span.allocBits = nil span.gcmarkBits = nil span.state.set(mSpanDead)