src/runtime/arena.go | 10 +++++++++- src/runtime/mcache.go | 8 ++++++++ src/runtime/mcentral.go | 7 ------- src/runtime/mheap.go | 5 ++++- diff --git a/src/runtime/arena.go b/src/runtime/arena.go index cd9a9dfae10abc240ccbd7783344cd1acdbaafb1..ab81a8dd704d327b7fe642837ee05f3669634c3e 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1063,9 +1063,17 @@ spc := makeSpanClass(0, false) h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() - s.limit = s.base() + s.elemsize s.freeindex = 1 s.allocCount = 1 + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole chunk or just skip it + // since we're in the mark phase anyway. + s.limit = s.base() + s.elemsize // Account for this new arena chunk memory. gcController.heapInUse.add(int64(userArenaChunkBytes)) diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index e8da133a69490e1f274c581f667c493d43d694c5..e28dbb020193322d836c4dd10753f26f2175bf9d 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -251,6 +251,14 @@ // Put the large span in the mcentral swept list so that it's // visible to the background sweeper. mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole object or just skip it + // since we're in the mark phase anyway. s.limit = s.base() + size s.initHeapBits(false) return s diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index bf597e1936da49575cb71f2017f5a042ff035d58..28c57eb30b86724430a87a28ef216e3d3e7bd7be 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -249,17 +249,10 @@ // grow allocates a new empty span from the heap and initializes it for c's size class. func (c *mcentral) grow() *mspan { npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) - size := uintptr(class_to_size[c.spanclass.sizeclass()]) - s := mheap_.alloc(npages, c.spanclass) if s == nil { return nil } - - // Use division by multiplication and shifts to quickly compute: - // n := (npages << _PageShift) / size - n := s.divideByElemSize(npages << _PageShift) - s.limit = s.base() + size*n s.initHeapBits(false) return s } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index bfca2d105b742656d9bd3d7aed1281112ab5211d..b27901cedc155787e3064da0f71ef0a03deaa420 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1390,7 +1390,6 @@ nbytes := npages * pageSize if typ.manual() { s.manualFreeList = 0 s.nelems = 0 - s.limit = s.base() + s.npages*pageSize s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1417,6 +1416,9 @@ s.freeIndexForScan = 0 s.allocCache = ^uint64(0) // all 1s indicating all free. s.gcmarkBits = newMarkBits(uintptr(s.nelems)) s.allocBits = newAllocBits(uintptr(s.nelems)) + + // Adjust s.limit down to the object-containing part of the span. + s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) // It's safe to access h.sweepgen without the heap lock because it's // only ever updated with the world stopped and we run on the @@ -1701,6 +1703,7 @@ span.prev = nil span.list = nil span.startAddr = base span.npages = npages + span.limit = base + npages*pageSize // see go.dev/issue/74288; adjusted later for heap spans span.allocCount = 0 span.spanclass = 0 span.elemsize = 0