src/runtime/arena.go | 10 +++++++++- src/runtime/mcache.go | 8 ++++++++ src/runtime/mcentral.go | 7 ------- src/runtime/mheap.go | 5 ++++- diff --git a/src/runtime/arena.go b/src/runtime/arena.go index 0ffc74e8720550a64870b7129aa1b9688bc779d0..63a0824fc43880b6a44cf59dc4c6daae705cb22a 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -1049,9 +1049,17 @@ spc := makeSpanClass(0, false) h.initSpan(s, spanAllocHeap, spc, base, userArenaChunkPages) s.isUserArenaChunk = true s.elemsize -= userArenaChunkReserveBytes() - s.limit = s.base() + s.elemsize s.freeindex = 1 s.allocCount = 1 + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole chunk or just skip it + // since we're in the mark phase anyway. + s.limit = s.base() + s.elemsize // Adjust size to include redzone. if asanenabled { diff --git a/src/runtime/mcache.go b/src/runtime/mcache.go index 44d737b19cf7da187db6cefd58b1f64c7e0a283c..fe4d0a7cf74bdca3fcb8d76b221e9385465df712 100644 --- a/src/runtime/mcache.go +++ b/src/runtime/mcache.go @@ -252,6 +252,14 @@ // Put the large span in the mcentral swept list so that it's // visible to the background sweeper. mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s) + + // Adjust s.limit down to the object-containing part of the span. + // + // This is just to create a slightly tighter bound on the limit. + // It's totally OK if the garbage collector, in particular + // conservative scanning, can temporarily observes an inflated + // limit. It will simply mark the whole object or just skip it + // since we're in the mark phase anyway. s.limit = s.base() + size s.initHeapBits() return s diff --git a/src/runtime/mcentral.go b/src/runtime/mcentral.go index 08ff0a5c5d0f072f4106d4a7593a88c2b0532fd0..d798de4d524b1653b53d0b947e77086cfb5e2fbf 100644 --- a/src/runtime/mcentral.go +++ b/src/runtime/mcentral.go @@ -249,17 +249,10 @@ // grow allocates a new empty span from the heap and initializes it for c's size class. func (c *mcentral) grow() *mspan { npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) - size := uintptr(class_to_size[c.spanclass.sizeclass()]) - s := mheap_.alloc(npages, c.spanclass) if s == nil { return nil } - - // Use division by multiplication and shifts to quickly compute: - // n := (npages << _PageShift) / size - n := s.divideByElemSize(npages << _PageShift) - s.limit = s.base() + size*n s.initHeapBits() return s } diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index e058dd848925a496e9d7c92952f688fff60286d7..b073487c01dd5ade3a82236c466aacc84ffaccf1 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1398,7 +1398,6 @@ nbytes := npages * pageSize if typ.manual() { s.manualFreeList = 0 s.nelems = 0 - s.limit = s.base() + s.npages*pageSize s.state.set(mSpanManual) } else { // We must set span properties before the span is published anywhere @@ -1425,6 +1424,9 @@ s.freeIndexForScan = 0 s.allocCache = ^uint64(0) // all 1s indicating all free. s.gcmarkBits = newMarkBits(uintptr(s.nelems)) s.allocBits = newAllocBits(uintptr(s.nelems)) + + // Adjust s.limit down to the object-containing part of the span. + s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) // It's safe to access h.sweepgen without the heap lock because it's // only ever updated with the world stopped and we run on the @@ -1709,6 +1711,7 @@ span.prev = nil span.list = nil span.startAddr = base span.npages = npages + span.limit = base + npages*pageSize // see go.dev/issue/74288; adjusted later for heap spans span.allocCount = 0 span.spanclass = 0 span.elemsize = 0