mirror of
https://github.com/golang/go.git
synced 2025-05-25 17:31:22 +00:00
runtime: allocate physical-page-aligned memory differently
Currently, physical-page-aligned allocations for stacks (where the physical page size is greater than the runtime page size) first overallocates some memory, then frees the unaligned portions back to the heap. However, because allocating via h.pages.alloc causes scavenged bits to get cleared, we need to account for that memory correctly in heapFree and heapReleased. Currently that is not the case, leading to throws at runtime. Trying to get that accounting right is complicated, because information about exactly which pages were scavenged needs to get plumbed up. Instead, find the oversized region first, and then only allocate the aligned part. This avoids any accounting issues. However, this does come with some performance cost, because we don't update searchAddr (which is safe, it just means the next allocation potentially must look harder) and we skip the fast path that h.pages.alloc has for simplicity. Fixes #52682. Change-Id: Iefa68317584d73b187634979d730eb30db770bb6 Reviewed-on: https://go-review.googlesource.com/c/go/+/407502 Run-TryBot: Michael Knyszek <mknyszek@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
parent
7ec6ef432a
commit
b58067013e
@ -1157,9 +1157,32 @@ func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass
|
|||||||
|
|
||||||
if needPhysPageAlign {
|
if needPhysPageAlign {
|
||||||
// Overallocate by a physical page to allow for later alignment.
|
// Overallocate by a physical page to allow for later alignment.
|
||||||
npages += physPageSize / pageSize
|
extraPages := physPageSize / pageSize
|
||||||
}
|
|
||||||
|
|
||||||
|
// Find a big enough region first, but then only allocate the
|
||||||
|
// aligned portion. We can't just allocate and then free the
|
||||||
|
// edges because we need to account for scavenged memory, and
|
||||||
|
// that's difficult with alloc.
|
||||||
|
//
|
||||||
|
// Note that we skip updates to searchAddr here. It's OK if
|
||||||
|
// it's stale and higher than normal; it'll operate correctly,
|
||||||
|
// just come with a performance cost.
|
||||||
|
base, _ = h.pages.find(npages + extraPages)
|
||||||
|
if base == 0 {
|
||||||
|
var ok bool
|
||||||
|
growth, ok = h.grow(npages + extraPages)
|
||||||
|
if !ok {
|
||||||
|
unlock(&h.lock)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
base, _ = h.pages.find(npages + extraPages)
|
||||||
|
if base == 0 {
|
||||||
|
throw("grew heap, but no adequate free space found")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
base = alignUp(base, physPageSize)
|
||||||
|
scav = h.pages.allocRange(base, npages)
|
||||||
|
}
|
||||||
if base == 0 {
|
if base == 0 {
|
||||||
// Try to acquire a base address.
|
// Try to acquire a base address.
|
||||||
base, scav = h.pages.alloc(npages)
|
base, scav = h.pages.alloc(npages)
|
||||||
@ -1181,23 +1204,6 @@ func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass
|
|||||||
// one now that we have the heap lock.
|
// one now that we have the heap lock.
|
||||||
s = h.allocMSpanLocked()
|
s = h.allocMSpanLocked()
|
||||||
}
|
}
|
||||||
|
|
||||||
if needPhysPageAlign {
|
|
||||||
allocBase, allocPages := base, npages
|
|
||||||
base = alignUp(allocBase, physPageSize)
|
|
||||||
npages -= physPageSize / pageSize
|
|
||||||
|
|
||||||
// Return memory around the aligned allocation.
|
|
||||||
spaceBefore := base - allocBase
|
|
||||||
if spaceBefore > 0 {
|
|
||||||
h.pages.free(allocBase, spaceBefore/pageSize, false)
|
|
||||||
}
|
|
||||||
spaceAfter := (allocPages-npages)*pageSize - spaceBefore
|
|
||||||
if spaceAfter > 0 {
|
|
||||||
h.pages.free(base+npages*pageSize, spaceAfter/pageSize, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
unlock(&h.lock)
|
unlock(&h.lock)
|
||||||
|
|
||||||
HaveSpan:
|
HaveSpan:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user