mirror of
https://github.com/golang/go.git
synced 2025-05-05 15:43:04 +00:00
runtime: move sizeclass defs to new package internal/runtime/gc
We will want to reference these definitions from new generator programs, and this is a good opportunity to cleanup all these old C-style names. Change-Id: Ifb06f0afc381e2697e7877f038eca786610c96de Reviewed-on: https://go-review.googlesource.com/c/go/+/655275 Auto-Submit: Michael Knyszek <mknyszek@google.com> LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
parent
ecdd429a3b
commit
528bafa049
@ -50,6 +50,7 @@ var runtimePkgs = []string{
|
||||
|
||||
"internal/runtime/atomic",
|
||||
"internal/runtime/exithook",
|
||||
"internal/runtime/gc",
|
||||
"internal/runtime/maps",
|
||||
"internal/runtime/math",
|
||||
"internal/runtime/sys",
|
||||
|
@ -94,6 +94,7 @@ var depsRules = `
|
||||
< internal/runtime/syscall
|
||||
< internal/runtime/atomic
|
||||
< internal/runtime/exithook
|
||||
< internal/runtime/gc
|
||||
< internal/runtime/math
|
||||
< internal/runtime/maps
|
||||
< runtime
|
||||
|
@ -289,29 +289,29 @@ func maxObjsPerSpan(classes []class) int {
|
||||
|
||||
func printClasses(w io.Writer, classes []class) {
|
||||
fmt.Fprintln(w, "const (")
|
||||
fmt.Fprintf(w, "minHeapAlign = %d\n", minHeapAlign)
|
||||
fmt.Fprintf(w, "_MaxSmallSize = %d\n", maxSmallSize)
|
||||
fmt.Fprintf(w, "smallSizeDiv = %d\n", smallSizeDiv)
|
||||
fmt.Fprintf(w, "smallSizeMax = %d\n", smallSizeMax)
|
||||
fmt.Fprintf(w, "largeSizeDiv = %d\n", largeSizeDiv)
|
||||
fmt.Fprintf(w, "_NumSizeClasses = %d\n", len(classes))
|
||||
fmt.Fprintf(w, "_PageShift = %d\n", pageShift)
|
||||
fmt.Fprintf(w, "maxObjsPerSpan = %d\n", maxObjsPerSpan(classes))
|
||||
fmt.Fprintf(w, "MinHeapAlign = %d\n", minHeapAlign)
|
||||
fmt.Fprintf(w, "MaxSmallSize = %d\n", maxSmallSize)
|
||||
fmt.Fprintf(w, "SmallSizeDiv = %d\n", smallSizeDiv)
|
||||
fmt.Fprintf(w, "SmallSizeMax = %d\n", smallSizeMax)
|
||||
fmt.Fprintf(w, "LargeSizeDiv = %d\n", largeSizeDiv)
|
||||
fmt.Fprintf(w, "NumSizeClasses = %d\n", len(classes))
|
||||
fmt.Fprintf(w, "PageShift = %d\n", pageShift)
|
||||
fmt.Fprintf(w, "MaxObjsPerSpan = %d\n", maxObjsPerSpan(classes))
|
||||
fmt.Fprintln(w, ")")
|
||||
|
||||
fmt.Fprint(w, "var class_to_size = [_NumSizeClasses]uint16 {")
|
||||
fmt.Fprint(w, "var SizeClassToSize = [NumSizeClasses]uint16 {")
|
||||
for _, c := range classes {
|
||||
fmt.Fprintf(w, "%d,", c.size)
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
|
||||
fmt.Fprint(w, "var class_to_allocnpages = [_NumSizeClasses]uint8 {")
|
||||
fmt.Fprint(w, "var SizeClassToNPages = [NumSizeClasses]uint8 {")
|
||||
for _, c := range classes {
|
||||
fmt.Fprintf(w, "%d,", c.npages)
|
||||
}
|
||||
fmt.Fprintln(w, "}")
|
||||
|
||||
fmt.Fprint(w, "var class_to_divmagic = [_NumSizeClasses]uint32 {")
|
||||
fmt.Fprint(w, "var SizeClassToDivMagic = [NumSizeClasses]uint32 {")
|
||||
for _, c := range classes {
|
||||
if c.size == 0 {
|
||||
fmt.Fprintf(w, "0,")
|
||||
@ -332,7 +332,7 @@ func printClasses(w io.Writer, classes []class) {
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, "var size_to_class8 = [smallSizeMax/smallSizeDiv+1]uint8 {")
|
||||
fmt.Fprint(w, "var SizeToSizeClass8 = [SmallSizeMax/SmallSizeDiv+1]uint8 {")
|
||||
for _, v := range sc {
|
||||
fmt.Fprintf(w, "%d,", v)
|
||||
}
|
||||
@ -349,7 +349,7 @@ func printClasses(w io.Writer, classes []class) {
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, "var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv+1]uint8 {")
|
||||
fmt.Fprint(w, "var SizeToSizeClass128 = [(MaxSmallSize-SmallSizeMax)/LargeSizeDiv+1]uint8 {")
|
||||
for _, v := range sc {
|
||||
fmt.Fprintf(w, "%d,", v)
|
||||
}
|
@ -1,7 +1,7 @@
|
||||
// Code generated by mksizeclasses.go; DO NOT EDIT.
|
||||
//go:generate go run mksizeclasses.go
|
||||
|
||||
package runtime
|
||||
package gc
|
||||
|
||||
// class bytes/obj bytes/span objects tail waste max waste min align
|
||||
// 1 8 8192 1024 0 87.50% 8
|
||||
@ -82,18 +82,18 @@ package runtime
|
||||
// 8192 13 32768
|
||||
|
||||
const (
|
||||
minHeapAlign = 8
|
||||
_MaxSmallSize = 32768
|
||||
smallSizeDiv = 8
|
||||
smallSizeMax = 1024
|
||||
largeSizeDiv = 128
|
||||
_NumSizeClasses = 68
|
||||
_PageShift = 13
|
||||
maxObjsPerSpan = 1024
|
||||
MinHeapAlign = 8
|
||||
MaxSmallSize = 32768
|
||||
SmallSizeDiv = 8
|
||||
SmallSizeMax = 1024
|
||||
LargeSizeDiv = 128
|
||||
NumSizeClasses = 68
|
||||
PageShift = 13
|
||||
MaxObjsPerSpan = 1024
|
||||
)
|
||||
|
||||
var class_to_size = [_NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768}
|
||||
var class_to_allocnpages = [_NumSizeClasses]uint8{0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 2, 3, 1, 3, 2, 3, 4, 5, 6, 1, 7, 6, 5, 4, 3, 5, 7, 2, 9, 7, 5, 8, 3, 10, 7, 4}
|
||||
var class_to_divmagic = [_NumSizeClasses]uint32{0, ^uint32(0)/8 + 1, ^uint32(0)/16 + 1, ^uint32(0)/24 + 1, ^uint32(0)/32 + 1, ^uint32(0)/48 + 1, ^uint32(0)/64 + 1, ^uint32(0)/80 + 1, ^uint32(0)/96 + 1, ^uint32(0)/112 + 1, ^uint32(0)/128 + 1, ^uint32(0)/144 + 1, ^uint32(0)/160 + 1, ^uint32(0)/176 + 1, ^uint32(0)/192 + 1, ^uint32(0)/208 + 1, ^uint32(0)/224 + 1, ^uint32(0)/240 + 1, ^uint32(0)/256 + 1, ^uint32(0)/288 + 1, ^uint32(0)/320 + 1, ^uint32(0)/352 + 1, ^uint32(0)/384 + 1, ^uint32(0)/416 + 1, ^uint32(0)/448 + 1, ^uint32(0)/480 + 1, ^uint32(0)/512 + 1, ^uint32(0)/576 + 1, ^uint32(0)/640 + 1, ^uint32(0)/704 + 1, ^uint32(0)/768 + 1, ^uint32(0)/896 + 1, ^uint32(0)/1024 + 1, ^uint32(0)/1152 + 1, ^uint32(0)/1280 + 1, ^uint32(0)/1408 + 1, ^uint32(0)/1536 + 1, ^uint32(0)/1792 + 1, ^uint32(0)/2048 + 1, ^uint32(0)/2304 + 1, ^uint32(0)/2688 + 1, ^uint32(0)/3072 + 1, ^uint32(0)/3200 + 1, ^uint32(0)/3456 + 1, ^uint32(0)/4096 + 1, ^uint32(0)/4864 + 1, ^uint32(0)/5376 + 1, ^uint32(0)/6144 + 1, ^uint32(0)/6528 + 1, ^uint32(0)/6784 + 1, ^uint32(0)/6912 + 1, ^uint32(0)/8192 + 1, ^uint32(0)/9472 + 1, ^uint32(0)/9728 + 1, ^uint32(0)/10240 + 1, ^uint32(0)/10880 + 1, ^uint32(0)/12288 + 1, ^uint32(0)/13568 + 1, ^uint32(0)/14336 + 1, ^uint32(0)/16384 + 1, ^uint32(0)/18432 + 1, ^uint32(0)/19072 + 1, ^uint32(0)/20480 + 1, ^uint32(0)/21760 + 1, ^uint32(0)/24576 + 1, ^uint32(0)/27264 + 1, ^uint32(0)/28672 + 1, ^uint32(0)/32768 + 1}
|
||||
var size_to_class8 = [smallSizeMax/smallSizeDiv + 1]uint8{0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
|
||||
var size_to_class128 = [(_MaxSmallSize-smallSizeMax)/largeSizeDiv + 1]uint8{32, 33, 34, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 40, 41, 41, 41, 42, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 48, 48, 48, 49, 49, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67}
|
||||
var SizeClassToSize = [NumSizeClasses]uint16{0, 8, 16, 24, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, 256, 288, 320, 352, 384, 416, 448, 480, 512, 576, 640, 704, 768, 896, 1024, 1152, 1280, 1408, 1536, 1792, 2048, 2304, 2688, 3072, 3200, 3456, 4096, 4864, 5376, 6144, 6528, 6784, 6912, 8192, 9472, 9728, 10240, 10880, 12288, 13568, 14336, 16384, 18432, 19072, 20480, 21760, 24576, 27264, 28672, 32768}
|
||||
var SizeClassToNPages = [NumSizeClasses]uint8{0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 2, 1, 2, 1, 3, 2, 3, 1, 3, 2, 3, 4, 5, 6, 1, 7, 6, 5, 4, 3, 5, 7, 2, 9, 7, 5, 8, 3, 10, 7, 4}
|
||||
var SizeClassToDivMagic = [NumSizeClasses]uint32{0, ^uint32(0)/8 + 1, ^uint32(0)/16 + 1, ^uint32(0)/24 + 1, ^uint32(0)/32 + 1, ^uint32(0)/48 + 1, ^uint32(0)/64 + 1, ^uint32(0)/80 + 1, ^uint32(0)/96 + 1, ^uint32(0)/112 + 1, ^uint32(0)/128 + 1, ^uint32(0)/144 + 1, ^uint32(0)/160 + 1, ^uint32(0)/176 + 1, ^uint32(0)/192 + 1, ^uint32(0)/208 + 1, ^uint32(0)/224 + 1, ^uint32(0)/240 + 1, ^uint32(0)/256 + 1, ^uint32(0)/288 + 1, ^uint32(0)/320 + 1, ^uint32(0)/352 + 1, ^uint32(0)/384 + 1, ^uint32(0)/416 + 1, ^uint32(0)/448 + 1, ^uint32(0)/480 + 1, ^uint32(0)/512 + 1, ^uint32(0)/576 + 1, ^uint32(0)/640 + 1, ^uint32(0)/704 + 1, ^uint32(0)/768 + 1, ^uint32(0)/896 + 1, ^uint32(0)/1024 + 1, ^uint32(0)/1152 + 1, ^uint32(0)/1280 + 1, ^uint32(0)/1408 + 1, ^uint32(0)/1536 + 1, ^uint32(0)/1792 + 1, ^uint32(0)/2048 + 1, ^uint32(0)/2304 + 1, ^uint32(0)/2688 + 1, ^uint32(0)/3072 + 1, ^uint32(0)/3200 + 1, ^uint32(0)/3456 + 1, ^uint32(0)/4096 + 1, ^uint32(0)/4864 + 1, ^uint32(0)/5376 + 1, ^uint32(0)/6144 + 1, ^uint32(0)/6528 + 1, ^uint32(0)/6784 + 1, ^uint32(0)/6912 + 1, ^uint32(0)/8192 + 1, ^uint32(0)/9472 + 1, ^uint32(0)/9728 + 1, ^uint32(0)/10240 + 1, ^uint32(0)/10880 + 1, ^uint32(0)/12288 + 1, ^uint32(0)/13568 + 1, ^uint32(0)/14336 + 1, ^uint32(0)/16384 + 1, ^uint32(0)/18432 + 1, ^uint32(0)/19072 + 1, ^uint32(0)/20480 + 1, ^uint32(0)/21760 + 1, ^uint32(0)/24576 + 1, ^uint32(0)/27264 + 1, ^uint32(0)/28672 + 1, ^uint32(0)/32768 + 1}
|
||||
var SizeToSizeClass8 = [SmallSizeMax/SmallSizeDiv + 1]uint8{0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 25, 25, 25, 25, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32}
|
||||
var SizeToSizeClass128 = [(MaxSmallSize-SmallSizeMax)/LargeSizeDiv + 1]uint8{32, 33, 34, 35, 36, 37, 37, 38, 38, 39, 39, 40, 40, 40, 41, 41, 41, 42, 43, 43, 44, 44, 44, 44, 44, 45, 45, 45, 45, 45, 45, 46, 46, 46, 46, 47, 47, 47, 47, 47, 47, 48, 48, 48, 49, 49, 50, 51, 51, 51, 51, 51, 51, 51, 51, 51, 51, 52, 52, 52, 52, 52, 52, 52, 52, 52, 52, 53, 53, 54, 54, 54, 54, 55, 55, 55, 55, 55, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 59, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 60, 61, 61, 61, 61, 61, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 62, 63, 63, 63, 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 64, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 65, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 66, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67, 67}
|
@ -11,6 +11,7 @@ import (
|
||||
"internal/goarch"
|
||||
"internal/goos"
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/sys"
|
||||
"unsafe"
|
||||
)
|
||||
@ -363,7 +364,7 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
||||
slow.Mallocs = 0
|
||||
slow.Frees = 0
|
||||
slow.HeapReleased = 0
|
||||
var bySize [_NumSizeClasses]struct {
|
||||
var bySize [gc.NumSizeClasses]struct {
|
||||
Mallocs, Frees uint64
|
||||
}
|
||||
|
||||
@ -391,11 +392,11 @@ func ReadMemStatsSlow() (base, slow MemStats) {
|
||||
|
||||
// Collect per-sizeclass free stats.
|
||||
var smallFree uint64
|
||||
for i := 0; i < _NumSizeClasses; i++ {
|
||||
for i := 0; i < gc.NumSizeClasses; i++ {
|
||||
slow.Frees += m.smallFreeCount[i]
|
||||
bySize[i].Frees += m.smallFreeCount[i]
|
||||
bySize[i].Mallocs += m.smallFreeCount[i]
|
||||
smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
|
||||
smallFree += m.smallFreeCount[i] * uint64(gc.SizeClassToSize[i])
|
||||
}
|
||||
slow.Frees += m.tinyAllocCount + m.largeFreeCount
|
||||
slow.Mallocs += slow.Frees
|
||||
|
@ -14,6 +14,7 @@ package runtime
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"internal/runtime/gc"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -471,7 +472,7 @@ func dumproots() {
|
||||
|
||||
// Bit vector of free marks.
|
||||
// Needs to be as big as the largest number of objects per span.
|
||||
var freemark [_PageSize / 8]bool
|
||||
var freemark [pageSize / 8]bool
|
||||
|
||||
func dumpobjs() {
|
||||
// To protect mheap_.allspans.
|
||||
@ -483,7 +484,7 @@ func dumpobjs() {
|
||||
}
|
||||
p := s.base()
|
||||
size := s.elemsize
|
||||
n := (s.npages << _PageShift) / size
|
||||
n := (s.npages << gc.PageShift) / size
|
||||
if n > uintptr(len(freemark)) {
|
||||
throw("freemark array doesn't have enough entries")
|
||||
}
|
||||
|
@ -104,6 +104,7 @@ import (
|
||||
"internal/goarch"
|
||||
"internal/goos"
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/math"
|
||||
"internal/runtime/sys"
|
||||
"unsafe"
|
||||
@ -112,13 +113,10 @@ import (
|
||||
const (
|
||||
maxTinySize = _TinySize
|
||||
tinySizeClass = _TinySizeClass
|
||||
maxSmallSize = _MaxSmallSize
|
||||
|
||||
pageShift = _PageShift
|
||||
pageSize = _PageSize
|
||||
|
||||
_PageSize = 1 << _PageShift
|
||||
_PageMask = _PageSize - 1
|
||||
maxSmallSize = gc.MaxSmallSize
|
||||
pageSize = 1 << gc.PageShift
|
||||
pageMask = pageSize - 1
|
||||
_PageSize = pageSize // Unused. Left for viewcore.
|
||||
|
||||
// _64bit = 1 on 64-bit systems, 0 on 32-bit systems
|
||||
_64bit = 1 << (^uintptr(0) >> 63) / 2
|
||||
@ -371,7 +369,7 @@ var (
|
||||
)
|
||||
|
||||
func mallocinit() {
|
||||
if class_to_size[_TinySizeClass] != _TinySize {
|
||||
if gc.SizeClassToSize[tinySizeClass] != maxTinySize {
|
||||
throw("bad TinySizeClass")
|
||||
}
|
||||
|
||||
@ -432,11 +430,11 @@ func mallocinit() {
|
||||
// span sizes are one page. Some code relies on this.
|
||||
minSizeForMallocHeaderIsSizeClass := false
|
||||
sizeClassesUpToMinSizeForMallocHeaderAreOnePage := true
|
||||
for i := 0; i < len(class_to_size); i++ {
|
||||
if class_to_allocnpages[i] > 1 {
|
||||
for i := 0; i < len(gc.SizeClassToSize); i++ {
|
||||
if gc.SizeClassToNPages[i] > 1 {
|
||||
sizeClassesUpToMinSizeForMallocHeaderAreOnePage = false
|
||||
}
|
||||
if minSizeForMallocHeader == uintptr(class_to_size[i]) {
|
||||
if minSizeForMallocHeader == uintptr(gc.SizeClassToSize[i]) {
|
||||
minSizeForMallocHeaderIsSizeClass = true
|
||||
break
|
||||
}
|
||||
@ -1272,12 +1270,12 @@ func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointe
|
||||
checkGCTrigger := false
|
||||
c := getMCache(mp)
|
||||
var sizeclass uint8
|
||||
if size <= smallSizeMax-8 {
|
||||
sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
|
||||
if size <= gc.SmallSizeMax-8 {
|
||||
sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
|
||||
} else {
|
||||
sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
|
||||
sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
|
||||
}
|
||||
size = uintptr(class_to_size[sizeclass])
|
||||
size = uintptr(gc.SizeClassToSize[sizeclass])
|
||||
spc := makeSpanClass(sizeclass, true)
|
||||
span := c.alloc[spc]
|
||||
v := nextFreeFast(span)
|
||||
@ -1360,7 +1358,7 @@ func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintpt
|
||||
|
||||
checkGCTrigger := false
|
||||
c := getMCache(mp)
|
||||
sizeclass := size_to_class8[divRoundUp(size, smallSizeDiv)]
|
||||
sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
|
||||
spc := makeSpanClass(sizeclass, false)
|
||||
span := c.alloc[spc]
|
||||
v := nextFreeFast(span)
|
||||
@ -1378,7 +1376,7 @@ func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintpt
|
||||
} else {
|
||||
c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)
|
||||
}
|
||||
size = uintptr(class_to_size[sizeclass])
|
||||
size = uintptr(gc.SizeClassToSize[sizeclass])
|
||||
|
||||
// Ensure that the stores above that initialize x to
|
||||
// type-safe memory and set the heap bits occur before
|
||||
@ -1453,12 +1451,12 @@ func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr)
|
||||
c := getMCache(mp)
|
||||
size += mallocHeaderSize
|
||||
var sizeclass uint8
|
||||
if size <= smallSizeMax-8 {
|
||||
sizeclass = size_to_class8[divRoundUp(size, smallSizeDiv)]
|
||||
if size <= gc.SmallSizeMax-8 {
|
||||
sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)]
|
||||
} else {
|
||||
sizeclass = size_to_class128[divRoundUp(size-smallSizeMax, largeSizeDiv)]
|
||||
sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)]
|
||||
}
|
||||
size = uintptr(class_to_size[sizeclass])
|
||||
size = uintptr(gc.SizeClassToSize[sizeclass])
|
||||
spc := makeSpanClass(sizeclass, false)
|
||||
span := c.alloc[spc]
|
||||
v := nextFreeFast(span)
|
||||
@ -1909,7 +1907,7 @@ func persistentalloc1(size, align uintptr, sysStat *sysMemStat) *notInHeap {
|
||||
if align&(align-1) != 0 {
|
||||
throw("persistentalloc: align is not a power of 2")
|
||||
}
|
||||
if align > _PageSize {
|
||||
if align > pageSize {
|
||||
throw("persistentalloc: align is too large")
|
||||
}
|
||||
} else {
|
||||
|
@ -6,6 +6,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/sys"
|
||||
"unsafe"
|
||||
)
|
||||
@ -218,18 +219,18 @@ func (c *mcache) refill(spc spanClass) {
|
||||
|
||||
// allocLarge allocates a span for a large object.
|
||||
func (c *mcache) allocLarge(size uintptr, noscan bool) *mspan {
|
||||
if size+_PageSize < size {
|
||||
if size+pageSize < size {
|
||||
throw("out of memory")
|
||||
}
|
||||
npages := size >> _PageShift
|
||||
if size&_PageMask != 0 {
|
||||
npages := size >> gc.PageShift
|
||||
if size&pageMask != 0 {
|
||||
npages++
|
||||
}
|
||||
|
||||
// Deduct credit for this span allocation and sweep if
|
||||
// necessary. mHeap_Alloc will also sweep npages, so this only
|
||||
// pays the debt down to npage pages.
|
||||
deductSweepCredit(npages*_PageSize, npages)
|
||||
deductSweepCredit(npages*pageSize, npages)
|
||||
|
||||
spc := makeSpanClass(0, noscan)
|
||||
s := mheap_.alloc(npages, spc)
|
||||
|
@ -14,6 +14,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/sys"
|
||||
)
|
||||
|
||||
@ -80,7 +81,7 @@ func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
|
||||
// Allocate a span to use in an mcache.
|
||||
func (c *mcentral) cacheSpan() *mspan {
|
||||
// Deduct credit for this span allocation and sweep if necessary.
|
||||
spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
|
||||
spanBytes := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()]) * pageSize
|
||||
deductSweepCredit(spanBytes, 0)
|
||||
|
||||
traceDone := false
|
||||
@ -248,8 +249,8 @@ func (c *mcentral) uncacheSpan(s *mspan) {
|
||||
|
||||
// grow allocates a new empty span from the heap and initializes it for c's size class.
|
||||
func (c *mcentral) grow() *mspan {
|
||||
npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
|
||||
size := uintptr(class_to_size[c.spanclass.sizeclass()])
|
||||
npages := uintptr(gc.SizeClassToNPages[c.spanclass.sizeclass()])
|
||||
size := uintptr(gc.SizeClassToSize[c.spanclass.sizeclass()])
|
||||
|
||||
s := mheap_.alloc(npages, c.spanclass)
|
||||
if s == nil {
|
||||
@ -257,8 +258,8 @@ func (c *mcentral) grow() *mspan {
|
||||
}
|
||||
|
||||
// Use division by multiplication and shifts to quickly compute:
|
||||
// n := (npages << _PageShift) / size
|
||||
n := s.divideByElemSize(npages << _PageShift)
|
||||
// n := (npages << gc.PageShift) / size
|
||||
n := s.divideByElemSize(npages << gc.PageShift)
|
||||
s.limit = s.base() + size*n
|
||||
s.initHeapBits()
|
||||
return s
|
||||
|
@ -8,6 +8,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/godebugs"
|
||||
"internal/runtime/gc"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -62,12 +63,12 @@ func initMetrics() {
|
||||
return
|
||||
}
|
||||
|
||||
sizeClassBuckets = make([]float64, _NumSizeClasses, _NumSizeClasses+1)
|
||||
sizeClassBuckets = make([]float64, gc.NumSizeClasses, gc.NumSizeClasses+1)
|
||||
// Skip size class 0 which is a stand-in for large objects, but large
|
||||
// objects are tracked separately (and they actually get placed in
|
||||
// the last bucket, not the first).
|
||||
sizeClassBuckets[0] = 1 // The smallest allocation is 1 byte in size.
|
||||
for i := 1; i < _NumSizeClasses; i++ {
|
||||
for i := 1; i < gc.NumSizeClasses; i++ {
|
||||
// Size classes have an inclusive upper-bound
|
||||
// and exclusive lower bound (e.g. 48-byte size class is
|
||||
// (32, 48]) whereas we want and inclusive lower-bound
|
||||
@ -79,7 +80,7 @@ func initMetrics() {
|
||||
// value up to 2^53 and size classes are relatively small
|
||||
// (nowhere near 2^48 even) so this will give us exact
|
||||
// boundaries.
|
||||
sizeClassBuckets[i] = float64(class_to_size[i] + 1)
|
||||
sizeClassBuckets[i] = float64(gc.SizeClassToSize[i] + 1)
|
||||
}
|
||||
sizeClassBuckets = append(sizeClassBuckets, float64Inf())
|
||||
|
||||
@ -615,8 +616,8 @@ func (a *heapStatsAggregate) compute() {
|
||||
nf := a.smallFreeCount[i]
|
||||
a.totalAllocs += na
|
||||
a.totalFrees += nf
|
||||
a.totalAllocated += na * uint64(class_to_size[i])
|
||||
a.totalFreed += nf * uint64(class_to_size[i])
|
||||
a.totalAllocated += na * uint64(gc.SizeClassToSize[i])
|
||||
a.totalFreed += nf * uint64(gc.SizeClassToSize[i])
|
||||
}
|
||||
a.inObjects = a.totalAllocated - a.totalFreed
|
||||
a.numObjects = a.totalAllocs - a.totalFrees
|
||||
|
@ -517,7 +517,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
|
||||
|
||||
trace := traceAcquire()
|
||||
if trace.ok() {
|
||||
trace.GCSweepSpan(s.npages * _PageSize)
|
||||
trace.GCSweepSpan(s.npages * pageSize)
|
||||
traceRelease(trace)
|
||||
}
|
||||
|
||||
@ -981,9 +981,9 @@ func gcPaceSweeper(trigger uint64) {
|
||||
// concurrent sweep are less likely to leave pages
|
||||
// unswept when GC starts.
|
||||
heapDistance -= 1024 * 1024
|
||||
if heapDistance < _PageSize {
|
||||
if heapDistance < pageSize {
|
||||
// Avoid setting the sweep ratio extremely high
|
||||
heapDistance = _PageSize
|
||||
heapDistance = pageSize
|
||||
}
|
||||
pagesSwept := mheap_.pagesSwept.Load()
|
||||
pagesInUse := mheap_.pagesInUse.Load()
|
||||
|
@ -13,6 +13,7 @@ import (
|
||||
"internal/cpu"
|
||||
"internal/goarch"
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/sys"
|
||||
"unsafe"
|
||||
)
|
||||
@ -514,7 +515,7 @@ func (s *mspan) base() uintptr {
|
||||
}
|
||||
|
||||
func (s *mspan) layout() (size, n, total uintptr) {
|
||||
total = s.npages << _PageShift
|
||||
total = s.npages << gc.PageShift
|
||||
size = s.elemsize
|
||||
if size > 0 {
|
||||
n = total / size
|
||||
@ -576,7 +577,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
|
||||
type spanClass uint8
|
||||
|
||||
const (
|
||||
numSpanClasses = _NumSizeClasses << 1
|
||||
numSpanClasses = gc.NumSizeClasses << 1
|
||||
tinySpanClass = spanClass(tinySizeClass<<1 | 1)
|
||||
)
|
||||
|
||||
@ -1423,14 +1424,14 @@ func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base,
|
||||
s.nelems = 1
|
||||
s.divMul = 0
|
||||
} else {
|
||||
s.elemsize = uintptr(class_to_size[sizeclass])
|
||||
s.elemsize = uintptr(gc.SizeClassToSize[sizeclass])
|
||||
if !s.spanclass.noscan() && heapBitsInSpan(s.elemsize) {
|
||||
// Reserve space for the pointer/scan bitmap at the end.
|
||||
s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize)
|
||||
} else {
|
||||
s.nelems = uint16(nbytes / s.elemsize)
|
||||
}
|
||||
s.divMul = class_to_divmagic[sizeclass]
|
||||
s.divMul = gc.SizeClassToDivMagic[sizeclass]
|
||||
}
|
||||
|
||||
// Initialize mark and allocation structures.
|
||||
@ -1589,13 +1590,13 @@ func (h *mheap) freeSpan(s *mspan) {
|
||||
if msanenabled {
|
||||
// Tell msan that this entire span is no longer in use.
|
||||
base := unsafe.Pointer(s.base())
|
||||
bytes := s.npages << _PageShift
|
||||
bytes := s.npages << gc.PageShift
|
||||
msanfree(base, bytes)
|
||||
}
|
||||
if asanenabled {
|
||||
// Tell asan that this entire span is no longer in use.
|
||||
base := unsafe.Pointer(s.base())
|
||||
bytes := s.npages << _PageShift
|
||||
bytes := s.npages << gc.PageShift
|
||||
asanpoison(base, bytes)
|
||||
}
|
||||
h.freeSpanLocked(s, spanAllocHeap)
|
||||
|
@ -49,6 +49,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -58,7 +59,7 @@ const (
|
||||
pallocChunkPages = 1 << logPallocChunkPages
|
||||
pallocChunkBytes = pallocChunkPages * pageSize
|
||||
logPallocChunkPages = 9
|
||||
logPallocChunkBytes = logPallocChunkPages + pageShift
|
||||
logPallocChunkBytes = logPallocChunkPages + gc.PageShift
|
||||
|
||||
// The number of radix bits for each level.
|
||||
//
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import "internal/runtime/gc"
|
||||
|
||||
// Returns size of the memory block that mallocgc will allocate if you ask for the size,
|
||||
// minus any inline space for metadata.
|
||||
func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
|
||||
@ -20,10 +22,10 @@ func roundupsize(size uintptr, noscan bool) (reqSize uintptr) {
|
||||
}
|
||||
// (reqSize - size) is either mallocHeaderSize or 0. We need to subtract mallocHeaderSize
|
||||
// from the result if we have one, since mallocgc will add it back in.
|
||||
if reqSize <= smallSizeMax-8 {
|
||||
return uintptr(class_to_size[size_to_class8[divRoundUp(reqSize, smallSizeDiv)]]) - (reqSize - size)
|
||||
if reqSize <= gc.SmallSizeMax-8 {
|
||||
return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size)
|
||||
}
|
||||
return uintptr(class_to_size[size_to_class128[divRoundUp(reqSize-smallSizeMax, largeSizeDiv)]]) - (reqSize - size)
|
||||
return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass128[divRoundUp(reqSize-gc.SmallSizeMax, gc.LargeSizeDiv)]]) - (reqSize - size)
|
||||
}
|
||||
// Large object. Align reqSize up to the next page. Check for overflow.
|
||||
reqSize += pageSize - 1
|
||||
|
@ -8,6 +8,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -397,23 +398,23 @@ func readmemstats_m(stats *MemStats) {
|
||||
nFree := consStats.largeFreeCount
|
||||
|
||||
// Collect per-sizeclass stats.
|
||||
var bySize [_NumSizeClasses]struct {
|
||||
var bySize [gc.NumSizeClasses]struct {
|
||||
Size uint32
|
||||
Mallocs uint64
|
||||
Frees uint64
|
||||
}
|
||||
for i := range bySize {
|
||||
bySize[i].Size = uint32(class_to_size[i])
|
||||
bySize[i].Size = uint32(gc.SizeClassToSize[i])
|
||||
|
||||
// Malloc stats.
|
||||
a := consStats.smallAllocCount[i]
|
||||
totalAlloc += a * uint64(class_to_size[i])
|
||||
totalAlloc += a * uint64(gc.SizeClassToSize[i])
|
||||
nMalloc += a
|
||||
bySize[i].Mallocs = a
|
||||
|
||||
// Free stats.
|
||||
f := consStats.smallFreeCount[i]
|
||||
totalFree += f * uint64(class_to_size[i])
|
||||
totalFree += f * uint64(gc.SizeClassToSize[i])
|
||||
nFree += f
|
||||
bySize[i].Frees = f
|
||||
}
|
||||
@ -678,13 +679,13 @@ type heapStatsDelta struct {
|
||||
//
|
||||
// These are all uint64 because they're cumulative, and could quickly wrap
|
||||
// around otherwise.
|
||||
tinyAllocCount uint64 // number of tiny allocations
|
||||
largeAlloc uint64 // bytes allocated for large objects
|
||||
largeAllocCount uint64 // number of large object allocations
|
||||
smallAllocCount [_NumSizeClasses]uint64 // number of allocs for small objects
|
||||
largeFree uint64 // bytes freed for large objects (>maxSmallSize)
|
||||
largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
|
||||
smallFreeCount [_NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
|
||||
tinyAllocCount uint64 // number of tiny allocations
|
||||
largeAlloc uint64 // bytes allocated for large objects
|
||||
largeAllocCount uint64 // number of large object allocations
|
||||
smallAllocCount [gc.NumSizeClasses]uint64 // number of allocs for small objects
|
||||
largeFree uint64 // bytes freed for large objects (>maxSmallSize)
|
||||
largeFreeCount uint64 // number of frees for large objects (>maxSmallSize)
|
||||
smallFreeCount [gc.NumSizeClasses]uint64 // number of frees for small objects (<=maxSmallSize)
|
||||
|
||||
// NOTE: This struct must be a multiple of 8 bytes in size because it
|
||||
// is stored in an array. If it's not, atomic accesses to the above
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
"internal/goarch"
|
||||
"internal/goos"
|
||||
"internal/runtime/atomic"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/sys"
|
||||
"unsafe"
|
||||
)
|
||||
@ -161,11 +162,11 @@ type stackpoolItem struct {
|
||||
// Global pool of large stack spans.
|
||||
var stackLarge struct {
|
||||
lock mutex
|
||||
free [heapAddrBits - pageShift]mSpanList // free lists by log_2(s.npages)
|
||||
free [heapAddrBits - gc.PageShift]mSpanList // free lists by log_2(s.npages)
|
||||
}
|
||||
|
||||
func stackinit() {
|
||||
if _StackCacheSize&_PageMask != 0 {
|
||||
if _StackCacheSize&pageMask != 0 {
|
||||
throw("cache size must be a multiple of page size")
|
||||
}
|
||||
for i := range stackpool {
|
||||
@ -196,7 +197,7 @@ func stackpoolalloc(order uint8) gclinkptr {
|
||||
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
|
||||
if s == nil {
|
||||
// no free stacks. Allocate another span worth.
|
||||
s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
|
||||
s = mheap_.allocManual(_StackCacheSize>>gc.PageShift, spanAllocStack)
|
||||
if s == nil {
|
||||
throw("out of memory")
|
||||
}
|
||||
@ -390,7 +391,7 @@ func stackalloc(n uint32) stack {
|
||||
v = unsafe.Pointer(x)
|
||||
} else {
|
||||
var s *mspan
|
||||
npage := uintptr(n) >> _PageShift
|
||||
npage := uintptr(n) >> gc.PageShift
|
||||
log2npage := stacklog2(npage)
|
||||
|
||||
// Try to get a stack from the large stack cache.
|
||||
|
@ -8,6 +8,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/runtime/gc"
|
||||
"internal/runtime/sys"
|
||||
"internal/trace/tracev2"
|
||||
)
|
||||
@ -38,7 +39,7 @@ func traceSnapshotMemory(gen uintptr) {
|
||||
// Emit info.
|
||||
w.varint(uint64(trace.minPageHeapAddr))
|
||||
w.varint(uint64(pageSize))
|
||||
w.varint(uint64(minHeapAlign))
|
||||
w.varint(uint64(gc.MinHeapAlign))
|
||||
w.varint(uint64(fixedStack))
|
||||
|
||||
// Finish writing the batch.
|
||||
@ -129,7 +130,7 @@ func (tl traceLocker) HeapObjectFree(addr uintptr) {
|
||||
|
||||
// traceHeapObjectID creates a trace ID for a heap object at address addr.
|
||||
func traceHeapObjectID(addr uintptr) traceArg {
|
||||
return traceArg(uint64(addr)-trace.minPageHeapAddr) / minHeapAlign
|
||||
return traceArg(uint64(addr)-trace.minPageHeapAddr) / gc.MinHeapAlign
|
||||
}
|
||||
|
||||
// GoroutineStackExists records that a goroutine stack already exists at address base with the provided size.
|
||||
|
Loading…
x
Reference in New Issue
Block a user