mirror of
https://github.com/golang/go.git
synced 2025-05-19 06:14:40 +00:00
runtime: rename gothrow to throw
Rename "gothrow" to "throw" now that the C version of "throw" is no longer needed. This change is purely mechanical except in panic.go where the old version of "throw" has been deleted. sed -i "" 's/[[:<:]]gothrow[[:>:]]/throw/g' runtime/*.go Change-Id: Icf0752299c35958b92870a97111c67bcd9159dc3 Reviewed-on: https://go-review.googlesource.com/2150 Reviewed-by: Minux Ma <minux@golang.org> Reviewed-by: Dave Cheney <dave@cheney.net>
This commit is contained in:
parent
ddef2d27fe
commit
b2a950bb73
@ -90,11 +90,11 @@ func cgocall(fn, arg unsafe.Pointer) {
|
|||||||
//go:nosplit
|
//go:nosplit
|
||||||
func cgocall_errno(fn, arg unsafe.Pointer) int32 {
|
func cgocall_errno(fn, arg unsafe.Pointer) int32 {
|
||||||
if !iscgo && GOOS != "solaris" && GOOS != "windows" {
|
if !iscgo && GOOS != "solaris" && GOOS != "windows" {
|
||||||
gothrow("cgocall unavailable")
|
throw("cgocall unavailable")
|
||||||
}
|
}
|
||||||
|
|
||||||
if fn == nil {
|
if fn == nil {
|
||||||
gothrow("cgocall nil")
|
throw("cgocall nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
@ -161,7 +161,7 @@ func cmalloc(n uintptr) unsafe.Pointer {
|
|||||||
args.n = uint64(n)
|
args.n = uint64(n)
|
||||||
cgocall(_cgo_malloc, unsafe.Pointer(&args))
|
cgocall(_cgo_malloc, unsafe.Pointer(&args))
|
||||||
if args.ret == nil {
|
if args.ret == nil {
|
||||||
gothrow("C malloc failed")
|
throw("C malloc failed")
|
||||||
}
|
}
|
||||||
return args.ret
|
return args.ret
|
||||||
}
|
}
|
||||||
@ -218,7 +218,7 @@ func cgocallbackg1() {
|
|||||||
sp := gp.m.g0.sched.sp
|
sp := gp.m.g0.sched.sp
|
||||||
switch GOARCH {
|
switch GOARCH {
|
||||||
default:
|
default:
|
||||||
gothrow("cgocallbackg is unimplemented on arch")
|
throw("cgocallbackg is unimplemented on arch")
|
||||||
case "arm":
|
case "arm":
|
||||||
// On arm, stack frame is two words and there's a saved LR between
|
// On arm, stack frame is two words and there's a saved LR between
|
||||||
// SP and the stack frame and between the stack frame and the arguments.
|
// SP and the stack frame and between the stack frame and the arguments.
|
||||||
@ -253,7 +253,7 @@ func unwindm(restore *bool) {
|
|||||||
sched := &mp.g0.sched
|
sched := &mp.g0.sched
|
||||||
switch GOARCH {
|
switch GOARCH {
|
||||||
default:
|
default:
|
||||||
gothrow("unwindm not implemented")
|
throw("unwindm not implemented")
|
||||||
case "386", "amd64":
|
case "386", "amd64":
|
||||||
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp))
|
sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp))
|
||||||
case "arm":
|
case "arm":
|
||||||
@ -264,12 +264,12 @@ func unwindm(restore *bool) {
|
|||||||
|
|
||||||
// called from assembly
|
// called from assembly
|
||||||
func badcgocallback() {
|
func badcgocallback() {
|
||||||
gothrow("misaligned stack in cgocallback")
|
throw("misaligned stack in cgocallback")
|
||||||
}
|
}
|
||||||
|
|
||||||
// called from (incomplete) assembly
|
// called from (incomplete) assembly
|
||||||
func cgounimpl() {
|
func cgounimpl() {
|
||||||
gothrow("cgo not implemented")
|
throw("cgo not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
var racecgosync uint64 // represents possible synchronization in C code
|
var racecgosync uint64 // represents possible synchronization in C code
|
||||||
|
@ -26,10 +26,10 @@ func makechan(t *chantype, size int64) *hchan {
|
|||||||
|
|
||||||
// compiler checks this but be safe.
|
// compiler checks this but be safe.
|
||||||
if elem.size >= 1<<16 {
|
if elem.size >= 1<<16 {
|
||||||
gothrow("makechan: invalid channel element type")
|
throw("makechan: invalid channel element type")
|
||||||
}
|
}
|
||||||
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
|
if hchanSize%maxAlign != 0 || elem.align > maxAlign {
|
||||||
gothrow("makechan: bad alignment")
|
throw("makechan: bad alignment")
|
||||||
}
|
}
|
||||||
if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
|
if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/uintptr(elem.size)) {
|
||||||
panic("makechan: size out of range")
|
panic("makechan: size out of range")
|
||||||
@ -95,7 +95,7 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
gopark(nil, nil, "chan send (nil chan)")
|
gopark(nil, nil, "chan send (nil chan)")
|
||||||
gothrow("unreachable")
|
throw("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
if debugChan {
|
if debugChan {
|
||||||
@ -180,12 +180,12 @@ func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uin
|
|||||||
|
|
||||||
// someone woke us up.
|
// someone woke us up.
|
||||||
if mysg != gp.waiting {
|
if mysg != gp.waiting {
|
||||||
gothrow("G waiting list is corrupted!")
|
throw("G waiting list is corrupted!")
|
||||||
}
|
}
|
||||||
gp.waiting = nil
|
gp.waiting = nil
|
||||||
if gp.param == nil {
|
if gp.param == nil {
|
||||||
if c.closed == 0 {
|
if c.closed == 0 {
|
||||||
gothrow("chansend: spurious wakeup")
|
throw("chansend: spurious wakeup")
|
||||||
}
|
}
|
||||||
panic("send on closed channel")
|
panic("send on closed channel")
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
gopark(nil, nil, "chan receive (nil chan)")
|
gopark(nil, nil, "chan receive (nil chan)")
|
||||||
gothrow("unreachable")
|
throw("unreachable")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fast path: check for failed non-blocking operation without acquiring the lock.
|
// Fast path: check for failed non-blocking operation without acquiring the lock.
|
||||||
@ -416,7 +416,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
|
|||||||
|
|
||||||
// someone woke us up
|
// someone woke us up
|
||||||
if mysg != gp.waiting {
|
if mysg != gp.waiting {
|
||||||
gothrow("G waiting list is corrupted!")
|
throw("G waiting list is corrupted!")
|
||||||
}
|
}
|
||||||
gp.waiting = nil
|
gp.waiting = nil
|
||||||
if mysg.releasetime > 0 {
|
if mysg.releasetime > 0 {
|
||||||
@ -435,7 +435,7 @@ func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, r
|
|||||||
|
|
||||||
lock(&c.lock)
|
lock(&c.lock)
|
||||||
if c.closed == 0 {
|
if c.closed == 0 {
|
||||||
gothrow("chanrecv: spurious wakeup")
|
throw("chanrecv: spurious wakeup")
|
||||||
}
|
}
|
||||||
return recvclosed(c, ep)
|
return recvclosed(c, ep)
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ func getenv(s *byte) *byte {
|
|||||||
func gogetenv(key string) string {
|
func gogetenv(key string) string {
|
||||||
env := environ()
|
env := environ()
|
||||||
if env == nil {
|
if env == nil {
|
||||||
gothrow("getenv before env init")
|
throw("getenv before env init")
|
||||||
}
|
}
|
||||||
for _, s := range environ() {
|
for _, s := range environ() {
|
||||||
if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key {
|
if len(s) > len(key) && s[len(key)] == '=' && s[:len(key)] == key {
|
||||||
|
@ -158,7 +158,7 @@ func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
|
|||||||
|
|
||||||
func makemap(t *maptype, hint int64) *hmap {
|
func makemap(t *maptype, hint int64) *hmap {
|
||||||
if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
|
if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
|
||||||
gothrow("bad hmap size")
|
throw("bad hmap size")
|
||||||
}
|
}
|
||||||
|
|
||||||
if hint < 0 || int64(int32(hint)) != hint {
|
if hint < 0 || int64(int32(hint)) != hint {
|
||||||
@ -167,41 +167,41 @@ func makemap(t *maptype, hint int64) *hmap {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if !ismapkey(t.key) {
|
if !ismapkey(t.key) {
|
||||||
gothrow("runtime.makemap: unsupported map key type")
|
throw("runtime.makemap: unsupported map key type")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check compiler's and reflect's math
|
// check compiler's and reflect's math
|
||||||
if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
|
if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
|
||||||
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
|
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
|
||||||
gothrow("key size wrong")
|
throw("key size wrong")
|
||||||
}
|
}
|
||||||
if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
|
if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
|
||||||
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
|
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
|
||||||
gothrow("value size wrong")
|
throw("value size wrong")
|
||||||
}
|
}
|
||||||
|
|
||||||
// invariants we depend on. We should probably check these at compile time
|
// invariants we depend on. We should probably check these at compile time
|
||||||
// somewhere, but for now we'll do it here.
|
// somewhere, but for now we'll do it here.
|
||||||
if t.key.align > bucketCnt {
|
if t.key.align > bucketCnt {
|
||||||
gothrow("key align too big")
|
throw("key align too big")
|
||||||
}
|
}
|
||||||
if t.elem.align > bucketCnt {
|
if t.elem.align > bucketCnt {
|
||||||
gothrow("value align too big")
|
throw("value align too big")
|
||||||
}
|
}
|
||||||
if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
|
if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
|
||||||
gothrow("key size not a multiple of key align")
|
throw("key size not a multiple of key align")
|
||||||
}
|
}
|
||||||
if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
|
if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
|
||||||
gothrow("value size not a multiple of value align")
|
throw("value size not a multiple of value align")
|
||||||
}
|
}
|
||||||
if bucketCnt < 8 {
|
if bucketCnt < 8 {
|
||||||
gothrow("bucketsize too small for proper alignment")
|
throw("bucketsize too small for proper alignment")
|
||||||
}
|
}
|
||||||
if dataOffset%uintptr(t.key.align) != 0 {
|
if dataOffset%uintptr(t.key.align) != 0 {
|
||||||
gothrow("need padding in bucket (key)")
|
throw("need padding in bucket (key)")
|
||||||
}
|
}
|
||||||
if dataOffset%uintptr(t.elem.align) != 0 {
|
if dataOffset%uintptr(t.elem.align) != 0 {
|
||||||
gothrow("need padding in bucket (value)")
|
throw("need padding in bucket (value)")
|
||||||
}
|
}
|
||||||
|
|
||||||
// find size parameter which will hold the requested # of elements
|
// find size parameter which will hold the requested # of elements
|
||||||
@ -561,7 +561,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if unsafe.Sizeof(hiter{})/ptrSize != 10 {
|
if unsafe.Sizeof(hiter{})/ptrSize != 10 {
|
||||||
gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
|
throw("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
|
||||||
}
|
}
|
||||||
it.t = t
|
it.t = t
|
||||||
it.h = h
|
it.h = h
|
||||||
@ -735,7 +735,7 @@ next:
|
|||||||
|
|
||||||
func hashGrow(t *maptype, h *hmap) {
|
func hashGrow(t *maptype, h *hmap) {
|
||||||
if h.oldbuckets != nil {
|
if h.oldbuckets != nil {
|
||||||
gothrow("evacuation not done in time")
|
throw("evacuation not done in time")
|
||||||
}
|
}
|
||||||
oldbuckets := h.buckets
|
oldbuckets := h.buckets
|
||||||
if checkgc {
|
if checkgc {
|
||||||
@ -796,7 +796,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if top < minTopHash {
|
if top < minTopHash {
|
||||||
gothrow("bad map state")
|
throw("bad map state")
|
||||||
}
|
}
|
||||||
k2 := k
|
k2 := k
|
||||||
if t.indirectkey {
|
if t.indirectkey {
|
||||||
|
@ -222,7 +222,7 @@ func dumpbv(cbv *bitvector, offset uintptr) {
|
|||||||
for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
|
for i := uintptr(0); i < uintptr(bv.n); i += bitsPerPointer {
|
||||||
switch bv.bytedata[i/8] >> (i % 8) & 3 {
|
switch bv.bytedata[i/8] >> (i % 8) & 3 {
|
||||||
default:
|
default:
|
||||||
gothrow("unexpected pointer bits")
|
throw("unexpected pointer bits")
|
||||||
case _BitsDead:
|
case _BitsDead:
|
||||||
// BitsDead has already been processed in makeheapobjbv.
|
// BitsDead has already been processed in makeheapobjbv.
|
||||||
// We should only see it in stack maps, in which case we should continue processing.
|
// We should only see it in stack maps, in which case we should continue processing.
|
||||||
@ -392,7 +392,7 @@ func dumpgs() {
|
|||||||
switch status {
|
switch status {
|
||||||
default:
|
default:
|
||||||
print("runtime: unexpected G.status ", hex(status), "\n")
|
print("runtime: unexpected G.status ", hex(status), "\n")
|
||||||
gothrow("dumpgs in STW - bad status")
|
throw("dumpgs in STW - bad status")
|
||||||
case _Gdead:
|
case _Gdead:
|
||||||
// ok
|
// ok
|
||||||
case _Grunnable,
|
case _Grunnable,
|
||||||
@ -462,7 +462,7 @@ func dumpobjs() {
|
|||||||
size := s.elemsize
|
size := s.elemsize
|
||||||
n := (s.npages << _PageShift) / size
|
n := (s.npages << _PageShift) / size
|
||||||
if n > uintptr(len(freemark)) {
|
if n > uintptr(len(freemark)) {
|
||||||
gothrow("freemark array doesn't have enough entries")
|
throw("freemark array doesn't have enough entries")
|
||||||
}
|
}
|
||||||
for l := s.freelist; l.ptr() != nil; l = l.ptr().next {
|
for l := s.freelist; l.ptr() != nil; l = l.ptr().next {
|
||||||
freemark[(uintptr(l)-p)/size] = true
|
freemark[(uintptr(l)-p)/size] = true
|
||||||
@ -708,7 +708,7 @@ func makeheapobjbv(p uintptr, size uintptr) bitvector {
|
|||||||
n := nptr*_BitsPerPointer/8 + 1
|
n := nptr*_BitsPerPointer/8 + 1
|
||||||
p := sysAlloc(n, &memstats.other_sys)
|
p := sysAlloc(n, &memstats.other_sys)
|
||||||
if p == nil {
|
if p == nil {
|
||||||
gothrow("heapdump: out of memory")
|
throw("heapdump: out of memory")
|
||||||
}
|
}
|
||||||
tmpbuf = (*[1 << 30]byte)(p)[:n]
|
tmpbuf = (*[1 << 30]byte)(p)[:n]
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ type fInterface interface {
|
|||||||
|
|
||||||
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
|
func getitab(inter *interfacetype, typ *_type, canfail bool) *itab {
|
||||||
if len(inter.mhdr) == 0 {
|
if len(inter.mhdr) == 0 {
|
||||||
gothrow("internal error - misuse of itab")
|
throw("internal error - misuse of itab")
|
||||||
}
|
}
|
||||||
|
|
||||||
// easy case
|
// easy case
|
||||||
@ -114,7 +114,7 @@ search:
|
|||||||
nextimethod:
|
nextimethod:
|
||||||
}
|
}
|
||||||
if locked == 0 {
|
if locked == 0 {
|
||||||
gothrow("invalid itab locking")
|
throw("invalid itab locking")
|
||||||
}
|
}
|
||||||
m.link = hash[h]
|
m.link = hash[h]
|
||||||
atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
|
atomicstorep(unsafe.Pointer(&hash[h]), unsafe.Pointer(m))
|
||||||
|
@ -14,7 +14,7 @@ func lfstackpush(head *uint64, node *lfnode) {
|
|||||||
new := lfstackPack(node, node.pushcnt)
|
new := lfstackPack(node, node.pushcnt)
|
||||||
if node1, _ := lfstackUnpack(new); node1 != node {
|
if node1, _ := lfstackUnpack(new); node1 != node {
|
||||||
println("runtime: lfstackpush invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n")
|
println("runtime: lfstackpush invalid packing: node=", node, " cnt=", hex(node.pushcnt), " packed=", hex(new), " -> node=", node1, "\n")
|
||||||
gothrow("lfstackpush")
|
throw("lfstackpush")
|
||||||
}
|
}
|
||||||
for {
|
for {
|
||||||
old := atomicload64(head)
|
old := atomicload64(head)
|
||||||
|
@ -43,7 +43,7 @@ func lock(l *mutex) {
|
|||||||
gp := getg()
|
gp := getg()
|
||||||
|
|
||||||
if gp.m.locks < 0 {
|
if gp.m.locks < 0 {
|
||||||
gothrow("runtime·lock: lock count")
|
throw("runtime·lock: lock count")
|
||||||
}
|
}
|
||||||
gp.m.locks++
|
gp.m.locks++
|
||||||
|
|
||||||
@ -102,7 +102,7 @@ func lock(l *mutex) {
|
|||||||
func unlock(l *mutex) {
|
func unlock(l *mutex) {
|
||||||
v := xchg(key32(&l.key), mutex_unlocked)
|
v := xchg(key32(&l.key), mutex_unlocked)
|
||||||
if v == mutex_unlocked {
|
if v == mutex_unlocked {
|
||||||
gothrow("unlock of unlocked lock")
|
throw("unlock of unlocked lock")
|
||||||
}
|
}
|
||||||
if v == mutex_sleeping {
|
if v == mutex_sleeping {
|
||||||
futexwakeup(key32(&l.key), 1)
|
futexwakeup(key32(&l.key), 1)
|
||||||
@ -111,7 +111,7 @@ func unlock(l *mutex) {
|
|||||||
gp := getg()
|
gp := getg()
|
||||||
gp.m.locks--
|
gp.m.locks--
|
||||||
if gp.m.locks < 0 {
|
if gp.m.locks < 0 {
|
||||||
gothrow("runtime·unlock: lock count")
|
throw("runtime·unlock: lock count")
|
||||||
}
|
}
|
||||||
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
gp.stackguard0 = stackPreempt
|
gp.stackguard0 = stackPreempt
|
||||||
@ -127,7 +127,7 @@ func notewakeup(n *note) {
|
|||||||
old := xchg(key32(&n.key), 1)
|
old := xchg(key32(&n.key), 1)
|
||||||
if old != 0 {
|
if old != 0 {
|
||||||
print("notewakeup - double wakeup (", old, ")\n")
|
print("notewakeup - double wakeup (", old, ")\n")
|
||||||
gothrow("notewakeup - double wakeup")
|
throw("notewakeup - double wakeup")
|
||||||
}
|
}
|
||||||
futexwakeup(key32(&n.key), 1)
|
futexwakeup(key32(&n.key), 1)
|
||||||
}
|
}
|
||||||
@ -135,7 +135,7 @@ func notewakeup(n *note) {
|
|||||||
func notesleep(n *note) {
|
func notesleep(n *note) {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp != gp.m.g0 {
|
if gp != gp.m.g0 {
|
||||||
gothrow("notesleep not on g0")
|
throw("notesleep not on g0")
|
||||||
}
|
}
|
||||||
for atomicload(key32(&n.key)) == 0 {
|
for atomicload(key32(&n.key)) == 0 {
|
||||||
gp.m.blocked = true
|
gp.m.blocked = true
|
||||||
@ -181,7 +181,7 @@ func notetsleep_internal(n *note, ns int64) bool {
|
|||||||
func notetsleep(n *note, ns int64) bool {
|
func notetsleep(n *note, ns int64) bool {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp != gp.m.g0 && gp.m.gcing == 0 {
|
if gp != gp.m.g0 && gp.m.gcing == 0 {
|
||||||
gothrow("notetsleep not on g0")
|
throw("notetsleep not on g0")
|
||||||
}
|
}
|
||||||
|
|
||||||
return notetsleep_internal(n, ns)
|
return notetsleep_internal(n, ns)
|
||||||
@ -192,7 +192,7 @@ func notetsleep(n *note, ns int64) bool {
|
|||||||
func notetsleepg(n *note, ns int64) bool {
|
func notetsleepg(n *note, ns int64) bool {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp == gp.m.g0 {
|
if gp == gp.m.g0 {
|
||||||
gothrow("notetsleepg on g0")
|
throw("notetsleepg on g0")
|
||||||
}
|
}
|
||||||
|
|
||||||
entersyscallblock(0)
|
entersyscallblock(0)
|
||||||
|
@ -34,7 +34,7 @@ const (
|
|||||||
func lock(l *mutex) {
|
func lock(l *mutex) {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp.m.locks < 0 {
|
if gp.m.locks < 0 {
|
||||||
gothrow("runtime·lock: lock count")
|
throw("runtime·lock: lock count")
|
||||||
}
|
}
|
||||||
gp.m.locks++
|
gp.m.locks++
|
||||||
|
|
||||||
@ -112,7 +112,7 @@ func unlock(l *mutex) {
|
|||||||
}
|
}
|
||||||
gp.m.locks--
|
gp.m.locks--
|
||||||
if gp.m.locks < 0 {
|
if gp.m.locks < 0 {
|
||||||
gothrow("runtime·unlock: lock count")
|
throw("runtime·unlock: lock count")
|
||||||
}
|
}
|
||||||
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
|
||||||
gp.stackguard0 = stackPreempt
|
gp.stackguard0 = stackPreempt
|
||||||
@ -140,7 +140,7 @@ func notewakeup(n *note) {
|
|||||||
// Nothing was waiting. Done.
|
// Nothing was waiting. Done.
|
||||||
case v == locked:
|
case v == locked:
|
||||||
// Two notewakeups! Not allowed.
|
// Two notewakeups! Not allowed.
|
||||||
gothrow("notewakeup - double wakeup")
|
throw("notewakeup - double wakeup")
|
||||||
default:
|
default:
|
||||||
// Must be the waiting m. Wake it up.
|
// Must be the waiting m. Wake it up.
|
||||||
semawakeup((*m)(unsafe.Pointer(v)))
|
semawakeup((*m)(unsafe.Pointer(v)))
|
||||||
@ -150,7 +150,7 @@ func notewakeup(n *note) {
|
|||||||
func notesleep(n *note) {
|
func notesleep(n *note) {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp != gp.m.g0 {
|
if gp != gp.m.g0 {
|
||||||
gothrow("notesleep not on g0")
|
throw("notesleep not on g0")
|
||||||
}
|
}
|
||||||
if gp.m.waitsema == 0 {
|
if gp.m.waitsema == 0 {
|
||||||
gp.m.waitsema = semacreate()
|
gp.m.waitsema = semacreate()
|
||||||
@ -158,7 +158,7 @@ func notesleep(n *note) {
|
|||||||
if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
||||||
// Must be locked (got wakeup).
|
// Must be locked (got wakeup).
|
||||||
if n.key != locked {
|
if n.key != locked {
|
||||||
gothrow("notesleep - waitm out of sync")
|
throw("notesleep - waitm out of sync")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -180,7 +180,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
|
|||||||
if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
|
||||||
// Must be locked (got wakeup).
|
// Must be locked (got wakeup).
|
||||||
if n.key != locked {
|
if n.key != locked {
|
||||||
gothrow("notetsleep - waitm out of sync")
|
throw("notetsleep - waitm out of sync")
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
@ -228,12 +228,12 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
|
|||||||
// Grab it to avoid getting out of sync.
|
// Grab it to avoid getting out of sync.
|
||||||
gp.m.blocked = true
|
gp.m.blocked = true
|
||||||
if semasleep(-1) < 0 {
|
if semasleep(-1) < 0 {
|
||||||
gothrow("runtime: unable to acquire - semaphore out of sync")
|
throw("runtime: unable to acquire - semaphore out of sync")
|
||||||
}
|
}
|
||||||
gp.m.blocked = false
|
gp.m.blocked = false
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
gothrow("runtime: unexpected waitm - semaphore out of sync")
|
throw("runtime: unexpected waitm - semaphore out of sync")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -241,7 +241,7 @@ func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
|
|||||||
func notetsleep(n *note, ns int64) bool {
|
func notetsleep(n *note, ns int64) bool {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp != gp.m.g0 && gp.m.gcing == 0 {
|
if gp != gp.m.g0 && gp.m.gcing == 0 {
|
||||||
gothrow("notetsleep not on g0")
|
throw("notetsleep not on g0")
|
||||||
}
|
}
|
||||||
if gp.m.waitsema == 0 {
|
if gp.m.waitsema == 0 {
|
||||||
gp.m.waitsema = semacreate()
|
gp.m.waitsema = semacreate()
|
||||||
@ -254,7 +254,7 @@ func notetsleep(n *note, ns int64) bool {
|
|||||||
func notetsleepg(n *note, ns int64) bool {
|
func notetsleepg(n *note, ns int64) bool {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp == gp.m.g0 {
|
if gp == gp.m.g0 {
|
||||||
gothrow("notetsleepg on g0")
|
throw("notetsleepg on g0")
|
||||||
}
|
}
|
||||||
if gp.m.waitsema == 0 {
|
if gp.m.waitsema == 0 {
|
||||||
gp.m.waitsema = semacreate()
|
gp.m.waitsema = semacreate()
|
||||||
|
@ -49,7 +49,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
size0 := size
|
size0 := size
|
||||||
|
|
||||||
if flags&flagNoScan == 0 && typ == nil {
|
if flags&flagNoScan == 0 && typ == nil {
|
||||||
gothrow("malloc missing type")
|
throw("malloc missing type")
|
||||||
}
|
}
|
||||||
|
|
||||||
// This function must be atomic wrt GC, but for performance reasons
|
// This function must be atomic wrt GC, but for performance reasons
|
||||||
@ -60,7 +60,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
if debugMalloc {
|
if debugMalloc {
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
if mp.mallocing != 0 {
|
if mp.mallocing != 0 {
|
||||||
gothrow("malloc deadlock")
|
throw("malloc deadlock")
|
||||||
}
|
}
|
||||||
mp.mallocing = 1
|
mp.mallocing = 1
|
||||||
if mp.curg != nil {
|
if mp.curg != nil {
|
||||||
@ -123,7 +123,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
if debugMalloc {
|
if debugMalloc {
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
if mp.mallocing == 0 {
|
if mp.mallocing == 0 {
|
||||||
gothrow("bad malloc")
|
throw("bad malloc")
|
||||||
}
|
}
|
||||||
mp.mallocing = 0
|
mp.mallocing = 0
|
||||||
if mp.curg != nil {
|
if mp.curg != nil {
|
||||||
@ -222,7 +222,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
shift := (off % wordsPerBitmapByte) * gcBits
|
shift := (off % wordsPerBitmapByte) * gcBits
|
||||||
if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary {
|
if debugMalloc && ((*xbits>>shift)&(bitMask|bitPtrMask)) != bitBoundary {
|
||||||
println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask))
|
println("runtime: bits =", (*xbits>>shift)&(bitMask|bitPtrMask))
|
||||||
gothrow("bad bits in markallocated")
|
throw("bad bits in markallocated")
|
||||||
}
|
}
|
||||||
|
|
||||||
var ti, te uintptr
|
var ti, te uintptr
|
||||||
@ -242,7 +242,7 @@ func mallocgc(size uintptr, typ *_type, flags uint32) unsafe.Pointer {
|
|||||||
masksize++ // unroll flag in the beginning
|
masksize++ // unroll flag in the beginning
|
||||||
if masksize > maxGCMask && typ.gc[1] != 0 {
|
if masksize > maxGCMask && typ.gc[1] != 0 {
|
||||||
// write barriers have not been updated to deal with this case yet.
|
// write barriers have not been updated to deal with this case yet.
|
||||||
gothrow("maxGCMask too small for now")
|
throw("maxGCMask too small for now")
|
||||||
// If the mask is too large, unroll the program directly
|
// If the mask is too large, unroll the program directly
|
||||||
// into the GC bitmap. It's 7 times slower than copying
|
// into the GC bitmap. It's 7 times slower than copying
|
||||||
// from the pre-unrolled mask, but saves 1/16 of type size
|
// from the pre-unrolled mask, but saves 1/16 of type size
|
||||||
@ -315,7 +315,7 @@ marked:
|
|||||||
if debugMalloc {
|
if debugMalloc {
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
if mp.mallocing == 0 {
|
if mp.mallocing == 0 {
|
||||||
gothrow("bad malloc")
|
throw("bad malloc")
|
||||||
}
|
}
|
||||||
mp.mallocing = 0
|
mp.mallocing = 0
|
||||||
if mp.curg != nil {
|
if mp.curg != nil {
|
||||||
@ -360,7 +360,7 @@ func loadPtrMask(typ *_type) []uint8 {
|
|||||||
masksize++ // unroll flag in the beginning
|
masksize++ // unroll flag in the beginning
|
||||||
if masksize > maxGCMask && typ.gc[1] != 0 {
|
if masksize > maxGCMask && typ.gc[1] != 0 {
|
||||||
// write barriers have not been updated to deal with this case yet.
|
// write barriers have not been updated to deal with this case yet.
|
||||||
gothrow("maxGCMask too small for now")
|
throw("maxGCMask too small for now")
|
||||||
}
|
}
|
||||||
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
|
ptrmask = (*uint8)(unsafe.Pointer(uintptr(typ.gc[0])))
|
||||||
// Check whether the program is already unrolled
|
// Check whether the program is already unrolled
|
||||||
@ -519,7 +519,7 @@ func gogc(force int32) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if mp != acquirem() {
|
if mp != acquirem() {
|
||||||
gothrow("gogc: rescheduled")
|
throw("gogc: rescheduled")
|
||||||
}
|
}
|
||||||
|
|
||||||
clearpools()
|
clearpools()
|
||||||
@ -738,14 +738,14 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
|
|||||||
e := (*eface)(unsafe.Pointer(&obj))
|
e := (*eface)(unsafe.Pointer(&obj))
|
||||||
etyp := e._type
|
etyp := e._type
|
||||||
if etyp == nil {
|
if etyp == nil {
|
||||||
gothrow("runtime.SetFinalizer: first argument is nil")
|
throw("runtime.SetFinalizer: first argument is nil")
|
||||||
}
|
}
|
||||||
if etyp.kind&kindMask != kindPtr {
|
if etyp.kind&kindMask != kindPtr {
|
||||||
gothrow("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
|
throw("runtime.SetFinalizer: first argument is " + *etyp._string + ", not pointer")
|
||||||
}
|
}
|
||||||
ot := (*ptrtype)(unsafe.Pointer(etyp))
|
ot := (*ptrtype)(unsafe.Pointer(etyp))
|
||||||
if ot.elem == nil {
|
if ot.elem == nil {
|
||||||
gothrow("nil elem type!")
|
throw("nil elem type!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the containing object
|
// find the containing object
|
||||||
@ -771,14 +771,14 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
|
|||||||
uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
|
uintptr(unsafe.Pointer(&noptrbss)) <= uintptr(e.data) && uintptr(e.data) < uintptr(unsafe.Pointer(&enoptrbss)) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
gothrow("runtime.SetFinalizer: pointer not in allocated block")
|
throw("runtime.SetFinalizer: pointer not in allocated block")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.data != base {
|
if e.data != base {
|
||||||
// As an implementation detail we allow to set finalizers for an inner byte
|
// As an implementation detail we allow to set finalizers for an inner byte
|
||||||
// of an object if it could come from tiny alloc (see mallocgc for details).
|
// of an object if it could come from tiny alloc (see mallocgc for details).
|
||||||
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
|
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
|
||||||
gothrow("runtime.SetFinalizer: pointer not at beginning of allocated block")
|
throw("runtime.SetFinalizer: pointer not at beginning of allocated block")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -793,12 +793,12 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if ftyp.kind&kindMask != kindFunc {
|
if ftyp.kind&kindMask != kindFunc {
|
||||||
gothrow("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
|
throw("runtime.SetFinalizer: second argument is " + *ftyp._string + ", not a function")
|
||||||
}
|
}
|
||||||
ft := (*functype)(unsafe.Pointer(ftyp))
|
ft := (*functype)(unsafe.Pointer(ftyp))
|
||||||
ins := *(*[]*_type)(unsafe.Pointer(&ft.in))
|
ins := *(*[]*_type)(unsafe.Pointer(&ft.in))
|
||||||
if ft.dotdotdot || len(ins) != 1 {
|
if ft.dotdotdot || len(ins) != 1 {
|
||||||
gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
|
throw("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
|
||||||
}
|
}
|
||||||
fint := ins[0]
|
fint := ins[0]
|
||||||
switch {
|
switch {
|
||||||
@ -821,7 +821,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
|
|||||||
goto okarg
|
goto okarg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gothrow("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
|
throw("runtime.SetFinalizer: cannot pass " + *etyp._string + " to finalizer " + *ftyp._string)
|
||||||
okarg:
|
okarg:
|
||||||
// compute size needed for return parameters
|
// compute size needed for return parameters
|
||||||
nret := uintptr(0)
|
nret := uintptr(0)
|
||||||
@ -835,7 +835,7 @@ okarg:
|
|||||||
|
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
|
if !addfinalizer(e.data, (*funcval)(f.data), nret, fint, ot) {
|
||||||
gothrow("runtime.SetFinalizer: finalizer already set")
|
throw("runtime.SetFinalizer: finalizer already set")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -933,7 +933,7 @@ func runfinq() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if f.fint == nil {
|
if f.fint == nil {
|
||||||
gothrow("missing type in runfinq")
|
throw("missing type in runfinq")
|
||||||
}
|
}
|
||||||
switch f.fint.kind & kindMask {
|
switch f.fint.kind & kindMask {
|
||||||
case kindPtr:
|
case kindPtr:
|
||||||
@ -950,7 +950,7 @@ func runfinq() {
|
|||||||
*(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame))
|
*(*fInterface)(frame) = assertE2I(ityp, *(*interface{})(frame))
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
gothrow("bad kind in runfinq")
|
throw("bad kind in runfinq")
|
||||||
}
|
}
|
||||||
reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
|
reflectcall(unsafe.Pointer(f.fn), frame, uint32(framesz), uint32(framesz))
|
||||||
|
|
||||||
@ -988,10 +988,10 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
|||||||
|
|
||||||
if align != 0 {
|
if align != 0 {
|
||||||
if align&(align-1) != 0 {
|
if align&(align-1) != 0 {
|
||||||
gothrow("persistentalloc: align is not a power of 2")
|
throw("persistentalloc: align is not a power of 2")
|
||||||
}
|
}
|
||||||
if align > _PageSize {
|
if align > _PageSize {
|
||||||
gothrow("persistentalloc: align is too large")
|
throw("persistentalloc: align is too large")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
align = 8
|
align = 8
|
||||||
@ -1007,7 +1007,7 @@ func persistentalloc(size, align uintptr, stat *uint64) unsafe.Pointer {
|
|||||||
persistent.pos = sysAlloc(chunk, &memstats.other_sys)
|
persistent.pos = sysAlloc(chunk, &memstats.other_sys)
|
||||||
if persistent.pos == nil {
|
if persistent.pos == nil {
|
||||||
unlock(&persistent.lock)
|
unlock(&persistent.lock)
|
||||||
gothrow("runtime: cannot allocate memory")
|
throw("runtime: cannot allocate memory")
|
||||||
}
|
}
|
||||||
persistent.end = add(persistent.pos, chunk)
|
persistent.end = add(persistent.pos, chunk)
|
||||||
}
|
}
|
||||||
|
@ -89,7 +89,7 @@ func mallocinit() {
|
|||||||
initSizes()
|
initSizes()
|
||||||
|
|
||||||
if class_to_size[_TinySizeClass] != _TinySize {
|
if class_to_size[_TinySizeClass] != _TinySize {
|
||||||
gothrow("bad TinySizeClass")
|
throw("bad TinySizeClass")
|
||||||
}
|
}
|
||||||
|
|
||||||
var p, bitmapSize, spansSize, pSize, limit uintptr
|
var p, bitmapSize, spansSize, pSize, limit uintptr
|
||||||
@ -196,7 +196,7 @@ func mallocinit() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if p == 0 {
|
if p == 0 {
|
||||||
gothrow("runtime: cannot reserve arena virtual address space")
|
throw("runtime: cannot reserve arena virtual address space")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -214,7 +214,7 @@ func mallocinit() {
|
|||||||
|
|
||||||
if mheap_.arena_start&(_PageSize-1) != 0 {
|
if mheap_.arena_start&(_PageSize-1) != 0 {
|
||||||
println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
|
println("bad pagesize", hex(p), hex(p1), hex(spansSize), hex(bitmapSize), hex(_PageSize), "start", hex(mheap_.arena_start))
|
||||||
gothrow("misrounded allocation in mallocinit")
|
throw("misrounded allocation in mallocinit")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the rest of the allocator.
|
// Initialize the rest of the allocator.
|
||||||
@ -262,7 +262,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if uintptr(p)&(_PageSize-1) != 0 {
|
if uintptr(p)&(_PageSize-1) != 0 {
|
||||||
gothrow("misrounded allocation in MHeap_SysAlloc")
|
throw("misrounded allocation in MHeap_SysAlloc")
|
||||||
}
|
}
|
||||||
return (unsafe.Pointer)(p)
|
return (unsafe.Pointer)(p)
|
||||||
}
|
}
|
||||||
@ -302,7 +302,7 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if uintptr(p)&(_PageSize-1) != 0 {
|
if uintptr(p)&(_PageSize-1) != 0 {
|
||||||
gothrow("misrounded allocation in MHeap_SysAlloc")
|
throw("misrounded allocation in MHeap_SysAlloc")
|
||||||
}
|
}
|
||||||
return (unsafe.Pointer)(p)
|
return (unsafe.Pointer)(p)
|
||||||
}
|
}
|
||||||
@ -313,7 +313,7 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
|
|||||||
// print("largeAlloc size=", size, "\n")
|
// print("largeAlloc size=", size, "\n")
|
||||||
|
|
||||||
if size+_PageSize < size {
|
if size+_PageSize < size {
|
||||||
gothrow("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
npages := size >> _PageShift
|
npages := size >> _PageShift
|
||||||
if size&_PageMask != 0 {
|
if size&_PageMask != 0 {
|
||||||
@ -321,7 +321,7 @@ func largeAlloc(size uintptr, flag uint32) *mspan {
|
|||||||
}
|
}
|
||||||
s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
|
s := mHeap_Alloc(&mheap_, npages, 0, true, flag&_FlagNoZero == 0)
|
||||||
if s == nil {
|
if s == nil {
|
||||||
gothrow("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
s.limit = uintptr(s.start)<<_PageShift + size
|
s.limit = uintptr(s.start)<<_PageShift + size
|
||||||
v := unsafe.Pointer(uintptr(s.start) << _PageShift)
|
v := unsafe.Pointer(uintptr(s.start) << _PageShift)
|
||||||
|
@ -60,7 +60,7 @@ func mCache_Refill(c *mcache, sizeclass int32) *mspan {
|
|||||||
// Return the current cached span to the central lists.
|
// Return the current cached span to the central lists.
|
||||||
s := c.alloc[sizeclass]
|
s := c.alloc[sizeclass]
|
||||||
if s.freelist.ptr() != nil {
|
if s.freelist.ptr() != nil {
|
||||||
gothrow("refill on a nonempty span")
|
throw("refill on a nonempty span")
|
||||||
}
|
}
|
||||||
if s != &emptymspan {
|
if s != &emptymspan {
|
||||||
s.incache = false
|
s.incache = false
|
||||||
@ -69,11 +69,11 @@ func mCache_Refill(c *mcache, sizeclass int32) *mspan {
|
|||||||
// Get a new cached span from the central lists.
|
// Get a new cached span from the central lists.
|
||||||
s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
|
s = mCentral_CacheSpan(&mheap_.central[sizeclass].mcentral)
|
||||||
if s == nil {
|
if s == nil {
|
||||||
gothrow("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
if s.freelist.ptr() == nil {
|
if s.freelist.ptr() == nil {
|
||||||
println(s.ref, (s.npages<<_PageShift)/s.elemsize)
|
println(s.ref, (s.npages<<_PageShift)/s.elemsize)
|
||||||
gothrow("empty span")
|
throw("empty span")
|
||||||
}
|
}
|
||||||
c.alloc[sizeclass] = s
|
c.alloc[sizeclass] = s
|
||||||
_g_.m.locks--
|
_g_.m.locks--
|
||||||
|
@ -88,10 +88,10 @@ havespan:
|
|||||||
cap := int32((s.npages << _PageShift) / s.elemsize)
|
cap := int32((s.npages << _PageShift) / s.elemsize)
|
||||||
n := cap - int32(s.ref)
|
n := cap - int32(s.ref)
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
gothrow("empty span")
|
throw("empty span")
|
||||||
}
|
}
|
||||||
if s.freelist.ptr() == nil {
|
if s.freelist.ptr() == nil {
|
||||||
gothrow("freelist empty")
|
throw("freelist empty")
|
||||||
}
|
}
|
||||||
s.incache = true
|
s.incache = true
|
||||||
return s
|
return s
|
||||||
@ -104,7 +104,7 @@ func mCentral_UncacheSpan(c *mcentral, s *mspan) {
|
|||||||
s.incache = false
|
s.incache = false
|
||||||
|
|
||||||
if s.ref == 0 {
|
if s.ref == 0 {
|
||||||
gothrow("uncaching full span")
|
throw("uncaching full span")
|
||||||
}
|
}
|
||||||
|
|
||||||
cap := int32((s.npages << _PageShift) / s.elemsize)
|
cap := int32((s.npages << _PageShift) / s.elemsize)
|
||||||
@ -124,7 +124,7 @@ func mCentral_UncacheSpan(c *mcentral, s *mspan) {
|
|||||||
// caller takes care of it.
|
// caller takes care of it.
|
||||||
func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
|
func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gclinkptr, preserve bool) bool {
|
||||||
if s.incache {
|
if s.incache {
|
||||||
gothrow("freespan into cached span")
|
throw("freespan into cached span")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the objects back to s's free list.
|
// Add the objects back to s's free list.
|
||||||
@ -137,7 +137,7 @@ func mCentral_FreeSpan(c *mcentral, s *mspan, n int32, start gclinkptr, end gcli
|
|||||||
// preserve is set only when called from MCentral_CacheSpan above,
|
// preserve is set only when called from MCentral_CacheSpan above,
|
||||||
// the span must be in the empty list.
|
// the span must be in the empty list.
|
||||||
if s.next == nil {
|
if s.next == nil {
|
||||||
gothrow("can't preserve unlinked span")
|
throw("can't preserve unlinked span")
|
||||||
}
|
}
|
||||||
atomicstore(&s.sweepgen, mheap_.sweepgen)
|
atomicstore(&s.sweepgen, mheap_.sweepgen)
|
||||||
return false
|
return false
|
||||||
@ -194,7 +194,7 @@ func mCentral_Grow(c *mcentral) *mspan {
|
|||||||
tail = gclinkptr(p)
|
tail = gclinkptr(p)
|
||||||
}
|
}
|
||||||
if s.freelist.ptr() != nil {
|
if s.freelist.ptr() != nil {
|
||||||
gothrow("freelist not empty")
|
throw("freelist not empty")
|
||||||
}
|
}
|
||||||
tail.ptr().next = 0
|
tail.ptr().next = 0
|
||||||
s.freelist = head
|
s.freelist = head
|
||||||
|
@ -69,7 +69,7 @@ func init() {
|
|||||||
var memStats MemStats
|
var memStats MemStats
|
||||||
if sizeof_C_MStats != unsafe.Sizeof(memStats) {
|
if sizeof_C_MStats != unsafe.Sizeof(memStats) {
|
||||||
println(sizeof_C_MStats, unsafe.Sizeof(memStats))
|
println(sizeof_C_MStats, unsafe.Sizeof(memStats))
|
||||||
gothrow("MStats vs MemStatsType size mismatch")
|
throw("MStats vs MemStatsType size mismatch")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,20 +69,20 @@ func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
|||||||
}
|
}
|
||||||
p := mmap(v, n, _PROT_READ|_PROT_WRITE, flags, -1, 0)
|
p := mmap(v, n, _PROT_READ|_PROT_WRITE, flags, -1, 0)
|
||||||
if uintptr(p) == _ENOMEM {
|
if uintptr(p) == _ENOMEM {
|
||||||
gothrow("runtime: out of memory")
|
throw("runtime: out of memory")
|
||||||
}
|
}
|
||||||
if p != v {
|
if p != v {
|
||||||
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
|
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
|
||||||
gothrow("runtime: address space conflict")
|
throw("runtime: address space conflict")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||||
if uintptr(p) == _ENOMEM {
|
if uintptr(p) == _ENOMEM {
|
||||||
gothrow("runtime: out of memory")
|
throw("runtime: out of memory")
|
||||||
}
|
}
|
||||||
if p != v {
|
if p != v {
|
||||||
gothrow("runtime: cannot map pages in arena address space")
|
throw("runtime: cannot map pages in arena address space")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,9 +50,9 @@ func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
|||||||
xadd64(stat, int64(n))
|
xadd64(stat, int64(n))
|
||||||
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
|
p := (unsafe.Pointer)(mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0))
|
||||||
if uintptr(p) == _ENOMEM {
|
if uintptr(p) == _ENOMEM {
|
||||||
gothrow("runtime: out of memory")
|
throw("runtime: out of memory")
|
||||||
}
|
}
|
||||||
if p != v {
|
if p != v {
|
||||||
gothrow("runtime: cannot map pages in arena address space")
|
throw("runtime: cannot map pages in arena address space")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -116,20 +116,20 @@ func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
|||||||
if !reserved {
|
if !reserved {
|
||||||
p := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
p := mmap_fixed(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)
|
||||||
if uintptr(p) == _ENOMEM {
|
if uintptr(p) == _ENOMEM {
|
||||||
gothrow("runtime: out of memory")
|
throw("runtime: out of memory")
|
||||||
}
|
}
|
||||||
if p != v {
|
if p != v {
|
||||||
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
|
print("runtime: address space conflict: map(", v, ") = ", p, "\n")
|
||||||
gothrow("runtime: address space conflict")
|
throw("runtime: address space conflict")
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
p := mmap(v, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_FIXED|_MAP_PRIVATE, -1, 0)
|
||||||
if uintptr(p) == _ENOMEM {
|
if uintptr(p) == _ENOMEM {
|
||||||
gothrow("runtime: out of memory")
|
throw("runtime: out of memory")
|
||||||
}
|
}
|
||||||
if p != v {
|
if p != v {
|
||||||
gothrow("runtime: cannot map pages in arena address space")
|
throw("runtime: cannot map pages in arena address space")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
|
|||||||
small &^= 4096 - 1
|
small &^= 4096 - 1
|
||||||
}
|
}
|
||||||
if small < 4096 {
|
if small < 4096 {
|
||||||
gothrow("runtime: failed to decommit pages")
|
throw("runtime: failed to decommit pages")
|
||||||
}
|
}
|
||||||
v = add(v, small)
|
v = add(v, small)
|
||||||
n -= small
|
n -= small
|
||||||
@ -66,7 +66,7 @@ func sysUnused(v unsafe.Pointer, n uintptr) {
|
|||||||
func sysUsed(v unsafe.Pointer, n uintptr) {
|
func sysUsed(v unsafe.Pointer, n uintptr) {
|
||||||
r := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
r := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
||||||
if r != uintptr(v) {
|
if r != uintptr(v) {
|
||||||
gothrow("runtime: failed to commit pages")
|
throw("runtime: failed to commit pages")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Commit failed. See SysUnused.
|
// Commit failed. See SysUnused.
|
||||||
@ -77,7 +77,7 @@ func sysUsed(v unsafe.Pointer, n uintptr) {
|
|||||||
small &^= 4096 - 1
|
small &^= 4096 - 1
|
||||||
}
|
}
|
||||||
if small < 4096 {
|
if small < 4096 {
|
||||||
gothrow("runtime: failed to decommit pages")
|
throw("runtime: failed to decommit pages")
|
||||||
}
|
}
|
||||||
v = add(v, small)
|
v = add(v, small)
|
||||||
n -= small
|
n -= small
|
||||||
@ -88,7 +88,7 @@ func sysFree(v unsafe.Pointer, n uintptr, stat *uint64) {
|
|||||||
xadd64(stat, -int64(n))
|
xadd64(stat, -int64(n))
|
||||||
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
r := stdcall3(_VirtualFree, uintptr(v), 0, _MEM_RELEASE)
|
||||||
if r == 0 {
|
if r == 0 {
|
||||||
gothrow("runtime: failed to release pages")
|
throw("runtime: failed to release pages")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,6 +114,6 @@ func sysMap(v unsafe.Pointer, n uintptr, reserved bool, stat *uint64) {
|
|||||||
xadd64(stat, int64(n))
|
xadd64(stat, int64(n))
|
||||||
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
p := stdcall4(_VirtualAlloc, uintptr(v), n, _MEM_COMMIT, _PAGE_READWRITE)
|
||||||
if p != uintptr(v) {
|
if p != uintptr(v) {
|
||||||
gothrow("runtime: cannot map pages in arena address space")
|
throw("runtime: cannot map pages in arena address space")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -26,7 +26,7 @@ func fixAlloc_Init(f *fixalloc, size uintptr, first func(unsafe.Pointer, unsafe.
|
|||||||
func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
|
func fixAlloc_Alloc(f *fixalloc) unsafe.Pointer {
|
||||||
if f.size == 0 {
|
if f.size == 0 {
|
||||||
print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
|
print("runtime: use of FixAlloc_Alloc before FixAlloc_Init\n")
|
||||||
gothrow("runtime: internal error")
|
throw("runtime: internal error")
|
||||||
}
|
}
|
||||||
|
|
||||||
if f.list != nil {
|
if f.list != nil {
|
||||||
|
@ -307,7 +307,7 @@ func objectstart(b uintptr, mbits *markbits) uintptr {
|
|||||||
print(" s.start=", hex(s.start<<_PageShift), " s.limit=", hex(s.limit), " s.state=", s.state, "\n")
|
print(" s.start=", hex(s.start<<_PageShift), " s.limit=", hex(s.limit), " s.state=", s.state, "\n")
|
||||||
}
|
}
|
||||||
printunlock()
|
printunlock()
|
||||||
gothrow("objectstart: bad pointer in unexpected span")
|
throw("objectstart: bad pointer in unexpected span")
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -320,7 +320,7 @@ func objectstart(b uintptr, mbits *markbits) uintptr {
|
|||||||
}
|
}
|
||||||
if p == obj {
|
if p == obj {
|
||||||
print("runtime: failed to find block beginning for ", hex(p), " s=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), "\n")
|
print("runtime: failed to find block beginning for ", hex(p), " s=", hex(s.start*_PageSize), " s.limit=", hex(s.limit), "\n")
|
||||||
gothrow("failed to find block beginning")
|
throw("failed to find block beginning")
|
||||||
}
|
}
|
||||||
obj = p
|
obj = p
|
||||||
}
|
}
|
||||||
@ -363,7 +363,7 @@ func docheckmark(mbits *markbits) {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func ismarked(mbits *markbits) bool {
|
func ismarked(mbits *markbits) bool {
|
||||||
if mbits.bits&bitBoundary != bitBoundary {
|
if mbits.bits&bitBoundary != bitBoundary {
|
||||||
gothrow("ismarked: bits should have boundary bit set")
|
throw("ismarked: bits should have boundary bit set")
|
||||||
}
|
}
|
||||||
return mbits.bits&bitMarked == bitMarked
|
return mbits.bits&bitMarked == bitMarked
|
||||||
}
|
}
|
||||||
@ -372,7 +372,7 @@ func ismarked(mbits *markbits) bool {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func ischeckmarked(mbits *markbits) bool {
|
func ischeckmarked(mbits *markbits) bool {
|
||||||
if mbits.bits&bitBoundary != bitBoundary {
|
if mbits.bits&bitBoundary != bitBoundary {
|
||||||
gothrow("ischeckmarked: bits should have boundary bit set")
|
throw("ischeckmarked: bits should have boundary bit set")
|
||||||
}
|
}
|
||||||
return mbits.tbits == _BitsScalarMarked || mbits.tbits == _BitsPointerMarked
|
return mbits.tbits == _BitsScalarMarked || mbits.tbits == _BitsPointerMarked
|
||||||
}
|
}
|
||||||
@ -381,10 +381,10 @@ func ischeckmarked(mbits *markbits) bool {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func gcmarknewobject_m(obj uintptr) {
|
func gcmarknewobject_m(obj uintptr) {
|
||||||
if gcphase != _GCmarktermination {
|
if gcphase != _GCmarktermination {
|
||||||
gothrow("marking new object while not in mark termination phase")
|
throw("marking new object while not in mark termination phase")
|
||||||
}
|
}
|
||||||
if checkmark { // The world should be stopped so this should not happen.
|
if checkmark { // The world should be stopped so this should not happen.
|
||||||
gothrow("gcmarknewobject called while doing checkmark")
|
throw("gcmarknewobject called while doing checkmark")
|
||||||
}
|
}
|
||||||
|
|
||||||
var mbits markbits
|
var mbits markbits
|
||||||
@ -412,7 +412,7 @@ func gcmarknewobject_m(obj uintptr) {
|
|||||||
func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
|
func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
|
||||||
// obj should be start of allocation, and so must be at least pointer-aligned.
|
// obj should be start of allocation, and so must be at least pointer-aligned.
|
||||||
if obj&(ptrSize-1) != 0 {
|
if obj&(ptrSize-1) != 0 {
|
||||||
gothrow("greyobject: obj not pointer-aligned")
|
throw("greyobject: obj not pointer-aligned")
|
||||||
}
|
}
|
||||||
|
|
||||||
if checkmark {
|
if checkmark {
|
||||||
@ -435,7 +435,7 @@ func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
|
|||||||
print(" *(obj+", i*ptrSize, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)*ptrSize))), "\n")
|
print(" *(obj+", i*ptrSize, ") = ", hex(*(*uintptr)(unsafe.Pointer(obj + uintptr(i)*ptrSize))), "\n")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
gothrow("checkmark found unmarked object")
|
throw("checkmark found unmarked object")
|
||||||
}
|
}
|
||||||
if ischeckmarked(mbits) {
|
if ischeckmarked(mbits) {
|
||||||
return wbuf
|
return wbuf
|
||||||
@ -443,7 +443,7 @@ func greyobject(obj uintptr, mbits *markbits, wbuf *workbuf) *workbuf {
|
|||||||
docheckmark(mbits)
|
docheckmark(mbits)
|
||||||
if !ischeckmarked(mbits) {
|
if !ischeckmarked(mbits) {
|
||||||
print("mbits xbits=", hex(mbits.xbits), " bits=", hex(mbits.bits), " tbits=", hex(mbits.tbits), " shift=", mbits.shift, "\n")
|
print("mbits xbits=", hex(mbits.xbits), " bits=", hex(mbits.bits), " tbits=", hex(mbits.tbits), " shift=", mbits.shift, "\n")
|
||||||
gothrow("docheckmark and ischeckmarked disagree")
|
throw("docheckmark and ischeckmarked disagree")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// If marked we have nothing to do.
|
// If marked we have nothing to do.
|
||||||
@ -521,7 +521,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
|
|||||||
// Consult GC bitmap.
|
// Consult GC bitmap.
|
||||||
bits = uintptr(*(*byte)(ptrbitp))
|
bits = uintptr(*(*byte)(ptrbitp))
|
||||||
if wordsPerBitmapByte != 2 {
|
if wordsPerBitmapByte != 2 {
|
||||||
gothrow("alg doesn't work for wordsPerBitmapByte != 2")
|
throw("alg doesn't work for wordsPerBitmapByte != 2")
|
||||||
}
|
}
|
||||||
j := (uintptr(b) + i) / ptrSize & 1 // j indicates upper nibble or lower nibble
|
j := (uintptr(b) + i) / ptrSize & 1 // j indicates upper nibble or lower nibble
|
||||||
bits >>= gcBits * j
|
bits >>= gcBits * j
|
||||||
@ -546,7 +546,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, wbuf *workbuf) *workbuf {
|
|||||||
|
|
||||||
if bits&_BitsPointer != _BitsPointer {
|
if bits&_BitsPointer != _BitsPointer {
|
||||||
print("gc checkmark=", checkmark, " b=", hex(b), " ptrmask=", ptrmask, " mbits.bitp=", mbits.bitp, " mbits.xbits=", hex(mbits.xbits), " bits=", hex(bits), "\n")
|
print("gc checkmark=", checkmark, " b=", hex(b), " ptrmask=", ptrmask, " mbits.bitp=", mbits.bitp, " mbits.xbits=", hex(mbits.xbits), " bits=", hex(bits), "\n")
|
||||||
gothrow("unexpected garbage collection bits")
|
throw("unexpected garbage collection bits")
|
||||||
}
|
}
|
||||||
|
|
||||||
obj := *(*uintptr)(unsafe.Pointer(b + i))
|
obj := *(*uintptr)(unsafe.Pointer(b + i))
|
||||||
@ -582,7 +582,7 @@ func scanblock(b, n uintptr, ptrmask *uint8) {
|
|||||||
if gcphase == _GCscan {
|
if gcphase == _GCscan {
|
||||||
if inheap(b) && ptrmask == nil {
|
if inheap(b) && ptrmask == nil {
|
||||||
// b is in heap, we are in GCscan so there should be a ptrmask.
|
// b is in heap, we are in GCscan so there should be a ptrmask.
|
||||||
gothrow("scanblock: In GCscan phase and inheap is true.")
|
throw("scanblock: In GCscan phase and inheap is true.")
|
||||||
}
|
}
|
||||||
// GCscan only goes one level deep since mark wb not turned on.
|
// GCscan only goes one level deep since mark wb not turned on.
|
||||||
putpartial(wbuf)
|
putpartial(wbuf)
|
||||||
@ -590,7 +590,7 @@ func scanblock(b, n uintptr, ptrmask *uint8) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if gcphase == _GCscan {
|
if gcphase == _GCscan {
|
||||||
gothrow("scanblock: In GCscan phase but no b passed in.")
|
throw("scanblock: In GCscan phase but no b passed in.")
|
||||||
}
|
}
|
||||||
|
|
||||||
keepworking := b == 0
|
keepworking := b == 0
|
||||||
@ -611,7 +611,7 @@ func scanblock(b, n uintptr, ptrmask *uint8) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if wbuf.nobj <= 0 {
|
if wbuf.nobj <= 0 {
|
||||||
gothrow("runtime:scanblock getfull returns empty buffer")
|
throw("runtime:scanblock getfull returns empty buffer")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -656,7 +656,7 @@ func markroot(desc *parfor, i uint32) {
|
|||||||
if !checkmark && s.sweepgen != sg {
|
if !checkmark && s.sweepgen != sg {
|
||||||
// sweepgen was updated (+2) during non-checkmark GC pass
|
// sweepgen was updated (+2) during non-checkmark GC pass
|
||||||
print("sweep ", s.sweepgen, " ", sg, "\n")
|
print("sweep ", s.sweepgen, " ", sg, "\n")
|
||||||
gothrow("gc: unswept span")
|
throw("gc: unswept span")
|
||||||
}
|
}
|
||||||
for sp := s.specials; sp != nil; sp = sp.next {
|
for sp := s.specials; sp != nil; sp = sp.next {
|
||||||
if sp.kind != _KindSpecialFinalizer {
|
if sp.kind != _KindSpecialFinalizer {
|
||||||
@ -682,7 +682,7 @@ func markroot(desc *parfor, i uint32) {
|
|||||||
default:
|
default:
|
||||||
// the rest is scanning goroutine stacks
|
// the rest is scanning goroutine stacks
|
||||||
if uintptr(i-_RootCount) >= allglen {
|
if uintptr(i-_RootCount) >= allglen {
|
||||||
gothrow("markroot: bad index")
|
throw("markroot: bad index")
|
||||||
}
|
}
|
||||||
gp := allgs[i-_RootCount]
|
gp := allgs[i-_RootCount]
|
||||||
|
|
||||||
@ -743,7 +743,7 @@ func getempty(b *workbuf) *workbuf {
|
|||||||
if b != nil && b.nobj != 0 {
|
if b != nil && b.nobj != 0 {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
print("m", _g_.m.id, ": getempty: popped b=", b, " with non-zero b.nobj=", b.nobj, "\n")
|
print("m", _g_.m.id, ": getempty: popped b=", b, " with non-zero b.nobj=", b.nobj, "\n")
|
||||||
gothrow("getempty: workbuffer not empty, b->nobj not 0")
|
throw("getempty: workbuffer not empty, b->nobj not 0")
|
||||||
}
|
}
|
||||||
if b == nil {
|
if b == nil {
|
||||||
b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
|
b = (*workbuf)(persistentalloc(unsafe.Sizeof(*b), _CacheLineSize, &memstats.gc_sys))
|
||||||
@ -755,7 +755,7 @@ func getempty(b *workbuf) *workbuf {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func putempty(b *workbuf) {
|
func putempty(b *workbuf) {
|
||||||
if b.nobj != 0 {
|
if b.nobj != 0 {
|
||||||
gothrow("putempty: b->nobj not 0")
|
throw("putempty: b->nobj not 0")
|
||||||
}
|
}
|
||||||
lfstackpush(&work.empty, &b.node)
|
lfstackpush(&work.empty, &b.node)
|
||||||
}
|
}
|
||||||
@ -763,7 +763,7 @@ func putempty(b *workbuf) {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func putfull(b *workbuf) {
|
func putfull(b *workbuf) {
|
||||||
if b.nobj <= 0 {
|
if b.nobj <= 0 {
|
||||||
gothrow("putfull: b->nobj <= 0")
|
throw("putfull: b->nobj <= 0")
|
||||||
}
|
}
|
||||||
lfstackpush(&work.full, &b.node)
|
lfstackpush(&work.full, &b.node)
|
||||||
}
|
}
|
||||||
@ -789,7 +789,7 @@ func putpartial(b *workbuf) {
|
|||||||
lfstackpush(&work.full, &b.node)
|
lfstackpush(&work.full, &b.node)
|
||||||
} else {
|
} else {
|
||||||
print("b=", b, " b.nobj=", b.nobj, " len(b.obj)=", len(b.obj), "\n")
|
print("b=", b, " b.nobj=", b.nobj, " len(b.obj)=", len(b.obj), "\n")
|
||||||
gothrow("putpartial: bad Workbuf b.nobj")
|
throw("putpartial: bad Workbuf b.nobj")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -869,7 +869,7 @@ func handoff(b *workbuf) *workbuf {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func stackmapdata(stkmap *stackmap, n int32) bitvector {
|
func stackmapdata(stkmap *stackmap, n int32) bitvector {
|
||||||
if n < 0 || n >= stkmap.n {
|
if n < 0 || n >= stkmap.n {
|
||||||
gothrow("stackmapdata: index out of range")
|
throw("stackmapdata: index out of range")
|
||||||
}
|
}
|
||||||
return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
|
return bitvector{stkmap.nbit, (*byte)(add(unsafe.Pointer(&stkmap.bytedata), uintptr(n*((stkmap.nbit+31)/32*4))))}
|
||||||
}
|
}
|
||||||
@ -910,14 +910,14 @@ func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
|
|||||||
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
|
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
|
||||||
if stkmap == nil || stkmap.n <= 0 {
|
if stkmap == nil || stkmap.n <= 0 {
|
||||||
print("runtime: frame ", gofuncname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
|
print("runtime: frame ", gofuncname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
|
||||||
gothrow("missing stackmap")
|
throw("missing stackmap")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locals bitmap information, scan just the pointers in locals.
|
// Locals bitmap information, scan just the pointers in locals.
|
||||||
if pcdata < 0 || pcdata >= stkmap.n {
|
if pcdata < 0 || pcdata >= stkmap.n {
|
||||||
// don't know where we are
|
// don't know where we are
|
||||||
print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
|
print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " locals stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
|
||||||
gothrow("scanframe: bad symbol table")
|
throw("scanframe: bad symbol table")
|
||||||
}
|
}
|
||||||
bv := stackmapdata(stkmap, pcdata)
|
bv := stackmapdata(stkmap, pcdata)
|
||||||
size = (uintptr(bv.n) * ptrSize) / bitsPerPointer
|
size = (uintptr(bv.n) * ptrSize) / bitsPerPointer
|
||||||
@ -933,12 +933,12 @@ func scanframe(frame *stkframe, unused unsafe.Pointer) bool {
|
|||||||
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
|
stkmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
|
||||||
if stkmap == nil || stkmap.n <= 0 {
|
if stkmap == nil || stkmap.n <= 0 {
|
||||||
print("runtime: frame ", gofuncname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
|
print("runtime: frame ", gofuncname(f), " untyped args ", hex(frame.argp), "+", hex(frame.arglen), "\n")
|
||||||
gothrow("missing stackmap")
|
throw("missing stackmap")
|
||||||
}
|
}
|
||||||
if pcdata < 0 || pcdata >= stkmap.n {
|
if pcdata < 0 || pcdata >= stkmap.n {
|
||||||
// don't know where we are
|
// don't know where we are
|
||||||
print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
|
print("runtime: pcdata is ", pcdata, " and ", stkmap.n, " args stack map entries for ", gofuncname(f), " (targetpc=", targetpc, ")\n")
|
||||||
gothrow("scanframe: bad symbol table")
|
throw("scanframe: bad symbol table")
|
||||||
}
|
}
|
||||||
bv = stackmapdata(stkmap, pcdata)
|
bv = stackmapdata(stkmap, pcdata)
|
||||||
}
|
}
|
||||||
@ -952,28 +952,28 @@ func scanstack(gp *g) {
|
|||||||
|
|
||||||
if readgstatus(gp)&_Gscan == 0 {
|
if readgstatus(gp)&_Gscan == 0 {
|
||||||
print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
|
print("runtime:scanstack: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", hex(readgstatus(gp)), "\n")
|
||||||
gothrow("scanstack - bad status")
|
throw("scanstack - bad status")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch readgstatus(gp) &^ _Gscan {
|
switch readgstatus(gp) &^ _Gscan {
|
||||||
default:
|
default:
|
||||||
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
|
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
|
||||||
gothrow("mark - bad status")
|
throw("mark - bad status")
|
||||||
case _Gdead:
|
case _Gdead:
|
||||||
return
|
return
|
||||||
case _Grunning:
|
case _Grunning:
|
||||||
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
|
print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
|
||||||
gothrow("scanstack: goroutine not stopped")
|
throw("scanstack: goroutine not stopped")
|
||||||
case _Grunnable, _Gsyscall, _Gwaiting:
|
case _Grunnable, _Gsyscall, _Gwaiting:
|
||||||
// ok
|
// ok
|
||||||
}
|
}
|
||||||
|
|
||||||
if gp == getg() {
|
if gp == getg() {
|
||||||
gothrow("can't scan our own stack")
|
throw("can't scan our own stack")
|
||||||
}
|
}
|
||||||
mp := gp.m
|
mp := gp.m
|
||||||
if mp != nil && mp.helpgc != 0 {
|
if mp != nil && mp.helpgc != 0 {
|
||||||
gothrow("can't scan gchelper stack")
|
throw("can't scan gchelper stack")
|
||||||
}
|
}
|
||||||
|
|
||||||
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
|
gentraceback(^uintptr(0), ^uintptr(0), 0, gp, 0, nil, 0x7fffffff, scanframe, nil, 0)
|
||||||
@ -1010,7 +1010,7 @@ func shaded(slot uintptr) bool {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func shade(b uintptr) {
|
func shade(b uintptr) {
|
||||||
if !inheap(b) {
|
if !inheap(b) {
|
||||||
gothrow("shade: passed an address not in the heap")
|
throw("shade: passed an address not in the heap")
|
||||||
}
|
}
|
||||||
|
|
||||||
wbuf := getpartialorempty()
|
wbuf := getpartialorempty()
|
||||||
@ -1062,7 +1062,7 @@ func shade(b uintptr) {
|
|||||||
func gcmarkwb_m(slot *uintptr, ptr uintptr) {
|
func gcmarkwb_m(slot *uintptr, ptr uintptr) {
|
||||||
switch gcphase {
|
switch gcphase {
|
||||||
default:
|
default:
|
||||||
gothrow("gcphasework in bad gcphase")
|
throw("gcphasework in bad gcphase")
|
||||||
|
|
||||||
case _GCoff, _GCquiesce, _GCstw, _GCsweep, _GCscan:
|
case _GCoff, _GCquiesce, _GCstw, _GCsweep, _GCscan:
|
||||||
// ok
|
// ok
|
||||||
@ -1080,7 +1080,7 @@ func gcmarkwb_m(slot *uintptr, ptr uintptr) {
|
|||||||
func gcphasework(gp *g) {
|
func gcphasework(gp *g) {
|
||||||
switch gcphase {
|
switch gcphase {
|
||||||
default:
|
default:
|
||||||
gothrow("gcphasework in bad gcphase")
|
throw("gcphasework in bad gcphase")
|
||||||
case _GCoff, _GCquiesce, _GCstw, _GCsweep:
|
case _GCoff, _GCquiesce, _GCstw, _GCsweep:
|
||||||
// No work.
|
// No work.
|
||||||
case _GCscan:
|
case _GCscan:
|
||||||
@ -1136,7 +1136,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
|
|||||||
unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
|
unsafe.Offsetof(finalizer{}.fint) != 3*ptrSize ||
|
||||||
unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize ||
|
unsafe.Offsetof(finalizer{}.ot) != 4*ptrSize ||
|
||||||
bitsPerPointer != 2) {
|
bitsPerPointer != 2) {
|
||||||
gothrow("finalizer out of sync")
|
throw("finalizer out of sync")
|
||||||
}
|
}
|
||||||
for i := range finptrmask {
|
for i := range finptrmask {
|
||||||
finptrmask[i] = finalizer1[i%len(finalizer1)]
|
finptrmask[i] = finalizer1[i%len(finalizer1)]
|
||||||
@ -1177,7 +1177,7 @@ func mSpan_EnsureSwept(s *mspan) {
|
|||||||
// (if GC is triggered on another goroutine).
|
// (if GC is triggered on another goroutine).
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
|
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
|
||||||
gothrow("MSpan_EnsureSwept: m is not locked")
|
throw("MSpan_EnsureSwept: m is not locked")
|
||||||
}
|
}
|
||||||
|
|
||||||
sg := mheap_.sweepgen
|
sg := mheap_.sweepgen
|
||||||
@ -1203,19 +1203,19 @@ func mSpan_EnsureSwept(s *mspan) {
|
|||||||
//TODO go:nowritebarrier
|
//TODO go:nowritebarrier
|
||||||
func mSpan_Sweep(s *mspan, preserve bool) bool {
|
func mSpan_Sweep(s *mspan, preserve bool) bool {
|
||||||
if checkmark {
|
if checkmark {
|
||||||
gothrow("MSpan_Sweep: checkmark only runs in STW and after the sweep")
|
throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
|
||||||
}
|
}
|
||||||
|
|
||||||
// It's critical that we enter this function with preemption disabled,
|
// It's critical that we enter this function with preemption disabled,
|
||||||
// GC must not start while we are in the middle of this function.
|
// GC must not start while we are in the middle of this function.
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
|
if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 {
|
||||||
gothrow("MSpan_Sweep: m is not locked")
|
throw("MSpan_Sweep: m is not locked")
|
||||||
}
|
}
|
||||||
sweepgen := mheap_.sweepgen
|
sweepgen := mheap_.sweepgen
|
||||||
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
|
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
|
||||||
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
|
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
|
||||||
gothrow("MSpan_Sweep: bad span state")
|
throw("MSpan_Sweep: bad span state")
|
||||||
}
|
}
|
||||||
arena_start := mheap_.arena_start
|
arena_start := mheap_.arena_start
|
||||||
cl := s.sizeclass
|
cl := s.sizeclass
|
||||||
@ -1319,7 +1319,7 @@ func mSpan_Sweep(s *mspan, preserve bool) bool {
|
|||||||
if cl == 0 {
|
if cl == 0 {
|
||||||
// Free large span.
|
// Free large span.
|
||||||
if preserve {
|
if preserve {
|
||||||
gothrow("can't preserve large span")
|
throw("can't preserve large span")
|
||||||
}
|
}
|
||||||
unmarkspan(p, s.npages<<_PageShift)
|
unmarkspan(p, s.npages<<_PageShift)
|
||||||
s.needzero = 1
|
s.needzero = 1
|
||||||
@ -1380,7 +1380,7 @@ func mSpan_Sweep(s *mspan, preserve bool) bool {
|
|||||||
// check for potential races.
|
// check for potential races.
|
||||||
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
|
if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
|
||||||
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
|
print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n")
|
||||||
gothrow("MSpan_Sweep: bad span state after sweep")
|
throw("MSpan_Sweep: bad span state after sweep")
|
||||||
}
|
}
|
||||||
atomicstore(&s.sweepgen, sweepgen)
|
atomicstore(&s.sweepgen, sweepgen)
|
||||||
}
|
}
|
||||||
@ -1588,7 +1588,7 @@ func updatememstats(stats *gcstats) {
|
|||||||
|
|
||||||
func gcinit() {
|
func gcinit() {
|
||||||
if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
|
if unsafe.Sizeof(workbuf{}) != _WorkbufSize {
|
||||||
gothrow("runtime: size of Workbuf is suboptimal")
|
throw("runtime: size of Workbuf is suboptimal")
|
||||||
}
|
}
|
||||||
|
|
||||||
work.markfor = parforalloc(_MaxGcproc)
|
work.markfor = parforalloc(_MaxGcproc)
|
||||||
@ -1622,7 +1622,7 @@ func gc_m(start_time int64, eagersweep bool) {
|
|||||||
func clearcheckmarkbitsspan(s *mspan) {
|
func clearcheckmarkbitsspan(s *mspan) {
|
||||||
if s.state != _MSpanInUse {
|
if s.state != _MSpanInUse {
|
||||||
print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
|
print("runtime:clearcheckmarkbitsspan: state=", s.state, "\n")
|
||||||
gothrow("clearcheckmarkbitsspan: bad span state")
|
throw("clearcheckmarkbitsspan: bad span state")
|
||||||
}
|
}
|
||||||
|
|
||||||
arena_start := mheap_.arena_start
|
arena_start := mheap_.arena_start
|
||||||
@ -1695,7 +1695,7 @@ func clearcheckmarkbitsspan(s *mspan) {
|
|||||||
// updating top and bottom nibbles, all boundaries
|
// updating top and bottom nibbles, all boundaries
|
||||||
for i := int32(0); i < n/2; i, bitp = i+1, addb(bitp, uintptrMask&-1) {
|
for i := int32(0); i < n/2; i, bitp = i+1, addb(bitp, uintptrMask&-1) {
|
||||||
if *bitp&bitBoundary == 0 {
|
if *bitp&bitBoundary == 0 {
|
||||||
gothrow("missing bitBoundary")
|
throw("missing bitBoundary")
|
||||||
}
|
}
|
||||||
b := (*bitp & bitPtrMask) >> 2
|
b := (*bitp & bitPtrMask) >> 2
|
||||||
if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
|
if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
|
||||||
@ -1705,7 +1705,7 @@ func clearcheckmarkbitsspan(s *mspan) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (*bitp>>gcBits)&bitBoundary == 0 {
|
if (*bitp>>gcBits)&bitBoundary == 0 {
|
||||||
gothrow("missing bitBoundary")
|
throw("missing bitBoundary")
|
||||||
}
|
}
|
||||||
b = ((*bitp >> gcBits) & bitPtrMask) >> 2
|
b = ((*bitp >> gcBits) & bitPtrMask) >> 2
|
||||||
if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
|
if !checkmark && (b == _BitsScalar || b == _BitsScalarMarked) {
|
||||||
@ -1718,7 +1718,7 @@ func clearcheckmarkbitsspan(s *mspan) {
|
|||||||
// updating bottom nibble for first word of each object
|
// updating bottom nibble for first word of each object
|
||||||
for i := int32(0); i < n; i, bitp = i+1, addb(bitp, -step) {
|
for i := int32(0); i < n; i, bitp = i+1, addb(bitp, -step) {
|
||||||
if *bitp&bitBoundary == 0 {
|
if *bitp&bitBoundary == 0 {
|
||||||
gothrow("missing bitBoundary")
|
throw("missing bitBoundary")
|
||||||
}
|
}
|
||||||
b := (*bitp & bitPtrMask) >> 2
|
b := (*bitp & bitPtrMask) >> 2
|
||||||
|
|
||||||
@ -1768,7 +1768,7 @@ func gccheckmark_m(startTime int64, eagersweep bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if checkmark {
|
if checkmark {
|
||||||
gothrow("gccheckmark_m, entered with checkmark already true")
|
throw("gccheckmark_m, entered with checkmark already true")
|
||||||
}
|
}
|
||||||
|
|
||||||
checkmark = true
|
checkmark = true
|
||||||
@ -1848,7 +1848,7 @@ func gcscan_m() {
|
|||||||
for i := uintptr(0); i < local_allglen; i++ {
|
for i := uintptr(0); i < local_allglen; i++ {
|
||||||
gp := allgs[i]
|
gp := allgs[i]
|
||||||
if !gp.gcworkdone {
|
if !gp.gcworkdone {
|
||||||
gothrow("scan missed a g")
|
throw("scan missed a g")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock(&allglock)
|
unlock(&allglock)
|
||||||
@ -1948,10 +1948,10 @@ func gc(start_time int64, eagersweep bool) {
|
|||||||
scanblock(0, 0, nil)
|
scanblock(0, 0, nil)
|
||||||
|
|
||||||
if work.full != 0 {
|
if work.full != 0 {
|
||||||
gothrow("work.full != 0")
|
throw("work.full != 0")
|
||||||
}
|
}
|
||||||
if work.partial != 0 {
|
if work.partial != 0 {
|
||||||
gothrow("work.partial != 0")
|
throw("work.partial != 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
gcphase = oldphase
|
gcphase = oldphase
|
||||||
@ -1990,7 +1990,7 @@ func gc(start_time int64, eagersweep bool) {
|
|||||||
updatememstats(&stats)
|
updatememstats(&stats)
|
||||||
if heap1 != memstats.heap_alloc {
|
if heap1 != memstats.heap_alloc {
|
||||||
print("runtime: mstats skew: heap=", heap1, "/", memstats.heap_alloc, "\n")
|
print("runtime: mstats skew: heap=", heap1, "/", memstats.heap_alloc, "\n")
|
||||||
gothrow("mstats skew")
|
throw("mstats skew")
|
||||||
}
|
}
|
||||||
obj := memstats.nmalloc - memstats.nfree
|
obj := memstats.nmalloc - memstats.nfree
|
||||||
|
|
||||||
@ -2087,7 +2087,7 @@ func readGCStats_m(pauses *[]uint64) {
|
|||||||
p := *pauses
|
p := *pauses
|
||||||
// Calling code in runtime/debug should make the slice large enough.
|
// Calling code in runtime/debug should make the slice large enough.
|
||||||
if cap(p) < len(memstats.pause_ns)+3 {
|
if cap(p) < len(memstats.pause_ns)+3 {
|
||||||
gothrow("runtime: short slice passed to readGCStats")
|
throw("runtime: short slice passed to readGCStats")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
|
// Pass back: pauses, pause ends, last gc (absolute time), number of gc, total pause ns.
|
||||||
@ -2131,10 +2131,10 @@ func gchelperstart() {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
|
if _g_.m.helpgc < 0 || _g_.m.helpgc >= _MaxGcproc {
|
||||||
gothrow("gchelperstart: bad m->helpgc")
|
throw("gchelperstart: bad m->helpgc")
|
||||||
}
|
}
|
||||||
if _g_ != _g_.m.g0 {
|
if _g_ != _g_.m.g0 {
|
||||||
gothrow("gchelper not running on g0 stack")
|
throw("gchelper not running on g0 stack")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2167,7 +2167,7 @@ func unrollgcprog1(maskp *byte, prog *byte, ppos *uintptr, inplace, sparse bool)
|
|||||||
for {
|
for {
|
||||||
switch *prog {
|
switch *prog {
|
||||||
default:
|
default:
|
||||||
gothrow("unrollgcprog: unknown instruction")
|
throw("unrollgcprog: unknown instruction")
|
||||||
|
|
||||||
case insData:
|
case insData:
|
||||||
prog = addb(prog, 1)
|
prog = addb(prog, 1)
|
||||||
@ -2214,7 +2214,7 @@ func unrollgcprog1(maskp *byte, prog *byte, ppos *uintptr, inplace, sparse bool)
|
|||||||
prog1 = unrollgcprog1(&mask[0], prog, &pos, inplace, sparse)
|
prog1 = unrollgcprog1(&mask[0], prog, &pos, inplace, sparse)
|
||||||
}
|
}
|
||||||
if *prog1 != insArrayEnd {
|
if *prog1 != insArrayEnd {
|
||||||
gothrow("unrollgcprog: array does not end with insArrayEnd")
|
throw("unrollgcprog: array does not end with insArrayEnd")
|
||||||
}
|
}
|
||||||
prog = (*byte)(add(unsafe.Pointer(prog1), 1))
|
prog = (*byte)(add(unsafe.Pointer(prog1), 1))
|
||||||
|
|
||||||
@ -2234,13 +2234,13 @@ func unrollglobgcprog(prog *byte, size uintptr) bitvector {
|
|||||||
prog = unrollgcprog1(&mask[0], prog, &pos, false, false)
|
prog = unrollgcprog1(&mask[0], prog, &pos, false, false)
|
||||||
if pos != size/ptrSize*bitsPerPointer {
|
if pos != size/ptrSize*bitsPerPointer {
|
||||||
print("unrollglobgcprog: bad program size, got ", pos, ", expect ", size/ptrSize*bitsPerPointer, "\n")
|
print("unrollglobgcprog: bad program size, got ", pos, ", expect ", size/ptrSize*bitsPerPointer, "\n")
|
||||||
gothrow("unrollglobgcprog: bad program size")
|
throw("unrollglobgcprog: bad program size")
|
||||||
}
|
}
|
||||||
if *prog != insEnd {
|
if *prog != insEnd {
|
||||||
gothrow("unrollglobgcprog: program does not end with insEnd")
|
throw("unrollglobgcprog: program does not end with insEnd")
|
||||||
}
|
}
|
||||||
if mask[masksize] != 0xa1 {
|
if mask[masksize] != 0xa1 {
|
||||||
gothrow("unrollglobgcprog: overflow")
|
throw("unrollglobgcprog: overflow")
|
||||||
}
|
}
|
||||||
return bitvector{int32(masksize * 8), &mask[0]}
|
return bitvector{int32(masksize * 8), &mask[0]}
|
||||||
}
|
}
|
||||||
@ -2280,7 +2280,7 @@ func unrollgcprog_m(typ *_type) {
|
|||||||
prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
|
prog := (*byte)(unsafe.Pointer(uintptr(typ.gc[1])))
|
||||||
prog = unrollgcprog1(mask, prog, &pos, false, true)
|
prog = unrollgcprog1(mask, prog, &pos, false, true)
|
||||||
if *prog != insEnd {
|
if *prog != insEnd {
|
||||||
gothrow("unrollgcprog: program does not end with insEnd")
|
throw("unrollgcprog: program does not end with insEnd")
|
||||||
}
|
}
|
||||||
if typ.size/ptrSize%2 != 0 {
|
if typ.size/ptrSize%2 != 0 {
|
||||||
// repeat the program
|
// repeat the program
|
||||||
@ -2299,13 +2299,13 @@ func unrollgcprog_m(typ *_type) {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
|
func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
|
||||||
if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
|
if uintptr(v)+size*n > mheap_.arena_used || uintptr(v) < mheap_.arena_start {
|
||||||
gothrow("markspan: bad pointer")
|
throw("markspan: bad pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find bits of the beginning of the span.
|
// Find bits of the beginning of the span.
|
||||||
off := (uintptr(v) - uintptr(mheap_.arena_start)) / ptrSize
|
off := (uintptr(v) - uintptr(mheap_.arena_start)) / ptrSize
|
||||||
if off%wordsPerBitmapByte != 0 {
|
if off%wordsPerBitmapByte != 0 {
|
||||||
gothrow("markspan: unaligned length")
|
throw("markspan: unaligned length")
|
||||||
}
|
}
|
||||||
b := mheap_.arena_start - off/wordsPerBitmapByte - 1
|
b := mheap_.arena_start - off/wordsPerBitmapByte - 1
|
||||||
|
|
||||||
@ -2317,14 +2317,14 @@ func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
|
|||||||
// Possible only on 64-bits (minimal size class is 8 bytes).
|
// Possible only on 64-bits (minimal size class is 8 bytes).
|
||||||
// Set memory to 0x11.
|
// Set memory to 0x11.
|
||||||
if (bitBoundary|bitsDead)<<gcBits|bitBoundary|bitsDead != 0x11 {
|
if (bitBoundary|bitsDead)<<gcBits|bitBoundary|bitsDead != 0x11 {
|
||||||
gothrow("markspan: bad bits")
|
throw("markspan: bad bits")
|
||||||
}
|
}
|
||||||
if n%(wordsPerBitmapByte*ptrSize) != 0 {
|
if n%(wordsPerBitmapByte*ptrSize) != 0 {
|
||||||
gothrow("markspan: unaligned length")
|
throw("markspan: unaligned length")
|
||||||
}
|
}
|
||||||
b = b - n/wordsPerBitmapByte + 1 // find first byte
|
b = b - n/wordsPerBitmapByte + 1 // find first byte
|
||||||
if b%ptrSize != 0 {
|
if b%ptrSize != 0 {
|
||||||
gothrow("markspan: unaligned pointer")
|
throw("markspan: unaligned pointer")
|
||||||
}
|
}
|
||||||
for i := uintptr(0); i < n; i, b = i+wordsPerBitmapByte*ptrSize, b+ptrSize {
|
for i := uintptr(0); i < n; i, b = i+wordsPerBitmapByte*ptrSize, b+ptrSize {
|
||||||
*(*uintptr)(unsafe.Pointer(b)) = uintptrMask & 0x1111111111111111 // bitBoundary | bitsDead, repeated
|
*(*uintptr)(unsafe.Pointer(b)) = uintptrMask & 0x1111111111111111 // bitBoundary | bitsDead, repeated
|
||||||
@ -2345,18 +2345,18 @@ func markspan(v unsafe.Pointer, size uintptr, n uintptr, leftover bool) {
|
|||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func unmarkspan(v, n uintptr) {
|
func unmarkspan(v, n uintptr) {
|
||||||
if v+n > mheap_.arena_used || v < mheap_.arena_start {
|
if v+n > mheap_.arena_used || v < mheap_.arena_start {
|
||||||
gothrow("markspan: bad pointer")
|
throw("markspan: bad pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
off := (v - mheap_.arena_start) / ptrSize // word offset
|
off := (v - mheap_.arena_start) / ptrSize // word offset
|
||||||
if off%(ptrSize*wordsPerBitmapByte) != 0 {
|
if off%(ptrSize*wordsPerBitmapByte) != 0 {
|
||||||
gothrow("markspan: unaligned pointer")
|
throw("markspan: unaligned pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
b := mheap_.arena_start - off/wordsPerBitmapByte - 1
|
b := mheap_.arena_start - off/wordsPerBitmapByte - 1
|
||||||
n /= ptrSize
|
n /= ptrSize
|
||||||
if n%(ptrSize*wordsPerBitmapByte) != 0 {
|
if n%(ptrSize*wordsPerBitmapByte) != 0 {
|
||||||
gothrow("unmarkspan: unaligned length")
|
throw("unmarkspan: unaligned length")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Okay to use non-atomic ops here, because we control
|
// Okay to use non-atomic ops here, because we control
|
||||||
|
@ -127,7 +127,7 @@ func writebarrierptr_nostore(dst *uintptr, src uintptr) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
|
if src != 0 && (src < _PageSize || src == _PoisonGC || src == _PoisonStack) {
|
||||||
systemstack(func() { gothrow("bad pointer in write barrier") })
|
systemstack(func() { throw("bad pointer in write barrier") })
|
||||||
}
|
}
|
||||||
|
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
|
@ -31,7 +31,7 @@ func recordspan(vh unsafe.Pointer, p unsafe.Pointer) {
|
|||||||
sp := (*slice)(unsafe.Pointer(&new))
|
sp := (*slice)(unsafe.Pointer(&new))
|
||||||
sp.array = (*byte)(sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys))
|
sp.array = (*byte)(sysAlloc(uintptr(n)*ptrSize, &memstats.other_sys))
|
||||||
if sp.array == nil {
|
if sp.array == nil {
|
||||||
gothrow("runtime: cannot allocate memory")
|
throw("runtime: cannot allocate memory")
|
||||||
}
|
}
|
||||||
sp.len = uint(len(h_allspans))
|
sp.len = uint(len(h_allspans))
|
||||||
sp.cap = uint(n)
|
sp.cap = uint(n)
|
||||||
@ -174,7 +174,7 @@ func mHeap_Reclaim(h *mheap, npage uintptr) {
|
|||||||
func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
|
func mHeap_Alloc_m(h *mheap, npage uintptr, sizeclass int32, large bool) *mspan {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_ != _g_.m.g0 {
|
if _g_ != _g_.m.g0 {
|
||||||
gothrow("_mheap_alloc not on g0 stack")
|
throw("_mheap_alloc not on g0 stack")
|
||||||
}
|
}
|
||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
|
|
||||||
@ -242,7 +242,7 @@ func mHeap_Alloc(h *mheap, npage uintptr, sizeclass int32, large bool, needzero
|
|||||||
func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
|
func mHeap_AllocStack(h *mheap, npage uintptr) *mspan {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_ != _g_.m.g0 {
|
if _g_ != _g_.m.g0 {
|
||||||
gothrow("mheap_allocstack not on g0 stack")
|
throw("mheap_allocstack not on g0 stack")
|
||||||
}
|
}
|
||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
s := mHeap_AllocSpanLocked(h, npage)
|
s := mHeap_AllocSpanLocked(h, npage)
|
||||||
@ -285,14 +285,14 @@ func mHeap_AllocSpanLocked(h *mheap, npage uintptr) *mspan {
|
|||||||
HaveSpan:
|
HaveSpan:
|
||||||
// Mark span in use.
|
// Mark span in use.
|
||||||
if s.state != _MSpanFree {
|
if s.state != _MSpanFree {
|
||||||
gothrow("MHeap_AllocLocked - MSpan not free")
|
throw("MHeap_AllocLocked - MSpan not free")
|
||||||
}
|
}
|
||||||
if s.npages < npage {
|
if s.npages < npage {
|
||||||
gothrow("MHeap_AllocLocked - bad npages")
|
throw("MHeap_AllocLocked - bad npages")
|
||||||
}
|
}
|
||||||
mSpanList_Remove(s)
|
mSpanList_Remove(s)
|
||||||
if s.next != nil || s.prev != nil {
|
if s.next != nil || s.prev != nil {
|
||||||
gothrow("still in list")
|
throw("still in list")
|
||||||
}
|
}
|
||||||
if s.npreleased > 0 {
|
if s.npreleased > 0 {
|
||||||
sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
|
sysUsed((unsafe.Pointer)(s.start<<_PageShift), s.npages<<_PageShift)
|
||||||
@ -332,7 +332,7 @@ HaveSpan:
|
|||||||
|
|
||||||
//println("spanalloc", hex(s.start<<_PageShift))
|
//println("spanalloc", hex(s.start<<_PageShift))
|
||||||
if s.next != nil || s.prev != nil {
|
if s.next != nil || s.prev != nil {
|
||||||
gothrow("still in list")
|
throw("still in list")
|
||||||
}
|
}
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
@ -447,7 +447,7 @@ func mHeap_Free(h *mheap, s *mspan, acct int32) {
|
|||||||
func mHeap_FreeStack(h *mheap, s *mspan) {
|
func mHeap_FreeStack(h *mheap, s *mspan) {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_ != _g_.m.g0 {
|
if _g_ != _g_.m.g0 {
|
||||||
gothrow("mheap_freestack not on g0 stack")
|
throw("mheap_freestack not on g0 stack")
|
||||||
}
|
}
|
||||||
s.needzero = 1
|
s.needzero = 1
|
||||||
lock(&h.lock)
|
lock(&h.lock)
|
||||||
@ -460,15 +460,15 @@ func mHeap_FreeSpanLocked(h *mheap, s *mspan, acctinuse, acctidle bool) {
|
|||||||
switch s.state {
|
switch s.state {
|
||||||
case _MSpanStack:
|
case _MSpanStack:
|
||||||
if s.ref != 0 {
|
if s.ref != 0 {
|
||||||
gothrow("MHeap_FreeSpanLocked - invalid stack free")
|
throw("MHeap_FreeSpanLocked - invalid stack free")
|
||||||
}
|
}
|
||||||
case _MSpanInUse:
|
case _MSpanInUse:
|
||||||
if s.ref != 0 || s.sweepgen != h.sweepgen {
|
if s.ref != 0 || s.sweepgen != h.sweepgen {
|
||||||
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
|
print("MHeap_FreeSpanLocked - span ", s, " ptr ", hex(s.start<<_PageShift), " ref ", s.ref, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n")
|
||||||
gothrow("MHeap_FreeSpanLocked - invalid free")
|
throw("MHeap_FreeSpanLocked - invalid free")
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
gothrow("MHeap_FreeSpanLocked - invalid span state")
|
throw("MHeap_FreeSpanLocked - invalid span state")
|
||||||
}
|
}
|
||||||
|
|
||||||
if acctinuse {
|
if acctinuse {
|
||||||
@ -608,7 +608,7 @@ func mSpanList_IsEmpty(list *mspan) bool {
|
|||||||
func mSpanList_Insert(list *mspan, span *mspan) {
|
func mSpanList_Insert(list *mspan, span *mspan) {
|
||||||
if span.next != nil || span.prev != nil {
|
if span.next != nil || span.prev != nil {
|
||||||
println("failed MSpanList_Insert", span, span.next, span.prev)
|
println("failed MSpanList_Insert", span, span.next, span.prev)
|
||||||
gothrow("MSpanList_Insert")
|
throw("MSpanList_Insert")
|
||||||
}
|
}
|
||||||
span.next = list.next
|
span.next = list.next
|
||||||
span.prev = list
|
span.prev = list
|
||||||
@ -619,7 +619,7 @@ func mSpanList_Insert(list *mspan, span *mspan) {
|
|||||||
func mSpanList_InsertBack(list *mspan, span *mspan) {
|
func mSpanList_InsertBack(list *mspan, span *mspan) {
|
||||||
if span.next != nil || span.prev != nil {
|
if span.next != nil || span.prev != nil {
|
||||||
println("failed MSpanList_InsertBack", span, span.next, span.prev)
|
println("failed MSpanList_InsertBack", span, span.next, span.prev)
|
||||||
gothrow("MSpanList_InsertBack")
|
throw("MSpanList_InsertBack")
|
||||||
}
|
}
|
||||||
span.next = list
|
span.next = list
|
||||||
span.prev = list.prev
|
span.prev = list.prev
|
||||||
@ -636,7 +636,7 @@ func mSpanList_InsertBack(list *mspan, span *mspan) {
|
|||||||
func addspecial(p unsafe.Pointer, s *special) bool {
|
func addspecial(p unsafe.Pointer, s *special) bool {
|
||||||
span := mHeap_LookupMaybe(&mheap_, p)
|
span := mHeap_LookupMaybe(&mheap_, p)
|
||||||
if span == nil {
|
if span == nil {
|
||||||
gothrow("addspecial on invalid pointer")
|
throw("addspecial on invalid pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the span is swept.
|
// Ensure that the span is swept.
|
||||||
@ -683,7 +683,7 @@ func addspecial(p unsafe.Pointer, s *special) bool {
|
|||||||
func removespecial(p unsafe.Pointer, kind uint8) *special {
|
func removespecial(p unsafe.Pointer, kind uint8) *special {
|
||||||
span := mHeap_LookupMaybe(&mheap_, p)
|
span := mHeap_LookupMaybe(&mheap_, p)
|
||||||
if span == nil {
|
if span == nil {
|
||||||
gothrow("removespecial on invalid pointer")
|
throw("removespecial on invalid pointer")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the span is swept.
|
// Ensure that the span is swept.
|
||||||
@ -755,7 +755,7 @@ func setprofilebucket(p unsafe.Pointer, b *bucket) {
|
|||||||
s.special.kind = _KindSpecialProfile
|
s.special.kind = _KindSpecialProfile
|
||||||
s.b = b
|
s.b = b
|
||||||
if !addspecial(p, &s.special) {
|
if !addspecial(p, &s.special) {
|
||||||
gothrow("setprofilebucket: profile already set")
|
throw("setprofilebucket: profile already set")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -779,7 +779,7 @@ func freespecial(s *special, p unsafe.Pointer, size uintptr, freed bool) bool {
|
|||||||
unlock(&mheap_.speciallock)
|
unlock(&mheap_.speciallock)
|
||||||
return true
|
return true
|
||||||
default:
|
default:
|
||||||
gothrow("bad special kind")
|
throw("bad special kind")
|
||||||
panic("not reached")
|
panic("not reached")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -100,7 +100,7 @@ func newBucket(typ bucketType, nstk int) *bucket {
|
|||||||
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
|
size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
|
||||||
switch typ {
|
switch typ {
|
||||||
default:
|
default:
|
||||||
gothrow("invalid profile bucket type")
|
throw("invalid profile bucket type")
|
||||||
case memProfile:
|
case memProfile:
|
||||||
size += unsafe.Sizeof(memRecord{})
|
size += unsafe.Sizeof(memRecord{})
|
||||||
case blockProfile:
|
case blockProfile:
|
||||||
@ -123,7 +123,7 @@ func (b *bucket) stk() []uintptr {
|
|||||||
// mp returns the memRecord associated with the memProfile bucket b.
|
// mp returns the memRecord associated with the memProfile bucket b.
|
||||||
func (b *bucket) mp() *memRecord {
|
func (b *bucket) mp() *memRecord {
|
||||||
if b.typ != memProfile {
|
if b.typ != memProfile {
|
||||||
gothrow("bad use of bucket.mp")
|
throw("bad use of bucket.mp")
|
||||||
}
|
}
|
||||||
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
||||||
return (*memRecord)(data)
|
return (*memRecord)(data)
|
||||||
@ -132,7 +132,7 @@ func (b *bucket) mp() *memRecord {
|
|||||||
// bp returns the blockRecord associated with the blockProfile bucket b.
|
// bp returns the blockRecord associated with the blockProfile bucket b.
|
||||||
func (b *bucket) bp() *blockRecord {
|
func (b *bucket) bp() *blockRecord {
|
||||||
if b.typ != blockProfile {
|
if b.typ != blockProfile {
|
||||||
gothrow("bad use of bucket.bp")
|
throw("bad use of bucket.bp")
|
||||||
}
|
}
|
||||||
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
|
||||||
return (*blockRecord)(data)
|
return (*blockRecord)(data)
|
||||||
@ -143,7 +143,7 @@ func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket
|
|||||||
if buckhash == nil {
|
if buckhash == nil {
|
||||||
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
|
buckhash = (*[buckHashSize]*bucket)(sysAlloc(unsafe.Sizeof(*buckhash), &memstats.buckhash_sys))
|
||||||
if buckhash == nil {
|
if buckhash == nil {
|
||||||
gothrow("runtime: cannot allocate memory")
|
throw("runtime: cannot allocate memory")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ package runtime
|
|||||||
|
|
||||||
func sizeToClass(size int32) int32 {
|
func sizeToClass(size int32) int32 {
|
||||||
if size > _MaxSmallSize {
|
if size > _MaxSmallSize {
|
||||||
gothrow("SizeToClass - invalid size")
|
throw("SizeToClass - invalid size")
|
||||||
}
|
}
|
||||||
if size > 1024-8 {
|
if size > 1024-8 {
|
||||||
return int32(size_to_class128[(size-1024+127)>>7])
|
return int32(size_to_class128[(size-1024+127)>>7])
|
||||||
@ -67,7 +67,7 @@ func initSizes() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if align&(align-1) != 0 {
|
if align&(align-1) != 0 {
|
||||||
gothrow("InitSizes - bug")
|
throw("InitSizes - bug")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the allocnpages big enough that
|
// Make the allocnpages big enough that
|
||||||
@ -95,7 +95,7 @@ func initSizes() {
|
|||||||
}
|
}
|
||||||
if sizeclass != _NumSizeClasses {
|
if sizeclass != _NumSizeClasses {
|
||||||
print("sizeclass=", sizeclass, " NumSizeClasses=", _NumSizeClasses, "\n")
|
print("sizeclass=", sizeclass, " NumSizeClasses=", _NumSizeClasses, "\n")
|
||||||
gothrow("InitSizes - bad NumSizeClasses")
|
throw("InitSizes - bad NumSizeClasses")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize the size_to_class tables.
|
// Initialize the size_to_class tables.
|
||||||
@ -155,7 +155,7 @@ dump:
|
|||||||
}
|
}
|
||||||
print("\n")
|
print("\n")
|
||||||
}
|
}
|
||||||
gothrow("InitSizes failed")
|
throw("InitSizes failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
|
// Returns size of the memory block that mallocgc will allocate if you ask for the size.
|
||||||
|
@ -81,10 +81,10 @@ func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
|
|||||||
pd := pollcache.alloc()
|
pd := pollcache.alloc()
|
||||||
lock(&pd.lock)
|
lock(&pd.lock)
|
||||||
if pd.wg != 0 && pd.wg != pdReady {
|
if pd.wg != 0 && pd.wg != pdReady {
|
||||||
gothrow("netpollOpen: blocked write on free descriptor")
|
throw("netpollOpen: blocked write on free descriptor")
|
||||||
}
|
}
|
||||||
if pd.rg != 0 && pd.rg != pdReady {
|
if pd.rg != 0 && pd.rg != pdReady {
|
||||||
gothrow("netpollOpen: blocked read on free descriptor")
|
throw("netpollOpen: blocked read on free descriptor")
|
||||||
}
|
}
|
||||||
pd.fd = fd
|
pd.fd = fd
|
||||||
pd.closing = false
|
pd.closing = false
|
||||||
@ -103,13 +103,13 @@ func net_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
|
|||||||
//go:linkname net_runtime_pollClose net.runtime_pollClose
|
//go:linkname net_runtime_pollClose net.runtime_pollClose
|
||||||
func net_runtime_pollClose(pd *pollDesc) {
|
func net_runtime_pollClose(pd *pollDesc) {
|
||||||
if !pd.closing {
|
if !pd.closing {
|
||||||
gothrow("netpollClose: close w/o unblock")
|
throw("netpollClose: close w/o unblock")
|
||||||
}
|
}
|
||||||
if pd.wg != 0 && pd.wg != pdReady {
|
if pd.wg != 0 && pd.wg != pdReady {
|
||||||
gothrow("netpollClose: blocked write on closing descriptor")
|
throw("netpollClose: blocked write on closing descriptor")
|
||||||
}
|
}
|
||||||
if pd.rg != 0 && pd.rg != pdReady {
|
if pd.rg != 0 && pd.rg != pdReady {
|
||||||
gothrow("netpollClose: blocked read on closing descriptor")
|
throw("netpollClose: blocked read on closing descriptor")
|
||||||
}
|
}
|
||||||
netpollclose(uintptr(pd.fd))
|
netpollclose(uintptr(pd.fd))
|
||||||
pollcache.free(pd)
|
pollcache.free(pd)
|
||||||
@ -240,7 +240,7 @@ func net_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
|
|||||||
func net_runtime_pollUnblock(pd *pollDesc) {
|
func net_runtime_pollUnblock(pd *pollDesc) {
|
||||||
lock(&pd.lock)
|
lock(&pd.lock)
|
||||||
if pd.closing {
|
if pd.closing {
|
||||||
gothrow("netpollUnblock: already closing")
|
throw("netpollUnblock: already closing")
|
||||||
}
|
}
|
||||||
pd.closing = true
|
pd.closing = true
|
||||||
pd.seq++
|
pd.seq++
|
||||||
@ -314,7 +314,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
if old != 0 {
|
if old != 0 {
|
||||||
gothrow("netpollblock: double wait")
|
throw("netpollblock: double wait")
|
||||||
}
|
}
|
||||||
if casuintptr(gpp, 0, pdWait) {
|
if casuintptr(gpp, 0, pdWait) {
|
||||||
break
|
break
|
||||||
@ -330,7 +330,7 @@ func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
|
|||||||
// be careful to not lose concurrent READY notification
|
// be careful to not lose concurrent READY notification
|
||||||
old := xchguintptr(gpp, 0)
|
old := xchguintptr(gpp, 0)
|
||||||
if old > pdWait {
|
if old > pdWait {
|
||||||
gothrow("netpollblock: corrupted state")
|
throw("netpollblock: corrupted state")
|
||||||
}
|
}
|
||||||
return old == pdReady
|
return old == pdReady
|
||||||
}
|
}
|
||||||
@ -376,7 +376,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
|
|||||||
var rg *g
|
var rg *g
|
||||||
if read {
|
if read {
|
||||||
if pd.rd <= 0 || pd.rt.f == nil {
|
if pd.rd <= 0 || pd.rt.f == nil {
|
||||||
gothrow("netpolldeadlineimpl: inconsistent read deadline")
|
throw("netpolldeadlineimpl: inconsistent read deadline")
|
||||||
}
|
}
|
||||||
pd.rd = -1
|
pd.rd = -1
|
||||||
atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
|
atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
|
||||||
@ -385,7 +385,7 @@ func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
|
|||||||
var wg *g
|
var wg *g
|
||||||
if write {
|
if write {
|
||||||
if pd.wd <= 0 || pd.wt.f == nil && !read {
|
if pd.wd <= 0 || pd.wt.f == nil && !read {
|
||||||
gothrow("netpolldeadlineimpl: inconsistent write deadline")
|
throw("netpolldeadlineimpl: inconsistent write deadline")
|
||||||
}
|
}
|
||||||
pd.wd = -1
|
pd.wd = -1
|
||||||
atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
|
atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
|
||||||
|
@ -34,7 +34,7 @@ func netpollinit() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
println("netpollinit: failed to create epoll descriptor", -epfd)
|
println("netpollinit: failed to create epoll descriptor", -epfd)
|
||||||
gothrow("netpollinit: failed to create descriptor")
|
throw("netpollinit: failed to create descriptor")
|
||||||
}
|
}
|
||||||
|
|
||||||
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
||||||
@ -50,7 +50,7 @@ func netpollclose(fd uintptr) int32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func netpollarm(pd *pollDesc, mode int) {
|
func netpollarm(pd *pollDesc, mode int) {
|
||||||
gothrow("unused")
|
throw("unused")
|
||||||
}
|
}
|
||||||
|
|
||||||
// polls for ready network connections
|
// polls for ready network connections
|
||||||
|
@ -25,7 +25,7 @@ func netpollinit() {
|
|||||||
kq = kqueue()
|
kq = kqueue()
|
||||||
if kq < 0 {
|
if kq < 0 {
|
||||||
println("netpollinit: kqueue failed with", -kq)
|
println("netpollinit: kqueue failed with", -kq)
|
||||||
gothrow("netpollinit: kqueue failed")
|
throw("netpollinit: kqueue failed")
|
||||||
}
|
}
|
||||||
closeonexec(kq)
|
closeonexec(kq)
|
||||||
}
|
}
|
||||||
@ -57,7 +57,7 @@ func netpollclose(fd uintptr) int32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func netpollarm(pd *pollDesc, mode int) {
|
func netpollarm(pd *pollDesc, mode int) {
|
||||||
gothrow("unused")
|
throw("unused")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Polls for ready network connections.
|
// Polls for ready network connections.
|
||||||
|
@ -118,7 +118,7 @@ func netpollinit() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
print("netpollinit: failed to create port (", errno(), ")\n")
|
print("netpollinit: failed to create port (", errno(), ")\n")
|
||||||
gothrow("netpollinit: failed to create port")
|
throw("netpollinit: failed to create port")
|
||||||
}
|
}
|
||||||
|
|
||||||
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
func netpollopen(fd uintptr, pd *pollDesc) int32 {
|
||||||
@ -155,7 +155,7 @@ func netpollupdate(pd *pollDesc, set, clear uint32) {
|
|||||||
|
|
||||||
if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
|
if events != 0 && port_associate(portfd, _PORT_SOURCE_FD, pd.fd, events, uintptr(unsafe.Pointer(pd))) != 0 {
|
||||||
print("netpollupdate: failed to associate (", errno(), ")\n")
|
print("netpollupdate: failed to associate (", errno(), ")\n")
|
||||||
gothrow("netpollupdate: failed to associate")
|
throw("netpollupdate: failed to associate")
|
||||||
}
|
}
|
||||||
pd.user = events
|
pd.user = events
|
||||||
}
|
}
|
||||||
@ -169,7 +169,7 @@ func netpollarm(pd *pollDesc, mode int) {
|
|||||||
case 'w':
|
case 'w':
|
||||||
netpollupdate(pd, _POLLOUT, 0)
|
netpollupdate(pd, _POLLOUT, 0)
|
||||||
default:
|
default:
|
||||||
gothrow("netpollarm: bad mode")
|
throw("netpollarm: bad mode")
|
||||||
}
|
}
|
||||||
unlock(&pd.lock)
|
unlock(&pd.lock)
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ func netpollinit() {
|
|||||||
iocphandle = uintptr(stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX))
|
iocphandle = uintptr(stdcall4(_CreateIoCompletionPort, _INVALID_HANDLE_VALUE, 0, 0, _DWORD_MAX))
|
||||||
if iocphandle == 0 {
|
if iocphandle == 0 {
|
||||||
println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
|
println("netpoll: failed to create iocp handle (errno=", getlasterror(), ")")
|
||||||
gothrow("netpoll: failed to create iocp handle")
|
throw("netpoll: failed to create iocp handle")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ func netpollclose(fd uintptr) int32 {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func netpollarm(pd *pollDesc, mode int) {
|
func netpollarm(pd *pollDesc, mode int) {
|
||||||
gothrow("unused")
|
throw("unused")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Polls for completed network IO.
|
// Polls for completed network IO.
|
||||||
@ -101,7 +101,7 @@ retry:
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
|
println("netpoll: GetQueuedCompletionStatusEx failed (errno=", errno, ")")
|
||||||
gothrow("netpoll: GetQueuedCompletionStatusEx failed")
|
throw("netpoll: GetQueuedCompletionStatusEx failed")
|
||||||
}
|
}
|
||||||
mp.blocked = false
|
mp.blocked = false
|
||||||
for i = 0; i < n; i++ {
|
for i = 0; i < n; i++ {
|
||||||
@ -128,7 +128,7 @@ retry:
|
|||||||
}
|
}
|
||||||
if op == nil {
|
if op == nil {
|
||||||
println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
|
println("netpoll: GetQueuedCompletionStatus failed (errno=", errno, ")")
|
||||||
gothrow("netpoll: GetQueuedCompletionStatus failed")
|
throw("netpoll: GetQueuedCompletionStatus failed")
|
||||||
}
|
}
|
||||||
// dequeued failed IO packet, so report that
|
// dequeued failed IO packet, so report that
|
||||||
}
|
}
|
||||||
@ -143,12 +143,12 @@ retry:
|
|||||||
|
|
||||||
func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
|
func handlecompletion(gpp **g, op *net_op, errno int32, qty uint32) {
|
||||||
if op == nil {
|
if op == nil {
|
||||||
gothrow("netpoll: GetQueuedCompletionStatus returned op == nil")
|
throw("netpoll: GetQueuedCompletionStatus returned op == nil")
|
||||||
}
|
}
|
||||||
mode := op.mode
|
mode := op.mode
|
||||||
if mode != 'r' && mode != 'w' {
|
if mode != 'r' && mode != 'w' {
|
||||||
println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
|
println("netpoll: GetQueuedCompletionStatus returned invalid mode=", mode)
|
||||||
gothrow("netpoll: GetQueuedCompletionStatus returned invalid mode")
|
throw("netpoll: GetQueuedCompletionStatus returned invalid mode")
|
||||||
}
|
}
|
||||||
op.errno = errno
|
op.errno = errno
|
||||||
op.qty = qty
|
op.qty = qty
|
||||||
|
@ -64,9 +64,9 @@ func goenvs() {
|
|||||||
if !iscgo {
|
if !iscgo {
|
||||||
if bsdthread_register() != 0 {
|
if bsdthread_register() != 0 {
|
||||||
if gogetenv("DYLD_INSERT_LIBRARIES") != "" {
|
if gogetenv("DYLD_INSERT_LIBRARIES") != "" {
|
||||||
gothrow("runtime: bsdthread_register error (unset DYLD_INSERT_LIBRARIES)")
|
throw("runtime: bsdthread_register error (unset DYLD_INSERT_LIBRARIES)")
|
||||||
}
|
}
|
||||||
gothrow("runtime: bsdthread_register error")
|
throw("runtime: bsdthread_register error")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -84,7 +84,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
|
|
||||||
if errno < 0 {
|
if errno < 0 {
|
||||||
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -errno, ")\n")
|
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -errno, ")\n")
|
||||||
gothrow("runtime.newosproc")
|
throw("runtime.newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +114,7 @@ func unminit() {
|
|||||||
|
|
||||||
func macherror(r int32, fn string) {
|
func macherror(r int32, fn string) {
|
||||||
print("mach error ", fn, ": ", r, "\n")
|
print("mach error ", fn, ": ", r, "\n")
|
||||||
gothrow("mach error")
|
throw("mach error")
|
||||||
}
|
}
|
||||||
|
|
||||||
const _DebugMach = false
|
const _DebugMach = false
|
||||||
@ -395,7 +395,7 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
gothrow("setsigstack")
|
throw("setsigstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsig(i int32) uintptr {
|
func getsig(i int32) uintptr {
|
||||||
|
@ -190,7 +190,7 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
gothrow("setsigstack")
|
throw("setsigstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsig(i int32) uintptr {
|
func getsig(i int32) uintptr {
|
||||||
|
@ -192,7 +192,7 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
gothrow("setsigstack")
|
throw("setsigstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsig(i int32) uintptr {
|
func getsig(i int32) uintptr {
|
||||||
|
@ -137,7 +137,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
|
|
||||||
if ret < 0 {
|
if ret < 0 {
|
||||||
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
|
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", -ret, ")\n")
|
||||||
gothrow("newosproc")
|
throw("newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -242,21 +242,21 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
sa.sa_handler = fn
|
sa.sa_handler = fn
|
||||||
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
||||||
gothrow("rt_sigaction failure")
|
throw("rt_sigaction failure")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
var sa sigactiont
|
var sa sigactiont
|
||||||
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
||||||
gothrow("rt_sigaction failure")
|
throw("rt_sigaction failure")
|
||||||
}
|
}
|
||||||
if sa.sa_handler == 0 || sa.sa_handler == _SIG_DFL || sa.sa_handler == _SIG_IGN || sa.sa_flags&_SA_ONSTACK != 0 {
|
if sa.sa_handler == 0 || sa.sa_handler == _SIG_DFL || sa.sa_handler == _SIG_IGN || sa.sa_flags&_SA_ONSTACK != 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sa.sa_flags |= _SA_ONSTACK
|
sa.sa_flags |= _SA_ONSTACK
|
||||||
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
if rt_sigaction(uintptr(i), &sa, nil, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
||||||
gothrow("rt_sigaction failure")
|
throw("rt_sigaction failure")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -265,7 +265,7 @@ func getsig(i int32) uintptr {
|
|||||||
|
|
||||||
memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
|
memclr(unsafe.Pointer(&sa), unsafe.Sizeof(sa))
|
||||||
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
if rt_sigaction(uintptr(i), nil, &sa, unsafe.Sizeof(sa.sa_mask)) != 0 {
|
||||||
gothrow("rt_sigaction read failure")
|
throw("rt_sigaction read failure")
|
||||||
}
|
}
|
||||||
if sa.sa_handler == funcPC(sigtramp) {
|
if sa.sa_handler == funcPC(sigtramp) {
|
||||||
return funcPC(sighandler)
|
return funcPC(sighandler)
|
||||||
|
@ -77,7 +77,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
ret := nacl_thread_create(funcPC(mstart_nacl), stk, unsafe.Pointer(&tls[2]), nil)
|
ret := nacl_thread_create(funcPC(mstart_nacl), stk, unsafe.Pointer(&tls[2]), nil)
|
||||||
if ret < 0 {
|
if ret < 0 {
|
||||||
print("nacl_thread_create: error ", -ret, "\n")
|
print("nacl_thread_create: error ", -ret, "\n")
|
||||||
gothrow("newosproc")
|
throw("newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,12 +88,12 @@ func semacreate() uintptr {
|
|||||||
mu := nacl_mutex_create(0)
|
mu := nacl_mutex_create(0)
|
||||||
if mu < 0 {
|
if mu < 0 {
|
||||||
print("nacl_mutex_create: error ", -mu, "\n")
|
print("nacl_mutex_create: error ", -mu, "\n")
|
||||||
gothrow("semacreate")
|
throw("semacreate")
|
||||||
}
|
}
|
||||||
c := nacl_cond_create(0)
|
c := nacl_cond_create(0)
|
||||||
if c < 0 {
|
if c < 0 {
|
||||||
print("nacl_cond_create: error ", -cond, "\n")
|
print("nacl_cond_create: error ", -cond, "\n")
|
||||||
gothrow("semacreate")
|
throw("semacreate")
|
||||||
}
|
}
|
||||||
cond = uintptr(c)
|
cond = uintptr(c)
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
@ -109,13 +109,13 @@ func semasleep(ns int64) int32 {
|
|||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if nacl_mutex_lock(int32(_g_.m.waitsemalock)) < 0 {
|
if nacl_mutex_lock(int32(_g_.m.waitsemalock)) < 0 {
|
||||||
gothrow("semasleep")
|
throw("semasleep")
|
||||||
}
|
}
|
||||||
|
|
||||||
for _g_.m.waitsemacount == 0 {
|
for _g_.m.waitsemacount == 0 {
|
||||||
if ns < 0 {
|
if ns < 0 {
|
||||||
if nacl_cond_wait(int32(_g_.m.waitsema), int32(_g_.m.waitsemalock)) < 0 {
|
if nacl_cond_wait(int32(_g_.m.waitsema), int32(_g_.m.waitsemalock)) < 0 {
|
||||||
gothrow("semasleep")
|
throw("semasleep")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
var ts timespec
|
var ts timespec
|
||||||
@ -129,7 +129,7 @@ func semasleep(ns int64) int32 {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if r < 0 {
|
if r < 0 {
|
||||||
gothrow("semasleep")
|
throw("semasleep")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -145,10 +145,10 @@ func semasleep(ns int64) int32 {
|
|||||||
func semawakeup(mp *m) {
|
func semawakeup(mp *m) {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
if nacl_mutex_lock(int32(mp.waitsemalock)) < 0 {
|
if nacl_mutex_lock(int32(mp.waitsemalock)) < 0 {
|
||||||
gothrow("semawakeup")
|
throw("semawakeup")
|
||||||
}
|
}
|
||||||
if mp.waitsemacount != 0 {
|
if mp.waitsemacount != 0 {
|
||||||
gothrow("semawakeup")
|
throw("semawakeup")
|
||||||
}
|
}
|
||||||
mp.waitsemacount = 1
|
mp.waitsemacount = 1
|
||||||
nacl_cond_signal(int32(mp.waitsema))
|
nacl_cond_signal(int32(mp.waitsema))
|
||||||
|
@ -162,7 +162,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
ret := lwp_create(unsafe.Pointer(&uc), 0, unsafe.Pointer(&mp.procid))
|
ret := lwp_create(unsafe.Pointer(&uc), 0, unsafe.Pointer(&mp.procid))
|
||||||
if ret < 0 {
|
if ret < 0 {
|
||||||
print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
|
print("runtime: failed to create new OS thread (have ", mcount()-1, " already; errno=", -ret, ")\n")
|
||||||
gothrow("runtime.newosproc")
|
throw("runtime.newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -234,7 +234,7 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
gothrow("setsigstack")
|
throw("setsigstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsig(i int32) uintptr {
|
func getsig(i int32) uintptr {
|
||||||
|
@ -130,7 +130,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
if ret == -ENOTSUP {
|
if ret == -ENOTSUP {
|
||||||
print("runtime: is kern.rthreads disabled?\n")
|
print("runtime: is kern.rthreads disabled?\n")
|
||||||
}
|
}
|
||||||
gothrow("runtime.newosproc")
|
throw("runtime.newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,7 +204,7 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
gothrow("setsigstack")
|
throw("setsigstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsig(i int32) uintptr {
|
func getsig(i int32) uintptr {
|
||||||
|
@ -194,7 +194,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
}
|
}
|
||||||
pid := rfork(_RFPROC | _RFMEM | _RFNOWAIT)
|
pid := rfork(_RFPROC | _RFMEM | _RFNOWAIT)
|
||||||
if pid < 0 {
|
if pid < 0 {
|
||||||
gothrow("newosproc: rfork failed")
|
throw("newosproc: rfork failed")
|
||||||
}
|
}
|
||||||
if pid == 0 {
|
if pid == 0 {
|
||||||
tstart_plan9(mp)
|
tstart_plan9(mp)
|
||||||
|
@ -246,7 +246,7 @@ func newosproc(mp *m, stk unsafe.Pointer) {
|
|||||||
_STACK_SIZE_PARAM_IS_A_RESERVATION, 0)
|
_STACK_SIZE_PARAM_IS_A_RESERVATION, 0)
|
||||||
if thandle == 0 {
|
if thandle == 0 {
|
||||||
println("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", getlasterror(), ")")
|
println("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", getlasterror(), ")")
|
||||||
gothrow("runtime.newosproc")
|
throw("runtime.newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -300,7 +300,7 @@ func systime(addr uintptr) int64 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
gothrow("interrupt/system time is changing too fast")
|
throw("interrupt/system time is changing too fast")
|
||||||
})
|
})
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
@ -141,17 +141,17 @@ func newosproc(mp *m, _ unsafe.Pointer) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
if pthread_attr_init(&attr) != 0 {
|
if pthread_attr_init(&attr) != 0 {
|
||||||
gothrow("pthread_attr_init")
|
throw("pthread_attr_init")
|
||||||
}
|
}
|
||||||
if pthread_attr_setstack(&attr, 0, 0x200000) != 0 {
|
if pthread_attr_setstack(&attr, 0, 0x200000) != 0 {
|
||||||
gothrow("pthread_attr_setstack")
|
throw("pthread_attr_setstack")
|
||||||
}
|
}
|
||||||
if pthread_attr_getstack(&attr, unsafe.Pointer(&mp.g0.stack.hi), &size) != 0 {
|
if pthread_attr_getstack(&attr, unsafe.Pointer(&mp.g0.stack.hi), &size) != 0 {
|
||||||
gothrow("pthread_attr_getstack")
|
throw("pthread_attr_getstack")
|
||||||
}
|
}
|
||||||
mp.g0.stack.lo = mp.g0.stack.hi - uintptr(size)
|
mp.g0.stack.lo = mp.g0.stack.hi - uintptr(size)
|
||||||
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
|
if pthread_attr_setdetachstate(&attr, _PTHREAD_CREATE_DETACHED) != 0 {
|
||||||
gothrow("pthread_attr_setdetachstate")
|
throw("pthread_attr_setdetachstate")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Disable signals during create, so that the new thread starts
|
// Disable signals during create, so that the new thread starts
|
||||||
@ -161,7 +161,7 @@ func newosproc(mp *m, _ unsafe.Pointer) {
|
|||||||
sigprocmask(_SIG_SETMASK, &oset, nil)
|
sigprocmask(_SIG_SETMASK, &oset, nil)
|
||||||
if ret != 0 {
|
if ret != 0 {
|
||||||
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
|
print("runtime: failed to create new OS thread (have ", mcount(), " already; errno=", ret, ")\n")
|
||||||
gothrow("newosproc")
|
throw("newosproc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,7 +253,7 @@ func setsig(i int32, fn uintptr, restart bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setsigstack(i int32) {
|
func setsigstack(i int32) {
|
||||||
gothrow("setsigstack")
|
throw("setsigstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func getsig(i int32) uintptr {
|
func getsig(i int32) uintptr {
|
||||||
@ -296,7 +296,7 @@ func semacreate() uintptr {
|
|||||||
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_g_.m.libcall))
|
asmcgocall(unsafe.Pointer(&asmsysvicall6), unsafe.Pointer(&_g_.m.libcall))
|
||||||
sem = (*semt)(unsafe.Pointer(_g_.m.libcall.r1))
|
sem = (*semt)(unsafe.Pointer(_g_.m.libcall.r1))
|
||||||
if sem_init(sem, 0, 0) != 0 {
|
if sem_init(sem, 0, 0) != 0 {
|
||||||
gothrow("sem_init")
|
throw("sem_init")
|
||||||
}
|
}
|
||||||
return uintptr(unsafe.Pointer(sem))
|
return uintptr(unsafe.Pointer(sem))
|
||||||
}
|
}
|
||||||
@ -319,7 +319,7 @@ func semasleep(ns int64) int32 {
|
|||||||
if *_m_.perrno == _ETIMEDOUT || *_m_.perrno == _EAGAIN || *_m_.perrno == _EINTR {
|
if *_m_.perrno == _ETIMEDOUT || *_m_.perrno == _EAGAIN || *_m_.perrno == _EINTR {
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
gothrow("sem_reltimedwait_np")
|
throw("sem_reltimedwait_np")
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -336,7 +336,7 @@ func semasleep(ns int64) int32 {
|
|||||||
if *_m_.perrno == _EINTR {
|
if *_m_.perrno == _EINTR {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
gothrow("sem_wait")
|
throw("sem_wait")
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -344,7 +344,7 @@ func semasleep(ns int64) int32 {
|
|||||||
//go:nosplit
|
//go:nosplit
|
||||||
func semawakeup(mp *m) {
|
func semawakeup(mp *m) {
|
||||||
if sem_post((*semt)(unsafe.Pointer(mp.waitsema))) != 0 {
|
if sem_post((*semt)(unsafe.Pointer(mp.waitsema))) != 0 {
|
||||||
gothrow("sem_post")
|
throw("sem_post")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,13 +36,13 @@ func write(fd uintptr, p unsafe.Pointer, n int32) int32
|
|||||||
|
|
||||||
//go:linkname os_sigpipe os.sigpipe
|
//go:linkname os_sigpipe os.sigpipe
|
||||||
func os_sigpipe() {
|
func os_sigpipe() {
|
||||||
gothrow("too many writes on closed pipe")
|
throw("too many writes on closed pipe")
|
||||||
}
|
}
|
||||||
|
|
||||||
func sigpanic() {
|
func sigpanic() {
|
||||||
g := getg()
|
g := getg()
|
||||||
if !canpanic(g) {
|
if !canpanic(g) {
|
||||||
gothrow("unexpected signal during runtime execution")
|
throw("unexpected signal during runtime execution")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Native Client only invokes the exception handler for memory faults.
|
// Native Client only invokes the exception handler for memory faults.
|
||||||
|
@ -55,13 +55,13 @@ type _Plink uintptr
|
|||||||
|
|
||||||
//go:linkname os_sigpipe os.sigpipe
|
//go:linkname os_sigpipe os.sigpipe
|
||||||
func os_sigpipe() {
|
func os_sigpipe() {
|
||||||
gothrow("too many writes on closed pipe")
|
throw("too many writes on closed pipe")
|
||||||
}
|
}
|
||||||
|
|
||||||
func sigpanic() {
|
func sigpanic() {
|
||||||
g := getg()
|
g := getg()
|
||||||
if !canpanic(g) {
|
if !canpanic(g) {
|
||||||
gothrow("unexpected signal during runtime execution")
|
throw("unexpected signal during runtime execution")
|
||||||
}
|
}
|
||||||
|
|
||||||
note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig)))
|
note := gostringnocopy((*byte)(unsafe.Pointer(g.m.notesig)))
|
||||||
@ -73,12 +73,12 @@ func sigpanic() {
|
|||||||
panicmem()
|
panicmem()
|
||||||
}
|
}
|
||||||
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
||||||
gothrow("fault")
|
throw("fault")
|
||||||
case _SIGTRAP:
|
case _SIGTRAP:
|
||||||
if g.paniconfault {
|
if g.paniconfault {
|
||||||
panicmem()
|
panicmem()
|
||||||
}
|
}
|
||||||
gothrow(note)
|
throw(note)
|
||||||
case _SIGINTDIV:
|
case _SIGINTDIV:
|
||||||
panicdivide()
|
panicdivide()
|
||||||
case _SIGFLOAT:
|
case _SIGFLOAT:
|
||||||
|
@ -10,13 +10,13 @@ type stdFunction *byte
|
|||||||
|
|
||||||
//go:linkname os_sigpipe os.sigpipe
|
//go:linkname os_sigpipe os.sigpipe
|
||||||
func os_sigpipe() {
|
func os_sigpipe() {
|
||||||
gothrow("too many writes on closed pipe")
|
throw("too many writes on closed pipe")
|
||||||
}
|
}
|
||||||
|
|
||||||
func sigpanic() {
|
func sigpanic() {
|
||||||
g := getg()
|
g := getg()
|
||||||
if !canpanic(g) {
|
if !canpanic(g) {
|
||||||
gothrow("unexpected signal during runtime execution")
|
throw("unexpected signal during runtime execution")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch uint32(g.sig) {
|
switch uint32(g.sig) {
|
||||||
@ -25,7 +25,7 @@ func sigpanic() {
|
|||||||
panicmem()
|
panicmem()
|
||||||
}
|
}
|
||||||
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
||||||
gothrow("fault")
|
throw("fault")
|
||||||
case _EXCEPTION_INT_DIVIDE_BY_ZERO:
|
case _EXCEPTION_INT_DIVIDE_BY_ZERO:
|
||||||
panicdivide()
|
panicdivide()
|
||||||
case _EXCEPTION_INT_OVERFLOW:
|
case _EXCEPTION_INT_OVERFLOW:
|
||||||
@ -37,5 +37,5 @@ func sigpanic() {
|
|||||||
_EXCEPTION_FLT_UNDERFLOW:
|
_EXCEPTION_FLT_UNDERFLOW:
|
||||||
panicfloat()
|
panicfloat()
|
||||||
}
|
}
|
||||||
gothrow("fault")
|
throw("fault")
|
||||||
}
|
}
|
||||||
|
@ -43,11 +43,11 @@ func panicmem() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func throwreturn() {
|
func throwreturn() {
|
||||||
gothrow("no return at end of a typed function - compiler is broken")
|
throw("no return at end of a typed function - compiler is broken")
|
||||||
}
|
}
|
||||||
|
|
||||||
func throwinit() {
|
func throwinit() {
|
||||||
gothrow("recursive call during initialization - linker skew")
|
throw("recursive call during initialization - linker skew")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a new deferred function fn with siz bytes of arguments.
|
// Create a new deferred function fn with siz bytes of arguments.
|
||||||
@ -56,7 +56,7 @@ func throwinit() {
|
|||||||
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
||||||
if getg().m.curg != getg() {
|
if getg().m.curg != getg() {
|
||||||
// go code on the system stack can't defer
|
// go code on the system stack can't defer
|
||||||
gothrow("defer on system stack")
|
throw("defer on system stack")
|
||||||
}
|
}
|
||||||
|
|
||||||
// the arguments of fn are in a perilous state. The stack map
|
// the arguments of fn are in a perilous state. The stack map
|
||||||
@ -71,7 +71,7 @@ func deferproc(siz int32, fn *funcval) { // arguments of fn follow fn
|
|||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
d := newdefer(siz)
|
d := newdefer(siz)
|
||||||
if d._panic != nil {
|
if d._panic != nil {
|
||||||
gothrow("deferproc: d.panic != nil after newdefer")
|
throw("deferproc: d.panic != nil after newdefer")
|
||||||
}
|
}
|
||||||
d.fn = fn
|
d.fn = fn
|
||||||
d.pc = callerpc
|
d.pc = callerpc
|
||||||
@ -137,7 +137,7 @@ func testdefersizes() {
|
|||||||
}
|
}
|
||||||
if m[defersc] != int32(siz) {
|
if m[defersc] != int32(siz) {
|
||||||
print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
|
print("bad defer size class: i=", i, " siz=", siz, " defersc=", defersc, "\n")
|
||||||
gothrow("bad defer size class")
|
throw("bad defer size class")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -209,12 +209,12 @@ func freedefer(d *_defer) {
|
|||||||
// Windows otherwise runs out of stack space.
|
// Windows otherwise runs out of stack space.
|
||||||
func freedeferpanic() {
|
func freedeferpanic() {
|
||||||
// _panic must be cleared before d is unlinked from gp.
|
// _panic must be cleared before d is unlinked from gp.
|
||||||
gothrow("freedefer with d._panic != nil")
|
throw("freedefer with d._panic != nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
func freedeferfn() {
|
func freedeferfn() {
|
||||||
// fn must be cleared before d is unlinked from gp.
|
// fn must be cleared before d is unlinked from gp.
|
||||||
gothrow("freedefer with d.fn != nil")
|
throw("freedefer with d.fn != nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run a deferred function if there is one.
|
// Run a deferred function if there is one.
|
||||||
@ -287,7 +287,7 @@ func Goexit() {
|
|||||||
d.started = true
|
d.started = true
|
||||||
reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
|
reflectcall(unsafe.Pointer(d.fn), deferArgs(d), uint32(d.siz), uint32(d.siz))
|
||||||
if gp._defer != d {
|
if gp._defer != d {
|
||||||
gothrow("bad defer entry in Goexit")
|
throw("bad defer entry in Goexit")
|
||||||
}
|
}
|
||||||
d._panic = nil
|
d._panic = nil
|
||||||
d.fn = nil
|
d.fn = nil
|
||||||
@ -319,7 +319,7 @@ func gopanic(e interface{}) {
|
|||||||
print("panic: ")
|
print("panic: ")
|
||||||
printany(e)
|
printany(e)
|
||||||
print("\n")
|
print("\n")
|
||||||
gothrow("panic on system stack")
|
throw("panic on system stack")
|
||||||
}
|
}
|
||||||
|
|
||||||
// m.softfloat is set during software floating point.
|
// m.softfloat is set during software floating point.
|
||||||
@ -329,25 +329,25 @@ func gopanic(e interface{}) {
|
|||||||
if gp.m.softfloat != 0 {
|
if gp.m.softfloat != 0 {
|
||||||
gp.m.locks--
|
gp.m.locks--
|
||||||
gp.m.softfloat = 0
|
gp.m.softfloat = 0
|
||||||
gothrow("panic during softfloat")
|
throw("panic during softfloat")
|
||||||
}
|
}
|
||||||
if gp.m.mallocing != 0 {
|
if gp.m.mallocing != 0 {
|
||||||
print("panic: ")
|
print("panic: ")
|
||||||
printany(e)
|
printany(e)
|
||||||
print("\n")
|
print("\n")
|
||||||
gothrow("panic during malloc")
|
throw("panic during malloc")
|
||||||
}
|
}
|
||||||
if gp.m.gcing != 0 {
|
if gp.m.gcing != 0 {
|
||||||
print("panic: ")
|
print("panic: ")
|
||||||
printany(e)
|
printany(e)
|
||||||
print("\n")
|
print("\n")
|
||||||
gothrow("panic during gc")
|
throw("panic during gc")
|
||||||
}
|
}
|
||||||
if gp.m.locks != 0 {
|
if gp.m.locks != 0 {
|
||||||
print("panic: ")
|
print("panic: ")
|
||||||
printany(e)
|
printany(e)
|
||||||
print("\n")
|
print("\n")
|
||||||
gothrow("panic holding locks")
|
throw("panic holding locks")
|
||||||
}
|
}
|
||||||
|
|
||||||
var p _panic
|
var p _panic
|
||||||
@ -390,7 +390,7 @@ func gopanic(e interface{}) {
|
|||||||
|
|
||||||
// reflectcall did not panic. Remove d.
|
// reflectcall did not panic. Remove d.
|
||||||
if gp._defer != d {
|
if gp._defer != d {
|
||||||
gothrow("bad defer entry in panic")
|
throw("bad defer entry in panic")
|
||||||
}
|
}
|
||||||
d._panic = nil
|
d._panic = nil
|
||||||
d.fn = nil
|
d.fn = nil
|
||||||
@ -416,7 +416,7 @@ func gopanic(e interface{}) {
|
|||||||
gp.sigcode0 = uintptr(sp)
|
gp.sigcode0 = uintptr(sp)
|
||||||
gp.sigcode1 = pc
|
gp.sigcode1 = pc
|
||||||
mcall(recovery)
|
mcall(recovery)
|
||||||
gothrow("recovery failed") // mcall should not return
|
throw("recovery failed") // mcall should not return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -482,19 +482,7 @@ func dopanic(unused int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func throw(s *byte) {
|
func throw(s string) {
|
||||||
gp := getg()
|
|
||||||
if gp.m.throwing == 0 {
|
|
||||||
gp.m.throwing = 1
|
|
||||||
}
|
|
||||||
startpanic()
|
|
||||||
print("fatal error: ", gostringnocopy(s), "\n")
|
|
||||||
dopanic(0)
|
|
||||||
*(*int)(nil) = 0 // not reached
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:nosplit
|
|
||||||
func gothrow(s string) {
|
|
||||||
print("fatal error: ", s, "\n")
|
print("fatal error: ", s, "\n")
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp.m.throwing == 0 {
|
if gp.m.throwing == 0 {
|
||||||
|
@ -23,7 +23,7 @@ func recovery(gp *g) {
|
|||||||
// d's arguments need to be in the stack.
|
// d's arguments need to be in the stack.
|
||||||
if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
|
if sp != 0 && (sp < gp.stack.lo || gp.stack.hi < sp) {
|
||||||
print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
|
print("recover: ", hex(sp), " not in [", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n")
|
||||||
gothrow("bad recovery")
|
throw("bad recovery")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the deferproc for this d return again,
|
// Make the deferproc for this d return again,
|
||||||
|
@ -27,7 +27,7 @@ func desc_thr_index(desc *parfor, i uint32) *parforthread {
|
|||||||
func parforsetup(desc *parfor, nthr, n uint32, ctx unsafe.Pointer, wait bool, body func(*parfor, uint32)) {
|
func parforsetup(desc *parfor, nthr, n uint32, ctx unsafe.Pointer, wait bool, body func(*parfor, uint32)) {
|
||||||
if desc == nil || nthr == 0 || nthr > desc.nthrmax || body == nil {
|
if desc == nil || nthr == 0 || nthr > desc.nthrmax || body == nil {
|
||||||
print("desc=", desc, " nthr=", nthr, " count=", n, " body=", body, "\n")
|
print("desc=", desc, " nthr=", nthr, " count=", n, " body=", body, "\n")
|
||||||
gothrow("parfor: invalid args")
|
throw("parfor: invalid args")
|
||||||
}
|
}
|
||||||
|
|
||||||
desc.body = *(*unsafe.Pointer)(unsafe.Pointer(&body))
|
desc.body = *(*unsafe.Pointer)(unsafe.Pointer(&body))
|
||||||
@ -48,7 +48,7 @@ func parforsetup(desc *parfor, nthr, n uint32, ctx unsafe.Pointer, wait bool, bo
|
|||||||
end := uint32(uint64(n) * uint64(i+1) / uint64(nthr))
|
end := uint32(uint64(n) * uint64(i+1) / uint64(nthr))
|
||||||
pos := &desc_thr_index(desc, i).pos
|
pos := &desc_thr_index(desc, i).pos
|
||||||
if uintptr(unsafe.Pointer(pos))&7 != 0 {
|
if uintptr(unsafe.Pointer(pos))&7 != 0 {
|
||||||
gothrow("parforsetup: pos is not aligned")
|
throw("parforsetup: pos is not aligned")
|
||||||
}
|
}
|
||||||
*pos = uint64(begin) | uint64(end)<<32
|
*pos = uint64(begin) | uint64(end)<<32
|
||||||
}
|
}
|
||||||
@ -59,7 +59,7 @@ func parfordo(desc *parfor) {
|
|||||||
tid := xadd(&desc.thrseq, 1) - 1
|
tid := xadd(&desc.thrseq, 1) - 1
|
||||||
if tid >= desc.nthr {
|
if tid >= desc.nthr {
|
||||||
print("tid=", tid, " nthr=", desc.nthr, "\n")
|
print("tid=", tid, " nthr=", desc.nthr, "\n")
|
||||||
gothrow("parfor: invalid tid")
|
throw("parfor: invalid tid")
|
||||||
}
|
}
|
||||||
|
|
||||||
// If single-threaded, just execute the for serially.
|
// If single-threaded, just execute the for serially.
|
||||||
@ -141,7 +141,7 @@ func parfordo(desc *parfor) {
|
|||||||
if begin < end {
|
if begin < end {
|
||||||
// Has successfully stolen some work.
|
// Has successfully stolen some work.
|
||||||
if idle {
|
if idle {
|
||||||
gothrow("parfor: should not be idle")
|
throw("parfor: should not be idle")
|
||||||
}
|
}
|
||||||
atomicstore64(mypos, uint64(begin)|uint64(end)<<32)
|
atomicstore64(mypos, uint64(begin)|uint64(end)<<32)
|
||||||
me.nsteal++
|
me.nsteal++
|
||||||
|
@ -43,7 +43,7 @@ func main() {
|
|||||||
lockOSThread()
|
lockOSThread()
|
||||||
|
|
||||||
if g.m != &m0 {
|
if g.m != &m0 {
|
||||||
gothrow("runtime.main not on m0")
|
throw("runtime.main not on m0")
|
||||||
}
|
}
|
||||||
|
|
||||||
runtime_init() // must be before defer
|
runtime_init() // must be before defer
|
||||||
@ -60,20 +60,20 @@ func main() {
|
|||||||
|
|
||||||
if iscgo {
|
if iscgo {
|
||||||
if _cgo_thread_start == nil {
|
if _cgo_thread_start == nil {
|
||||||
gothrow("_cgo_thread_start missing")
|
throw("_cgo_thread_start missing")
|
||||||
}
|
}
|
||||||
if _cgo_malloc == nil {
|
if _cgo_malloc == nil {
|
||||||
gothrow("_cgo_malloc missing")
|
throw("_cgo_malloc missing")
|
||||||
}
|
}
|
||||||
if _cgo_free == nil {
|
if _cgo_free == nil {
|
||||||
gothrow("_cgo_free missing")
|
throw("_cgo_free missing")
|
||||||
}
|
}
|
||||||
if GOOS != "windows" {
|
if GOOS != "windows" {
|
||||||
if _cgo_setenv == nil {
|
if _cgo_setenv == nil {
|
||||||
gothrow("_cgo_setenv missing")
|
throw("_cgo_setenv missing")
|
||||||
}
|
}
|
||||||
if _cgo_unsetenv == nil {
|
if _cgo_unsetenv == nil {
|
||||||
gothrow("_cgo_unsetenv missing")
|
throw("_cgo_unsetenv missing")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -114,7 +114,7 @@ func forcegchelper() {
|
|||||||
for {
|
for {
|
||||||
lock(&forcegc.lock)
|
lock(&forcegc.lock)
|
||||||
if forcegc.idle != 0 {
|
if forcegc.idle != 0 {
|
||||||
gothrow("forcegc: phase error")
|
throw("forcegc: phase error")
|
||||||
}
|
}
|
||||||
atomicstore(&forcegc.idle, 1)
|
atomicstore(&forcegc.idle, 1)
|
||||||
goparkunlock(&forcegc.lock, "force gc (idle)")
|
goparkunlock(&forcegc.lock, "force gc (idle)")
|
||||||
@ -141,7 +141,7 @@ func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason s
|
|||||||
gp := mp.curg
|
gp := mp.curg
|
||||||
status := readgstatus(gp)
|
status := readgstatus(gp)
|
||||||
if status != _Grunning && status != _Gscanrunning {
|
if status != _Grunning && status != _Gscanrunning {
|
||||||
gothrow("gopark: bad g status")
|
throw("gopark: bad g status")
|
||||||
}
|
}
|
||||||
mp.waitlock = lock
|
mp.waitlock = lock
|
||||||
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
|
mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
|
||||||
@ -169,7 +169,7 @@ func acquireSudog() *sudog {
|
|||||||
s := c.sudogcache
|
s := c.sudogcache
|
||||||
if s != nil {
|
if s != nil {
|
||||||
if s.elem != nil {
|
if s.elem != nil {
|
||||||
gothrow("acquireSudog: found s.elem != nil in cache")
|
throw("acquireSudog: found s.elem != nil in cache")
|
||||||
}
|
}
|
||||||
c.sudogcache = s.next
|
c.sudogcache = s.next
|
||||||
s.next = nil
|
s.next = nil
|
||||||
@ -187,7 +187,7 @@ func acquireSudog() *sudog {
|
|||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
p := new(sudog)
|
p := new(sudog)
|
||||||
if p.elem != nil {
|
if p.elem != nil {
|
||||||
gothrow("acquireSudog: found p.elem != nil after new")
|
throw("acquireSudog: found p.elem != nil after new")
|
||||||
}
|
}
|
||||||
releasem(mp)
|
releasem(mp)
|
||||||
return p
|
return p
|
||||||
@ -196,23 +196,23 @@ func acquireSudog() *sudog {
|
|||||||
//go:nosplit
|
//go:nosplit
|
||||||
func releaseSudog(s *sudog) {
|
func releaseSudog(s *sudog) {
|
||||||
if s.elem != nil {
|
if s.elem != nil {
|
||||||
gothrow("runtime: sudog with non-nil elem")
|
throw("runtime: sudog with non-nil elem")
|
||||||
}
|
}
|
||||||
if s.selectdone != nil {
|
if s.selectdone != nil {
|
||||||
gothrow("runtime: sudog with non-nil selectdone")
|
throw("runtime: sudog with non-nil selectdone")
|
||||||
}
|
}
|
||||||
if s.next != nil {
|
if s.next != nil {
|
||||||
gothrow("runtime: sudog with non-nil next")
|
throw("runtime: sudog with non-nil next")
|
||||||
}
|
}
|
||||||
if s.prev != nil {
|
if s.prev != nil {
|
||||||
gothrow("runtime: sudog with non-nil prev")
|
throw("runtime: sudog with non-nil prev")
|
||||||
}
|
}
|
||||||
if s.waitlink != nil {
|
if s.waitlink != nil {
|
||||||
gothrow("runtime: sudog with non-nil waitlink")
|
throw("runtime: sudog with non-nil waitlink")
|
||||||
}
|
}
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp.param != nil {
|
if gp.param != nil {
|
||||||
gothrow("runtime: releaseSudog with non-nil gp.param")
|
throw("runtime: releaseSudog with non-nil gp.param")
|
||||||
}
|
}
|
||||||
c := gomcache()
|
c := gomcache()
|
||||||
s.next = c.sudogcache
|
s.next = c.sudogcache
|
||||||
@ -228,11 +228,11 @@ func funcPC(f interface{}) uintptr {
|
|||||||
|
|
||||||
// called from assembly
|
// called from assembly
|
||||||
func badmcall(fn func(*g)) {
|
func badmcall(fn func(*g)) {
|
||||||
gothrow("runtime: mcall called on m->g0 stack")
|
throw("runtime: mcall called on m->g0 stack")
|
||||||
}
|
}
|
||||||
|
|
||||||
func badmcall2(fn func(*g)) {
|
func badmcall2(fn func(*g)) {
|
||||||
gothrow("runtime: mcall function returned")
|
throw("runtime: mcall function returned")
|
||||||
}
|
}
|
||||||
|
|
||||||
func badreflectcall() {
|
func badreflectcall() {
|
||||||
@ -263,7 +263,7 @@ var (
|
|||||||
|
|
||||||
func allgadd(gp *g) {
|
func allgadd(gp *g) {
|
||||||
if readgstatus(gp) == _Gidle {
|
if readgstatus(gp) == _Gidle {
|
||||||
gothrow("allgadd: bad status Gidle")
|
throw("allgadd: bad status Gidle")
|
||||||
}
|
}
|
||||||
|
|
||||||
lock(&allglock)
|
lock(&allglock)
|
||||||
|
@ -133,7 +133,7 @@ func schedinit() {
|
|||||||
procs = n
|
procs = n
|
||||||
}
|
}
|
||||||
if procresize(int32(procs)) != nil {
|
if procresize(int32(procs)) != nil {
|
||||||
gothrow("unknown runnable goroutine during bootstrap")
|
throw("unknown runnable goroutine during bootstrap")
|
||||||
}
|
}
|
||||||
|
|
||||||
if buildVersion == "" {
|
if buildVersion == "" {
|
||||||
@ -157,7 +157,7 @@ func checkmcount() {
|
|||||||
// sched lock is held
|
// sched lock is held
|
||||||
if sched.mcount > sched.maxmcount {
|
if sched.mcount > sched.maxmcount {
|
||||||
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
|
print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
|
||||||
gothrow("thread exhaustion")
|
throw("thread exhaustion")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ func ready(gp *g) {
|
|||||||
_g_.m.locks++ // disable preemption because it can be holding p in a local var
|
_g_.m.locks++ // disable preemption because it can be holding p in a local var
|
||||||
if status&^_Gscan != _Gwaiting {
|
if status&^_Gscan != _Gwaiting {
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("bad g->status in ready")
|
throw("bad g->status in ready")
|
||||||
}
|
}
|
||||||
|
|
||||||
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
|
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
|
||||||
@ -259,7 +259,7 @@ func helpgc(nproc int32) {
|
|||||||
}
|
}
|
||||||
mp := mget()
|
mp := mget()
|
||||||
if mp == nil {
|
if mp == nil {
|
||||||
gothrow("gcprocs inconsistency")
|
throw("gcprocs inconsistency")
|
||||||
}
|
}
|
||||||
mp.helpgc = n
|
mp.helpgc = n
|
||||||
mp.mcache = allp[pos].mcache
|
mp.mcache = allp[pos].mcache
|
||||||
@ -297,7 +297,7 @@ func freezetheworld() {
|
|||||||
|
|
||||||
func isscanstatus(status uint32) bool {
|
func isscanstatus(status uint32) bool {
|
||||||
if status == _Gscan {
|
if status == _Gscan {
|
||||||
gothrow("isscanstatus: Bad status Gscan")
|
throw("isscanstatus: Bad status Gscan")
|
||||||
}
|
}
|
||||||
return status&_Gscan == _Gscan
|
return status&_Gscan == _Gscan
|
||||||
}
|
}
|
||||||
@ -321,7 +321,7 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
|
|||||||
default:
|
default:
|
||||||
print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
|
print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("casfrom_Gscanstatus:top gp->status is not in scan state")
|
throw("casfrom_Gscanstatus:top gp->status is not in scan state")
|
||||||
case _Gscanrunnable,
|
case _Gscanrunnable,
|
||||||
_Gscanwaiting,
|
_Gscanwaiting,
|
||||||
_Gscanrunning,
|
_Gscanrunning,
|
||||||
@ -337,7 +337,7 @@ func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
|
|||||||
if !success {
|
if !success {
|
||||||
print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
|
print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("casfrom_Gscanstatus: gp->status is not in scan state")
|
throw("casfrom_Gscanstatus: gp->status is not in scan state")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -357,7 +357,7 @@ func castogscanstatus(gp *g, oldval, newval uint32) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
|
print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
|
||||||
gothrow("castogscanstatus")
|
throw("castogscanstatus")
|
||||||
panic("not reached")
|
panic("not reached")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -370,7 +370,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
|
|||||||
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
|
if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
|
print("casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
|
||||||
gothrow("casgstatus: bad incoming values")
|
throw("casgstatus: bad incoming values")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -379,7 +379,7 @@ func casgstatus(gp *g, oldval, newval uint32) {
|
|||||||
for !cas(&gp.atomicstatus, oldval, newval) {
|
for !cas(&gp.atomicstatus, oldval, newval) {
|
||||||
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
|
if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
gothrow("casgstatus: waiting for Gwaiting but is Grunnable")
|
throw("casgstatus: waiting for Gwaiting but is Grunnable")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
// Help GC if needed.
|
// Help GC if needed.
|
||||||
@ -402,7 +402,7 @@ func casgcopystack(gp *g) uint32 {
|
|||||||
for {
|
for {
|
||||||
oldstatus := readgstatus(gp) &^ _Gscan
|
oldstatus := readgstatus(gp) &^ _Gscan
|
||||||
if oldstatus != _Gwaiting && oldstatus != _Grunnable {
|
if oldstatus != _Gwaiting && oldstatus != _Grunnable {
|
||||||
gothrow("copystack: bad status, not Gwaiting or Grunnable")
|
throw("copystack: bad status, not Gwaiting or Grunnable")
|
||||||
}
|
}
|
||||||
if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
|
if cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
|
||||||
return oldstatus
|
return oldstatus
|
||||||
@ -427,7 +427,7 @@ func stopg(gp *g) bool {
|
|||||||
switch s := readgstatus(gp); s {
|
switch s := readgstatus(gp); s {
|
||||||
default:
|
default:
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("stopg: gp->atomicstatus is not valid")
|
throw("stopg: gp->atomicstatus is not valid")
|
||||||
|
|
||||||
case _Gdead:
|
case _Gdead:
|
||||||
return false
|
return false
|
||||||
@ -479,7 +479,7 @@ func restartg(gp *g) {
|
|||||||
switch s {
|
switch s {
|
||||||
default:
|
default:
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("restartg: unexpected status")
|
throw("restartg: unexpected status")
|
||||||
|
|
||||||
case _Gdead:
|
case _Gdead:
|
||||||
// ok
|
// ok
|
||||||
@ -495,7 +495,7 @@ func restartg(gp *g) {
|
|||||||
case _Gscanenqueue:
|
case _Gscanenqueue:
|
||||||
casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
|
casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
|
||||||
if gp != getg().m.curg {
|
if gp != getg().m.curg {
|
||||||
gothrow("processing Gscanenqueue on wrong m")
|
throw("processing Gscanenqueue on wrong m")
|
||||||
}
|
}
|
||||||
dropg()
|
dropg()
|
||||||
ready(gp)
|
ready(gp)
|
||||||
@ -505,12 +505,12 @@ func restartg(gp *g) {
|
|||||||
func stopscanstart(gp *g) {
|
func stopscanstart(gp *g) {
|
||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
if _g_ == gp {
|
if _g_ == gp {
|
||||||
gothrow("GC not moved to G0")
|
throw("GC not moved to G0")
|
||||||
}
|
}
|
||||||
if stopg(gp) {
|
if stopg(gp) {
|
||||||
if !isscanstatus(readgstatus(gp)) {
|
if !isscanstatus(readgstatus(gp)) {
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("GC not in scan state")
|
throw("GC not in scan state")
|
||||||
}
|
}
|
||||||
restartg(gp)
|
restartg(gp)
|
||||||
}
|
}
|
||||||
@ -588,7 +588,7 @@ func stoptheworld() {
|
|||||||
// If we hold a lock, then we won't be able to stop another M
|
// If we hold a lock, then we won't be able to stop another M
|
||||||
// that is blocked trying to acquire the lock.
|
// that is blocked trying to acquire the lock.
|
||||||
if _g_.m.locks > 0 {
|
if _g_.m.locks > 0 {
|
||||||
gothrow("stoptheworld: holding locks")
|
throw("stoptheworld: holding locks")
|
||||||
}
|
}
|
||||||
|
|
||||||
lock(&sched.lock)
|
lock(&sched.lock)
|
||||||
@ -630,12 +630,12 @@ func stoptheworld() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if sched.stopwait != 0 {
|
if sched.stopwait != 0 {
|
||||||
gothrow("stoptheworld: not stopped")
|
throw("stoptheworld: not stopped")
|
||||||
}
|
}
|
||||||
for i := 0; i < int(gomaxprocs); i++ {
|
for i := 0; i < int(gomaxprocs); i++ {
|
||||||
p := allp[i]
|
p := allp[i]
|
||||||
if p.status != _Pgcstop {
|
if p.status != _Pgcstop {
|
||||||
gothrow("stoptheworld: not stopped")
|
throw("stoptheworld: not stopped")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -674,7 +674,7 @@ func starttheworld() {
|
|||||||
mp := p.m
|
mp := p.m
|
||||||
p.m = nil
|
p.m = nil
|
||||||
if mp.nextp != nil {
|
if mp.nextp != nil {
|
||||||
gothrow("starttheworld: inconsistent mp->nextp")
|
throw("starttheworld: inconsistent mp->nextp")
|
||||||
}
|
}
|
||||||
mp.nextp = p
|
mp.nextp = p
|
||||||
notewakeup(&mp.park)
|
notewakeup(&mp.park)
|
||||||
@ -734,7 +734,7 @@ func mstart1() {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_ != _g_.m.g0 {
|
if _g_ != _g_.m.g0 {
|
||||||
gothrow("bad runtime·mstart")
|
throw("bad runtime·mstart")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Record top of stack for use by mcall.
|
// Record top of stack for use by mcall.
|
||||||
@ -1013,7 +1013,7 @@ func _newm(fn func(), _p_ *p) {
|
|||||||
if iscgo {
|
if iscgo {
|
||||||
var ts cgothreadstart
|
var ts cgothreadstart
|
||||||
if _cgo_thread_start == nil {
|
if _cgo_thread_start == nil {
|
||||||
gothrow("_cgo_thread_start missing")
|
throw("_cgo_thread_start missing")
|
||||||
}
|
}
|
||||||
ts.g = mp.g0
|
ts.g = mp.g0
|
||||||
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
|
ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
|
||||||
@ -1030,10 +1030,10 @@ func stopm() {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.locks != 0 {
|
if _g_.m.locks != 0 {
|
||||||
gothrow("stopm holding locks")
|
throw("stopm holding locks")
|
||||||
}
|
}
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != nil {
|
||||||
gothrow("stopm holding p")
|
throw("stopm holding p")
|
||||||
}
|
}
|
||||||
if _g_.m.spinning {
|
if _g_.m.spinning {
|
||||||
_g_.m.spinning = false
|
_g_.m.spinning = false
|
||||||
@ -1085,10 +1085,10 @@ func startm(_p_ *p, spinning bool) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if mp.spinning {
|
if mp.spinning {
|
||||||
gothrow("startm: m is spinning")
|
throw("startm: m is spinning")
|
||||||
}
|
}
|
||||||
if mp.nextp != nil {
|
if mp.nextp != nil {
|
||||||
gothrow("startm: m has p")
|
throw("startm: m has p")
|
||||||
}
|
}
|
||||||
mp.spinning = spinning
|
mp.spinning = spinning
|
||||||
mp.nextp = _p_
|
mp.nextp = _p_
|
||||||
@ -1150,7 +1150,7 @@ func stoplockedm() {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
|
if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
|
||||||
gothrow("stoplockedm: inconsistent locking")
|
throw("stoplockedm: inconsistent locking")
|
||||||
}
|
}
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != nil {
|
||||||
// Schedule another M to run this p.
|
// Schedule another M to run this p.
|
||||||
@ -1165,7 +1165,7 @@ func stoplockedm() {
|
|||||||
if status&^_Gscan != _Grunnable {
|
if status&^_Gscan != _Grunnable {
|
||||||
print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
|
print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
|
||||||
dumpgstatus(_g_)
|
dumpgstatus(_g_)
|
||||||
gothrow("stoplockedm: not runnable")
|
throw("stoplockedm: not runnable")
|
||||||
}
|
}
|
||||||
acquirep(_g_.m.nextp)
|
acquirep(_g_.m.nextp)
|
||||||
_g_.m.nextp = nil
|
_g_.m.nextp = nil
|
||||||
@ -1177,10 +1177,10 @@ func startlockedm(gp *g) {
|
|||||||
|
|
||||||
mp := gp.lockedm
|
mp := gp.lockedm
|
||||||
if mp == _g_.m {
|
if mp == _g_.m {
|
||||||
gothrow("startlockedm: locked to me")
|
throw("startlockedm: locked to me")
|
||||||
}
|
}
|
||||||
if mp.nextp != nil {
|
if mp.nextp != nil {
|
||||||
gothrow("startlockedm: m has p")
|
throw("startlockedm: m has p")
|
||||||
}
|
}
|
||||||
// directly handoff current P to the locked m
|
// directly handoff current P to the locked m
|
||||||
incidlelocked(-1)
|
incidlelocked(-1)
|
||||||
@ -1196,7 +1196,7 @@ func gcstopm() {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if sched.gcwaiting == 0 {
|
if sched.gcwaiting == 0 {
|
||||||
gothrow("gcstopm: not waiting for gc")
|
throw("gcstopm: not waiting for gc")
|
||||||
}
|
}
|
||||||
if _g_.m.spinning {
|
if _g_.m.spinning {
|
||||||
_g_.m.spinning = false
|
_g_.m.spinning = false
|
||||||
@ -1338,10 +1338,10 @@ stop:
|
|||||||
// poll network
|
// poll network
|
||||||
if xchg64(&sched.lastpoll, 0) != 0 {
|
if xchg64(&sched.lastpoll, 0) != 0 {
|
||||||
if _g_.m.p != nil {
|
if _g_.m.p != nil {
|
||||||
gothrow("findrunnable: netpoll with p")
|
throw("findrunnable: netpoll with p")
|
||||||
}
|
}
|
||||||
if _g_.m.spinning {
|
if _g_.m.spinning {
|
||||||
gothrow("findrunnable: netpoll with spinning")
|
throw("findrunnable: netpoll with spinning")
|
||||||
}
|
}
|
||||||
gp := netpoll(true) // block until new work is available
|
gp := netpoll(true) // block until new work is available
|
||||||
atomicstore64(&sched.lastpoll, uint64(nanotime()))
|
atomicstore64(&sched.lastpoll, uint64(nanotime()))
|
||||||
@ -1370,7 +1370,7 @@ func resetspinning() {
|
|||||||
_g_.m.spinning = false
|
_g_.m.spinning = false
|
||||||
nmspinning = xadd(&sched.nmspinning, -1)
|
nmspinning = xadd(&sched.nmspinning, -1)
|
||||||
if nmspinning < 0 {
|
if nmspinning < 0 {
|
||||||
gothrow("findrunnable: negative nmspinning")
|
throw("findrunnable: negative nmspinning")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
nmspinning = atomicload(&sched.nmspinning)
|
nmspinning = atomicload(&sched.nmspinning)
|
||||||
@ -1409,7 +1409,7 @@ func schedule() {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.locks != 0 {
|
if _g_.m.locks != 0 {
|
||||||
gothrow("schedule: holding locks")
|
throw("schedule: holding locks")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _g_.m.lockedg != nil {
|
if _g_.m.lockedg != nil {
|
||||||
@ -1441,7 +1441,7 @@ top:
|
|||||||
if gp == nil {
|
if gp == nil {
|
||||||
gp = runqget(_g_.m.p)
|
gp = runqget(_g_.m.p)
|
||||||
if gp != nil && _g_.m.spinning {
|
if gp != nil && _g_.m.spinning {
|
||||||
gothrow("schedule: spinning with local work")
|
throw("schedule: spinning with local work")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if gp == nil {
|
if gp == nil {
|
||||||
@ -1522,7 +1522,7 @@ func gosched_m(gp *g) {
|
|||||||
status := readgstatus(gp)
|
status := readgstatus(gp)
|
||||||
if status&^_Gscan != _Grunning {
|
if status&^_Gscan != _Grunning {
|
||||||
dumpgstatus(gp)
|
dumpgstatus(gp)
|
||||||
gothrow("bad g status")
|
throw("bad g status")
|
||||||
}
|
}
|
||||||
casgstatus(gp, _Grunning, _Grunnable)
|
casgstatus(gp, _Grunning, _Grunnable)
|
||||||
dropg()
|
dropg()
|
||||||
@ -1562,7 +1562,7 @@ func goexit0(gp *g) {
|
|||||||
|
|
||||||
if _g_.m.locked&^_LockExternal != 0 {
|
if _g_.m.locked&^_LockExternal != 0 {
|
||||||
print("invalid m->locked = ", _g_.m.locked, "\n")
|
print("invalid m->locked = ", _g_.m.locked, "\n")
|
||||||
gothrow("internal lockOSThread error")
|
throw("internal lockOSThread error")
|
||||||
}
|
}
|
||||||
_g_.m.locked = 0
|
_g_.m.locked = 0
|
||||||
gfput(_g_.m.p, gp)
|
gfput(_g_.m.p, gp)
|
||||||
@ -1628,7 +1628,7 @@ func reentersyscall(pc, sp uintptr) {
|
|||||||
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
|
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
|
print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
|
||||||
gothrow("entersyscall")
|
throw("entersyscall")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1700,14 +1700,14 @@ func entersyscallblock(dummy int32) {
|
|||||||
sp3 := _g_.syscallsp
|
sp3 := _g_.syscallsp
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
|
print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
|
||||||
gothrow("entersyscallblock")
|
throw("entersyscallblock")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
casgstatus(_g_, _Grunning, _Gsyscall)
|
casgstatus(_g_, _Grunning, _Gsyscall)
|
||||||
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
|
if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
|
print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
|
||||||
gothrow("entersyscallblock")
|
throw("entersyscallblock")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1733,13 +1733,13 @@ func exitsyscall(dummy int32) {
|
|||||||
|
|
||||||
_g_.m.locks++ // see comment in entersyscall
|
_g_.m.locks++ // see comment in entersyscall
|
||||||
if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
|
if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
|
||||||
gothrow("exitsyscall: syscall frame is no longer valid")
|
throw("exitsyscall: syscall frame is no longer valid")
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.waitsince = 0
|
_g_.waitsince = 0
|
||||||
if exitsyscallfast() {
|
if exitsyscallfast() {
|
||||||
if _g_.m.mcache == nil {
|
if _g_.m.mcache == nil {
|
||||||
gothrow("lost mcache")
|
throw("lost mcache")
|
||||||
}
|
}
|
||||||
// There's a cpu for us, so we can run.
|
// There's a cpu for us, so we can run.
|
||||||
_g_.m.p.syscalltick++
|
_g_.m.p.syscalltick++
|
||||||
@ -1767,7 +1767,7 @@ func exitsyscall(dummy int32) {
|
|||||||
mcall(exitsyscall0)
|
mcall(exitsyscall0)
|
||||||
|
|
||||||
if _g_.m.mcache == nil {
|
if _g_.m.mcache == nil {
|
||||||
gothrow("lost mcache")
|
throw("lost mcache")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scheduler returned, so we're allowed to run now.
|
// Scheduler returned, so we're allowed to run now.
|
||||||
@ -1941,7 +1941,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
|
|||||||
|
|
||||||
if fn == nil {
|
if fn == nil {
|
||||||
_g_.m.throwing = -1 // do not dump full stacks
|
_g_.m.throwing = -1 // do not dump full stacks
|
||||||
gothrow("go of nil func value")
|
throw("go of nil func value")
|
||||||
}
|
}
|
||||||
_g_.m.locks++ // disable preemption because it can be holding p in a local var
|
_g_.m.locks++ // disable preemption because it can be holding p in a local var
|
||||||
siz := narg + nret
|
siz := narg + nret
|
||||||
@ -1952,7 +1952,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
|
|||||||
// 4*sizeof(uintreg): extra space added below
|
// 4*sizeof(uintreg): extra space added below
|
||||||
// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
|
// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
|
||||||
if siz >= _StackMin-4*regSize-regSize {
|
if siz >= _StackMin-4*regSize-regSize {
|
||||||
gothrow("newproc: function arguments too large for new goroutine")
|
throw("newproc: function arguments too large for new goroutine")
|
||||||
}
|
}
|
||||||
|
|
||||||
_p_ := _g_.m.p
|
_p_ := _g_.m.p
|
||||||
@ -1963,11 +1963,11 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
|
|||||||
allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
|
allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
|
||||||
}
|
}
|
||||||
if newg.stack.hi == 0 {
|
if newg.stack.hi == 0 {
|
||||||
gothrow("newproc1: newg missing stack")
|
throw("newproc1: newg missing stack")
|
||||||
}
|
}
|
||||||
|
|
||||||
if readgstatus(newg) != _Gdead {
|
if readgstatus(newg) != _Gdead {
|
||||||
gothrow("newproc1: new g is not Gdead")
|
throw("newproc1: new g is not Gdead")
|
||||||
}
|
}
|
||||||
|
|
||||||
sp := newg.stack.hi
|
sp := newg.stack.hi
|
||||||
@ -2017,7 +2017,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr
|
|||||||
// If local list is too long, transfer a batch to the global list.
|
// If local list is too long, transfer a batch to the global list.
|
||||||
func gfput(_p_ *p, gp *g) {
|
func gfput(_p_ *p, gp *g) {
|
||||||
if readgstatus(gp) != _Gdead {
|
if readgstatus(gp) != _Gdead {
|
||||||
gothrow("gfput: bad status (not Gdead)")
|
throw("gfput: bad status (not Gdead)")
|
||||||
}
|
}
|
||||||
|
|
||||||
stksize := gp.stack.hi - gp.stack.lo
|
stksize := gp.stack.hi - gp.stack.lo
|
||||||
@ -2161,7 +2161,7 @@ func unlockOSThread() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func badunlockosthread() {
|
func badunlockosthread() {
|
||||||
gothrow("runtime: internal error: misuse of lockOSThread/unlockOSThread")
|
throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
|
||||||
}
|
}
|
||||||
|
|
||||||
func gcount() int32 {
|
func gcount() int32 {
|
||||||
@ -2380,7 +2380,7 @@ func setcpuprofilerate_m(hz int32) {
|
|||||||
func procresize(new int32) *p {
|
func procresize(new int32) *p {
|
||||||
old := gomaxprocs
|
old := gomaxprocs
|
||||||
if old < 0 || old > _MaxGomaxprocs || new <= 0 || new > _MaxGomaxprocs {
|
if old < 0 || old > _MaxGomaxprocs || new <= 0 || new > _MaxGomaxprocs {
|
||||||
gothrow("procresize: invalid arg")
|
throw("procresize: invalid arg")
|
||||||
}
|
}
|
||||||
|
|
||||||
// initialize new P's
|
// initialize new P's
|
||||||
@ -2395,7 +2395,7 @@ func procresize(new int32) *p {
|
|||||||
if p.mcache == nil {
|
if p.mcache == nil {
|
||||||
if old == 0 && i == 0 {
|
if old == 0 && i == 0 {
|
||||||
if getg().m.mcache == nil {
|
if getg().m.mcache == nil {
|
||||||
gothrow("missing mcache?")
|
throw("missing mcache?")
|
||||||
}
|
}
|
||||||
p.mcache = getg().m.mcache // bootstrap
|
p.mcache = getg().m.mcache // bootstrap
|
||||||
} else {
|
} else {
|
||||||
@ -2468,7 +2468,7 @@ func acquirep(_p_ *p) {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.p != nil || _g_.m.mcache != nil {
|
if _g_.m.p != nil || _g_.m.mcache != nil {
|
||||||
gothrow("acquirep: already in go")
|
throw("acquirep: already in go")
|
||||||
}
|
}
|
||||||
if _p_.m != nil || _p_.status != _Pidle {
|
if _p_.m != nil || _p_.status != _Pidle {
|
||||||
id := int32(0)
|
id := int32(0)
|
||||||
@ -2476,7 +2476,7 @@ func acquirep(_p_ *p) {
|
|||||||
id = _p_.m.id
|
id = _p_.m.id
|
||||||
}
|
}
|
||||||
print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
|
print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
|
||||||
gothrow("acquirep: invalid p state")
|
throw("acquirep: invalid p state")
|
||||||
}
|
}
|
||||||
_g_.m.mcache = _p_.mcache
|
_g_.m.mcache = _p_.mcache
|
||||||
_g_.m.p = _p_
|
_g_.m.p = _p_
|
||||||
@ -2489,12 +2489,12 @@ func releasep() *p {
|
|||||||
_g_ := getg()
|
_g_ := getg()
|
||||||
|
|
||||||
if _g_.m.p == nil || _g_.m.mcache == nil {
|
if _g_.m.p == nil || _g_.m.mcache == nil {
|
||||||
gothrow("releasep: invalid arg")
|
throw("releasep: invalid arg")
|
||||||
}
|
}
|
||||||
_p_ := _g_.m.p
|
_p_ := _g_.m.p
|
||||||
if _p_.m != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
|
if _p_.m != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
|
||||||
print("releasep: m=", _g_.m, " m->p=", _g_.m.p, " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
|
print("releasep: m=", _g_.m, " m->p=", _g_.m.p, " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
|
||||||
gothrow("releasep: invalid p state")
|
throw("releasep: invalid p state")
|
||||||
}
|
}
|
||||||
_g_.m.p = nil
|
_g_.m.p = nil
|
||||||
_g_.m.mcache = nil
|
_g_.m.mcache = nil
|
||||||
@ -2530,7 +2530,7 @@ func checkdead() {
|
|||||||
}
|
}
|
||||||
if run < 0 {
|
if run < 0 {
|
||||||
print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
|
print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
|
||||||
gothrow("checkdead: inconsistent counts")
|
throw("checkdead: inconsistent counts")
|
||||||
}
|
}
|
||||||
|
|
||||||
grunning := 0
|
grunning := 0
|
||||||
@ -2549,12 +2549,12 @@ func checkdead() {
|
|||||||
_Gsyscall:
|
_Gsyscall:
|
||||||
unlock(&allglock)
|
unlock(&allglock)
|
||||||
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
|
print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
|
||||||
gothrow("checkdead: runnable g")
|
throw("checkdead: runnable g")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
unlock(&allglock)
|
unlock(&allglock)
|
||||||
if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
|
if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
|
||||||
gothrow("no goroutines (main called runtime.Goexit) - deadlock!")
|
throw("no goroutines (main called runtime.Goexit) - deadlock!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maybe jump time forward for playground.
|
// Maybe jump time forward for playground.
|
||||||
@ -2564,7 +2564,7 @@ func checkdead() {
|
|||||||
globrunqput(gp)
|
globrunqput(gp)
|
||||||
_p_ := pidleget()
|
_p_ := pidleget()
|
||||||
if _p_ == nil {
|
if _p_ == nil {
|
||||||
gothrow("checkdead: no p for timer")
|
throw("checkdead: no p for timer")
|
||||||
}
|
}
|
||||||
mp := mget()
|
mp := mget()
|
||||||
if mp == nil {
|
if mp == nil {
|
||||||
@ -2577,7 +2577,7 @@ func checkdead() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
getg().m.throwing = -1 // do not dump full stacks
|
getg().m.throwing = -1 // do not dump full stacks
|
||||||
gothrow("all goroutines are asleep - deadlock!")
|
throw("all goroutines are asleep - deadlock!")
|
||||||
}
|
}
|
||||||
|
|
||||||
func sysmon() {
|
func sysmon() {
|
||||||
@ -2999,7 +2999,7 @@ func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
|
|||||||
n := t - h
|
n := t - h
|
||||||
n = n / 2
|
n = n / 2
|
||||||
if n != uint32(len(_p_.runq)/2) {
|
if n != uint32(len(_p_.runq)/2) {
|
||||||
gothrow("runqputslow: queue is not full")
|
throw("runqputslow: queue is not full")
|
||||||
}
|
}
|
||||||
for i := uint32(0); i < n; i++ {
|
for i := uint32(0); i < n; i++ {
|
||||||
batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
|
batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))]
|
||||||
@ -3079,7 +3079,7 @@ func runqsteal(_p_, p2 *p) *g {
|
|||||||
h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
|
h := atomicload(&_p_.runqhead) // load-acquire, synchronize with consumers
|
||||||
t := _p_.runqtail
|
t := _p_.runqtail
|
||||||
if t-h+n >= uint32(len(_p_.runq)) {
|
if t-h+n >= uint32(len(_p_.runq)) {
|
||||||
gothrow("runqsteal: runq overflow")
|
throw("runqsteal: runq overflow")
|
||||||
}
|
}
|
||||||
for i := uint32(0); i < n; i++ {
|
for i := uint32(0); i < n; i++ {
|
||||||
_p_.runq[(t+i)%uint32(len(_p_.runq))] = batch[i]
|
_p_.runq[(t+i)%uint32(len(_p_.runq))] = batch[i]
|
||||||
@ -3093,7 +3093,7 @@ func testSchedLocalQueue() {
|
|||||||
gs := make([]g, len(_p_.runq))
|
gs := make([]g, len(_p_.runq))
|
||||||
for i := 0; i < len(_p_.runq); i++ {
|
for i := 0; i < len(_p_.runq); i++ {
|
||||||
if runqget(_p_) != nil {
|
if runqget(_p_) != nil {
|
||||||
gothrow("runq is not empty initially")
|
throw("runq is not empty initially")
|
||||||
}
|
}
|
||||||
for j := 0; j < i; j++ {
|
for j := 0; j < i; j++ {
|
||||||
runqput(_p_, &gs[i])
|
runqput(_p_, &gs[i])
|
||||||
@ -3101,11 +3101,11 @@ func testSchedLocalQueue() {
|
|||||||
for j := 0; j < i; j++ {
|
for j := 0; j < i; j++ {
|
||||||
if runqget(_p_) != &gs[i] {
|
if runqget(_p_) != &gs[i] {
|
||||||
print("bad element at iter ", i, "/", j, "\n")
|
print("bad element at iter ", i, "/", j, "\n")
|
||||||
gothrow("bad element")
|
throw("bad element")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if runqget(_p_) != nil {
|
if runqget(_p_) != nil {
|
||||||
gothrow("runq is not empty afterwards")
|
throw("runq is not empty afterwards")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3143,12 +3143,12 @@ func testSchedLocalQueueSteal() {
|
|||||||
for j := 0; j < i; j++ {
|
for j := 0; j < i; j++ {
|
||||||
if gs[j].sig != 1 {
|
if gs[j].sig != 1 {
|
||||||
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
|
print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
|
||||||
gothrow("bad element")
|
throw("bad element")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if s != i/2 && s != i/2+1 {
|
if s != i/2 && s != i/2+1 {
|
||||||
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
|
print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
|
||||||
gothrow("bad steal")
|
throw("bad steal")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,22 +16,22 @@ const raceenabled = false
|
|||||||
|
|
||||||
// Because raceenabled is false, none of these functions should be called.
|
// Because raceenabled is false, none of these functions should be called.
|
||||||
|
|
||||||
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
|
func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
|
||||||
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
|
func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
|
||||||
func raceinit() uintptr { gothrow("race"); return 0 }
|
func raceinit() uintptr { throw("race"); return 0 }
|
||||||
func racefini() { gothrow("race") }
|
func racefini() { throw("race") }
|
||||||
func racemapshadow(addr unsafe.Pointer, size uintptr) { gothrow("race") }
|
func racemapshadow(addr unsafe.Pointer, size uintptr) { throw("race") }
|
||||||
func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
|
func racewritepc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
|
||||||
func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { gothrow("race") }
|
func racereadpc(addr unsafe.Pointer, callerpc, pc uintptr) { throw("race") }
|
||||||
func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
|
func racereadrangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
|
||||||
func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { gothrow("race") }
|
func racewriterangepc(addr unsafe.Pointer, sz, callerpc, pc uintptr) { throw("race") }
|
||||||
func raceacquire(addr unsafe.Pointer) { gothrow("race") }
|
func raceacquire(addr unsafe.Pointer) { throw("race") }
|
||||||
func raceacquireg(gp *g, addr unsafe.Pointer) { gothrow("race") }
|
func raceacquireg(gp *g, addr unsafe.Pointer) { throw("race") }
|
||||||
func racerelease(addr unsafe.Pointer) { gothrow("race") }
|
func racerelease(addr unsafe.Pointer) { throw("race") }
|
||||||
func racereleaseg(gp *g, addr unsafe.Pointer) { gothrow("race") }
|
func racereleaseg(gp *g, addr unsafe.Pointer) { throw("race") }
|
||||||
func racereleasemerge(addr unsafe.Pointer) { gothrow("race") }
|
func racereleasemerge(addr unsafe.Pointer) { throw("race") }
|
||||||
func racereleasemergeg(gp *g, addr unsafe.Pointer) { gothrow("race") }
|
func racereleasemergeg(gp *g, addr unsafe.Pointer) { throw("race") }
|
||||||
func racefingo() { gothrow("race") }
|
func racefingo() { throw("race") }
|
||||||
func racemalloc(p unsafe.Pointer, sz uintptr) { gothrow("race") }
|
func racemalloc(p unsafe.Pointer, sz uintptr) { throw("race") }
|
||||||
func racegostart(pc uintptr) uintptr { gothrow("race"); return 0 }
|
func racegostart(pc uintptr) uintptr { throw("race"); return 0 }
|
||||||
func racegoend() { gothrow("race") }
|
func racegoend() { throw("race") }
|
||||||
|
@ -110,7 +110,7 @@ func isvalidaddr(addr unsafe.Pointer) bool {
|
|||||||
func raceinit() uintptr {
|
func raceinit() uintptr {
|
||||||
// cgo is required to initialize libc, which is used by race runtime
|
// cgo is required to initialize libc, which is used by race runtime
|
||||||
if !iscgo {
|
if !iscgo {
|
||||||
gothrow("raceinit: race build must use cgo")
|
throw("raceinit: race build must use cgo")
|
||||||
}
|
}
|
||||||
|
|
||||||
var racectx uintptr
|
var racectx uintptr
|
||||||
|
@ -102,36 +102,36 @@ func testAtomic64() {
|
|||||||
prefetcht2(uintptr(unsafe.Pointer(&z64)))
|
prefetcht2(uintptr(unsafe.Pointer(&z64)))
|
||||||
prefetchnta(uintptr(unsafe.Pointer(&z64)))
|
prefetchnta(uintptr(unsafe.Pointer(&z64)))
|
||||||
if cas64(&z64, x64, 1) {
|
if cas64(&z64, x64, 1) {
|
||||||
gothrow("cas64 failed")
|
throw("cas64 failed")
|
||||||
}
|
}
|
||||||
if x64 != 0 {
|
if x64 != 0 {
|
||||||
gothrow("cas64 failed")
|
throw("cas64 failed")
|
||||||
}
|
}
|
||||||
x64 = 42
|
x64 = 42
|
||||||
if !cas64(&z64, x64, 1) {
|
if !cas64(&z64, x64, 1) {
|
||||||
gothrow("cas64 failed")
|
throw("cas64 failed")
|
||||||
}
|
}
|
||||||
if x64 != 42 || z64 != 1 {
|
if x64 != 42 || z64 != 1 {
|
||||||
gothrow("cas64 failed")
|
throw("cas64 failed")
|
||||||
}
|
}
|
||||||
if atomicload64(&z64) != 1 {
|
if atomicload64(&z64) != 1 {
|
||||||
gothrow("load64 failed")
|
throw("load64 failed")
|
||||||
}
|
}
|
||||||
atomicstore64(&z64, (1<<40)+1)
|
atomicstore64(&z64, (1<<40)+1)
|
||||||
if atomicload64(&z64) != (1<<40)+1 {
|
if atomicload64(&z64) != (1<<40)+1 {
|
||||||
gothrow("store64 failed")
|
throw("store64 failed")
|
||||||
}
|
}
|
||||||
if xadd64(&z64, (1<<40)+1) != (2<<40)+2 {
|
if xadd64(&z64, (1<<40)+1) != (2<<40)+2 {
|
||||||
gothrow("xadd64 failed")
|
throw("xadd64 failed")
|
||||||
}
|
}
|
||||||
if atomicload64(&z64) != (2<<40)+2 {
|
if atomicload64(&z64) != (2<<40)+2 {
|
||||||
gothrow("xadd64 failed")
|
throw("xadd64 failed")
|
||||||
}
|
}
|
||||||
if xchg64(&z64, (3<<40)+3) != (2<<40)+2 {
|
if xchg64(&z64, (3<<40)+3) != (2<<40)+2 {
|
||||||
gothrow("xchg64 failed")
|
throw("xchg64 failed")
|
||||||
}
|
}
|
||||||
if atomicload64(&z64) != (3<<40)+3 {
|
if atomicload64(&z64) != (3<<40)+3 {
|
||||||
gothrow("xchg64 failed")
|
throw("xchg64 failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,78 +162,78 @@ func check() {
|
|||||||
var y1 y1t
|
var y1 y1t
|
||||||
|
|
||||||
if unsafe.Sizeof(a) != 1 {
|
if unsafe.Sizeof(a) != 1 {
|
||||||
gothrow("bad a")
|
throw("bad a")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(b) != 1 {
|
if unsafe.Sizeof(b) != 1 {
|
||||||
gothrow("bad b")
|
throw("bad b")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(c) != 2 {
|
if unsafe.Sizeof(c) != 2 {
|
||||||
gothrow("bad c")
|
throw("bad c")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(d) != 2 {
|
if unsafe.Sizeof(d) != 2 {
|
||||||
gothrow("bad d")
|
throw("bad d")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(e) != 4 {
|
if unsafe.Sizeof(e) != 4 {
|
||||||
gothrow("bad e")
|
throw("bad e")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(f) != 4 {
|
if unsafe.Sizeof(f) != 4 {
|
||||||
gothrow("bad f")
|
throw("bad f")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(g) != 8 {
|
if unsafe.Sizeof(g) != 8 {
|
||||||
gothrow("bad g")
|
throw("bad g")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(h) != 8 {
|
if unsafe.Sizeof(h) != 8 {
|
||||||
gothrow("bad h")
|
throw("bad h")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(i) != 4 {
|
if unsafe.Sizeof(i) != 4 {
|
||||||
gothrow("bad i")
|
throw("bad i")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(j) != 8 {
|
if unsafe.Sizeof(j) != 8 {
|
||||||
gothrow("bad j")
|
throw("bad j")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(k) != ptrSize {
|
if unsafe.Sizeof(k) != ptrSize {
|
||||||
gothrow("bad k")
|
throw("bad k")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(l) != ptrSize {
|
if unsafe.Sizeof(l) != ptrSize {
|
||||||
gothrow("bad l")
|
throw("bad l")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(x1) != 1 {
|
if unsafe.Sizeof(x1) != 1 {
|
||||||
gothrow("bad unsafe.Sizeof x1")
|
throw("bad unsafe.Sizeof x1")
|
||||||
}
|
}
|
||||||
if unsafe.Offsetof(y1.y) != 1 {
|
if unsafe.Offsetof(y1.y) != 1 {
|
||||||
gothrow("bad offsetof y1.y")
|
throw("bad offsetof y1.y")
|
||||||
}
|
}
|
||||||
if unsafe.Sizeof(y1) != 2 {
|
if unsafe.Sizeof(y1) != 2 {
|
||||||
gothrow("bad unsafe.Sizeof y1")
|
throw("bad unsafe.Sizeof y1")
|
||||||
}
|
}
|
||||||
|
|
||||||
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
|
if timediv(12345*1000000000+54321, 1000000000, &e) != 12345 || e != 54321 {
|
||||||
gothrow("bad timediv")
|
throw("bad timediv")
|
||||||
}
|
}
|
||||||
|
|
||||||
var z uint32
|
var z uint32
|
||||||
z = 1
|
z = 1
|
||||||
if !cas(&z, 1, 2) {
|
if !cas(&z, 1, 2) {
|
||||||
gothrow("cas1")
|
throw("cas1")
|
||||||
}
|
}
|
||||||
if z != 2 {
|
if z != 2 {
|
||||||
gothrow("cas2")
|
throw("cas2")
|
||||||
}
|
}
|
||||||
|
|
||||||
z = 4
|
z = 4
|
||||||
if cas(&z, 5, 6) {
|
if cas(&z, 5, 6) {
|
||||||
gothrow("cas3")
|
throw("cas3")
|
||||||
}
|
}
|
||||||
if z != 4 {
|
if z != 4 {
|
||||||
gothrow("cas4")
|
throw("cas4")
|
||||||
}
|
}
|
||||||
|
|
||||||
z = 0xffffffff
|
z = 0xffffffff
|
||||||
if !cas(&z, 0xffffffff, 0xfffffffe) {
|
if !cas(&z, 0xffffffff, 0xfffffffe) {
|
||||||
gothrow("cas5")
|
throw("cas5")
|
||||||
}
|
}
|
||||||
if z != 0xfffffffe {
|
if z != 0xfffffffe {
|
||||||
gothrow("cas6")
|
throw("cas6")
|
||||||
}
|
}
|
||||||
|
|
||||||
k = unsafe.Pointer(uintptr(0xfedcb123))
|
k = unsafe.Pointer(uintptr(0xfedcb123))
|
||||||
@ -241,58 +241,58 @@ func check() {
|
|||||||
k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
|
k = unsafe.Pointer(uintptr(unsafe.Pointer(k)) << 10)
|
||||||
}
|
}
|
||||||
if casp(&k, nil, nil) {
|
if casp(&k, nil, nil) {
|
||||||
gothrow("casp1")
|
throw("casp1")
|
||||||
}
|
}
|
||||||
k1 = add(k, 1)
|
k1 = add(k, 1)
|
||||||
if !casp(&k, k, k1) {
|
if !casp(&k, k, k1) {
|
||||||
gothrow("casp2")
|
throw("casp2")
|
||||||
}
|
}
|
||||||
if k != k1 {
|
if k != k1 {
|
||||||
gothrow("casp3")
|
throw("casp3")
|
||||||
}
|
}
|
||||||
|
|
||||||
m = [4]byte{1, 1, 1, 1}
|
m = [4]byte{1, 1, 1, 1}
|
||||||
atomicor8(&m[1], 0xf0)
|
atomicor8(&m[1], 0xf0)
|
||||||
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
|
if m[0] != 1 || m[1] != 0xf1 || m[2] != 1 || m[3] != 1 {
|
||||||
gothrow("atomicor8")
|
throw("atomicor8")
|
||||||
}
|
}
|
||||||
|
|
||||||
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
|
*(*uint64)(unsafe.Pointer(&j)) = ^uint64(0)
|
||||||
if j == j {
|
if j == j {
|
||||||
gothrow("float64nan")
|
throw("float64nan")
|
||||||
}
|
}
|
||||||
if !(j != j) {
|
if !(j != j) {
|
||||||
gothrow("float64nan1")
|
throw("float64nan1")
|
||||||
}
|
}
|
||||||
|
|
||||||
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
|
*(*uint64)(unsafe.Pointer(&j1)) = ^uint64(1)
|
||||||
if j == j1 {
|
if j == j1 {
|
||||||
gothrow("float64nan2")
|
throw("float64nan2")
|
||||||
}
|
}
|
||||||
if !(j != j1) {
|
if !(j != j1) {
|
||||||
gothrow("float64nan3")
|
throw("float64nan3")
|
||||||
}
|
}
|
||||||
|
|
||||||
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
|
*(*uint32)(unsafe.Pointer(&i)) = ^uint32(0)
|
||||||
if i == i {
|
if i == i {
|
||||||
gothrow("float32nan")
|
throw("float32nan")
|
||||||
}
|
}
|
||||||
if i == i {
|
if i == i {
|
||||||
gothrow("float32nan1")
|
throw("float32nan1")
|
||||||
}
|
}
|
||||||
|
|
||||||
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
|
*(*uint32)(unsafe.Pointer(&i1)) = ^uint32(1)
|
||||||
if i == i1 {
|
if i == i1 {
|
||||||
gothrow("float32nan2")
|
throw("float32nan2")
|
||||||
}
|
}
|
||||||
if i == i1 {
|
if i == i1 {
|
||||||
gothrow("float32nan3")
|
throw("float32nan3")
|
||||||
}
|
}
|
||||||
|
|
||||||
testAtomic64()
|
testAtomic64()
|
||||||
|
|
||||||
if _FixedStack != round2(_FixedStack) {
|
if _FixedStack != round2(_FixedStack) {
|
||||||
gothrow("FixedStack is not power-of-2")
|
throw("FixedStack is not power-of-2")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ func selectsize(size uintptr) uintptr {
|
|||||||
func newselect(sel *_select, selsize int64, size int32) {
|
func newselect(sel *_select, selsize int64, size int32) {
|
||||||
if selsize != int64(selectsize(uintptr(size))) {
|
if selsize != int64(selectsize(uintptr(size))) {
|
||||||
print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
|
print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
|
||||||
gothrow("bad select size")
|
throw("bad select size")
|
||||||
}
|
}
|
||||||
sel.tcase = uint16(size)
|
sel.tcase = uint16(size)
|
||||||
sel.ncase = 0
|
sel.ncase = 0
|
||||||
@ -53,7 +53,7 @@ func selectsend(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) {
|
|||||||
func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) {
|
func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) {
|
||||||
i := sel.ncase
|
i := sel.ncase
|
||||||
if i >= sel.tcase {
|
if i >= sel.tcase {
|
||||||
gothrow("selectsend: too many cases")
|
throw("selectsend: too many cases")
|
||||||
}
|
}
|
||||||
sel.ncase = i + 1
|
sel.ncase = i + 1
|
||||||
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
|
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
|
||||||
@ -90,7 +90,7 @@ func selectrecv2(sel *_select, c *hchan, elem unsafe.Pointer, received *bool) (s
|
|||||||
func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) {
|
func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) {
|
||||||
i := sel.ncase
|
i := sel.ncase
|
||||||
if i >= sel.tcase {
|
if i >= sel.tcase {
|
||||||
gothrow("selectrecv: too many cases")
|
throw("selectrecv: too many cases")
|
||||||
}
|
}
|
||||||
sel.ncase = i + 1
|
sel.ncase = i + 1
|
||||||
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
|
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
|
||||||
@ -115,7 +115,7 @@ func selectdefault(sel *_select) (selected bool) {
|
|||||||
func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) {
|
func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) {
|
||||||
i := sel.ncase
|
i := sel.ncase
|
||||||
if i >= sel.tcase {
|
if i >= sel.tcase {
|
||||||
gothrow("selectdefault: too many cases")
|
throw("selectdefault: too many cases")
|
||||||
}
|
}
|
||||||
sel.ncase = i + 1
|
sel.ncase = i + 1
|
||||||
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
|
cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
|
||||||
@ -263,7 +263,7 @@ func selectgoImpl(sel *_select) (uintptr, uint16) {
|
|||||||
for i := 0; i+1 < int(sel.ncase); i++ {
|
for i := 0; i+1 < int(sel.ncase); i++ {
|
||||||
if lockorder[i].sortkey() > lockorder[i+1].sortkey() {
|
if lockorder[i].sortkey() > lockorder[i+1].sortkey() {
|
||||||
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
|
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
|
||||||
gothrow("select: broken sort")
|
throw("select: broken sort")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
@ -412,7 +412,7 @@ loop:
|
|||||||
c = cas._chan
|
c = cas._chan
|
||||||
|
|
||||||
if c.dataqsiz > 0 {
|
if c.dataqsiz > 0 {
|
||||||
gothrow("selectgo: shouldn't happen")
|
throw("selectgo: shouldn't happen")
|
||||||
}
|
}
|
||||||
|
|
||||||
if debugSelect {
|
if debugSelect {
|
||||||
|
@ -62,7 +62,7 @@ func net_runtime_Semrelease(addr *uint32) {
|
|||||||
func semacquire(addr *uint32, profile bool) {
|
func semacquire(addr *uint32, profile bool) {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
if gp != gp.m.curg {
|
if gp != gp.m.curg {
|
||||||
gothrow("semacquire not on the G stack")
|
throw("semacquire not on the G stack")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Easy case.
|
// Easy case.
|
||||||
@ -284,6 +284,6 @@ func syncsemrelease(s *syncSema, n uint32) {
|
|||||||
func syncsemcheck(sz uintptr) {
|
func syncsemcheck(sz uintptr) {
|
||||||
if sz != unsafe.Sizeof(syncSema{}) {
|
if sz != unsafe.Sizeof(syncSema{}) {
|
||||||
print("runtime: bad syncSema size - sync=", sz, " runtime=", unsafe.Sizeof(syncSema{}), "\n")
|
print("runtime: bad syncSema size - sync=", sz, " runtime=", unsafe.Sizeof(syncSema{}), "\n")
|
||||||
gothrow("bad syncSema size")
|
throw("bad syncSema size")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -16,7 +16,7 @@ func initsig() {
|
|||||||
// sigtable should describe what to do for all the possible signals.
|
// sigtable should describe what to do for all the possible signals.
|
||||||
if len(sigtable) != _NSIG {
|
if len(sigtable) != _NSIG {
|
||||||
print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
|
print("runtime: len(sigtable)=", len(sigtable), " _NSIG=", _NSIG, "\n")
|
||||||
gothrow("initsig")
|
throw("initsig")
|
||||||
}
|
}
|
||||||
|
|
||||||
// First call: basic setup.
|
// First call: basic setup.
|
||||||
|
@ -9,7 +9,7 @@ package runtime
|
|||||||
func sigpanic() {
|
func sigpanic() {
|
||||||
g := getg()
|
g := getg()
|
||||||
if !canpanic(g) {
|
if !canpanic(g) {
|
||||||
gothrow("unexpected signal during runtime execution")
|
throw("unexpected signal during runtime execution")
|
||||||
}
|
}
|
||||||
|
|
||||||
switch g.sig {
|
switch g.sig {
|
||||||
@ -18,13 +18,13 @@ func sigpanic() {
|
|||||||
panicmem()
|
panicmem()
|
||||||
}
|
}
|
||||||
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
||||||
gothrow("fault")
|
throw("fault")
|
||||||
case _SIGSEGV:
|
case _SIGSEGV:
|
||||||
if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 || g.paniconfault {
|
if (g.sigcode0 == 0 || g.sigcode0 == _SEGV_MAPERR || g.sigcode0 == _SEGV_ACCERR) && g.sigcode1 < 0x1000 || g.paniconfault {
|
||||||
panicmem()
|
panicmem()
|
||||||
}
|
}
|
||||||
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
print("unexpected fault address ", hex(g.sigcode1), "\n")
|
||||||
gothrow("fault")
|
throw("fault")
|
||||||
case _SIGFPE:
|
case _SIGFPE:
|
||||||
switch g.sigcode0 {
|
switch g.sigcode0 {
|
||||||
case _FPE_INTDIV:
|
case _FPE_INTDIV:
|
||||||
@ -37,7 +37,7 @@ func sigpanic() {
|
|||||||
|
|
||||||
if g.sig >= uint32(len(sigtable)) {
|
if g.sig >= uint32(len(sigtable)) {
|
||||||
// can't happen: we looked up g.sig in sigtable to decide to call sigpanic
|
// can't happen: we looked up g.sig in sigtable to decide to call sigpanic
|
||||||
gothrow("unexpected signal value")
|
throw("unexpected signal value")
|
||||||
}
|
}
|
||||||
panic(errorString(sigtable[g.sig].name))
|
panic(errorString(sigtable[g.sig].name))
|
||||||
}
|
}
|
||||||
|
@ -67,7 +67,7 @@ Send:
|
|||||||
for {
|
for {
|
||||||
switch atomicload(&sig.state) {
|
switch atomicload(&sig.state) {
|
||||||
default:
|
default:
|
||||||
gothrow("sigsend: inconsistent state")
|
throw("sigsend: inconsistent state")
|
||||||
case sigIdle:
|
case sigIdle:
|
||||||
if cas(&sig.state, sigIdle, sigSending) {
|
if cas(&sig.state, sigIdle, sigSending) {
|
||||||
break Send
|
break Send
|
||||||
@ -103,7 +103,7 @@ func signal_recv() uint32 {
|
|||||||
for {
|
for {
|
||||||
switch atomicload(&sig.state) {
|
switch atomicload(&sig.state) {
|
||||||
default:
|
default:
|
||||||
gothrow("signal_recv: inconsistent state")
|
throw("signal_recv: inconsistent state")
|
||||||
case sigIdle:
|
case sigIdle:
|
||||||
if cas(&sig.state, sigIdle, sigReceiving) {
|
if cas(&sig.state, sigIdle, sigReceiving) {
|
||||||
notetsleepg(&sig.note, -1)
|
notetsleepg(&sig.note, -1)
|
||||||
|
@ -21,7 +21,7 @@ const (
|
|||||||
var fptrace = 0
|
var fptrace = 0
|
||||||
|
|
||||||
func fabort() {
|
func fabort() {
|
||||||
gothrow("unsupported floating point instruction")
|
throw("unsupported floating point instruction")
|
||||||
}
|
}
|
||||||
|
|
||||||
func fputf(reg uint32, val uint32) {
|
func fputf(reg uint32, val uint32) {
|
||||||
|
@ -49,7 +49,7 @@ var stackfreequeue stack
|
|||||||
|
|
||||||
func stackinit() {
|
func stackinit() {
|
||||||
if _StackCacheSize&_PageMask != 0 {
|
if _StackCacheSize&_PageMask != 0 {
|
||||||
gothrow("cache size must be a multiple of page size")
|
throw("cache size must be a multiple of page size")
|
||||||
}
|
}
|
||||||
for i := range stackpool {
|
for i := range stackpool {
|
||||||
mSpanList_Init(&stackpool[i])
|
mSpanList_Init(&stackpool[i])
|
||||||
@ -65,13 +65,13 @@ func stackpoolalloc(order uint8) gclinkptr {
|
|||||||
// no free stacks. Allocate another span worth.
|
// no free stacks. Allocate another span worth.
|
||||||
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
|
s = mHeap_AllocStack(&mheap_, _StackCacheSize>>_PageShift)
|
||||||
if s == nil {
|
if s == nil {
|
||||||
gothrow("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
if s.ref != 0 {
|
if s.ref != 0 {
|
||||||
gothrow("bad ref")
|
throw("bad ref")
|
||||||
}
|
}
|
||||||
if s.freelist.ptr() != nil {
|
if s.freelist.ptr() != nil {
|
||||||
gothrow("bad freelist")
|
throw("bad freelist")
|
||||||
}
|
}
|
||||||
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
|
for i := uintptr(0); i < _StackCacheSize; i += _FixedStack << order {
|
||||||
x := gclinkptr(uintptr(s.start)<<_PageShift + i)
|
x := gclinkptr(uintptr(s.start)<<_PageShift + i)
|
||||||
@ -82,7 +82,7 @@ func stackpoolalloc(order uint8) gclinkptr {
|
|||||||
}
|
}
|
||||||
x := s.freelist
|
x := s.freelist
|
||||||
if x.ptr() == nil {
|
if x.ptr() == nil {
|
||||||
gothrow("span has no free stacks")
|
throw("span has no free stacks")
|
||||||
}
|
}
|
||||||
s.freelist = x.ptr().next
|
s.freelist = x.ptr().next
|
||||||
s.ref++
|
s.ref++
|
||||||
@ -97,7 +97,7 @@ func stackpoolalloc(order uint8) gclinkptr {
|
|||||||
func stackpoolfree(x gclinkptr, order uint8) {
|
func stackpoolfree(x gclinkptr, order uint8) {
|
||||||
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
|
s := mHeap_Lookup(&mheap_, (unsafe.Pointer)(x))
|
||||||
if s.state != _MSpanStack {
|
if s.state != _MSpanStack {
|
||||||
gothrow("freeing stack not in a stack span")
|
throw("freeing stack not in a stack span")
|
||||||
}
|
}
|
||||||
if s.freelist.ptr() == nil {
|
if s.freelist.ptr() == nil {
|
||||||
// s will now have a free stack
|
// s will now have a free stack
|
||||||
@ -179,10 +179,10 @@ func stackalloc(n uint32) stack {
|
|||||||
// Doing so would cause a deadlock (issue 1547).
|
// Doing so would cause a deadlock (issue 1547).
|
||||||
thisg := getg()
|
thisg := getg()
|
||||||
if thisg != thisg.m.g0 {
|
if thisg != thisg.m.g0 {
|
||||||
gothrow("stackalloc not on scheduler stack")
|
throw("stackalloc not on scheduler stack")
|
||||||
}
|
}
|
||||||
if n&(n-1) != 0 {
|
if n&(n-1) != 0 {
|
||||||
gothrow("stack size not a power of 2")
|
throw("stack size not a power of 2")
|
||||||
}
|
}
|
||||||
if stackDebug >= 1 {
|
if stackDebug >= 1 {
|
||||||
print("stackalloc ", n, "\n")
|
print("stackalloc ", n, "\n")
|
||||||
@ -191,7 +191,7 @@ func stackalloc(n uint32) stack {
|
|||||||
if debug.efence != 0 || stackFromSystem != 0 {
|
if debug.efence != 0 || stackFromSystem != 0 {
|
||||||
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
|
v := sysAlloc(round(uintptr(n), _PageSize), &memstats.stacks_sys)
|
||||||
if v == nil {
|
if v == nil {
|
||||||
gothrow("out of memory (stackalloc)")
|
throw("out of memory (stackalloc)")
|
||||||
}
|
}
|
||||||
return stack{uintptr(v), uintptr(v) + uintptr(n)}
|
return stack{uintptr(v), uintptr(v) + uintptr(n)}
|
||||||
}
|
}
|
||||||
@ -230,7 +230,7 @@ func stackalloc(n uint32) stack {
|
|||||||
} else {
|
} else {
|
||||||
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
|
s := mHeap_AllocStack(&mheap_, round(uintptr(n), _PageSize)>>_PageShift)
|
||||||
if s == nil {
|
if s == nil {
|
||||||
gothrow("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
v = (unsafe.Pointer)(s.start << _PageShift)
|
v = (unsafe.Pointer)(s.start << _PageShift)
|
||||||
}
|
}
|
||||||
@ -249,7 +249,7 @@ func stackfree(stk stack) {
|
|||||||
n := stk.hi - stk.lo
|
n := stk.hi - stk.lo
|
||||||
v := (unsafe.Pointer)(stk.lo)
|
v := (unsafe.Pointer)(stk.lo)
|
||||||
if n&(n-1) != 0 {
|
if n&(n-1) != 0 {
|
||||||
gothrow("stack not a power of 2")
|
throw("stack not a power of 2")
|
||||||
}
|
}
|
||||||
if stackDebug >= 1 {
|
if stackDebug >= 1 {
|
||||||
println("stackfree", v, n)
|
println("stackfree", v, n)
|
||||||
@ -288,7 +288,7 @@ func stackfree(stk stack) {
|
|||||||
s := mHeap_Lookup(&mheap_, v)
|
s := mHeap_Lookup(&mheap_, v)
|
||||||
if s.state != _MSpanStack {
|
if s.state != _MSpanStack {
|
||||||
println(hex(s.start<<_PageShift), v)
|
println(hex(s.start<<_PageShift), v)
|
||||||
gothrow("bad span state")
|
throw("bad span state")
|
||||||
}
|
}
|
||||||
mHeap_FreeStack(&mheap_, s)
|
mHeap_FreeStack(&mheap_, s)
|
||||||
}
|
}
|
||||||
@ -379,7 +379,7 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
|
|||||||
}
|
}
|
||||||
switch ptrbits(&bv, i) {
|
switch ptrbits(&bv, i) {
|
||||||
default:
|
default:
|
||||||
gothrow("unexpected pointer bits")
|
throw("unexpected pointer bits")
|
||||||
case _BitsDead:
|
case _BitsDead:
|
||||||
if debug.gcdead != 0 {
|
if debug.gcdead != 0 {
|
||||||
*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
|
*(*unsafe.Pointer)(add(scanp, i*ptrSize)) = unsafe.Pointer(uintptr(poisonStack))
|
||||||
@ -394,7 +394,7 @@ func adjustpointers(scanp unsafe.Pointer, cbv *bitvector, adjinfo *adjustinfo, f
|
|||||||
// Live analysis wrong?
|
// Live analysis wrong?
|
||||||
getg().m.traceback = 2
|
getg().m.traceback = 2
|
||||||
print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
|
print("runtime: bad pointer in frame ", gofuncname(f), " at ", add(scanp, i*ptrSize), ": ", p, "\n")
|
||||||
gothrow("invalid stack pointer")
|
throw("invalid stack pointer")
|
||||||
}
|
}
|
||||||
if minp <= up && up < maxp {
|
if minp <= up && up < maxp {
|
||||||
if stackDebug >= 3 {
|
if stackDebug >= 3 {
|
||||||
@ -445,13 +445,13 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
|
|||||||
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
|
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_LocalsPointerMaps))
|
||||||
if stackmap == nil || stackmap.n <= 0 {
|
if stackmap == nil || stackmap.n <= 0 {
|
||||||
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
|
print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n")
|
||||||
gothrow("missing stackmap")
|
throw("missing stackmap")
|
||||||
}
|
}
|
||||||
// Locals bitmap information, scan just the pointers in locals.
|
// Locals bitmap information, scan just the pointers in locals.
|
||||||
if pcdata < 0 || pcdata >= stackmap.n {
|
if pcdata < 0 || pcdata >= stackmap.n {
|
||||||
// don't know where we are
|
// don't know where we are
|
||||||
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
|
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
|
||||||
gothrow("bad symbol table")
|
throw("bad symbol table")
|
||||||
}
|
}
|
||||||
bv = stackmapdata(stackmap, pcdata)
|
bv = stackmapdata(stackmap, pcdata)
|
||||||
size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
|
size = (uintptr(bv.n) * ptrSize) / _BitsPerPointer
|
||||||
@ -470,12 +470,12 @@ func adjustframe(frame *stkframe, arg unsafe.Pointer) bool {
|
|||||||
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
|
stackmap := (*stackmap)(funcdata(f, _FUNCDATA_ArgsPointerMaps))
|
||||||
if stackmap == nil || stackmap.n <= 0 {
|
if stackmap == nil || stackmap.n <= 0 {
|
||||||
print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
|
print("runtime: frame ", funcname(f), " untyped args ", frame.argp, "+", uintptr(frame.arglen), "\n")
|
||||||
gothrow("missing stackmap")
|
throw("missing stackmap")
|
||||||
}
|
}
|
||||||
if pcdata < 0 || pcdata >= stackmap.n {
|
if pcdata < 0 || pcdata >= stackmap.n {
|
||||||
// don't know where we are
|
// don't know where we are
|
||||||
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
|
print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", targetpc, ")\n")
|
||||||
gothrow("bad symbol table")
|
throw("bad symbol table")
|
||||||
}
|
}
|
||||||
bv = stackmapdata(stackmap, pcdata)
|
bv = stackmapdata(stackmap, pcdata)
|
||||||
}
|
}
|
||||||
@ -529,11 +529,11 @@ func fillstack(stk stack, b byte) {
|
|||||||
// Caller must have changed gp status to Gcopystack.
|
// Caller must have changed gp status to Gcopystack.
|
||||||
func copystack(gp *g, newsize uintptr) {
|
func copystack(gp *g, newsize uintptr) {
|
||||||
if gp.syscallsp != 0 {
|
if gp.syscallsp != 0 {
|
||||||
gothrow("stack growth not allowed in system call")
|
throw("stack growth not allowed in system call")
|
||||||
}
|
}
|
||||||
old := gp.stack
|
old := gp.stack
|
||||||
if old.lo == 0 {
|
if old.lo == 0 {
|
||||||
gothrow("nil stackbase")
|
throw("nil stackbase")
|
||||||
}
|
}
|
||||||
used := old.hi - gp.sched.sp
|
used := old.hi - gp.sched.sp
|
||||||
|
|
||||||
@ -612,13 +612,13 @@ func newstack() {
|
|||||||
thisg := getg()
|
thisg := getg()
|
||||||
// TODO: double check all gp. shouldn't be getg().
|
// TODO: double check all gp. shouldn't be getg().
|
||||||
if thisg.m.morebuf.g.stackguard0 == stackFork {
|
if thisg.m.morebuf.g.stackguard0 == stackFork {
|
||||||
gothrow("stack growth after fork")
|
throw("stack growth after fork")
|
||||||
}
|
}
|
||||||
if thisg.m.morebuf.g != thisg.m.curg {
|
if thisg.m.morebuf.g != thisg.m.curg {
|
||||||
print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
|
print("runtime: newstack called from g=", thisg.m.morebuf.g, "\n"+"\tm=", thisg.m, " m->curg=", thisg.m.curg, " m->g0=", thisg.m.g0, " m->gsignal=", thisg.m.gsignal, "\n")
|
||||||
morebuf := thisg.m.morebuf
|
morebuf := thisg.m.morebuf
|
||||||
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
|
traceback(morebuf.pc, morebuf.sp, morebuf.lr, morebuf.g)
|
||||||
gothrow("runtime: wrong goroutine in newstack")
|
throw("runtime: wrong goroutine in newstack")
|
||||||
}
|
}
|
||||||
if thisg.m.curg.throwsplit {
|
if thisg.m.curg.throwsplit {
|
||||||
gp := thisg.m.curg
|
gp := thisg.m.curg
|
||||||
@ -629,7 +629,7 @@ func newstack() {
|
|||||||
print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
|
print("runtime: newstack sp=", hex(gp.sched.sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n",
|
||||||
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
|
"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n",
|
||||||
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
|
"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n")
|
||||||
gothrow("runtime: stack split at bad time")
|
throw("runtime: stack split at bad time")
|
||||||
}
|
}
|
||||||
|
|
||||||
// The goroutine must be executing in order to call newstack,
|
// The goroutine must be executing in order to call newstack,
|
||||||
@ -648,7 +648,7 @@ func newstack() {
|
|||||||
rewindmorestack(&gp.sched)
|
rewindmorestack(&gp.sched)
|
||||||
|
|
||||||
if gp.stack.lo == 0 {
|
if gp.stack.lo == 0 {
|
||||||
gothrow("missing stack in newstack")
|
throw("missing stack in newstack")
|
||||||
}
|
}
|
||||||
sp := gp.sched.sp
|
sp := gp.sched.sp
|
||||||
if thechar == '6' || thechar == '8' {
|
if thechar == '6' || thechar == '8' {
|
||||||
@ -663,7 +663,7 @@ func newstack() {
|
|||||||
if sp < gp.stack.lo {
|
if sp < gp.stack.lo {
|
||||||
print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
|
print("runtime: gp=", gp, ", gp->status=", hex(readgstatus(gp)), "\n ")
|
||||||
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
|
print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n")
|
||||||
gothrow("runtime: split stack overflow")
|
throw("runtime: split stack overflow")
|
||||||
}
|
}
|
||||||
|
|
||||||
if gp.sched.ctxt != nil {
|
if gp.sched.ctxt != nil {
|
||||||
@ -676,10 +676,10 @@ func newstack() {
|
|||||||
|
|
||||||
if gp.stackguard0 == stackPreempt {
|
if gp.stackguard0 == stackPreempt {
|
||||||
if gp == thisg.m.g0 {
|
if gp == thisg.m.g0 {
|
||||||
gothrow("runtime: preempt g0")
|
throw("runtime: preempt g0")
|
||||||
}
|
}
|
||||||
if thisg.m.p == nil && thisg.m.locks == 0 {
|
if thisg.m.p == nil && thisg.m.locks == 0 {
|
||||||
gothrow("runtime: g is running but p is not")
|
throw("runtime: g is running but p is not")
|
||||||
}
|
}
|
||||||
if gp.preemptscan {
|
if gp.preemptscan {
|
||||||
for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
|
for !castogscanstatus(gp, _Gwaiting, _Gscanwaiting) {
|
||||||
@ -715,7 +715,7 @@ func newstack() {
|
|||||||
newsize := oldsize * 2
|
newsize := oldsize * 2
|
||||||
if uintptr(newsize) > maxstacksize {
|
if uintptr(newsize) > maxstacksize {
|
||||||
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
|
print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n")
|
||||||
gothrow("stack overflow")
|
throw("stack overflow")
|
||||||
}
|
}
|
||||||
|
|
||||||
casgstatus(gp, _Gwaiting, _Gcopystack)
|
casgstatus(gp, _Gwaiting, _Gcopystack)
|
||||||
@ -761,7 +761,7 @@ func shrinkstack(gp *g) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if gp.stack.lo == 0 {
|
if gp.stack.lo == 0 {
|
||||||
gothrow("missing stack in shrinkstack")
|
throw("missing stack in shrinkstack")
|
||||||
}
|
}
|
||||||
|
|
||||||
oldsize := gp.stack.hi - gp.stack.lo
|
oldsize := gp.stack.hi - gp.stack.lo
|
||||||
@ -808,6 +808,6 @@ func shrinkfinish() {
|
|||||||
//go:nosplit
|
//go:nosplit
|
||||||
func morestackc() {
|
func morestackc() {
|
||||||
systemstack(func() {
|
systemstack(func() {
|
||||||
gothrow("attempt to execute C code on Go stack")
|
throw("attempt to execute C code on Go stack")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -395,3 +395,21 @@ func TestStackPanic(t *testing.T) {
|
|||||||
useStack(32)
|
useStack(32)
|
||||||
panic("test panic")
|
panic("test panic")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func BenchmarkStackCopy(b *testing.B) {
|
||||||
|
c := make(chan bool)
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
go func() {
|
||||||
|
count(1000000)
|
||||||
|
c <- true
|
||||||
|
}()
|
||||||
|
<-c
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func count(n int) int {
|
||||||
|
if n == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1 + count(n-1)
|
||||||
|
}
|
||||||
|
@ -18,7 +18,7 @@ func concatstrings(a []string) string {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if l+n < l {
|
if l+n < l {
|
||||||
gothrow("string concatenation too long")
|
throw("string concatenation too long")
|
||||||
}
|
}
|
||||||
l += n
|
l += n
|
||||||
count++
|
count++
|
||||||
@ -222,7 +222,7 @@ func rawbyteslice(size int) (b []byte) {
|
|||||||
// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
|
// rawruneslice allocates a new rune slice. The rune slice is not zeroed.
|
||||||
func rawruneslice(size int) (b []rune) {
|
func rawruneslice(size int) (b []rune) {
|
||||||
if uintptr(size) > _MaxMem/4 {
|
if uintptr(size) > _MaxMem/4 {
|
||||||
gothrow("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
mem := goroundupsize(uintptr(size) * 4)
|
mem := goroundupsize(uintptr(size) * 4)
|
||||||
p := mallocgc(mem, nil, flagNoScan|flagNoZero)
|
p := mallocgc(mem, nil, flagNoScan|flagNoZero)
|
||||||
|
@ -59,7 +59,7 @@ func mcall(fn func(*g))
|
|||||||
func systemstack(fn func())
|
func systemstack(fn func())
|
||||||
|
|
||||||
func badsystemstack() {
|
func badsystemstack() {
|
||||||
gothrow("systemstack called from unexpected goroutine")
|
throw("systemstack called from unexpected goroutine")
|
||||||
}
|
}
|
||||||
|
|
||||||
// memclr clears n bytes starting at ptr.
|
// memclr clears n bytes starting at ptr.
|
||||||
|
@ -50,7 +50,7 @@ func symtabinit() {
|
|||||||
pcln32 := (*[2]uint32)(unsafe.Pointer(&pclntab))
|
pcln32 := (*[2]uint32)(unsafe.Pointer(&pclntab))
|
||||||
if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != _PCQuantum || pcln[7] != ptrSize {
|
if pcln32[0] != 0xfffffffb || pcln[4] != 0 || pcln[5] != 0 || pcln[6] != _PCQuantum || pcln[7] != ptrSize {
|
||||||
println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7]))
|
println("runtime: function symbol table header:", hex(pcln32[0]), hex(pcln[4]), hex(pcln[5]), hex(pcln[6]), hex(pcln[7]))
|
||||||
gothrow("invalid function symbol table\n")
|
throw("invalid function symbol table\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
// pclntable is all bytes of pclntab symbol.
|
// pclntable is all bytes of pclntab symbol.
|
||||||
@ -79,7 +79,7 @@ func symtabinit() {
|
|||||||
for j := 0; j <= i; j++ {
|
for j := 0; j <= i; j++ {
|
||||||
print("\t", hex(ftab[j].entry), " ", gofuncname((*_func)(unsafe.Pointer(&pclntable[ftab[j].funcoff]))), "\n")
|
print("\t", hex(ftab[j].entry), " ", gofuncname((*_func)(unsafe.Pointer(&pclntable[ftab[j].funcoff]))), "\n")
|
||||||
}
|
}
|
||||||
gothrow("invalid runtime symbol table")
|
throw("invalid runtime symbol table")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -150,7 +150,7 @@ func findfunc(pc uintptr) *_func {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
gothrow("findfunc: binary search failed")
|
throw("findfunc: binary search failed")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -192,7 +192,7 @@ func pcvalue(f *_func, off int32, targetpc uintptr, strict bool) int32 {
|
|||||||
print("\tvalue=", val, " until pc=", hex(pc), "\n")
|
print("\tvalue=", val, " until pc=", hex(pc), "\n")
|
||||||
}
|
}
|
||||||
|
|
||||||
gothrow("invalid runtime symbol table")
|
throw("invalid runtime symbol table")
|
||||||
return -1
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ import "unsafe"
|
|||||||
// and then did an immediate Gosave.
|
// and then did an immediate Gosave.
|
||||||
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
|
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
|
||||||
if buf.lr != 0 {
|
if buf.lr != 0 {
|
||||||
gothrow("invalid use of gostartcall")
|
throw("invalid use of gostartcall")
|
||||||
}
|
}
|
||||||
buf.lr = buf.pc
|
buf.lr = buf.pc
|
||||||
buf.pc = uintptr(fn)
|
buf.pc = uintptr(fn)
|
||||||
@ -31,5 +31,5 @@ func rewindmorestack(buf *gobuf) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
|
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
|
||||||
gothrow("runtime: misuse of rewindmorestack")
|
throw("runtime: misuse of rewindmorestack")
|
||||||
}
|
}
|
||||||
|
@ -12,7 +12,7 @@ import "unsafe"
|
|||||||
// and then did an immediate Gosave.
|
// and then did an immediate Gosave.
|
||||||
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
|
func gostartcall(buf *gobuf, fn, ctxt unsafe.Pointer) {
|
||||||
if buf.lr != 0 {
|
if buf.lr != 0 {
|
||||||
gothrow("invalid use of gostartcall")
|
throw("invalid use of gostartcall")
|
||||||
}
|
}
|
||||||
buf.lr = buf.pc
|
buf.lr = buf.pc
|
||||||
buf.pc = uintptr(fn)
|
buf.pc = uintptr(fn)
|
||||||
@ -33,5 +33,5 @@ func rewindmorestack(buf *gobuf) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
|
print("runtime: pc=", hex(buf.pc), " ", hex(inst), "\n")
|
||||||
gothrow("runtime: misuse of rewindmorestack")
|
throw("runtime: misuse of rewindmorestack")
|
||||||
}
|
}
|
||||||
|
@ -50,5 +50,5 @@ func rewindmorestack(buf *gobuf) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
|
print("runtime: pc=", pc, " ", hex(pc[0]), " ", hex(pc[1]), " ", hex(pc[2]), " ", hex(pc[3]), " ", hex(pc[4]), "\n")
|
||||||
gothrow("runtime: misuse of rewindmorestack")
|
throw("runtime: misuse of rewindmorestack")
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,7 @@ func compileCallback(fn eface, cleanstack bool) (code uintptr) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if n >= cb_max {
|
if n >= cb_max {
|
||||||
gothrow("too many callback functions")
|
throw("too many callback functions")
|
||||||
}
|
}
|
||||||
|
|
||||||
c := new(wincallbackcontext)
|
c := new(wincallbackcontext)
|
||||||
|
@ -77,7 +77,7 @@ func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v uns
|
|||||||
f := findfunc(frame.pc)
|
f := findfunc(frame.pc)
|
||||||
if f == nil {
|
if f == nil {
|
||||||
print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
|
print("runtime: unknown pc in defer ", hex(frame.pc), "\n")
|
||||||
gothrow("unknown pc")
|
throw("unknown pc")
|
||||||
}
|
}
|
||||||
frame.fn = f
|
frame.fn = f
|
||||||
frame.argp = uintptr(deferArgs(d))
|
frame.argp = uintptr(deferArgs(d))
|
||||||
@ -96,7 +96,7 @@ func tracebackdefers(gp *g, callback func(*stkframe, unsafe.Pointer) bool, v uns
|
|||||||
// duplicating the code and all its subtlety.
|
// duplicating the code and all its subtlety.
|
||||||
func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int {
|
func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int {
|
||||||
if goexitPC == 0 {
|
if goexitPC == 0 {
|
||||||
gothrow("gentraceback before goexitPC initialization")
|
throw("gentraceback before goexitPC initialization")
|
||||||
}
|
}
|
||||||
g := getg()
|
g := getg()
|
||||||
if g == gp && g == g.m.curg {
|
if g == gp && g == g.m.curg {
|
||||||
@ -113,7 +113,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
|
|||||||
// accepts an sp for the current goroutine (typically obtained by
|
// accepts an sp for the current goroutine (typically obtained by
|
||||||
// calling getcallersp) must not run on that goroutine's stack but
|
// calling getcallersp) must not run on that goroutine's stack but
|
||||||
// instead on the g0 stack.
|
// instead on the g0 stack.
|
||||||
gothrow("gentraceback cannot trace user goroutine on its own stack")
|
throw("gentraceback cannot trace user goroutine on its own stack")
|
||||||
}
|
}
|
||||||
gotraceback := gotraceback(nil)
|
gotraceback := gotraceback(nil)
|
||||||
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
|
if pc0 == ^uintptr(0) && sp0 == ^uintptr(0) { // Signal to fetch saved values from gp.
|
||||||
@ -163,7 +163,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
|
|||||||
if f == nil {
|
if f == nil {
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
print("runtime: unknown pc ", hex(frame.pc), "\n")
|
print("runtime: unknown pc ", hex(frame.pc), "\n")
|
||||||
gothrow("unknown pc")
|
throw("unknown pc")
|
||||||
}
|
}
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -200,7 +200,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
|
|||||||
// to avoid that confusion.
|
// to avoid that confusion.
|
||||||
// See golang.org/issue/8153.
|
// See golang.org/issue/8153.
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
gothrow("traceback_arm: found jmpdefer when tracing with callback")
|
throw("traceback_arm: found jmpdefer when tracing with callback")
|
||||||
}
|
}
|
||||||
frame.lr = 0
|
frame.lr = 0
|
||||||
} else {
|
} else {
|
||||||
@ -221,7 +221,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
|
|||||||
// get everything, so crash loudly.
|
// get everything, so crash loudly.
|
||||||
if callback != nil {
|
if callback != nil {
|
||||||
print("runtime: unexpected return pc for ", gofuncname(f), " called from ", hex(frame.lr), "\n")
|
print("runtime: unexpected return pc for ", gofuncname(f), " called from ", hex(frame.lr), "\n")
|
||||||
gothrow("unknown caller pc")
|
throw("unknown caller pc")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -411,7 +411,7 @@ func gentraceback(pc0 uintptr, sp0 uintptr, lr0 uintptr, gp *g, skip int, pcbuf
|
|||||||
for _defer = gp._defer; _defer != nil; _defer = _defer.link {
|
for _defer = gp._defer; _defer != nil; _defer = _defer.link {
|
||||||
print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
|
print("\tdefer ", _defer, " sp=", hex(_defer.sp), " pc=", hex(_defer.pc), "\n")
|
||||||
}
|
}
|
||||||
gothrow("traceback has leftover defers")
|
throw("traceback has leftover defers")
|
||||||
}
|
}
|
||||||
|
|
||||||
return n
|
return n
|
||||||
@ -430,7 +430,7 @@ func setArgInfo(frame *stkframe, f *_func, needArgMap bool) {
|
|||||||
fn := *(**[2]uintptr)(unsafe.Pointer(arg0))
|
fn := *(**[2]uintptr)(unsafe.Pointer(arg0))
|
||||||
if fn[0] != f.entry {
|
if fn[0] != f.entry {
|
||||||
print("runtime: confused by ", gofuncname(f), "\n")
|
print("runtime: confused by ", gofuncname(f), "\n")
|
||||||
gothrow("reflect mismatch")
|
throw("reflect mismatch")
|
||||||
}
|
}
|
||||||
bv := (*bitvector)(unsafe.Pointer(fn[1]))
|
bv := (*bitvector)(unsafe.Pointer(fn[1]))
|
||||||
frame.arglen = uintptr(bv.n / 2 * ptrSize)
|
frame.arglen = uintptr(bv.n / 2 * ptrSize)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user