mirror of
https://github.com/golang/go.git
synced 2025-05-06 08:03:03 +00:00
runtime: panic on uncomparable map key, even if map is empty
Reorg map flags a bit so we don't need any extra space for the extra flag. Fixes #23734 Change-Id: I436812156240ae90de53d0943fe1aabf3ea37417 Reviewed-on: https://go-review.googlesource.com/c/155918 Run-TryBot: Keith Randall <khr@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
This commit is contained in:
parent
14bdcc76fd
commit
ed15e82413
@ -1095,6 +1095,28 @@ func needkeyupdate(t *types.Type) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hashMightPanic reports whether the hash of a map key of type t might panic.
|
||||||
|
func hashMightPanic(t *types.Type) bool {
|
||||||
|
switch t.Etype {
|
||||||
|
case TINTER:
|
||||||
|
return true
|
||||||
|
|
||||||
|
case TARRAY:
|
||||||
|
return hashMightPanic(t.Elem())
|
||||||
|
|
||||||
|
case TSTRUCT:
|
||||||
|
for _, t1 := range t.Fields().Slice() {
|
||||||
|
if hashMightPanic(t1.Type) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// formalType replaces byte and rune aliases with real types.
|
// formalType replaces byte and rune aliases with real types.
|
||||||
// They've been separate internally to make error messages
|
// They've been separate internally to make error messages
|
||||||
// better, but we have to merge them in the reflect tables.
|
// better, but we have to merge them in the reflect tables.
|
||||||
@ -1257,25 +1279,33 @@ func dtypesym(t *types.Type) *obj.LSym {
|
|||||||
ot = dsymptr(lsym, ot, s1, 0)
|
ot = dsymptr(lsym, ot, s1, 0)
|
||||||
ot = dsymptr(lsym, ot, s2, 0)
|
ot = dsymptr(lsym, ot, s2, 0)
|
||||||
ot = dsymptr(lsym, ot, s3, 0)
|
ot = dsymptr(lsym, ot, s3, 0)
|
||||||
|
var flags uint32
|
||||||
|
// Note: flags must match maptype accessors in ../../../../runtime/type.go
|
||||||
|
// and maptype builder in ../../../../reflect/type.go:MapOf.
|
||||||
if t.Key().Width > MAXKEYSIZE {
|
if t.Key().Width > MAXKEYSIZE {
|
||||||
ot = duint8(lsym, ot, uint8(Widthptr))
|
ot = duint8(lsym, ot, uint8(Widthptr))
|
||||||
ot = duint8(lsym, ot, 1) // indirect
|
flags |= 1 // indirect key
|
||||||
} else {
|
} else {
|
||||||
ot = duint8(lsym, ot, uint8(t.Key().Width))
|
ot = duint8(lsym, ot, uint8(t.Key().Width))
|
||||||
ot = duint8(lsym, ot, 0) // not indirect
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.Elem().Width > MAXVALSIZE {
|
if t.Elem().Width > MAXVALSIZE {
|
||||||
ot = duint8(lsym, ot, uint8(Widthptr))
|
ot = duint8(lsym, ot, uint8(Widthptr))
|
||||||
ot = duint8(lsym, ot, 1) // indirect
|
flags |= 2 // indirect value
|
||||||
} else {
|
} else {
|
||||||
ot = duint8(lsym, ot, uint8(t.Elem().Width))
|
ot = duint8(lsym, ot, uint8(t.Elem().Width))
|
||||||
ot = duint8(lsym, ot, 0) // not indirect
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ot = duint16(lsym, ot, uint16(bmap(t).Width))
|
ot = duint16(lsym, ot, uint16(bmap(t).Width))
|
||||||
ot = duint8(lsym, ot, uint8(obj.Bool2int(isreflexive(t.Key()))))
|
if isreflexive(t.Key()) {
|
||||||
ot = duint8(lsym, ot, uint8(obj.Bool2int(needkeyupdate(t.Key()))))
|
flags |= 4 // reflexive key
|
||||||
|
}
|
||||||
|
if needkeyupdate(t.Key()) {
|
||||||
|
flags |= 8 // need key update
|
||||||
|
}
|
||||||
|
if hashMightPanic(t.Key()) {
|
||||||
|
flags |= 16 // hash might panic
|
||||||
|
}
|
||||||
|
ot = duint32(lsym, ot, flags)
|
||||||
ot = dextratype(lsym, ot, t, 0)
|
ot = dextratype(lsym, ot, t, 0)
|
||||||
|
|
||||||
case TPTR:
|
case TPTR:
|
||||||
|
@ -398,12 +398,9 @@ type mapType struct {
|
|||||||
elem *rtype // map element (value) type
|
elem *rtype // map element (value) type
|
||||||
bucket *rtype // internal bucket structure
|
bucket *rtype // internal bucket structure
|
||||||
keysize uint8 // size of key slot
|
keysize uint8 // size of key slot
|
||||||
indirectkey uint8 // store ptr to key instead of key itself
|
|
||||||
valuesize uint8 // size of value slot
|
valuesize uint8 // size of value slot
|
||||||
indirectvalue uint8 // store ptr to value instead of value itself
|
|
||||||
bucketsize uint16 // size of bucket
|
bucketsize uint16 // size of bucket
|
||||||
reflexivekey bool // true if k==k for all keys
|
flags uint32
|
||||||
needkeyupdate bool // true if we need to update key on an overwrite
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ptrType represents a pointer type.
|
// ptrType represents a pointer type.
|
||||||
@ -1859,6 +1856,8 @@ func MapOf(key, elem Type) Type {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Make a map type.
|
// Make a map type.
|
||||||
|
// Note: flag values must match those used in the TMAP case
|
||||||
|
// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
|
||||||
var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
|
var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
|
||||||
mt := **(**mapType)(unsafe.Pointer(&imap))
|
mt := **(**mapType)(unsafe.Pointer(&imap))
|
||||||
mt.str = resolveReflectName(newName(s, "", false))
|
mt.str = resolveReflectName(newName(s, "", false))
|
||||||
@ -1867,23 +1866,29 @@ func MapOf(key, elem Type) Type {
|
|||||||
mt.key = ktyp
|
mt.key = ktyp
|
||||||
mt.elem = etyp
|
mt.elem = etyp
|
||||||
mt.bucket = bucketOf(ktyp, etyp)
|
mt.bucket = bucketOf(ktyp, etyp)
|
||||||
|
mt.flags = 0
|
||||||
if ktyp.size > maxKeySize {
|
if ktyp.size > maxKeySize {
|
||||||
mt.keysize = uint8(ptrSize)
|
mt.keysize = uint8(ptrSize)
|
||||||
mt.indirectkey = 1
|
mt.flags |= 1 // indirect key
|
||||||
} else {
|
} else {
|
||||||
mt.keysize = uint8(ktyp.size)
|
mt.keysize = uint8(ktyp.size)
|
||||||
mt.indirectkey = 0
|
|
||||||
}
|
}
|
||||||
if etyp.size > maxValSize {
|
if etyp.size > maxValSize {
|
||||||
mt.valuesize = uint8(ptrSize)
|
mt.valuesize = uint8(ptrSize)
|
||||||
mt.indirectvalue = 1
|
mt.flags |= 2 // indirect value
|
||||||
} else {
|
} else {
|
||||||
mt.valuesize = uint8(etyp.size)
|
mt.valuesize = uint8(etyp.size)
|
||||||
mt.indirectvalue = 0
|
|
||||||
}
|
}
|
||||||
mt.bucketsize = uint16(mt.bucket.size)
|
mt.bucketsize = uint16(mt.bucket.size)
|
||||||
mt.reflexivekey = isReflexive(ktyp)
|
if isReflexive(ktyp) {
|
||||||
mt.needkeyupdate = needKeyUpdate(ktyp)
|
mt.flags |= 4
|
||||||
|
}
|
||||||
|
if needKeyUpdate(ktyp) {
|
||||||
|
mt.flags |= 8
|
||||||
|
}
|
||||||
|
if hashMightPanic(ktyp) {
|
||||||
|
mt.flags |= 16
|
||||||
|
}
|
||||||
mt.ptrToThis = 0
|
mt.ptrToThis = 0
|
||||||
|
|
||||||
ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
|
ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
|
||||||
@ -2122,6 +2127,27 @@ func needKeyUpdate(t *rtype) bool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// hashMightPanic reports whether the hash of a map key of type t might panic.
|
||||||
|
func hashMightPanic(t *rtype) bool {
|
||||||
|
switch t.Kind() {
|
||||||
|
case Interface:
|
||||||
|
return true
|
||||||
|
case Array:
|
||||||
|
tt := (*arrayType)(unsafe.Pointer(t))
|
||||||
|
return hashMightPanic(tt.elem)
|
||||||
|
case Struct:
|
||||||
|
tt := (*structType)(unsafe.Pointer(t))
|
||||||
|
for _, f := range tt.fields {
|
||||||
|
if hashMightPanic(f.typ) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure these routines stay in sync with ../../runtime/map.go!
|
// Make sure these routines stay in sync with ../../runtime/map.go!
|
||||||
// These types exist only for GC, so we only fill out GC relevant info.
|
// These types exist only for GC, so we only fill out GC relevant info.
|
||||||
// Currently, that's just size and the GC program. We also fill in string
|
// Currently, that's just size and the GC program. We also fill in string
|
||||||
|
@ -404,6 +404,9 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
|
|||||||
msanread(key, t.key.size)
|
msanread(key, t.key.size)
|
||||||
}
|
}
|
||||||
if h == nil || h.count == 0 {
|
if h == nil || h.count == 0 {
|
||||||
|
if t.hashMightPanic() {
|
||||||
|
t.key.alg.hash(key, 0) // see issue 23734
|
||||||
|
}
|
||||||
return unsafe.Pointer(&zeroVal[0])
|
return unsafe.Pointer(&zeroVal[0])
|
||||||
}
|
}
|
||||||
if h.flags&hashWriting != 0 {
|
if h.flags&hashWriting != 0 {
|
||||||
@ -434,12 +437,12 @@ bucketloop:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k = *((*unsafe.Pointer)(k))
|
k = *((*unsafe.Pointer)(k))
|
||||||
}
|
}
|
||||||
if alg.equal(key, k) {
|
if alg.equal(key, k) {
|
||||||
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
v = *((*unsafe.Pointer)(v))
|
v = *((*unsafe.Pointer)(v))
|
||||||
}
|
}
|
||||||
return v
|
return v
|
||||||
@ -460,6 +463,9 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
|
|||||||
msanread(key, t.key.size)
|
msanread(key, t.key.size)
|
||||||
}
|
}
|
||||||
if h == nil || h.count == 0 {
|
if h == nil || h.count == 0 {
|
||||||
|
if t.hashMightPanic() {
|
||||||
|
t.key.alg.hash(key, 0) // see issue 23734
|
||||||
|
}
|
||||||
return unsafe.Pointer(&zeroVal[0]), false
|
return unsafe.Pointer(&zeroVal[0]), false
|
||||||
}
|
}
|
||||||
if h.flags&hashWriting != 0 {
|
if h.flags&hashWriting != 0 {
|
||||||
@ -490,12 +496,12 @@ bucketloop:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k = *((*unsafe.Pointer)(k))
|
k = *((*unsafe.Pointer)(k))
|
||||||
}
|
}
|
||||||
if alg.equal(key, k) {
|
if alg.equal(key, k) {
|
||||||
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
v = *((*unsafe.Pointer)(v))
|
v = *((*unsafe.Pointer)(v))
|
||||||
}
|
}
|
||||||
return v, true
|
return v, true
|
||||||
@ -535,12 +541,12 @@ bucketloop:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k = *((*unsafe.Pointer)(k))
|
k = *((*unsafe.Pointer)(k))
|
||||||
}
|
}
|
||||||
if alg.equal(key, k) {
|
if alg.equal(key, k) {
|
||||||
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
v = *((*unsafe.Pointer)(v))
|
v = *((*unsafe.Pointer)(v))
|
||||||
}
|
}
|
||||||
return k, v
|
return k, v
|
||||||
@ -620,14 +626,14 @@ bucketloop:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k = *((*unsafe.Pointer)(k))
|
k = *((*unsafe.Pointer)(k))
|
||||||
}
|
}
|
||||||
if !alg.equal(key, k) {
|
if !alg.equal(key, k) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// already have a mapping for key. Update it.
|
// already have a mapping for key. Update it.
|
||||||
if t.needkeyupdate {
|
if t.needkeyupdate() {
|
||||||
typedmemmove(t.key, k, key)
|
typedmemmove(t.key, k, key)
|
||||||
}
|
}
|
||||||
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
||||||
@ -658,12 +664,12 @@ bucketloop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// store new key/value at insert position
|
// store new key/value at insert position
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
kmem := newobject(t.key)
|
kmem := newobject(t.key)
|
||||||
*(*unsafe.Pointer)(insertk) = kmem
|
*(*unsafe.Pointer)(insertk) = kmem
|
||||||
insertk = kmem
|
insertk = kmem
|
||||||
}
|
}
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
vmem := newobject(t.elem)
|
vmem := newobject(t.elem)
|
||||||
*(*unsafe.Pointer)(val) = vmem
|
*(*unsafe.Pointer)(val) = vmem
|
||||||
}
|
}
|
||||||
@ -676,7 +682,7 @@ done:
|
|||||||
throw("concurrent map writes")
|
throw("concurrent map writes")
|
||||||
}
|
}
|
||||||
h.flags &^= hashWriting
|
h.flags &^= hashWriting
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
val = *((*unsafe.Pointer)(val))
|
val = *((*unsafe.Pointer)(val))
|
||||||
}
|
}
|
||||||
return val
|
return val
|
||||||
@ -693,6 +699,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
|
|||||||
msanread(key, t.key.size)
|
msanread(key, t.key.size)
|
||||||
}
|
}
|
||||||
if h == nil || h.count == 0 {
|
if h == nil || h.count == 0 {
|
||||||
|
if t.hashMightPanic() {
|
||||||
|
t.key.alg.hash(key, 0) // see issue 23734
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if h.flags&hashWriting != 0 {
|
if h.flags&hashWriting != 0 {
|
||||||
@ -724,20 +733,20 @@ search:
|
|||||||
}
|
}
|
||||||
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
||||||
k2 := k
|
k2 := k
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k2 = *((*unsafe.Pointer)(k2))
|
k2 = *((*unsafe.Pointer)(k2))
|
||||||
}
|
}
|
||||||
if !alg.equal(key, k2) {
|
if !alg.equal(key, k2) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Only clear key if there are pointers in it.
|
// Only clear key if there are pointers in it.
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
*(*unsafe.Pointer)(k) = nil
|
*(*unsafe.Pointer)(k) = nil
|
||||||
} else if t.key.kind&kindNoPointers == 0 {
|
} else if t.key.kind&kindNoPointers == 0 {
|
||||||
memclrHasPointers(k, t.key.size)
|
memclrHasPointers(k, t.key.size)
|
||||||
}
|
}
|
||||||
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
*(*unsafe.Pointer)(v) = nil
|
*(*unsafe.Pointer)(v) = nil
|
||||||
} else if t.elem.kind&kindNoPointers == 0 {
|
} else if t.elem.kind&kindNoPointers == 0 {
|
||||||
memclrHasPointers(v, t.elem.size)
|
memclrHasPointers(v, t.elem.size)
|
||||||
@ -897,7 +906,7 @@ next:
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
|
k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k = *((*unsafe.Pointer)(k))
|
k = *((*unsafe.Pointer)(k))
|
||||||
}
|
}
|
||||||
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
|
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
|
||||||
@ -909,7 +918,7 @@ next:
|
|||||||
// through the oldbucket, skipping any keys that will go
|
// through the oldbucket, skipping any keys that will go
|
||||||
// to the other new bucket (each oldbucket expands to two
|
// to the other new bucket (each oldbucket expands to two
|
||||||
// buckets during a grow).
|
// buckets during a grow).
|
||||||
if t.reflexivekey || alg.equal(k, k) {
|
if t.reflexivekey() || alg.equal(k, k) {
|
||||||
// If the item in the oldbucket is not destined for
|
// If the item in the oldbucket is not destined for
|
||||||
// the current new bucket in the iteration, skip it.
|
// the current new bucket in the iteration, skip it.
|
||||||
hash := alg.hash(k, uintptr(h.hash0))
|
hash := alg.hash(k, uintptr(h.hash0))
|
||||||
@ -930,13 +939,13 @@ next:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
|
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
|
||||||
!(t.reflexivekey || alg.equal(k, k)) {
|
!(t.reflexivekey() || alg.equal(k, k)) {
|
||||||
// This is the golden data, we can return it.
|
// This is the golden data, we can return it.
|
||||||
// OR
|
// OR
|
||||||
// key!=key, so the entry can't be deleted or updated, so we can just return it.
|
// key!=key, so the entry can't be deleted or updated, so we can just return it.
|
||||||
// That's lucky for us because when key!=key we can't look it up successfully.
|
// That's lucky for us because when key!=key we can't look it up successfully.
|
||||||
it.key = k
|
it.key = k
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
v = *((*unsafe.Pointer)(v))
|
v = *((*unsafe.Pointer)(v))
|
||||||
}
|
}
|
||||||
it.value = v
|
it.value = v
|
||||||
@ -1160,7 +1169,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||||||
throw("bad map state")
|
throw("bad map state")
|
||||||
}
|
}
|
||||||
k2 := k
|
k2 := k
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
k2 = *((*unsafe.Pointer)(k2))
|
k2 = *((*unsafe.Pointer)(k2))
|
||||||
}
|
}
|
||||||
var useY uint8
|
var useY uint8
|
||||||
@ -1168,7 +1177,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||||||
// Compute hash to make our evacuation decision (whether we need
|
// Compute hash to make our evacuation decision (whether we need
|
||||||
// to send this key/value to bucket x or bucket y).
|
// to send this key/value to bucket x or bucket y).
|
||||||
hash := t.key.alg.hash(k2, uintptr(h.hash0))
|
hash := t.key.alg.hash(k2, uintptr(h.hash0))
|
||||||
if h.flags&iterator != 0 && !t.reflexivekey && !t.key.alg.equal(k2, k2) {
|
if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.alg.equal(k2, k2) {
|
||||||
// If key != key (NaNs), then the hash could be (and probably
|
// If key != key (NaNs), then the hash could be (and probably
|
||||||
// will be) entirely different from the old hash. Moreover,
|
// will be) entirely different from the old hash. Moreover,
|
||||||
// it isn't reproducible. Reproducibility is required in the
|
// it isn't reproducible. Reproducibility is required in the
|
||||||
@ -1203,12 +1212,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
|
|||||||
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
|
dst.v = add(dst.k, bucketCnt*uintptr(t.keysize))
|
||||||
}
|
}
|
||||||
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
|
||||||
if t.indirectkey {
|
if t.indirectkey() {
|
||||||
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
|
*(*unsafe.Pointer)(dst.k) = k2 // copy pointer
|
||||||
} else {
|
} else {
|
||||||
typedmemmove(t.key, dst.k, k) // copy value
|
typedmemmove(t.key, dst.k, k) // copy value
|
||||||
}
|
}
|
||||||
if t.indirectvalue {
|
if t.indirectvalue() {
|
||||||
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
|
*(*unsafe.Pointer)(dst.v) = *(*unsafe.Pointer)(v)
|
||||||
} else {
|
} else {
|
||||||
typedmemmove(t.elem, dst.v, v)
|
typedmemmove(t.elem, dst.v, v)
|
||||||
@ -1274,12 +1283,12 @@ func reflect_makemap(t *maptype, cap int) *hmap {
|
|||||||
if !ismapkey(t.key) {
|
if !ismapkey(t.key) {
|
||||||
throw("runtime.reflect_makemap: unsupported map key type")
|
throw("runtime.reflect_makemap: unsupported map key type")
|
||||||
}
|
}
|
||||||
if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(sys.PtrSize)) ||
|
if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
|
||||||
t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
|
t.key.size <= maxKeySize && (t.indirectkey() || t.keysize != uint8(t.key.size)) {
|
||||||
throw("key size wrong")
|
throw("key size wrong")
|
||||||
}
|
}
|
||||||
if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(sys.PtrSize)) ||
|
if t.elem.size > maxValueSize && (!t.indirectvalue() || t.valuesize != uint8(sys.PtrSize)) ||
|
||||||
t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
|
t.elem.size <= maxValueSize && (t.indirectvalue() || t.valuesize != uint8(t.elem.size)) {
|
||||||
throw("value size wrong")
|
throw("value size wrong")
|
||||||
}
|
}
|
||||||
if t.key.align > bucketCnt {
|
if t.key.align > bucketCnt {
|
||||||
|
@ -366,12 +366,27 @@ type maptype struct {
|
|||||||
elem *_type
|
elem *_type
|
||||||
bucket *_type // internal type representing a hash bucket
|
bucket *_type // internal type representing a hash bucket
|
||||||
keysize uint8 // size of key slot
|
keysize uint8 // size of key slot
|
||||||
indirectkey bool // store ptr to key instead of key itself
|
|
||||||
valuesize uint8 // size of value slot
|
valuesize uint8 // size of value slot
|
||||||
indirectvalue bool // store ptr to value instead of value itself
|
|
||||||
bucketsize uint16 // size of bucket
|
bucketsize uint16 // size of bucket
|
||||||
reflexivekey bool // true if k==k for all keys
|
flags uint32
|
||||||
needkeyupdate bool // true if we need to update key on an overwrite
|
}
|
||||||
|
|
||||||
|
// Note: flag values must match those used in the TMAP case
|
||||||
|
// in ../cmd/compile/internal/gc/reflect.go:dtypesym.
|
||||||
|
func (mt *maptype) indirectkey() bool { // store ptr to key instead of key itself
|
||||||
|
return mt.flags&1 != 0
|
||||||
|
}
|
||||||
|
func (mt *maptype) indirectvalue() bool { // store ptr to value instead of value itself
|
||||||
|
return mt.flags&2 != 0
|
||||||
|
}
|
||||||
|
func (mt *maptype) reflexivekey() bool { // true if k==k for all keys
|
||||||
|
return mt.flags&4 != 0
|
||||||
|
}
|
||||||
|
func (mt *maptype) needkeyupdate() bool { // true if we need to update key on an overwrite
|
||||||
|
return mt.flags&8 != 0
|
||||||
|
}
|
||||||
|
func (mt *maptype) hashMightPanic() bool { // true if hash function might panic
|
||||||
|
return mt.flags&16 != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
type arraytype struct {
|
type arraytype struct {
|
||||||
|
32
test/fixedbugs/issue23734.go
Normal file
32
test/fixedbugs/issue23734.go
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
// run
|
||||||
|
|
||||||
|
// Copyright 2018 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
m := map[interface{}]int{}
|
||||||
|
k := []int{}
|
||||||
|
|
||||||
|
mustPanic(func() {
|
||||||
|
_ = m[k]
|
||||||
|
})
|
||||||
|
mustPanic(func() {
|
||||||
|
_, _ = m[k]
|
||||||
|
})
|
||||||
|
mustPanic(func() {
|
||||||
|
delete(m, k)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func mustPanic(f func()) {
|
||||||
|
defer func() {
|
||||||
|
r := recover()
|
||||||
|
if r == nil {
|
||||||
|
panic("didn't panic")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
f()
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user