mirror of
https://github.com/golang/go.git
synced 2025-05-29 11:25:43 +00:00
reflect: audit and explain safety of all unsafe.Pointer additions
It's not safe to do p+x with unsafe if that would point past the end of the object. (Valid in C, not safe in Go.) Pass a "whySafe" reason (compiled away) to explain at each call site why it's safe. Fixes #21733. Change-Id: I5da8c25bde66f5c9beac232f2135dcab8e8bf3b1 Reviewed-on: https://go-review.googlesource.com/80738 Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
662938850b
commit
8bb51a73e9
@ -93,7 +93,7 @@ func FirstMethodNameBytes(t Type) *byte {
|
||||
}
|
||||
m := ut.methods()[0]
|
||||
mname := t.(*rtype).nameOff(m.name)
|
||||
if *mname.data(0)&(1<<2) == 0 {
|
||||
if *mname.data(0, "name flag field")&(1<<2) == 0 {
|
||||
panic("method name does not have pkgPath *string")
|
||||
}
|
||||
return mname.bytes
|
||||
|
@ -65,8 +65,8 @@ func Swapper(slice interface{}) func(i, j int) {
|
||||
if uint(i) >= uint(s.Len) || uint(j) >= uint(s.Len) {
|
||||
panic("reflect: slice index out of range")
|
||||
}
|
||||
val1 := arrayAt(s.Data, i, size)
|
||||
val2 := arrayAt(s.Data, j, size)
|
||||
val1 := arrayAt(s.Data, i, size, "i < s.Len")
|
||||
val2 := arrayAt(s.Data, j, size, "j < s.Len")
|
||||
typedmemmove(typ, tmp, val1)
|
||||
typedmemmove(typ, val1, val2)
|
||||
typedmemmove(typ, val2, tmp)
|
||||
|
@ -468,8 +468,8 @@ type name struct {
|
||||
bytes *byte
|
||||
}
|
||||
|
||||
func (n name) data(off int) *byte {
|
||||
return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off)))
|
||||
func (n name) data(off int, whySafe string) *byte {
|
||||
return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
|
||||
}
|
||||
|
||||
func (n name) isExported() bool {
|
||||
@ -477,15 +477,15 @@ func (n name) isExported() bool {
|
||||
}
|
||||
|
||||
func (n name) nameLen() int {
|
||||
return int(uint16(*n.data(1))<<8 | uint16(*n.data(2)))
|
||||
return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field")))
|
||||
}
|
||||
|
||||
func (n name) tagLen() int {
|
||||
if *n.data(0)&(1<<1) == 0 {
|
||||
if *n.data(0, "name flag field")&(1<<1) == 0 {
|
||||
return 0
|
||||
}
|
||||
off := 3 + n.nameLen()
|
||||
return int(uint16(*n.data(off))<<8 | uint16(*n.data(off + 1)))
|
||||
return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field")))
|
||||
}
|
||||
|
||||
func (n name) name() (s string) {
|
||||
@ -507,13 +507,13 @@ func (n name) tag() (s string) {
|
||||
}
|
||||
nl := n.nameLen()
|
||||
hdr := (*stringHeader)(unsafe.Pointer(&s))
|
||||
hdr.Data = unsafe.Pointer(n.data(3 + nl + 2))
|
||||
hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string"))
|
||||
hdr.Len = tl
|
||||
return s
|
||||
}
|
||||
|
||||
func (n name) pkgPath() string {
|
||||
if n.bytes == nil || *n.data(0)&(1<<2) == 0 {
|
||||
if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
|
||||
return ""
|
||||
}
|
||||
off := 3 + n.nameLen()
|
||||
@ -521,7 +521,9 @@ func (n name) pkgPath() string {
|
||||
off += 2 + tl
|
||||
}
|
||||
var nameOff int32
|
||||
copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off)))[:])
|
||||
// Note that this field may not be aligned in memory,
|
||||
// so we cannot use a direct int32 assignment here.
|
||||
copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
|
||||
pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
|
||||
return pkgPathName.name()
|
||||
}
|
||||
@ -630,7 +632,10 @@ var kindNames = []string{
|
||||
}
|
||||
|
||||
func (t *uncommonType) methods() []method {
|
||||
return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff)))[:t.mcount:t.mcount]
|
||||
if t.mcount == 0 {
|
||||
return nil
|
||||
}
|
||||
return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
|
||||
}
|
||||
|
||||
// resolveNameOff resolves a name offset from a base pointer.
|
||||
@ -1045,7 +1050,10 @@ func (t *funcType) in() []*rtype {
|
||||
if t.tflag&tflagUncommon != 0 {
|
||||
uadd += unsafe.Sizeof(uncommonType{})
|
||||
}
|
||||
return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[:t.inCount]
|
||||
if t.inCount == 0 {
|
||||
return nil
|
||||
}
|
||||
return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount]
|
||||
}
|
||||
|
||||
func (t *funcType) out() []*rtype {
|
||||
@ -1054,10 +1062,20 @@ func (t *funcType) out() []*rtype {
|
||||
uadd += unsafe.Sizeof(uncommonType{})
|
||||
}
|
||||
outCount := t.outCount & (1<<15 - 1)
|
||||
return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd))[t.inCount : t.inCount+outCount]
|
||||
if outCount == 0 {
|
||||
return nil
|
||||
}
|
||||
return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount]
|
||||
}
|
||||
|
||||
func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
|
||||
// add returns p+x.
|
||||
//
|
||||
// The whySafe string is ignored, so that the function still inlines
|
||||
// as efficiently as p+x, but all call sites should use the string to
|
||||
// record why the addition is safe, which is to say why the addition
|
||||
// does not cause x to advance to the very end of p's allocation
|
||||
// and therefore point incorrectly at the next block in memory.
|
||||
func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(p) + x)
|
||||
}
|
||||
|
||||
@ -1721,7 +1739,7 @@ func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
|
||||
func typelinks() (sections []unsafe.Pointer, offset [][]int32)
|
||||
|
||||
func rtypeOff(section unsafe.Pointer, off int32) *rtype {
|
||||
return (*rtype)(add(section, uintptr(off)))
|
||||
return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
|
||||
}
|
||||
|
||||
// typesByString returns the subslice of typelinks() whose elements have
|
||||
@ -2747,7 +2765,7 @@ func StructOf(fields []StructField) Type {
|
||||
typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
|
||||
o := seed
|
||||
for _, ft := range typ.fields {
|
||||
pi := unsafe.Pointer(uintptr(p) + ft.offset())
|
||||
pi := add(p, ft.offset(), "&x.field safe")
|
||||
o = ft.typ.alg.hash(pi, o)
|
||||
}
|
||||
return o
|
||||
@ -2757,8 +2775,8 @@ func StructOf(fields []StructField) Type {
|
||||
if comparable {
|
||||
typ.alg.equal = func(p, q unsafe.Pointer) bool {
|
||||
for _, ft := range typ.fields {
|
||||
pi := unsafe.Pointer(uintptr(p) + ft.offset())
|
||||
qi := unsafe.Pointer(uintptr(q) + ft.offset())
|
||||
pi := add(p, ft.offset(), "&x.field safe")
|
||||
qi := add(q, ft.offset(), "&x.field safe")
|
||||
if !ft.typ.alg.equal(pi, qi) {
|
||||
return false
|
||||
}
|
||||
@ -2972,8 +2990,8 @@ func ArrayOf(count int, elem Type) Type {
|
||||
eequal := ealg.equal
|
||||
array.alg.equal = func(p, q unsafe.Pointer) bool {
|
||||
for i := 0; i < count; i++ {
|
||||
pi := arrayAt(p, i, esize)
|
||||
qi := arrayAt(q, i, esize)
|
||||
pi := arrayAt(p, i, esize, "i < count")
|
||||
qi := arrayAt(q, i, esize, "i < count")
|
||||
if !eequal(pi, qi) {
|
||||
return false
|
||||
}
|
||||
@ -2987,7 +3005,7 @@ func ArrayOf(count int, elem Type) Type {
|
||||
array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
|
||||
o := seed
|
||||
for i := 0; i < count; i++ {
|
||||
o = ehash(arrayAt(ptr, i, esize), o)
|
||||
o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
|
||||
}
|
||||
return o
|
||||
}
|
||||
|
@ -426,7 +426,14 @@ func (v Value) call(op string, in []Value) []Value {
|
||||
a := uintptr(targ.align)
|
||||
off = (off + a - 1) &^ (a - 1)
|
||||
n := targ.size
|
||||
addr := unsafe.Pointer(uintptr(args) + off)
|
||||
if n == 0 {
|
||||
// Not safe to compute args+off pointing at 0 bytes,
|
||||
// because that might point beyond the end of the frame,
|
||||
// but we still need to call assignTo to check assignability.
|
||||
v.assignTo("reflect.Value.Call", targ, nil)
|
||||
continue
|
||||
}
|
||||
addr := add(args, off, "n > 0")
|
||||
v = v.assignTo("reflect.Value.Call", targ, addr)
|
||||
if v.flag&flagIndir != 0 {
|
||||
typedmemmove(targ, addr, v.ptr)
|
||||
@ -464,7 +471,7 @@ func (v Value) call(op string, in []Value) []Value {
|
||||
off = (off + a - 1) &^ (a - 1)
|
||||
if tv.Size() != 0 {
|
||||
fl := flagIndir | flag(tv.Kind())
|
||||
ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl}
|
||||
ret[i] = Value{tv.common(), add(args, off, "tv.Size() != 0"), fl}
|
||||
} else {
|
||||
// For zero-sized return value, args+off may point to the next object.
|
||||
// In this case, return the zero value instead.
|
||||
@ -499,7 +506,6 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
|
||||
in := make([]Value, 0, int(ftyp.inCount))
|
||||
for _, typ := range ftyp.in() {
|
||||
off += -off & uintptr(typ.align-1)
|
||||
addr := unsafe.Pointer(uintptr(ptr) + off)
|
||||
v := Value{typ, nil, flag(typ.Kind())}
|
||||
if ifaceIndir(typ) {
|
||||
// value cannot be inlined in interface data.
|
||||
@ -507,10 +513,12 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
|
||||
// and we cannot let f keep a reference to the stack frame
|
||||
// after this function returns, not even a read-only reference.
|
||||
v.ptr = unsafe_New(typ)
|
||||
typedmemmove(typ, v.ptr, addr)
|
||||
if typ.size > 0 {
|
||||
typedmemmove(typ, v.ptr, add(ptr, off, "typ.size > 0"))
|
||||
}
|
||||
v.flag |= flagIndir
|
||||
} else {
|
||||
v.ptr = *(*unsafe.Pointer)(addr)
|
||||
v.ptr = *(*unsafe.Pointer)(add(ptr, off, "1-ptr"))
|
||||
}
|
||||
in = append(in, v)
|
||||
off += typ.size
|
||||
@ -541,7 +549,10 @@ func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
|
||||
" returned value obtained from unexported field")
|
||||
}
|
||||
off += -off & uintptr(typ.align-1)
|
||||
addr := unsafe.Pointer(uintptr(ptr) + off)
|
||||
if typ.size == 0 {
|
||||
continue
|
||||
}
|
||||
addr := add(ptr, off, "typ.size > 0")
|
||||
if v.flag&flagIndir != 0 {
|
||||
typedmemmove(typ, addr, v.ptr)
|
||||
} else {
|
||||
@ -645,7 +656,7 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
|
||||
// Avoid constructing out-of-bounds pointers if there are no args.
|
||||
storeRcvr(rcvr, args)
|
||||
if argSize-ptrSize > 0 {
|
||||
typedmemmovepartial(frametype, unsafe.Pointer(uintptr(args)+ptrSize), frame, ptrSize, argSize-ptrSize)
|
||||
typedmemmovepartial(frametype, add(args, ptrSize, "argSize > ptrSize"), frame, ptrSize, argSize-ptrSize)
|
||||
}
|
||||
|
||||
// Call.
|
||||
@ -663,8 +674,8 @@ func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
|
||||
callerRetOffset = align(argSize-ptrSize, 8)
|
||||
}
|
||||
typedmemmovepartial(frametype,
|
||||
unsafe.Pointer(uintptr(frame)+callerRetOffset),
|
||||
unsafe.Pointer(uintptr(args)+retOffset),
|
||||
add(frame, callerRetOffset, "frametype.size > retOffset"),
|
||||
add(args, retOffset, "frametype.size > retOffset"),
|
||||
retOffset,
|
||||
frametype.size-retOffset)
|
||||
}
|
||||
@ -791,8 +802,8 @@ func (v Value) Field(i int) Value {
|
||||
// or flagIndir is not set and v.ptr is the actual struct data.
|
||||
// In the former case, we want v.ptr + offset.
|
||||
// In the latter case, we must have field.offset = 0,
|
||||
// so v.ptr + field.offset is still okay.
|
||||
ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset())
|
||||
// so v.ptr + field.offset is still the correct address.
|
||||
ptr := add(v.ptr, field.offset(), "same as non-reflect &v.field")
|
||||
return Value{typ, ptr, fl}
|
||||
}
|
||||
|
||||
@ -870,8 +881,8 @@ func (v Value) Index(i int) Value {
|
||||
// or flagIndir is not set and v.ptr is the actual array data.
|
||||
// In the former case, we want v.ptr + offset.
|
||||
// In the latter case, we must be doing Index(0), so offset = 0,
|
||||
// so v.ptr + offset is still okay.
|
||||
val := unsafe.Pointer(uintptr(v.ptr) + offset)
|
||||
// so v.ptr + offset is still the correct address.
|
||||
val := add(v.ptr, offset, "same as &v[i], i < tt.len")
|
||||
fl := v.flag&(flagIndir|flagAddr) | v.flag.ro() | flag(typ.Kind()) // bits same as overall array
|
||||
return Value{typ, val, fl}
|
||||
|
||||
@ -884,7 +895,7 @@ func (v Value) Index(i int) Value {
|
||||
}
|
||||
tt := (*sliceType)(unsafe.Pointer(v.typ))
|
||||
typ := tt.elem
|
||||
val := arrayAt(s.Data, i, typ.size)
|
||||
val := arrayAt(s.Data, i, typ.size, "i < s.Len")
|
||||
fl := flagAddr | flagIndir | v.flag.ro() | flag(typ.Kind())
|
||||
return Value{typ, val, fl}
|
||||
|
||||
@ -893,7 +904,7 @@ func (v Value) Index(i int) Value {
|
||||
if uint(i) >= uint(s.Len) {
|
||||
panic("reflect: string index out of range")
|
||||
}
|
||||
p := arrayAt(s.Data, i, 1)
|
||||
p := arrayAt(s.Data, i, 1, "i < s.Len")
|
||||
fl := v.flag.ro() | flag(Uint8) | flagIndir
|
||||
return Value{uint8Type, p, fl}
|
||||
}
|
||||
@ -1575,7 +1586,10 @@ func (v Value) Slice(i, j int) Value {
|
||||
if i < 0 || j < i || j > s.Len {
|
||||
panic("reflect.Value.Slice: string slice index out of bounds")
|
||||
}
|
||||
t := stringHeader{arrayAt(s.Data, i, 1), j - i}
|
||||
var t stringHeader
|
||||
if i < s.Len {
|
||||
t = stringHeader{arrayAt(s.Data, i, 1, "i < s.Len"), j - i}
|
||||
}
|
||||
return Value{v.typ, unsafe.Pointer(&t), v.flag}
|
||||
}
|
||||
|
||||
@ -1591,7 +1605,7 @@ func (v Value) Slice(i, j int) Value {
|
||||
s.Len = j - i
|
||||
s.Cap = cap - i
|
||||
if cap-i > 0 {
|
||||
s.Data = arrayAt(base, i, typ.elem.Size())
|
||||
s.Data = arrayAt(base, i, typ.elem.Size(), "i < cap")
|
||||
} else {
|
||||
// do not advance pointer, to avoid pointing beyond end of slice
|
||||
s.Data = base
|
||||
@ -1643,7 +1657,7 @@ func (v Value) Slice3(i, j, k int) Value {
|
||||
s.Len = j - i
|
||||
s.Cap = k - i
|
||||
if k-i > 0 {
|
||||
s.Data = arrayAt(base, i, typ.elem.Size())
|
||||
s.Data = arrayAt(base, i, typ.elem.Size(), "i < k <= cap")
|
||||
} else {
|
||||
// do not advance pointer, to avoid pointing beyond end of slice
|
||||
s.Data = base
|
||||
@ -1802,10 +1816,15 @@ func typesMustMatch(what string, t1, t2 Type) {
|
||||
}
|
||||
}
|
||||
|
||||
// arrayAt returns the i-th element of p, a C-array whose elements are
|
||||
// eltSize wide (in bytes).
|
||||
func arrayAt(p unsafe.Pointer, i int, eltSize uintptr) unsafe.Pointer {
|
||||
return unsafe.Pointer(uintptr(p) + uintptr(i)*eltSize)
|
||||
// arrayAt returns the i-th element of p,
|
||||
// an array whose elements are eltSize bytes wide.
|
||||
// The array pointed at by p must have at least i+1 elements:
|
||||
// it is invalid (but impossible to check here) to pass i >= len,
|
||||
// because then the result will point outside the array.
|
||||
// whySafe must explain why i < len. (Passing "i < len" is fine;
|
||||
// the benefit is to surface this assumption at the call site.)
|
||||
func arrayAt(p unsafe.Pointer, i int, eltSize uintptr, whySafe string) unsafe.Pointer {
|
||||
return add(p, uintptr(i)*eltSize, "i < len")
|
||||
}
|
||||
|
||||
// grow grows the slice s so that it can hold extra more values, allocating
|
||||
|
Loading…
x
Reference in New Issue
Block a user