mirror of
https://github.com/golang/go.git
synced 2025-05-05 15:43:04 +00:00
test: make codegen tests work with both ABIs
Some codegen tests were written with the assumption that arguments and results are in memory, and with a specific stack layout. With the register ABI, the assumption is no longer true. Adjust the tests to work with both cases. - For tests expecting in memory arguments/results, change to use global variables or memory-assigned argument/results. - Allow more registers. E.g. some tests expecting register names contain only letters (e.g. AX), but it can also contain numbers (e.g. R10). - Some instruction selection changes when operate on register vs. memory, e.g. ADDQ vs. LEAQ, MOVB vs. MOVL. Accept both. TODO: mathbits.go and memops.go still need fix. Change-Id: Ic5932b4b5dd3f5d30ed078d296476b641420c4c5 Reviewed-on: https://go-review.googlesource.com/c/go/+/309335 Trust: Cherry Zhang <cherryyz@google.com> Run-TryBot: Cherry Zhang <cherryyz@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: David Chase <drchase@google.com>
This commit is contained in:
parent
3d5e3a15f6
commit
263e13d1f7
@ -139,7 +139,7 @@ func MergeMuls1(n int) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func MergeMuls2(n int) int {
|
func MergeMuls2(n int) int {
|
||||||
// amd64:"IMUL3Q\t[$]23","ADDQ\t[$]29"
|
// amd64:"IMUL3Q\t[$]23","(ADDQ\t[$]29)|(LEAQ\t29)"
|
||||||
// 386:"IMUL3L\t[$]23","ADDL\t[$]29"
|
// 386:"IMUL3L\t[$]23","ADDL\t[$]29"
|
||||||
return 5*n + 7*(n+1) + 11*(n+2) // 23n + 29
|
return 5*n + 7*(n+1) + 11*(n+2) // 23n + 29
|
||||||
}
|
}
|
||||||
|
@ -264,23 +264,23 @@ func bitcompl32(a, b uint32) (n uint32) {
|
|||||||
|
|
||||||
// check direct operation on memory with constant and shifted constant sources
|
// check direct operation on memory with constant and shifted constant sources
|
||||||
func bitOpOnMem(a []uint32, b, c, d uint32) {
|
func bitOpOnMem(a []uint32, b, c, d uint32) {
|
||||||
// amd64:`ANDL\s[$]200,\s\([A-Z]+\)`
|
// amd64:`ANDL\s[$]200,\s\([A-Z][A-Z0-9]+\)`
|
||||||
a[0] &= 200
|
a[0] &= 200
|
||||||
// amd64:`ORL\s[$]220,\s4\([A-Z]+\)`
|
// amd64:`ORL\s[$]220,\s4\([A-Z][A-Z0-9]+\)`
|
||||||
a[1] |= 220
|
a[1] |= 220
|
||||||
// amd64:`XORL\s[$]240,\s8\([A-Z]+\)`
|
// amd64:`XORL\s[$]240,\s8\([A-Z][A-Z0-9]+\)`
|
||||||
a[2] ^= 240
|
a[2] ^= 240
|
||||||
// amd64:`BTRL\s[$]15,\s12\([A-Z]+\)`,-`ANDL`
|
// amd64:`BTRL\s[$]15,\s12\([A-Z][A-Z0-9]+\)`,-`ANDL`
|
||||||
a[3] &= 0xffff7fff
|
a[3] &= 0xffff7fff
|
||||||
// amd64:`BTSL\s[$]14,\s16\([A-Z]+\)`,-`ORL`
|
// amd64:`BTSL\s[$]14,\s16\([A-Z][A-Z0-9]+\)`,-`ORL`
|
||||||
a[4] |= 0x4000
|
a[4] |= 0x4000
|
||||||
// amd64:`BTCL\s[$]13,\s20\([A-Z]+\)`,-`XORL`
|
// amd64:`BTCL\s[$]13,\s20\([A-Z][A-Z0-9]+\)`,-`XORL`
|
||||||
a[5] ^= 0x2000
|
a[5] ^= 0x2000
|
||||||
// amd64:`BTRL\s[A-Z]+,\s24\([A-Z]+\)`
|
// amd64:`BTRL\s[A-Z][A-Z0-9]+,\s24\([A-Z][A-Z0-9]+\)`
|
||||||
a[6] &^= 1 << (b & 31)
|
a[6] &^= 1 << (b & 31)
|
||||||
// amd64:`BTSL\s[A-Z]+,\s28\([A-Z]+\)`
|
// amd64:`BTSL\s[A-Z][A-Z0-9]+,\s28\([A-Z][A-Z0-9]+\)`
|
||||||
a[7] |= 1 << (c & 31)
|
a[7] |= 1 << (c & 31)
|
||||||
// amd64:`BTCL\s[A-Z]+,\s32\([A-Z]+\)`
|
// amd64:`BTCL\s[A-Z][A-Z0-9]+,\s32\([A-Z][A-Z0-9]+\)`
|
||||||
a[8] ^= 1 << (d & 31)
|
a[8] ^= 1 << (d & 31)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,14 +19,14 @@ func F(a, b, c int, d S) {
|
|||||||
// amd64:`MOVQ\t\$-2401018187971961171, R8`, `MOVQ\t\$-2401018187971961171, R9`, `MOVQ\t\$-2401018187971961171, R10`
|
// amd64:`MOVQ\t\$-2401018187971961171, R8`, `MOVQ\t\$-2401018187971961171, R9`, `MOVQ\t\$-2401018187971961171, R10`
|
||||||
// amd64:`MOVQ\t\$-2401018187971961171, R11`, `MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`
|
// amd64:`MOVQ\t\$-2401018187971961171, R11`, `MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`
|
||||||
// amd64:-`MOVQ\t\$-2401018187971961171, BP` // frame pointer is not clobbered
|
// amd64:-`MOVQ\t\$-2401018187971961171, BP` // frame pointer is not clobbered
|
||||||
StackArgsCall(a, b, c, d)
|
StackArgsCall([10]int{a, b, c})
|
||||||
// amd64:`MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`, `MOVQ\t\$-2401018187971961171, DX`
|
// amd64:`MOVQ\t\$-2401018187971961171, R12`, `MOVQ\t\$-2401018187971961171, R13`, `MOVQ\t\$-2401018187971961171, DX`
|
||||||
// amd64:-`MOVQ\t\$-2401018187971961171, AX`, -`MOVQ\t\$-2401018187971961171, R11` // register args are not clobbered
|
// amd64:-`MOVQ\t\$-2401018187971961171, AX`, -`MOVQ\t\$-2401018187971961171, R11` // register args are not clobbered
|
||||||
RegArgsCall(a, b, c, d)
|
RegArgsCall(a, b, c, d)
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:noinline
|
//go:noinline
|
||||||
func StackArgsCall(int, int, int, S) {}
|
func StackArgsCall([10]int) {}
|
||||||
|
|
||||||
//go:noinline
|
//go:noinline
|
||||||
//go:registerparams
|
//go:registerparams
|
||||||
|
@ -90,9 +90,11 @@ func CompareArray6(a, b unsafe.Pointer) bool {
|
|||||||
|
|
||||||
// Test that LEAQ/ADDQconst are folded into SETx ops
|
// Test that LEAQ/ADDQconst are folded into SETx ops
|
||||||
|
|
||||||
func CmpFold(x uint32) bool {
|
var r bool
|
||||||
// amd64:`SETHI\t.*\(SP\)`
|
|
||||||
return x > 4
|
func CmpFold(x uint32) {
|
||||||
|
// amd64:`SETHI\t.*\(SB\)`
|
||||||
|
r = x > 4
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test that direct comparisons with memory are generated when
|
// Test that direct comparisons with memory are generated when
|
||||||
|
@ -13,10 +13,10 @@ var wsp = [256]bool{
|
|||||||
'\r': true,
|
'\r': true,
|
||||||
}
|
}
|
||||||
|
|
||||||
func zeroExtArgByte(ch byte) bool {
|
func zeroExtArgByte(ch [2]byte) bool {
|
||||||
return wsp[ch] // amd64:-"MOVBLZX\t..,.."
|
return wsp[ch[0]] // amd64:-"MOVBLZX\t..,.."
|
||||||
}
|
}
|
||||||
|
|
||||||
func zeroExtArgUint16(ch uint16) bool {
|
func zeroExtArgUint16(ch [2]uint16) bool {
|
||||||
return wsp[ch] // amd64:-"MOVWLZX\t..,.."
|
return wsp[ch[0]] // amd64:-"MOVWLZX\t..,.."
|
||||||
}
|
}
|
||||||
|
@ -16,12 +16,12 @@ package codegen
|
|||||||
// Direct use of constants in fast map access calls (Issue #19015).
|
// Direct use of constants in fast map access calls (Issue #19015).
|
||||||
|
|
||||||
func AccessInt1(m map[int]int) int {
|
func AccessInt1(m map[int]int) int {
|
||||||
// amd64:"MOVQ\t[$]5"
|
// amd64:"MOV[LQ]\t[$]5"
|
||||||
return m[5]
|
return m[5]
|
||||||
}
|
}
|
||||||
|
|
||||||
func AccessInt2(m map[int]int) bool {
|
func AccessInt2(m map[int]int) bool {
|
||||||
// amd64:"MOVQ\t[$]5"
|
// amd64:"MOV[LQ]\t[$]5"
|
||||||
_, ok := m[5]
|
_, ok := m[5]
|
||||||
return ok
|
return ok
|
||||||
}
|
}
|
||||||
|
@ -160,13 +160,13 @@ func toFloat32(u32 uint32) float32 {
|
|||||||
// are evaluated at compile-time
|
// are evaluated at compile-time
|
||||||
|
|
||||||
func constantCheck64() bool {
|
func constantCheck64() bool {
|
||||||
// amd64:"MOVB\t[$]0",-"FCMP",-"MOVB\t[$]1"
|
// amd64:"(MOVB\t[$]0)|(XORL\t[A-Z][A-Z0-9]+, [A-Z][A-Z0-9]+)",-"FCMP",-"MOVB\t[$]1"
|
||||||
// s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1,"
|
// s390x:"MOV(B|BZ|D)\t[$]0,",-"FCMPU",-"MOV(B|BZ|D)\t[$]1,"
|
||||||
return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63))
|
return 0.5 == float64(uint32(1)) || 1.5 > float64(uint64(1<<63))
|
||||||
}
|
}
|
||||||
|
|
||||||
func constantCheck32() bool {
|
func constantCheck32() bool {
|
||||||
// amd64:"MOVB\t[$]1",-"FCMP",-"MOVB\t[$]0"
|
// amd64:"MOV(B|L)\t[$]1",-"FCMP",-"MOV(B|L)\t[$]0"
|
||||||
// s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0,"
|
// s390x:"MOV(B|BZ|D)\t[$]1,",-"FCMPU",-"MOV(B|BZ|D)\t[$]0,"
|
||||||
return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31))
|
return float32(0.5) <= float32(int64(1)) && float32(1.5) >= float32(int32(-1<<31))
|
||||||
}
|
}
|
||||||
|
@ -306,16 +306,16 @@ func load_be_byte8_uint64_idx8(s []byte, idx int) uint64 {
|
|||||||
|
|
||||||
// Check load combining across function calls.
|
// Check load combining across function calls.
|
||||||
|
|
||||||
func fcall_byte(a, b byte) (byte, byte) {
|
func fcall_byte(a [2]byte) [2]byte {
|
||||||
return fcall_byte(fcall_byte(a, b)) // amd64:`MOVW`
|
return fcall_byte(fcall_byte(a)) // amd64:`MOVW`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fcall_uint16(a, b uint16) (uint16, uint16) {
|
func fcall_uint16(a [2]uint16) [2]uint16 {
|
||||||
return fcall_uint16(fcall_uint16(a, b)) // amd64:`MOVL`
|
return fcall_uint16(fcall_uint16(a)) // amd64:`MOVL`
|
||||||
}
|
}
|
||||||
|
|
||||||
func fcall_uint32(a, b uint32) (uint32, uint32) {
|
func fcall_uint32(a [2]uint32) [2]uint32 {
|
||||||
return fcall_uint32(fcall_uint32(a, b)) // amd64:`MOVQ`
|
return fcall_uint32(fcall_uint32(a)) // amd64:`MOVQ`
|
||||||
}
|
}
|
||||||
|
|
||||||
// We want to merge load+op in the first function, but not in the
|
// We want to merge load+op in the first function, but not in the
|
||||||
|
@ -307,7 +307,7 @@ func InitSmallSliceLiteral() []int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func InitNotSmallSliceLiteral() []int {
|
func InitNotSmallSliceLiteral() []int {
|
||||||
// amd64:`MOVQ\t.*autotmp_`
|
// amd64:`LEAQ\t.*stmp_`
|
||||||
return []int{
|
return []int{
|
||||||
42,
|
42,
|
||||||
42,
|
42,
|
||||||
|
@ -92,11 +92,11 @@ func ArrayInit(i, j int) [4]int {
|
|||||||
// Check that assembly output has matching offset and base register
|
// Check that assembly output has matching offset and base register
|
||||||
// (issue #21064).
|
// (issue #21064).
|
||||||
|
|
||||||
func check_asmout(a, b int) int {
|
func check_asmout(b [2]int) int {
|
||||||
runtime.GC() // use some frame
|
runtime.GC() // use some frame
|
||||||
// amd64:`.*b\+24\(SP\)`
|
// amd64:`.*b\+24\(SP\)`
|
||||||
// arm:`.*b\+4\(FP\)`
|
// arm:`.*b\+4\(FP\)`
|
||||||
return b
|
return b[1]
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check that simple functions get promoted to nosplit, even when
|
// Check that simple functions get promoted to nosplit, even when
|
||||||
|
@ -12,14 +12,14 @@ package codegen
|
|||||||
|
|
||||||
func zeroSize() {
|
func zeroSize() {
|
||||||
c := make(chan struct{})
|
c := make(chan struct{})
|
||||||
// amd64:`MOVQ\t\$0, ""\.s\+32\(SP\)`
|
// amd64:`MOVQ\t\$0, ""\.s\+56\(SP\)`
|
||||||
var s *int
|
var s *int
|
||||||
g(&s) // force s to be a stack object
|
// force s to be a stack object, also use some (fixed) stack space
|
||||||
|
g(&s, 1, 2, 3, 4, 5)
|
||||||
|
|
||||||
// amd64:`LEAQ\t""\..*\+31\(SP\)`
|
// amd64:`LEAQ\t""\..*\+55\(SP\)`
|
||||||
c <- struct{}{}
|
c <- struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//go:noinline
|
//go:noinline
|
||||||
func g(p **int) {
|
func g(**int, int, int, int, int, int) {}
|
||||||
}
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user