mirror of
https://github.com/golang/go.git
synced 2025-05-05 15:43:04 +00:00
Use a small python script to consolidate duplicate ppc64/ppc64le tests into a single ppc64x codegen test. This makes small assumption that anytime two tests with for different arch/variant combos exists, those tests can be combined into a single ppc64x test. E.x: // ppc64le: foo // ppc64le/power9: foo into // ppc64x: foo or // ppc64: foo // ppc64le: foo into // ppc64x: foo import glob import re files = glob.glob("codegen/*.go") for file in files: with open(file) as f: text = [l for l in f] i = 0 while i < len(text): first = re.match("\s*// ?ppc64(le)?(/power[89])?:(.*)", text[i]) if first: j = i+1 while j < len(text): second = re.match("\s*// ?ppc64(le)?(/power[89])?:(.*)", text[j]) if not second: break if (not first.group(2) or first.group(2) == second.group(2)) and first.group(3) == second.group(3): text[i] = re.sub(" ?ppc64(le|x)?"," ppc64x",text[i]) text=text[:j] + (text[j+1:]) else: j += 1 i+=1 with open(file, 'w') as f: f.write("".join(text)) Change-Id: Ic6b009b54eacaadc5a23db9c5a3bf7331b595821 Reviewed-on: https://go-review.googlesource.com/c/go/+/463220 Reviewed-by: Cherry Mui <cherryyz@google.com> Reviewed-by: Lynn Boger <laboger@linux.vnet.ibm.com> Reviewed-by: Bryan Mills <bcmills@google.com> Run-TryBot: Paul Murphy <murp@ibm.com> TryBot-Result: Gopher Robot <gobot@golang.org>
154 lines
3.0 KiB
Go
154 lines
3.0 KiB
Go
// asmcheck
|
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package codegen
|
|
|
|
import "runtime"
|
|
|
|
// Check small copies are replaced with moves.
|
|
|
|
func movesmall4() {
|
|
x := [...]byte{1, 2, 3, 4}
|
|
// 386:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// arm:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
func movesmall7() {
|
|
x := [...]byte{1, 2, 3, 4, 5, 6, 7}
|
|
// 386:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
func movesmall16() {
|
|
x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}
|
|
// amd64:-".*memmove"
|
|
// ppc64x:".*memmove"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
var x [256]byte
|
|
|
|
// Check that large disjoint copies are replaced with moves.
|
|
|
|
func moveDisjointStack32() {
|
|
var s [32]byte
|
|
// ppc64x:-".*memmove"
|
|
// ppc64le/power8:"LXVD2X",-"ADD",-"BC"
|
|
// ppc64le/power9:"LXV",-"LXVD2X",-"ADD",-"BC"
|
|
copy(s[:], x[:32])
|
|
runtime.KeepAlive(&s)
|
|
}
|
|
|
|
func moveDisjointStack64() {
|
|
var s [96]byte
|
|
// ppc64x:-".*memmove"
|
|
// ppc64le/power8:"LXVD2X","ADD","BC"
|
|
// ppc64le/power9:"LXV",-"LXVD2X",-"ADD",-"BC"
|
|
copy(s[:], x[:96])
|
|
runtime.KeepAlive(&s)
|
|
}
|
|
|
|
func moveDisjointStack() {
|
|
var s [256]byte
|
|
// s390x:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
// ppc64le/power8:"LXVD2X"
|
|
// ppc64le/power9:"LXV",-"LXVD2X"
|
|
copy(s[:], x[:])
|
|
runtime.KeepAlive(&s)
|
|
}
|
|
|
|
func moveDisjointArg(b *[256]byte) {
|
|
var s [256]byte
|
|
// s390x:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
// ppc64le/power8:"LXVD2X"
|
|
// ppc64le/power9:"LXV",-"LXVD2X"
|
|
copy(s[:], b[:])
|
|
runtime.KeepAlive(&s)
|
|
}
|
|
|
|
func moveDisjointNoOverlap(a *[256]byte) {
|
|
// s390x:-".*memmove"
|
|
// amd64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
// ppc64le/power8:"LXVD2X"
|
|
// ppc64le/power9:"LXV",-"LXVD2X"
|
|
copy(a[:], a[128:])
|
|
}
|
|
|
|
// Check arch-specific memmove lowering. See issue 41662 fot details
|
|
|
|
func moveArchLowering1(b []byte, x *[1]byte) {
|
|
_ = b[1]
|
|
// amd64:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
copy(b, x[:])
|
|
}
|
|
|
|
func moveArchLowering2(b []byte, x *[2]byte) {
|
|
_ = b[2]
|
|
// amd64:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
copy(b, x[:])
|
|
}
|
|
|
|
func moveArchLowering4(b []byte, x *[4]byte) {
|
|
_ = b[4]
|
|
// amd64:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
copy(b, x[:])
|
|
}
|
|
|
|
func moveArchLowering8(b []byte, x *[8]byte) {
|
|
_ = b[8]
|
|
// amd64:-".*memmove"
|
|
// arm64:-".*memmove"
|
|
// ppc64x:-".*memmove"
|
|
copy(b, x[:])
|
|
}
|
|
|
|
func moveArchLowering16(b []byte, x *[16]byte) {
|
|
_ = b[16]
|
|
// amd64:-".*memmove"
|
|
copy(b, x[:])
|
|
}
|
|
|
|
// Check that no branches are generated when the pointers are [not] equal.
|
|
|
|
func ptrEqual() {
|
|
// amd64:-"JEQ",-"JNE"
|
|
// ppc64x:-"BEQ",-"BNE"
|
|
// s390x:-"BEQ",-"BNE"
|
|
copy(x[:], x[:])
|
|
}
|
|
|
|
func ptrOneOffset() {
|
|
// amd64:-"JEQ",-"JNE"
|
|
// ppc64x:-"BEQ",-"BNE"
|
|
// s390x:-"BEQ",-"BNE"
|
|
copy(x[1:], x[:])
|
|
}
|
|
|
|
func ptrBothOffset() {
|
|
// amd64:-"JEQ",-"JNE"
|
|
// ppc64x:-"BEQ",-"BNE"
|
|
// s390x:-"BEQ",-"BNE"
|
|
copy(x[1:], x[2:])
|
|
}
|