cmd/internal/obj/arm64: return a bit shift from movcon

Return the shift in bits from movcon, rather than returning an index.
This allows a number of multiplications to be removed, making the code
more readable. Scale down to an index only when encoding.

Change-Id: I1be91eb526ad95d389e2f8ce97212311551790df
Reviewed-on: https://go-review.googlesource.com/c/go/+/650939
Auto-Submit: Joel Sing <joel@sing.id.au>
Reviewed-by: Dmitri Shuralyov <dmitshur@google.com>
Reviewed-by: Cherry Mui <cherryyz@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
This commit is contained in:
Joel Sing 2025-02-20 22:55:37 +11:00 committed by Gopher Robot
parent 4464a7d94f
commit 06f82af183

View File

@ -1863,15 +1863,14 @@ func (c *ctxt7) offsetshift(p *obj.Prog, v int64, cls int) int64 {
return vs return vs
} }
/* // movcon checks if v contains a single 16 bit value that is aligned on
* if v contains a single 16-bit value aligned // a 16 bit boundary, suitable for use with a movk/movn instruction. The
* on a 16-bit field, and thus suitable for movk/movn, // field offset in bits is returned (being a multiple 16), otherwise -1 is
* return the field index 0 to 3; otherwise return -1. // returned indicating an unsuitable value.
*/
func movcon(v int64) int { func movcon(v int64) int {
for s := 0; s < 64; s += 16 { for s := 0; s < 64; s += 16 {
if (uint64(v) &^ (uint64(0xFFFF) << uint(s))) == 0 { if (uint64(v) &^ (uint64(0xFFFF) << uint(s))) == 0 {
return s / 16 return s
} }
} }
return -1 return -1
@ -4120,18 +4119,18 @@ func (c *ctxt7) asmout(p *obj.Prog, out []uint32) (count int) {
c.ctxt.Diag("zero shifts cannot be handled correctly: %v", p) c.ctxt.Diag("zero shifts cannot be handled correctly: %v", p)
} }
s := movcon(d) s := movcon(d)
if s < 0 || s >= 4 { if s < 0 || s >= 64 {
c.ctxt.Diag("bad constant for MOVK: %#x\n%v", uint64(d), p) c.ctxt.Diag("bad constant for MOVK: %#x\n%v", uint64(d), p)
} }
if (o1&S64) == 0 && s >= 2 { if (o1&S64) == 0 && s >= 32 {
c.ctxt.Diag("illegal bit position\n%v", p) c.ctxt.Diag("illegal bit position\n%v", p)
} }
if ((uint64(d) >> uint(s*16)) >> 16) != 0 { if ((uint64(d) >> uint(s)) >> 16) != 0 {
c.ctxt.Diag("requires uimm16\n%v", p) c.ctxt.Diag("requires uimm16\n%v", p)
} }
rt := int(p.To.Reg) rt := int(p.To.Reg)
o1 |= uint32((((d >> uint(s*16)) & 0xFFFF) << 5) | int64((uint32(s)&3)<<21) | int64(rt&31)) o1 |= uint32((((d >> uint(s)) & 0xFFFF) << 5) | int64((uint32(s>>4)&3)<<21) | int64(rt&31))
case 34: /* mov $lacon,R */ case 34: /* mov $lacon,R */
rt, r, rf := p.To.Reg, p.From.Reg, int16(REGTMP) rt, r, rf := p.To.Reg, p.From.Reg, int16(REGTMP)
@ -7423,32 +7422,32 @@ func (c *ctxt7) omovconst(as obj.As, p *obj.Prog, a *obj.Addr, rt int) (o1 uint3
if as == AMOVW { if as == AMOVW {
d := uint32(a.Offset) d := uint32(a.Offset)
s := movcon(int64(d)) s := movcon(int64(d))
if s < 0 || 16*s >= 32 { if s < 0 || s >= 32 {
d = ^d d = ^d
s = movcon(int64(d)) s = movcon(int64(d))
if s < 0 || 16*s >= 32 { if s < 0 || s >= 32 {
c.ctxt.Diag("impossible 32-bit move wide: %#x\n%v", uint32(a.Offset), p) c.ctxt.Diag("impossible 32-bit move wide: %#x\n%v", uint32(a.Offset), p)
} }
o1 = c.opirr(p, AMOVNW) o1 = c.opirr(p, AMOVNW)
} else { } else {
o1 = c.opirr(p, AMOVZW) o1 = c.opirr(p, AMOVZW)
} }
o1 |= MOVCONST(int64(d), s, rt) o1 |= MOVCONST(int64(d), s>>4, rt)
} }
if as == AMOVD { if as == AMOVD {
d := a.Offset d := a.Offset
s := movcon(d) s := movcon(d)
if s < 0 || 16*s >= 64 { if s < 0 || s >= 64 {
d = ^d d = ^d
s = movcon(d) s = movcon(d)
if s < 0 || 16*s >= 64 { if s < 0 || s >= 64 {
c.ctxt.Diag("impossible 64-bit move wide: %#x\n%v", uint64(a.Offset), p) c.ctxt.Diag("impossible 64-bit move wide: %#x\n%v", uint64(a.Offset), p)
} }
o1 = c.opirr(p, AMOVN) o1 = c.opirr(p, AMOVN)
} else { } else {
o1 = c.opirr(p, AMOVZ) o1 = c.opirr(p, AMOVZ)
} }
o1 |= MOVCONST(d, s, rt) o1 |= MOVCONST(d, s>>4, rt)
} }
return o1 return o1
} }