diff --git a/src/cmd/compile/internal/gc/inl_test.go b/src/cmd/compile/internal/gc/inl_test.go index cfe7f6f546..c8521e584d 100644 --- a/src/cmd/compile/internal/gc/inl_test.go +++ b/src/cmd/compile/internal/gc/inl_test.go @@ -175,7 +175,7 @@ func TestIntendedInlining(t *testing.T) { } switch runtime.GOARCH { - case "386", "wasm", "arm": + case "386", "wasm", "arm", "riscv64": default: // TODO(mvdan): As explained in /test/inline_sync.go, some // architectures don't have atomic intrinsics, so these go over diff --git a/src/cmd/compile/internal/gc/plive.go b/src/cmd/compile/internal/gc/plive.go index 5b9f31426d..d406780a79 100644 --- a/src/cmd/compile/internal/gc/plive.go +++ b/src/cmd/compile/internal/gc/plive.go @@ -705,6 +705,12 @@ func (lv *Liveness) markUnsafePoints() { v = v.Args[0] continue } + case ssa.OpRISCV64SUB: + // RISCV64 lowers Neq32 to include a SUB with multiple arguments. + // TODO(jsing): it would be preferable not to use Neq32 for + // writeBuffer.enabled checks on this platform. + v = v.Args[0] + continue case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U: // Args[0] is the address of the write // barrier control. Ignore Args[1], diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index efbfa32f42..43ef33bdfe 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -6533,7 +6533,7 @@ func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog { } else { // TODO(mdempsky): Can these differences be eliminated? switch thearch.LinkArch.Family { - case sys.AMD64, sys.I386, sys.PPC64, sys.S390X, sys.Wasm: + case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm: p.To.Type = obj.TYPE_REG case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64: p.To.Type = obj.TYPE_MEM diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go new file mode 100644 index 0000000000..4db0fac52e --- /dev/null +++ b/src/cmd/compile/internal/riscv64/galign.go @@ -0,0 +1,25 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj/riscv" +) + +func Init(arch *gc.Arch) { + arch.LinkArch = &riscv.LinkRISCV64 + + arch.REGSP = riscv.REG_SP + arch.MAXWIDTH = 1 << 50 + + arch.Ginsnop = ginsnop + arch.Ginsnopdefer = ginsnop + arch.ZeroRange = zeroRange + + arch.SSAMarkMoves = ssaMarkMoves + arch.SSAGenValue = ssaGenValue + arch.SSAGenBlock = ssaGenBlock +} diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go new file mode 100644 index 0000000000..be31fad441 --- /dev/null +++ b/src/cmd/compile/internal/riscv64/ggen.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +func zeroRange(pp *gc.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog { + if cnt == 0 { + return p + } + + // Adjust the frame to account for LR. + off += gc.Ctxt.FixedFrameSize() + + if cnt < int64(4*gc.Widthptr) { + for i := int64(0); i < cnt; i += int64(gc.Widthptr) { + p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i) + } + return p + } + + // TODO(jsing): Add a duff zero implementation for medium sized ranges. + + // Loop, zeroing pointer width bytes at a time. + // ADD $(off), SP, T0 + // ADD $(cnt), T0, T1 + // loop: + // MOV ZERO, (T0) + // ADD $Widthptr, T0 + // BNE T0, T1, loop + p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0) + p.Reg = riscv.REG_SP + p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0) + p.Reg = riscv.REG_T0 + p = pp.Appendpp(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0) + loop := p + p = pp.Appendpp(p, riscv.AADD, obj.TYPE_CONST, 0, int64(gc.Widthptr), obj.TYPE_REG, riscv.REG_T0, 0) + p = pp.Appendpp(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0) + p.Reg = riscv.REG_T1 + gc.Patch(p, loop) + return p +} diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go new file mode 100644 index 0000000000..d40bdf7a1d --- /dev/null +++ b/src/cmd/compile/internal/riscv64/gsubr.go @@ -0,0 +1,20 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/gc" + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +func ginsnop(pp *gc.Progs) *obj.Prog { + // Hardware nop is ADD $0, ZERO + p := pp.Prog(riscv.AADD) + p.From.Type = obj.TYPE_CONST + p.Reg = riscv.REG_ZERO + p.To = obj.Addr{Type: obj.TYPE_REG, Reg: riscv.REG_ZERO} + return p +} diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go new file mode 100644 index 0000000000..d33240351f --- /dev/null +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -0,0 +1,496 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package riscv64 + +import ( + "cmd/compile/internal/gc" + "cmd/compile/internal/ssa" + "cmd/compile/internal/types" + "cmd/internal/obj" + "cmd/internal/obj/riscv" +) + +// ssaRegToReg maps ssa register numbers to obj register numbers. +var ssaRegToReg = []int16{ + riscv.REG_X0, + // X1 (LR): unused + riscv.REG_X2, + riscv.REG_X3, + riscv.REG_X4, + riscv.REG_X5, + riscv.REG_X6, + riscv.REG_X7, + riscv.REG_X8, + riscv.REG_X9, + riscv.REG_X10, + riscv.REG_X11, + riscv.REG_X12, + riscv.REG_X13, + riscv.REG_X14, + riscv.REG_X15, + riscv.REG_X16, + riscv.REG_X17, + riscv.REG_X18, + riscv.REG_X19, + riscv.REG_X20, + riscv.REG_X21, + riscv.REG_X22, + riscv.REG_X23, + riscv.REG_X24, + riscv.REG_X25, + riscv.REG_X26, + riscv.REG_X27, + riscv.REG_X28, + riscv.REG_X29, + riscv.REG_X30, + riscv.REG_X31, + riscv.REG_F0, + riscv.REG_F1, + riscv.REG_F2, + riscv.REG_F3, + riscv.REG_F4, + riscv.REG_F5, + riscv.REG_F6, + riscv.REG_F7, + riscv.REG_F8, + riscv.REG_F9, + riscv.REG_F10, + riscv.REG_F11, + riscv.REG_F12, + riscv.REG_F13, + riscv.REG_F14, + riscv.REG_F15, + riscv.REG_F16, + riscv.REG_F17, + riscv.REG_F18, + riscv.REG_F19, + riscv.REG_F20, + riscv.REG_F21, + riscv.REG_F22, + riscv.REG_F23, + riscv.REG_F24, + riscv.REG_F25, + riscv.REG_F26, + riscv.REG_F27, + riscv.REG_F28, + riscv.REG_F29, + riscv.REG_F30, + riscv.REG_F31, + 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case. +} + +func loadByType(t *types.Type) obj.As { + width := t.Size() + + if t.IsFloat() { + switch width { + case 4: + return riscv.AMOVF + case 8: + return riscv.AMOVD + default: + gc.Fatalf("unknown float width for load %d in type %v", width, t) + return 0 + } + } + + switch width { + case 1: + if t.IsSigned() { + return riscv.AMOVB + } else { + return riscv.AMOVBU + } + case 2: + if t.IsSigned() { + return riscv.AMOVH + } else { + return riscv.AMOVHU + } + case 4: + if t.IsSigned() { + return riscv.AMOVW + } else { + return riscv.AMOVWU + } + case 8: + return riscv.AMOV + default: + gc.Fatalf("unknown width for load %d in type %v", width, t) + return 0 + } +} + +// storeByType returns the store instruction of the given type. +func storeByType(t *types.Type) obj.As { + width := t.Size() + + if t.IsFloat() { + switch width { + case 4: + return riscv.AMOVF + case 8: + return riscv.AMOVD + default: + gc.Fatalf("unknown float width for store %d in type %v", width, t) + return 0 + } + } + + switch width { + case 1: + return riscv.AMOVB + case 2: + return riscv.AMOVH + case 4: + return riscv.AMOVW + case 8: + return riscv.AMOV + default: + gc.Fatalf("unknown width for store %d in type %v", width, t) + return 0 + } +} + +// largestMove returns the largest move instruction possible and its size, +// given the alignment of the total size of the move. +// +// e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB. +// +// Note that the moves may not be on naturally aligned addresses depending on +// the source and destination. +// +// This matches the calculation in ssa.moveSize. +func largestMove(alignment int64) (obj.As, int64) { + switch { + case alignment%8 == 0: + return riscv.AMOV, 8 + case alignment%4 == 0: + return riscv.AMOVW, 4 + case alignment%2 == 0: + return riscv.AMOVH, 2 + default: + return riscv.AMOVB, 1 + } +} + +// markMoves marks any MOVXconst ops that need to avoid clobbering flags. +// RISC-V has no flags, so this is a no-op. +func ssaMarkMoves(s *gc.SSAGenState, b *ssa.Block) {} + +func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) { + s.SetPos(v.Pos) + + switch v.Op { + case ssa.OpInitMem: + // memory arg needs no code + case ssa.OpArg: + // input args need no code + case ssa.OpPhi: + gc.CheckLoweredPhi(v) + case ssa.OpCopy, ssa.OpRISCV64MOVconvert: + if v.Type.IsMemory() { + return + } + rs := v.Args[0].Reg() + rd := v.Reg() + if rs == rd { + return + } + as := riscv.AMOV + if v.Type.IsFloat() { + as = riscv.AMOVD + } + p := s.Prog(as) + p.From.Type = obj.TYPE_REG + p.From.Reg = rs + p.To.Type = obj.TYPE_REG + p.To.Reg = rd + case ssa.OpLoadReg: + if v.Type.IsFlags() { + v.Fatalf("load flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(loadByType(v.Type)) + gc.AddrAuto(&p.From, v.Args[0]) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpStoreReg: + if v.Type.IsFlags() { + v.Fatalf("store flags not implemented: %v", v.LongString()) + return + } + p := s.Prog(storeByType(v.Type)) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + gc.AddrAuto(&p.To, v) + case ssa.OpSP, ssa.OpSB, ssa.OpGetG: + // nothing to do + case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND, + ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL, + ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH, + ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW, + ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW, + ssa.OpRISCV64REMUW, + ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS, + ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES, + ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD, + ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED: + r := v.Reg() + r1 := v.Args[0].Reg() + r2 := v.Args[1].Reg() + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = r2 + p.Reg = r1 + p.To.Type = obj.TYPE_REG + p.To.Reg = r + case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD, + ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX, + ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS, + ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64ADDI, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI, + ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI, + ssa.OpRISCV64SLTIU: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64MOVBconst, ssa.OpRISCV64MOVHconst, ssa.OpRISCV64MOVWconst, ssa.OpRISCV64MOVDconst: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_CONST + p.From.Offset = v.AuxInt + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64MOVaddr: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_ADDR + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + var wantreg string + // MOVW $sym+off(base), R + switch v.Aux.(type) { + default: + v.Fatalf("aux is of unknown type %T", v.Aux) + case *obj.LSym: + wantreg = "SB" + gc.AddAux(&p.From, v) + case *gc.Node: + wantreg = "SP" + gc.AddAux(&p.From, v) + case nil: + // No sym, just MOVW $off(SP), R + wantreg = "SP" + p.From.Reg = riscv.REG_SP + p.From.Offset = v.AuxInt + } + if reg := v.Args[0].RegName(); reg != wantreg { + v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg) + } + case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload, + ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload, + ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore, + ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + gc.AddAux(&p.To, v) + case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ: + p := s.Prog(v.Op.Asm()) + p.From.Type = obj.TYPE_REG + p.From.Reg = v.Args[0].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter: + s.Call(v) + case ssa.OpRISCV64LoweredWB: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = v.Aux.(*obj.LSym) + case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC: + p := s.Prog(obj.ACALL) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = gc.BoundsCheckFunc[v.AuxInt] + s.UseArgs(16) // space used in callee args area by assembly stubs + case ssa.OpRISCV64LoweredZero: + mov, sz := largestMove(v.AuxInt) + + // mov ZERO, (Rarg0) + // ADD $sz, Rarg0 + // BGEU Rarg1, Rarg0, -2(PC) + + p := s.Prog(mov) + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.To.Type = obj.TYPE_MEM + p.To.Reg = v.Args[0].Reg() + + p2 := s.Prog(riscv.AADD) + p2.From.Type = obj.TYPE_CONST + p2.From.Offset = sz + p2.To.Type = obj.TYPE_REG + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(riscv.ABGEU) + p3.To.Type = obj.TYPE_BRANCH + p3.Reg = v.Args[0].Reg() + p3.From.Type = obj.TYPE_REG + p3.From.Reg = v.Args[1].Reg() + gc.Patch(p3, p) + + case ssa.OpRISCV64LoweredMove: + mov, sz := largestMove(v.AuxInt) + + // mov (Rarg1), T2 + // mov T2, (Rarg0) + // ADD $sz, Rarg0 + // ADD $sz, Rarg1 + // BGEU Rarg2, Rarg0, -4(PC) + + p := s.Prog(mov) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[1].Reg() + p.To.Type = obj.TYPE_REG + p.To.Reg = riscv.REG_T2 + + p2 := s.Prog(mov) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = riscv.REG_T2 + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = v.Args[0].Reg() + + p3 := s.Prog(riscv.AADD) + p3.From.Type = obj.TYPE_CONST + p3.From.Offset = sz + p3.To.Type = obj.TYPE_REG + p3.To.Reg = v.Args[0].Reg() + + p4 := s.Prog(riscv.AADD) + p4.From.Type = obj.TYPE_CONST + p4.From.Offset = sz + p4.To.Type = obj.TYPE_REG + p4.To.Reg = v.Args[1].Reg() + + p5 := s.Prog(riscv.ABGEU) + p5.To.Type = obj.TYPE_BRANCH + p5.Reg = v.Args[1].Reg() + p5.From.Type = obj.TYPE_REG + p5.From.Reg = v.Args[2].Reg() + gc.Patch(p5, p) + + case ssa.OpRISCV64LoweredNilCheck: + // Issue a load which will fault if arg is nil. + // TODO: optimizations. See arm and amd64 LoweredNilCheck. + p := s.Prog(riscv.AMOVB) + p.From.Type = obj.TYPE_MEM + p.From.Reg = v.Args[0].Reg() + gc.AddAux(&p.From, v) + p.To.Type = obj.TYPE_REG + p.To.Reg = riscv.REG_ZERO + if gc.Debug_checknil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers + gc.Warnl(v.Pos, "generated nil check") + } + + case ssa.OpRISCV64LoweredGetClosurePtr: + // Closure pointer is S4 (riscv.REG_CTXT). + gc.CheckLoweredGetClosurePtr(v) + + case ssa.OpRISCV64LoweredGetCallerSP: + // caller's SP is FixedFrameSize below the address of the first arg + p := s.Prog(riscv.AMOV) + p.From.Type = obj.TYPE_ADDR + p.From.Offset = -gc.Ctxt.FixedFrameSize() + p.From.Name = obj.NAME_PARAM + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + case ssa.OpRISCV64LoweredGetCallerPC: + p := s.Prog(obj.AGETCALLERPC) + p.To.Type = obj.TYPE_REG + p.To.Reg = v.Reg() + + default: + v.Fatalf("Unhandled op %v", v.Op) + } +} + +func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) { + s.SetPos(b.Pos) + + switch b.Kind { + case ssa.BlockDefer: + // defer returns in A0: + // 0 if we should continue executing + // 1 if we should jump to deferreturn call + p := s.Prog(riscv.ABNE) + p.To.Type = obj.TYPE_BRANCH + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + p.Reg = riscv.REG_A0 + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[1].Block()}) + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockPlain: + if b.Succs[0].Block() != next { + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_BRANCH + s.Branches = append(s.Branches, gc.Branch{P: p, B: b.Succs[0].Block()}) + } + case ssa.BlockExit: + case ssa.BlockRet: + s.Prog(obj.ARET) + case ssa.BlockRetJmp: + p := s.Prog(obj.AJMP) + p.To.Type = obj.TYPE_MEM + p.To.Name = obj.NAME_EXTERN + p.To.Sym = b.Aux.(*obj.LSym) + case ssa.BlockRISCV64BNE: + var p *obj.Prog + switch next { + case b.Succs[0].Block(): + p = s.Br(riscv.ABNE, b.Succs[1].Block()) + p.As = riscv.InvertBranch(p.As) + case b.Succs[1].Block(): + p = s.Br(riscv.ABNE, b.Succs[0].Block()) + default: + if b.Likely != ssa.BranchUnlikely { + p = s.Br(riscv.ABNE, b.Succs[0].Block()) + s.Br(obj.AJMP, b.Succs[1].Block()) + } else { + p = s.Br(riscv.ABNE, b.Succs[1].Block()) + p.As = riscv.InvertBranch(p.As) + s.Br(obj.AJMP, b.Succs[0].Block()) + } + } + p.Reg = b.Controls[0].Reg() + p.From.Type = obj.TYPE_REG + p.From.Reg = riscv.REG_ZERO + + default: + b.Fatalf("Unhandled block: %s", b.LongString()) + } +} diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index 3cbbfcfa4e..b51dfcb1f5 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -305,6 +305,16 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config c.LinkReg = linkRegMIPS c.hasGReg = true c.noDuffDevice = true + case "riscv64": + c.PtrSize = 8 + c.RegSize = 8 + c.lowerBlock = rewriteBlockRISCV64 + c.lowerValue = rewriteValueRISCV64 + c.registers = registersRISCV64[:] + c.gpRegMask = gpRegMaskRISCV64 + c.fpRegMask = fpRegMaskRISCV64 + c.FPReg = framepointerRegRISCV64 + c.hasGReg = true case "wasm": c.PtrSize = 8 c.RegSize = 8 diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules new file mode 100644 index 0000000000..5331c73259 --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules @@ -0,0 +1,478 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Optimizations TODO: +// * Somehow track when values are already zero/signed-extended, avoid re-extending. +// * Use SLTI and SLTIU for comparisons to constants, instead of SLT/SLTU with constants in registers +// * Find a more efficient way to do zero/sign extension than left+right shift. +// There are many other options (store then load-extend, LUI+ANDI for zero extend, special case 32->64, ...), +// but left+right shift is simple and uniform, and we don't have real hardware to do perf testing on anyway. +// * Use the zero register instead of moving 0 into a register. +// * Add rules to avoid generating a temp bool value for (If (SLT[U] ...) ...). +// * Optimize left and right shift by simplifying SLTIU, Neg, and ADD for constants. +// * Arrange for non-trivial Zero and Move lowerings to use aligned loads and stores. +// * Eliminate zero immediate shifts, adds, etc. +// * Use a Duff's device for some moves and zeros. +// * Avoid using Neq32 for writeBarrier.enabled checks. + +// Lowering arithmetic +(Add64 x y) -> (ADD x y) +(AddPtr x y) -> (ADD x y) +(Add32 x y) -> (ADD x y) +(Add16 x y) -> (ADD x y) +(Add8 x y) -> (ADD x y) +(Add32F x y) -> (FADDS x y) +(Add64F x y) -> (FADDD x y) + +(Sub64 x y) -> (SUB x y) +(SubPtr x y) -> (SUB x y) +(Sub32 x y) -> (SUB x y) +(Sub16 x y) -> (SUB x y) +(Sub8 x y) -> (SUB x y) +(Sub32F x y) -> (FSUBS x y) +(Sub64F x y) -> (FSUBD x y) + +(Mul64 x y) -> (MUL x y) +(Mul32 x y) -> (MULW x y) +(Mul16 x y) -> (MULW (SignExt16to32 x) (SignExt16to32 y)) +(Mul8 x y) -> (MULW (SignExt8to32 x) (SignExt8to32 y)) +(Mul32F x y) -> (FMULS x y) +(Mul64F x y) -> (FMULD x y) + +(Div32F x y) -> (FDIVS x y) +(Div64F x y) -> (FDIVD x y) + +(Div64 x y) -> (DIV x y) +(Div64u x y) -> (DIVU x y) +(Div32 x y) -> (DIVW x y) +(Div32u x y) -> (DIVUW x y) +(Div16 x y) -> (DIVW (SignExt16to32 x) (SignExt16to32 y)) +(Div16u x y) -> (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Div8 x y) -> (DIVW (SignExt8to32 x) (SignExt8to32 y)) +(Div8u x y) -> (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + +(Hmul64 x y) -> (MULH x y) +(Hmul64u x y) -> (MULHU x y) +(Hmul32 x y) -> (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) +(Hmul32u x y) -> (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) + +// (x + y) / 2 -> (x / 2) + (y / 2) + (x & y & 1) +(Avg64u x y) -> (ADD (ADD (SRLI [1] x) (SRLI [1] y)) (ANDI [1] (AND x y))) + +(Mod64 x y) -> (REM x y) +(Mod64u x y) -> (REMU x y) +(Mod32 x y) -> (REMW x y) +(Mod32u x y) -> (REMUW x y) +(Mod16 x y) -> (REMW (SignExt16to32 x) (SignExt16to32 y)) +(Mod16u x y) -> (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) +(Mod8 x y) -> (REMW (SignExt8to32 x) (SignExt8to32 y)) +(Mod8u x y) -> (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + +(And64 x y) -> (AND x y) +(And32 x y) -> (AND x y) +(And16 x y) -> (AND x y) +(And8 x y) -> (AND x y) + +(Or64 x y) -> (OR x y) +(Or32 x y) -> (OR x y) +(Or16 x y) -> (OR x y) +(Or8 x y) -> (OR x y) + +(Xor64 x y) -> (XOR x y) +(Xor32 x y) -> (XOR x y) +(Xor16 x y) -> (XOR x y) +(Xor8 x y) -> (XOR x y) + +(Neg64 x) -> (SUB (MOVDconst) x) +(Neg32 x) -> (SUB (MOVWconst) x) +(Neg16 x) -> (SUB (MOVHconst) x) +(Neg8 x) -> (SUB (MOVBconst) x) +(Neg32F x) -> (FNEGS x) +(Neg64F x) -> (FNEGD x) + +(Com64 x) -> (XORI [int64(-1)] x) +(Com32 x) -> (XORI [int64(-1)] x) +(Com16 x) -> (XORI [int64(-1)] x) +(Com8 x) -> (XORI [int64(-1)] x) + +(Sqrt x) -> (FSQRTD x) + +// Zero and sign extension +// Shift left until the bits we want are at the top of the register. +// Then logical/arithmetic shift right for zero/sign extend. +// We always extend to 64 bits; there's no reason not to, +// and optimization rules can then collapse some extensions. + +(SignExt8to16 x) -> (SRAI [56] (SLLI [56] x)) +(SignExt8to32 x) -> (SRAI [56] (SLLI [56] x)) +(SignExt8to64 x) -> (SRAI [56] (SLLI [56] x)) +(SignExt16to32 x) -> (SRAI [48] (SLLI [48] x)) +(SignExt16to64 x) -> (SRAI [48] (SLLI [48] x)) +(SignExt32to64 x) -> (SRAI [32] (SLLI [32] x)) + +(ZeroExt8to16 x) -> (SRLI [56] (SLLI [56] x)) +(ZeroExt8to32 x) -> (SRLI [56] (SLLI [56] x)) +(ZeroExt8to64 x) -> (SRLI [56] (SLLI [56] x)) +(ZeroExt16to32 x) -> (SRLI [48] (SLLI [48] x)) +(ZeroExt16to64 x) -> (SRLI [48] (SLLI [48] x)) +(ZeroExt32to64 x) -> (SRLI [32] (SLLI [32] x)) + +(Cvt32to32F x) -> (FCVTSW x) +(Cvt32to64F x) -> (FCVTDW x) +(Cvt64to32F x) -> (FCVTSL x) +(Cvt64to64F x) -> (FCVTDL x) + +(Cvt32Fto32 x) -> (FCVTWS x) +(Cvt32Fto64 x) -> (FCVTLS x) +(Cvt64Fto32 x) -> (FCVTWD x) +(Cvt64Fto64 x) -> (FCVTLD x) + +(Cvt32Fto64F x) -> (FCVTDS x) +(Cvt64Fto32F x) -> (FCVTSD x) + +(Round32F x) -> x +(Round64F x) -> x + +// From genericOps.go: +// "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0" +// +// Like other arches, we compute ~((x-1) >> 63), with arithmetic right shift. +// For positive x, bit 63 of x-1 is always 0, so the result is -1. +// For zero x, bit 63 of x-1 is 1, so the result is 0. +// +// TODO(prattmic): Use XORconst etc instead of XOR (MOVDconst). +(Slicemask x) -> (XOR (MOVDconst [-1]) (SRA (SUB x (MOVDconst [1])) (MOVDconst [63]))) + +// Truncations +// We ignore the unused high parts of registers, so truncates are just copies. +(Trunc16to8 x) -> x +(Trunc32to8 x) -> x +(Trunc32to16 x) -> x +(Trunc64to8 x) -> x +(Trunc64to16 x) -> x +(Trunc64to32 x) -> x + +// Shifts + +// SLL only considers the bottom 6 bits of y. If y > 64, the result should +// always be 0. +// +// Breaking down the operation: +// +// (SLL x y) generates x << (y & 63). +// +// If y < 64, this is the value we want. Otherwise, we want zero. +// +// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise. +(Lsh8x8 x y) -> (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh8x16 x y) -> (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh8x32 x y) -> (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh8x64 x y) -> (AND (SLL x y) (Neg8 (SLTIU [64] y))) +(Lsh16x8 x y) -> (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh16x16 x y) -> (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh16x32 x y) -> (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh16x64 x y) -> (AND (SLL x y) (Neg16 (SLTIU [64] y))) +(Lsh32x8 x y) -> (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh32x16 x y) -> (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh32x32 x y) -> (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh32x64 x y) -> (AND (SLL x y) (Neg32 (SLTIU [64] y))) +(Lsh64x8 x y) -> (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Lsh64x16 x y) -> (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Lsh64x32 x y) -> (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Lsh64x64 x y) -> (AND (SLL x y) (Neg64 (SLTIU [64] y))) + +// SRL only considers the bottom 6 bits of y. If y > 64, the result should +// always be 0. See Lsh above for a detailed description. +(Rsh8Ux8 x y) -> (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh8Ux16 x y) -> (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh8Ux32 x y) -> (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh8Ux64 x y) -> (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) +(Rsh16Ux8 x y) -> (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh16Ux16 x y) -> (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh16Ux32 x y) -> (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh16Ux64 x y) -> (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) +(Rsh32Ux8 x y) -> (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh32Ux16 x y) -> (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh32Ux32 x y) -> (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh32Ux64 x y) -> (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] y))) +(Rsh64Ux8 x y) -> (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh64Ux16 x y) -> (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh64Ux32 x y) -> (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh64Ux64 x y) -> (AND (SRL x y) (Neg64 (SLTIU [64] y))) + +// SRA only considers the bottom 6 bits of y. If y > 64, the result should +// be either 0 or -1 based on the sign bit. +// +// We implement this by performing the max shift (-1) if y >= 64. +// +// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves +// us with -1 (0xffff...) if y >= 64. +// +// We don't need to sign-extend the OR result, as it will be at minimum 8 bits, +// more than the 6 bits SRA cares about. +(Rsh8x8 x y) -> (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh8x16 x y) -> (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh8x32 x y) -> (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh8x64 x y) -> (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh16x8 x y) -> (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh16x16 x y) -> (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh16x32 x y) -> (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh16x64 x y) -> (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh32x8 x y) -> (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh32x16 x y) -> (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh32x32 x y) -> (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh32x64 x y) -> (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh64x8 x y) -> (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh64x16 x y) -> (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh64x32 x y) -> (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh64x64 x y) -> (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) + +// rotates +(RotateLeft8 x (MOVBconst [c])) -> (Or8 (Lsh8x64 x (MOVBconst [c&7])) (Rsh8Ux64 x (MOVBconst [-c&7]))) +(RotateLeft16 x (MOVHconst [c])) -> (Or16 (Lsh16x64 x (MOVHconst [c&15])) (Rsh16Ux64 x (MOVHconst [-c&15]))) +(RotateLeft32 x (MOVWconst [c])) -> (Or32 (Lsh32x64 x (MOVWconst [c&31])) (Rsh32Ux64 x (MOVWconst [-c&31]))) +(RotateLeft64 x (MOVDconst [c])) -> (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) + +(Less64 x y) -> (SLT x y) +(Less32 x y) -> (SLT (SignExt32to64 x) (SignExt32to64 y)) +(Less16 x y) -> (SLT (SignExt16to64 x) (SignExt16to64 y)) +(Less8 x y) -> (SLT (SignExt8to64 x) (SignExt8to64 y)) +(Less64U x y) -> (SLTU x y) +(Less32U x y) -> (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) +(Less16U x y) -> (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) +(Less8U x y) -> (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) +(Less64F x y) -> (FLTD x y) +(Less32F x y) -> (FLTS x y) + +// Convert x <= y to !(y > x). +(Leq64 x y) -> (Not (Less64 y x)) +(Leq32 x y) -> (Not (Less32 y x)) +(Leq16 x y) -> (Not (Less16 y x)) +(Leq8 x y) -> (Not (Less8 y x)) +(Leq64U x y) -> (Not (Less64U y x)) +(Leq32U x y) -> (Not (Less32U y x)) +(Leq16U x y) -> (Not (Less16U y x)) +(Leq8U x y) -> (Not (Less8U y x)) +(Leq64F x y) -> (FLED x y) +(Leq32F x y) -> (FLES x y) + +// Convert x > y to y < x. +(Greater64 x y) -> (Less64 y x) +(Greater32 x y) -> (Less32 y x) +(Greater16 x y) -> (Less16 y x) +(Greater8 x y) -> (Less8 y x) +(Greater64U x y) -> (Less64U y x) +(Greater32U x y) -> (Less32U y x) +(Greater16U x y) -> (Less16U y x) +(Greater8U x y) -> (Less8U y x) +(Greater64F x y) -> (FLTD y x) +(Greater32F x y) -> (FLTS y x) + +// Convert x >= y to !(x < y) +(Geq64 x y) -> (Not (Less64 x y)) +(Geq32 x y) -> (Not (Less32 x y)) +(Geq16 x y) -> (Not (Less16 x y)) +(Geq8 x y) -> (Not (Less8 x y)) +(Geq64U x y) -> (Not (Less64U x y)) +(Geq32U x y) -> (Not (Less32U x y)) +(Geq16U x y) -> (Not (Less16U x y)) +(Geq8U x y) -> (Not (Less8U x y)) +(Geq64F x y) -> (FLED y x) +(Geq32F x y) -> (FLES y x) + +(EqPtr x y) -> (SEQZ (SUB x y)) +(Eq64 x y) -> (SEQZ (SUB x y)) +(Eq32 x y) -> (SEQZ (ZeroExt32to64 (SUB x y))) +(Eq16 x y) -> (SEQZ (ZeroExt16to64 (SUB x y))) +(Eq8 x y) -> (SEQZ (ZeroExt8to64 (SUB x y))) +(Eq64F x y) -> (FEQD x y) +(Eq32F x y) -> (FEQS x y) + +(NeqPtr x y) -> (SNEZ (SUB x y)) +(Neq64 x y) -> (SNEZ (SUB x y)) +(Neq32 x y) -> (SNEZ (ZeroExt32to64 (SUB x y))) +(Neq16 x y) -> (SNEZ (ZeroExt16to64 (SUB x y))) +(Neq8 x y) -> (SNEZ (ZeroExt8to64 (SUB x y))) +(Neq64F x y) -> (FNED x y) +(Neq32F x y) -> (FNES x y) + +// Loads +(Load ptr mem) && t.IsBoolean() -> (MOVBUload ptr mem) +(Load ptr mem) && ( is8BitInt(t) && isSigned(t)) -> (MOVBload ptr mem) +(Load ptr mem) && ( is8BitInt(t) && !isSigned(t)) -> (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && isSigned(t)) -> (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) -> (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && isSigned(t)) -> (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) -> (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) -> (MOVDload ptr mem) +(Load ptr mem) && is32BitFloat(t) -> (FMOVWload ptr mem) +(Load ptr mem) && is64BitFloat(t) -> (FMOVDload ptr mem) + +// Stores +(Store {t} ptr val mem) && t.(*types.Type).Size() == 1 -> (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.(*types.Type).Size() == 2 -> (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) -> (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) -> (MOVDstore ptr val mem) +(Store {t} ptr val mem) && t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) -> (FMOVWstore ptr val mem) +(Store {t} ptr val mem) && t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) -> (FMOVDstore ptr val mem) + +// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis +// knows what variables are being read/written by the ops. +(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) +(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + +(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) +(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(off1+off2) && canMergeSym(sym1, sym2) -> + (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + +(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVBUload [off1+off2] {sym} base mem) +(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVBload [off1+off2] {sym} base mem) +(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVHUload [off1+off2] {sym} base mem) +(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVHload [off1+off2] {sym} base mem) +(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVWUload [off1+off2] {sym} base mem) +(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVWload [off1+off2] {sym} base mem) +(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(off1+off2) -> + (MOVDload [off1+off2] {sym} base mem) + +(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> + (MOVBstore [off1+off2] {sym} base val mem) +(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> + (MOVHstore [off1+off2] {sym} base val mem) +(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> + (MOVWstore [off1+off2] {sym} base val mem) +(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(off1+off2) -> + (MOVDstore [off1+off2] {sym} base val mem) + +// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis +// with OffPtr -> ADDI. +(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+d) -> (MOVaddr [c+d] {s} x) + +// Zeroing +// TODO: more optimized zeroing, including attempting to use aligned accesses. +(Zero [0] _ mem) -> mem +(Zero [1] ptr mem) -> (MOVBstore ptr (MOVBconst) mem) +(Zero [2] ptr mem) -> (MOVHstore ptr (MOVHconst) mem) +(Zero [4] ptr mem) -> (MOVWstore ptr (MOVWconst) mem) +(Zero [8] ptr mem) -> (MOVDstore ptr (MOVDconst) mem) + +// Generic zeroing uses a loop +(Zero [s] {t} ptr mem) -> + (LoweredZero [t.(*types.Type).Alignment()] + ptr + (ADD ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)])) + mem) + +(Convert x mem) -> (MOVconvert x mem) + +// Checks +(IsNonNil p) -> (NeqPtr (MOVDconst) p) +(IsInBounds idx len) -> (Less64U idx len) +(IsSliceInBounds idx len) -> (Leq64U idx len) + +// Trivial lowering +(NilCheck ptr mem) -> (LoweredNilCheck ptr mem) +(GetClosurePtr) -> (LoweredGetClosurePtr) +(GetCallerSP) -> (LoweredGetCallerSP) +(GetCallerPC) -> (LoweredGetCallerPC) + +// Write barrier. +(WB {fn} destptr srcptr mem) -> (LoweredWB {fn} destptr srcptr mem) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 -> (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 -> (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 -> (LoweredPanicBoundsC [kind] x y mem) + +// Moves +// TODO: more optimized moves, including attempting to use aligned accesses. +(Move [0] _ _ mem) -> mem +(Move [1] dst src mem) -> (MOVBstore dst (MOVBload src mem) mem) +(Move [2] dst src mem) -> (MOVHstore dst (MOVHload src mem) mem) +(Move [4] dst src mem) -> (MOVWstore dst (MOVWload src mem) mem) +(Move [8] dst src mem) -> (MOVDstore dst (MOVDload src mem) mem) + +// Generic move uses a loop +(Move [s] {t} dst src mem) -> + (LoweredMove [t.(*types.Type).Alignment()] + dst + src + (ADDI [s-moveSize(t.(*types.Type).Alignment(), config)] src) + mem) + +// Boolean ops; 0=false, 1=true +(AndB x y) -> (AND x y) +(OrB x y) -> (OR x y) +(EqB x y) -> (XORI [1] (XOR x y)) +(NeqB x y) -> (XOR x y) +(Not x) -> (XORI [1] x) + +// Lowering pointer arithmetic +// TODO: Special handling for SP offsets, like ARM +(OffPtr [off] ptr:(SP)) -> (MOVaddr [off] ptr) +(OffPtr [off] ptr) && is32Bit(off) -> (ADDI [off] ptr) +(OffPtr [off] ptr) -> (ADD (MOVDconst [off]) ptr) + +(Const8 [val]) -> (MOVBconst [val]) +(Const16 [val]) -> (MOVHconst [val]) +(Const32 [val]) -> (MOVWconst [val]) +(Const64 [val]) -> (MOVDconst [val]) +(Const32F [val]) -> (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))])) +(Const64F [val]) -> (FMVDX (MOVDconst [val])) +(ConstNil) -> (MOVDconst [0]) +(ConstBool [b]) -> (MOVBconst [b]) + +// Convert 64 bit immediate to two 32 bit immediates, combine with add and shift. +// The lower 32 bit immediate will be treated as signed, +// so if it is negative, adjust for the borrow by incrementing the top half. +// We don't have to worry about overflow from the increment, +// because if the top half is all 1s, and int32(c) is negative, +// then the overall constant fits in an int32. +(MOVDconst [c]) && !is32Bit(c) && int32(c) < 0 -> (ADD (SLLI [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))])) +(MOVDconst [c]) && !is32Bit(c) && int32(c) >= 0 -> (ADD (SLLI [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))])) + +// Fold ADD+MOVDconst into ADDI where possible. +(ADD (MOVDconst [off]) ptr) && is32Bit(off) -> (ADDI [off] ptr) + +(Addr {sym} base) -> (MOVaddr {sym} base) +(LocalAddr {sym} base _) -> (MOVaddr {sym} base) + +// Conditional branches +// +// cond is 1 if true. BNE compares against 0. +// +// TODO(prattmic): RISCV branch instructions take two operands to compare, +// so we could generate more efficient code by computing the condition in the +// branch itself. This should be revisited now that the compiler has support +// for two control values (https://golang.org/cl/196557). +(If cond yes no) -> (BNE cond yes no) + +// Calls +(StaticCall [argwid] {target} mem) -> (CALLstatic [argwid] {target} mem) +(ClosureCall [argwid] entry closure mem) -> (CALLclosure [argwid] entry closure mem) +(InterCall [argwid] entry mem) -> (CALLinter [argwid] entry mem) + +// remove redundant *const ops +(ADDI [0] x) -> x diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go new file mode 100644 index 0000000000..a251146c2a --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go @@ -0,0 +1,312 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +package main + +import "cmd/internal/obj/riscv" + +// Suffixes encode the bit width of various instructions: +// +// D (double word) = 64 bit int +// W (word) = 32 bit int +// H (half word) = 16 bit int +// B (byte) = 8 bit int +// S (single) = 32 bit float +// D (double) = 64 bit float +// L = 64 bit int, used when the opcode starts with F + +func init() { + var regNamesRISCV64 []string + var gpMask, fpMask, gpspMask, gpspsbMask regMask + regNamed := make(map[string]regMask) + + // Build the list of register names, creating an appropriately indexed + // regMask for the gp and fp registers as we go. + // + // If name is specified, use it rather than the riscv reg number. + addreg := func(r int, name string) regMask { + mask := regMask(1) << uint(len(regNamesRISCV64)) + if name == "" { + name = riscv.RegName(r) + } + regNamesRISCV64 = append(regNamesRISCV64, name) + regNamed[name] = mask + return mask + } + + // General purpose registers. + for r := riscv.REG_X0; r <= riscv.REG_X31; r++ { + if r == riscv.REG_LR { + // LR is not used by regalloc, so we skip it to leave + // room for pseudo-register SB. + continue + } + + mask := addreg(r, "") + + // Add general purpose registers to gpMask. + switch r { + // ZERO, g, and TMP are not in any gp mask. + case riscv.REG_ZERO, riscv.REG_G, riscv.REG_TMP: + case riscv.REG_SP: + gpspMask |= mask + gpspsbMask |= mask + default: + gpMask |= mask + gpspMask |= mask + gpspsbMask |= mask + } + } + + // Floating pointer registers. + for r := riscv.REG_F0; r <= riscv.REG_F31; r++ { + mask := addreg(r, "") + fpMask |= mask + } + + // Pseudo-register: SB + mask := addreg(-1, "SB") + gpspsbMask |= mask + + if len(regNamesRISCV64) > 64 { + // regMask is only 64 bits. + panic("Too many RISCV64 registers") + } + + regCtxt := regNamed["X20"] + callerSave := gpMask | fpMask | regNamed["g"] + + var ( + gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register + gp01 = regInfo{outputs: []regMask{gpMask}} + gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}} + gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}} + gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}} + gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}} + + fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}} + fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}} + gpfp = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{fpMask}} + fpgp = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{gpMask}} + fpstore = regInfo{inputs: []regMask{gpspsbMask, fpMask, 0}} + fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}} + fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}} + + call = regInfo{clobbers: callerSave} + callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave} + callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave} + ) + + RISCV64ops := []opData{ + {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 + {name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint + {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1 + + // M extension. H means high (i.e., it returns the top bits of + // the result). U means unsigned. W means word (i.e., 32-bit). + {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true, typ: "Int64"}, // arg0 * arg1 + {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true, typ: "Int32"}, + {name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int64"}, + {name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt64"}, + {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", typ: "Int64"}, // arg0 / arg1 + {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", typ: "UInt64"}, + {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, + {name: "DIVUW", argLength: 2, reg: gp21, asm: "DIVUW", typ: "UInt32"}, + {name: "REM", argLength: 2, reg: gp21, asm: "REM", typ: "Int64"}, // arg0 % arg1 + {name: "REMU", argLength: 2, reg: gp21, asm: "REMU", typ: "UInt64"}, + {name: "REMW", argLength: 2, reg: gp21, asm: "REMW", typ: "Int32"}, + {name: "REMUW", argLength: 2, reg: gp21, asm: "REMUW", typ: "UInt32"}, + + {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "RdWr"}, // arg0 + auxint + offset encoded in aux + // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address + + {name: "MOVBconst", reg: gp01, asm: "MOV", typ: "UInt8", aux: "Int8", rematerializeable: true}, // 8 low bits of auxint + {name: "MOVHconst", reg: gp01, asm: "MOV", typ: "UInt16", aux: "Int16", rematerializeable: true}, // 16 low bits of auxint + {name: "MOVWconst", reg: gp01, asm: "MOV", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint + {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint + + // Loads: load bits from arg0+auxint+aux and extend to 64 bits; arg1=mem + {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend + {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, sign extend + {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, sign extend + {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOV", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // 64 bits + {name: "MOVBUload", argLength: 2, reg: gpload, asm: "MOVBU", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, zero extend + {name: "MOVHUload", argLength: 2, reg: gpload, asm: "MOVHU", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, zero extend + {name: "MOVWUload", argLength: 2, reg: gpload, asm: "MOVWU", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, zero extend + + // Stores: store lowest bits in arg1 to arg0+auxint+aux; arg2=mem + {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits + {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits + {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits + {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits + + // Shift ops + {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << aux1 + {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> aux1, signed + {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> aux1, unsigned + {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint + {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed + {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned + + // Bitwise ops + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1 + {name: "XORI", argLength: 1, reg: gp11, asm: "XORI", aux: "Int64"}, // arg0 ^ auxint + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 + {name: "ORI", argLength: 1, reg: gp11, asm: "ORI", aux: "Int64"}, // arg0 | auxint + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDI", argLength: 1, reg: gp11, asm: "ANDI", aux: "Int64"}, // arg0 & auxint + + // Generate boolean values + {name: "SEQZ", argLength: 1, reg: gp11, asm: "SEQZ"}, // arg0 == 0, result is 0 or 1 + {name: "SNEZ", argLength: 1, reg: gp11, asm: "SNEZ"}, // arg0 != 0, result is 0 or 1 + {name: "SLT", argLength: 2, reg: gp21, asm: "SLT"}, // arg0 < arg1, result is 0 or 1 + {name: "SLTI", argLength: 1, reg: gp11, asm: "SLTI", aux: "Int64"}, // arg0 < auxint, result is 0 or 1 + {name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1 + {name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1 + + // MOVconvert converts between pointers and integers. + // We have a special op for this so as to not confuse GC + // (particularly stack maps). It takes a memory arg so it + // gets correctly ordered with respect to GC safepoints. + {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem + + // Calls + {name: "CALLstatic", argLength: 1, reg: call, aux: "SymOff", call: true, symEffect: "None"}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "Int64", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: callInter, aux: "Int64", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // Generic moves and zeros + + // general unaligned zeroing + // arg0 = address of memory to zero (in X5, changed as side effect) + // arg1 = address of the last element to zero (inclusive) + // arg2 = mem + // auxint = element size + // returns mem + // mov ZERO, (X5) + // ADD $sz, X5 + // BGEU Rarg1, X5, -2(PC) + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{regNamed["X5"], gpMask}, + clobbers: regNamed["X5"], + }, + typ: "Mem", + faultOnNilArg0: true, + }, + + // general unaligned move + // arg0 = address of dst memory (in X5, changed as side effect) + // arg1 = address of src memory (in X6, changed as side effect) + // arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2) + // arg3 = mem + // auxint = alignment + // clobbers X7 as a tmp register. + // returns mem + // mov (X6), X7 + // mov X7, (X5) + // ADD $sz, X5 + // ADD $sz, X6 + // BGEU Rarg2, X5, -4(PC) + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]}, + clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"], + }, + typ: "Mem", + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // Lowering pass-throughs + {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block + + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier + // It saves all GP registers if necessary, + // but clobbers RA (LR) because it's a call + // and T6 (REG_TMP). + {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}, clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"]}, clobberFlags: true, aux: "Sym", symEffect: "None"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem"}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + + // F extension. + {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1 + {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1 + {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1 + {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1 + {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0) + {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0 + {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float + {name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0) + {name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0) + {name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0) + {name: "FCVTLS", argLength: 1, reg: fpgp, asm: "FCVTLS", typ: "Int64"}, // int64(arg0) + {name: "FMOVWload", argLength: 2, reg: fpload, asm: "MOVF", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load float32 from arg0+auxint+aux + {name: "FMOVWstore", argLength: 3, reg: fpstore, asm: "MOVF", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float32 to arg0+auxint+aux + {name: "FEQS", argLength: 2, reg: fp2gp, asm: "FEQS", commutative: true}, // arg0 == arg1 + {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1 + {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1 + {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1 + + // D extension. + {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1 + {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD", commutative: false, typ: "Float64"}, // arg0 - arg1 + {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true, typ: "Float64"}, // arg0 * arg1 + {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD", commutative: false, typ: "Float64"}, // arg0 / arg1 + {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0) + {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0 + {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float + {name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0) + {name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0) + {name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0) + {name: "FCVTLD", argLength: 1, reg: fpgp, asm: "FCVTLD", typ: "Int64"}, // int64(arg0) + {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS", typ: "Float64"}, // float64(arg0) + {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD", typ: "Float32"}, // float32(arg0) + {name: "FMOVDload", argLength: 2, reg: fpload, asm: "MOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load float64 from arg0+auxint+aux + {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float6 to arg0+auxint+aux + {name: "FEQD", argLength: 2, reg: fp2gp, asm: "FEQD", commutative: true}, // arg0 == arg1 + {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1 + {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1 + {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1 + } + + RISCV64blocks := []blockData{ + {name: "BNE", controls: 1}, // Control != 0 (take a register) + } + + archs = append(archs, arch{ + name: "RISCV64", + pkg: "cmd/internal/obj/riscv", + genfile: "../../riscv64/ssa.go", + ops: RISCV64ops, + blocks: RISCV64blocks, + regnames: regNamesRISCV64, + gpregmask: gpMask, + fpregmask: fpMask, + framepointerreg: -1, // not used + }) +} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 99dc60640c..86428a3e84 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -8,6 +8,7 @@ import ( "cmd/internal/obj/arm64" "cmd/internal/obj/mips" "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" "cmd/internal/obj/s390x" "cmd/internal/obj/wasm" "cmd/internal/obj/x86" @@ -111,6 +112,8 @@ const ( BlockPPC64FGT BlockPPC64FGE + BlockRISCV64BNE + BlockS390XBRC BlockS390XCRJ BlockS390XCGRJ @@ -228,6 +231,8 @@ var blockString = [...]string{ BlockPPC64FGT: "FGT", BlockPPC64FGE: "FGE", + BlockRISCV64BNE: "BNE", + BlockS390XBRC: "BRC", BlockS390XCRJ: "CRJ", BlockS390XCGRJ: "CGRJ", @@ -1877,6 +1882,106 @@ const ( OpPPC64FlagLT OpPPC64FlagGT + OpRISCV64ADD + OpRISCV64ADDI + OpRISCV64SUB + OpRISCV64MUL + OpRISCV64MULW + OpRISCV64MULH + OpRISCV64MULHU + OpRISCV64DIV + OpRISCV64DIVU + OpRISCV64DIVW + OpRISCV64DIVUW + OpRISCV64REM + OpRISCV64REMU + OpRISCV64REMW + OpRISCV64REMUW + OpRISCV64MOVaddr + OpRISCV64MOVBconst + OpRISCV64MOVHconst + OpRISCV64MOVWconst + OpRISCV64MOVDconst + OpRISCV64MOVBload + OpRISCV64MOVHload + OpRISCV64MOVWload + OpRISCV64MOVDload + OpRISCV64MOVBUload + OpRISCV64MOVHUload + OpRISCV64MOVWUload + OpRISCV64MOVBstore + OpRISCV64MOVHstore + OpRISCV64MOVWstore + OpRISCV64MOVDstore + OpRISCV64SLL + OpRISCV64SRA + OpRISCV64SRL + OpRISCV64SLLI + OpRISCV64SRAI + OpRISCV64SRLI + OpRISCV64XOR + OpRISCV64XORI + OpRISCV64OR + OpRISCV64ORI + OpRISCV64AND + OpRISCV64ANDI + OpRISCV64SEQZ + OpRISCV64SNEZ + OpRISCV64SLT + OpRISCV64SLTI + OpRISCV64SLTU + OpRISCV64SLTIU + OpRISCV64MOVconvert + OpRISCV64CALLstatic + OpRISCV64CALLclosure + OpRISCV64CALLinter + OpRISCV64LoweredZero + OpRISCV64LoweredMove + OpRISCV64LoweredNilCheck + OpRISCV64LoweredGetClosurePtr + OpRISCV64LoweredGetCallerSP + OpRISCV64LoweredGetCallerPC + OpRISCV64LoweredWB + OpRISCV64LoweredPanicBoundsA + OpRISCV64LoweredPanicBoundsB + OpRISCV64LoweredPanicBoundsC + OpRISCV64FADDS + OpRISCV64FSUBS + OpRISCV64FMULS + OpRISCV64FDIVS + OpRISCV64FSQRTS + OpRISCV64FNEGS + OpRISCV64FMVSX + OpRISCV64FCVTSW + OpRISCV64FCVTSL + OpRISCV64FCVTWS + OpRISCV64FCVTLS + OpRISCV64FMOVWload + OpRISCV64FMOVWstore + OpRISCV64FEQS + OpRISCV64FNES + OpRISCV64FLTS + OpRISCV64FLES + OpRISCV64FADDD + OpRISCV64FSUBD + OpRISCV64FMULD + OpRISCV64FDIVD + OpRISCV64FSQRTD + OpRISCV64FNEGD + OpRISCV64FMVDX + OpRISCV64FCVTDW + OpRISCV64FCVTDL + OpRISCV64FCVTWD + OpRISCV64FCVTLD + OpRISCV64FCVTDS + OpRISCV64FCVTSD + OpRISCV64FMOVDload + OpRISCV64FMOVDstore + OpRISCV64FEQD + OpRISCV64FNED + OpRISCV64FLTD + OpRISCV64FLED + OpS390XFADDS OpS390XFADD OpS390XFSUBS @@ -24892,6 +24997,1370 @@ var opcodeTable = [...]opInfo{ reg: regInfo{}, }, + { + name: "ADD", + argLen: 2, + commutative: true, + asm: riscv.AADD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "ADDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AADDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SUB", + argLen: 2, + asm: riscv.ASUB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MUL", + argLen: 2, + commutative: true, + asm: riscv.AMUL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MULW", + argLen: 2, + commutative: true, + asm: riscv.AMULW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MULH", + argLen: 2, + commutative: true, + asm: riscv.AMULH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MULHU", + argLen: 2, + commutative: true, + asm: riscv.AMULHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "DIV", + argLen: 2, + asm: riscv.ADIV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "DIVU", + argLen: 2, + asm: riscv.ADIVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "DIVW", + argLen: 2, + asm: riscv.ADIVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "DIVUW", + argLen: 2, + asm: riscv.ADIVUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "REM", + argLen: 2, + asm: riscv.AREM, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "REMU", + argLen: 2, + asm: riscv.AREMU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "REMW", + argLen: 2, + asm: riscv.AREMW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "REMUW", + argLen: 2, + asm: riscv.AREMUW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymRdWr, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVBconst", + auxType: auxInt8, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVHconst", + auxType: auxInt16, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVWconst", + auxType: auxInt32, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: riscv.AMOV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + }, + }, + { + name: "SLL", + argLen: 2, + asm: riscv.ASLL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SRA", + argLen: 2, + asm: riscv.ASRA, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SRL", + argLen: 2, + asm: riscv.ASRL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SLLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SRAI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRAI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SRLI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASRLI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: riscv.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "XORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AXORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: riscv.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "ORI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AORI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: riscv.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "ANDI", + auxType: auxInt64, + argLen: 1, + asm: riscv.AANDI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SEQZ", + argLen: 1, + asm: riscv.ASEQZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SNEZ", + argLen: 1, + asm: riscv.ASNEZ, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SLT", + argLen: 2, + asm: riscv.ASLT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SLTI", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTI, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SLTU", + argLen: 2, + asm: riscv.ASLTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "SLTIU", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLTIU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "MOVconvert", + argLen: 2, + asm: riscv.AMOV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxSymOff, + argLen: 1, + call: true, + symEffect: SymNone, + reg: regInfo{ + clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 524288}, // X20 + {0, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxInt64, + argLen: 2, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + clobbers: 9223372035781033980, // X3 g X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + clobbers: 16, // X5 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + {2, 1073741748}, // X3 X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + clobbers: 112, // X5 X6 X7 + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741814}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + reg: regInfo{ + outputs: []outputInfo{ + {0, 524288}, // X20 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxSym, + argLen: 3, + clobberFlags: true, + symEffect: SymNone, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + }, + clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 64}, // X7 + {1, 134217728}, // X28 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 32}, // X6 + {1, 64}, // X7 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + reg: regInfo{ + inputs: []inputInfo{ + {0, 16}, // X5 + {1, 32}, // X6 + }, + }, + }, + { + name: "FADDS", + argLen: 2, + commutative: true, + asm: riscv.AFADDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBS", + argLen: 2, + asm: riscv.AFSUBS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULS", + argLen: 2, + commutative: true, + asm: riscv.AFMULS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVS", + argLen: 2, + asm: riscv.AFDIVS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTS", + argLen: 1, + asm: riscv.AFSQRTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGS", + argLen: 1, + asm: riscv.AFNEGS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVSX", + argLen: 1, + asm: riscv.AFMVSX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSW", + argLen: 1, + asm: riscv.AFCVTSW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSL", + argLen: 1, + asm: riscv.AFCVTSL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWS", + argLen: 1, + asm: riscv.AFCVTWS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLS", + argLen: 1, + asm: riscv.AFCVTLS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FMOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQS", + argLen: 2, + commutative: true, + asm: riscv.AFEQS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FNES", + argLen: 2, + commutative: true, + asm: riscv.AFNES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FLTS", + argLen: 2, + asm: riscv.AFLTS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FLES", + argLen: 2, + asm: riscv.AFLES, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FADDD", + argLen: 2, + commutative: true, + asm: riscv.AFADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSUBD", + argLen: 2, + asm: riscv.AFSUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMULD", + argLen: 2, + commutative: true, + asm: riscv.AFMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FDIVD", + argLen: 2, + asm: riscv.AFDIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FSQRTD", + argLen: 1, + asm: riscv.AFSQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FNEGD", + argLen: 1, + asm: riscv.AFNEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMVDX", + argLen: 1, + asm: riscv.AFMVDX, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDW", + argLen: 1, + asm: riscv.AFCVTDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTDL", + argLen: 1, + asm: riscv.AFCVTDL, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTWD", + argLen: 1, + asm: riscv.AFCVTWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FCVTLD", + argLen: 1, + asm: riscv.AFCVTLD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FCVTDS", + argLen: 1, + asm: riscv.AFCVTDS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FCVTSD", + argLen: 1, + asm: riscv.AFCVTSD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FMOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: riscv.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372037928517622}, // SP X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 SB + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "FEQD", + argLen: 2, + commutative: true, + asm: riscv.AFEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FNED", + argLen: 2, + commutative: true, + asm: riscv.AFNED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FLTD", + argLen: 2, + asm: riscv.AFLTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { + name: "FLED", + argLen: 2, + asm: riscv.AFLED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 1073741812}, // X3 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X27 X28 X29 X30 + }, + }, + }, + { name: "FADDS", argLen: 2, @@ -32010,6 +33479,77 @@ var fpRegMaskPPC64 = regMask(576460743713488896) var specialRegMaskPPC64 = regMask(0) var framepointerRegPPC64 = int8(1) var linkRegPPC64 = int8(-1) +var registersRISCV64 = [...]Register{ + {0, riscv.REG_X0, -1, "X0"}, + {1, riscv.REGSP, -1, "SP"}, + {2, riscv.REG_X3, 0, "X3"}, + {3, riscv.REGG, -1, "g"}, + {4, riscv.REG_X5, 1, "X5"}, + {5, riscv.REG_X6, 2, "X6"}, + {6, riscv.REG_X7, 3, "X7"}, + {7, riscv.REG_X8, 4, "X8"}, + {8, riscv.REG_X9, 5, "X9"}, + {9, riscv.REG_X10, 6, "X10"}, + {10, riscv.REG_X11, 7, "X11"}, + {11, riscv.REG_X12, 8, "X12"}, + {12, riscv.REG_X13, 9, "X13"}, + {13, riscv.REG_X14, 10, "X14"}, + {14, riscv.REG_X15, 11, "X15"}, + {15, riscv.REG_X16, 12, "X16"}, + {16, riscv.REG_X17, 13, "X17"}, + {17, riscv.REG_X18, 14, "X18"}, + {18, riscv.REG_X19, 15, "X19"}, + {19, riscv.REG_X20, 16, "X20"}, + {20, riscv.REG_X21, 17, "X21"}, + {21, riscv.REG_X22, 18, "X22"}, + {22, riscv.REG_X23, 19, "X23"}, + {23, riscv.REG_X24, 20, "X24"}, + {24, riscv.REG_X25, 21, "X25"}, + {25, riscv.REG_X26, 22, "X26"}, + {26, riscv.REG_X27, 23, "X27"}, + {27, riscv.REG_X28, 24, "X28"}, + {28, riscv.REG_X29, 25, "X29"}, + {29, riscv.REG_X30, 26, "X30"}, + {30, riscv.REG_X31, -1, "X31"}, + {31, riscv.REG_F0, -1, "F0"}, + {32, riscv.REG_F1, -1, "F1"}, + {33, riscv.REG_F2, -1, "F2"}, + {34, riscv.REG_F3, -1, "F3"}, + {35, riscv.REG_F4, -1, "F4"}, + {36, riscv.REG_F5, -1, "F5"}, + {37, riscv.REG_F6, -1, "F6"}, + {38, riscv.REG_F7, -1, "F7"}, + {39, riscv.REG_F8, -1, "F8"}, + {40, riscv.REG_F9, -1, "F9"}, + {41, riscv.REG_F10, -1, "F10"}, + {42, riscv.REG_F11, -1, "F11"}, + {43, riscv.REG_F12, -1, "F12"}, + {44, riscv.REG_F13, -1, "F13"}, + {45, riscv.REG_F14, -1, "F14"}, + {46, riscv.REG_F15, -1, "F15"}, + {47, riscv.REG_F16, -1, "F16"}, + {48, riscv.REG_F17, -1, "F17"}, + {49, riscv.REG_F18, -1, "F18"}, + {50, riscv.REG_F19, -1, "F19"}, + {51, riscv.REG_F20, -1, "F20"}, + {52, riscv.REG_F21, -1, "F21"}, + {53, riscv.REG_F22, -1, "F22"}, + {54, riscv.REG_F23, -1, "F23"}, + {55, riscv.REG_F24, -1, "F24"}, + {56, riscv.REG_F25, -1, "F25"}, + {57, riscv.REG_F26, -1, "F26"}, + {58, riscv.REG_F27, -1, "F27"}, + {59, riscv.REG_F28, -1, "F28"}, + {60, riscv.REG_F29, -1, "F29"}, + {61, riscv.REG_F30, -1, "F30"}, + {62, riscv.REG_F31, -1, "F31"}, + {63, 0, -1, "SB"}, +} +var gpRegMaskRISCV64 = regMask(1073741812) +var fpRegMaskRISCV64 = regMask(9223372034707292160) +var specialRegMaskRISCV64 = regMask(0) +var framepointerRegRISCV64 = int8(-1) +var linkRegRISCV64 = int8(0) var registersS390X = [...]Register{ {0, s390x.REG_R0, 0, "R0"}, {1, s390x.REG_R1, 1, "R1"}, diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go new file mode 100644 index 0000000000..8ccfaa54f9 --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -0,0 +1,5561 @@ +// Code generated from gen/RISCV64.rules; DO NOT EDIT. +// generated with: cd gen; go run *.go + +package ssa + +import "math" +import "cmd/compile/internal/types" + +func rewriteValueRISCV64(v *Value) bool { + switch v.Op { + case OpAdd16: + return rewriteValueRISCV64_OpAdd16_0(v) + case OpAdd32: + return rewriteValueRISCV64_OpAdd32_0(v) + case OpAdd32F: + return rewriteValueRISCV64_OpAdd32F_0(v) + case OpAdd64: + return rewriteValueRISCV64_OpAdd64_0(v) + case OpAdd64F: + return rewriteValueRISCV64_OpAdd64F_0(v) + case OpAdd8: + return rewriteValueRISCV64_OpAdd8_0(v) + case OpAddPtr: + return rewriteValueRISCV64_OpAddPtr_0(v) + case OpAddr: + return rewriteValueRISCV64_OpAddr_0(v) + case OpAnd16: + return rewriteValueRISCV64_OpAnd16_0(v) + case OpAnd32: + return rewriteValueRISCV64_OpAnd32_0(v) + case OpAnd64: + return rewriteValueRISCV64_OpAnd64_0(v) + case OpAnd8: + return rewriteValueRISCV64_OpAnd8_0(v) + case OpAndB: + return rewriteValueRISCV64_OpAndB_0(v) + case OpAvg64u: + return rewriteValueRISCV64_OpAvg64u_0(v) + case OpClosureCall: + return rewriteValueRISCV64_OpClosureCall_0(v) + case OpCom16: + return rewriteValueRISCV64_OpCom16_0(v) + case OpCom32: + return rewriteValueRISCV64_OpCom32_0(v) + case OpCom64: + return rewriteValueRISCV64_OpCom64_0(v) + case OpCom8: + return rewriteValueRISCV64_OpCom8_0(v) + case OpConst16: + return rewriteValueRISCV64_OpConst16_0(v) + case OpConst32: + return rewriteValueRISCV64_OpConst32_0(v) + case OpConst32F: + return rewriteValueRISCV64_OpConst32F_0(v) + case OpConst64: + return rewriteValueRISCV64_OpConst64_0(v) + case OpConst64F: + return rewriteValueRISCV64_OpConst64F_0(v) + case OpConst8: + return rewriteValueRISCV64_OpConst8_0(v) + case OpConstBool: + return rewriteValueRISCV64_OpConstBool_0(v) + case OpConstNil: + return rewriteValueRISCV64_OpConstNil_0(v) + case OpConvert: + return rewriteValueRISCV64_OpConvert_0(v) + case OpCvt32Fto32: + return rewriteValueRISCV64_OpCvt32Fto32_0(v) + case OpCvt32Fto64: + return rewriteValueRISCV64_OpCvt32Fto64_0(v) + case OpCvt32Fto64F: + return rewriteValueRISCV64_OpCvt32Fto64F_0(v) + case OpCvt32to32F: + return rewriteValueRISCV64_OpCvt32to32F_0(v) + case OpCvt32to64F: + return rewriteValueRISCV64_OpCvt32to64F_0(v) + case OpCvt64Fto32: + return rewriteValueRISCV64_OpCvt64Fto32_0(v) + case OpCvt64Fto32F: + return rewriteValueRISCV64_OpCvt64Fto32F_0(v) + case OpCvt64Fto64: + return rewriteValueRISCV64_OpCvt64Fto64_0(v) + case OpCvt64to32F: + return rewriteValueRISCV64_OpCvt64to32F_0(v) + case OpCvt64to64F: + return rewriteValueRISCV64_OpCvt64to64F_0(v) + case OpDiv16: + return rewriteValueRISCV64_OpDiv16_0(v) + case OpDiv16u: + return rewriteValueRISCV64_OpDiv16u_0(v) + case OpDiv32: + return rewriteValueRISCV64_OpDiv32_0(v) + case OpDiv32F: + return rewriteValueRISCV64_OpDiv32F_0(v) + case OpDiv32u: + return rewriteValueRISCV64_OpDiv32u_0(v) + case OpDiv64: + return rewriteValueRISCV64_OpDiv64_0(v) + case OpDiv64F: + return rewriteValueRISCV64_OpDiv64F_0(v) + case OpDiv64u: + return rewriteValueRISCV64_OpDiv64u_0(v) + case OpDiv8: + return rewriteValueRISCV64_OpDiv8_0(v) + case OpDiv8u: + return rewriteValueRISCV64_OpDiv8u_0(v) + case OpEq16: + return rewriteValueRISCV64_OpEq16_0(v) + case OpEq32: + return rewriteValueRISCV64_OpEq32_0(v) + case OpEq32F: + return rewriteValueRISCV64_OpEq32F_0(v) + case OpEq64: + return rewriteValueRISCV64_OpEq64_0(v) + case OpEq64F: + return rewriteValueRISCV64_OpEq64F_0(v) + case OpEq8: + return rewriteValueRISCV64_OpEq8_0(v) + case OpEqB: + return rewriteValueRISCV64_OpEqB_0(v) + case OpEqPtr: + return rewriteValueRISCV64_OpEqPtr_0(v) + case OpGeq16: + return rewriteValueRISCV64_OpGeq16_0(v) + case OpGeq16U: + return rewriteValueRISCV64_OpGeq16U_0(v) + case OpGeq32: + return rewriteValueRISCV64_OpGeq32_0(v) + case OpGeq32F: + return rewriteValueRISCV64_OpGeq32F_0(v) + case OpGeq32U: + return rewriteValueRISCV64_OpGeq32U_0(v) + case OpGeq64: + return rewriteValueRISCV64_OpGeq64_0(v) + case OpGeq64F: + return rewriteValueRISCV64_OpGeq64F_0(v) + case OpGeq64U: + return rewriteValueRISCV64_OpGeq64U_0(v) + case OpGeq8: + return rewriteValueRISCV64_OpGeq8_0(v) + case OpGeq8U: + return rewriteValueRISCV64_OpGeq8U_0(v) + case OpGetCallerPC: + return rewriteValueRISCV64_OpGetCallerPC_0(v) + case OpGetCallerSP: + return rewriteValueRISCV64_OpGetCallerSP_0(v) + case OpGetClosurePtr: + return rewriteValueRISCV64_OpGetClosurePtr_0(v) + case OpGreater16: + return rewriteValueRISCV64_OpGreater16_0(v) + case OpGreater16U: + return rewriteValueRISCV64_OpGreater16U_0(v) + case OpGreater32: + return rewriteValueRISCV64_OpGreater32_0(v) + case OpGreater32F: + return rewriteValueRISCV64_OpGreater32F_0(v) + case OpGreater32U: + return rewriteValueRISCV64_OpGreater32U_0(v) + case OpGreater64: + return rewriteValueRISCV64_OpGreater64_0(v) + case OpGreater64F: + return rewriteValueRISCV64_OpGreater64F_0(v) + case OpGreater64U: + return rewriteValueRISCV64_OpGreater64U_0(v) + case OpGreater8: + return rewriteValueRISCV64_OpGreater8_0(v) + case OpGreater8U: + return rewriteValueRISCV64_OpGreater8U_0(v) + case OpHmul32: + return rewriteValueRISCV64_OpHmul32_0(v) + case OpHmul32u: + return rewriteValueRISCV64_OpHmul32u_0(v) + case OpHmul64: + return rewriteValueRISCV64_OpHmul64_0(v) + case OpHmul64u: + return rewriteValueRISCV64_OpHmul64u_0(v) + case OpInterCall: + return rewriteValueRISCV64_OpInterCall_0(v) + case OpIsInBounds: + return rewriteValueRISCV64_OpIsInBounds_0(v) + case OpIsNonNil: + return rewriteValueRISCV64_OpIsNonNil_0(v) + case OpIsSliceInBounds: + return rewriteValueRISCV64_OpIsSliceInBounds_0(v) + case OpLeq16: + return rewriteValueRISCV64_OpLeq16_0(v) + case OpLeq16U: + return rewriteValueRISCV64_OpLeq16U_0(v) + case OpLeq32: + return rewriteValueRISCV64_OpLeq32_0(v) + case OpLeq32F: + return rewriteValueRISCV64_OpLeq32F_0(v) + case OpLeq32U: + return rewriteValueRISCV64_OpLeq32U_0(v) + case OpLeq64: + return rewriteValueRISCV64_OpLeq64_0(v) + case OpLeq64F: + return rewriteValueRISCV64_OpLeq64F_0(v) + case OpLeq64U: + return rewriteValueRISCV64_OpLeq64U_0(v) + case OpLeq8: + return rewriteValueRISCV64_OpLeq8_0(v) + case OpLeq8U: + return rewriteValueRISCV64_OpLeq8U_0(v) + case OpLess16: + return rewriteValueRISCV64_OpLess16_0(v) + case OpLess16U: + return rewriteValueRISCV64_OpLess16U_0(v) + case OpLess32: + return rewriteValueRISCV64_OpLess32_0(v) + case OpLess32F: + return rewriteValueRISCV64_OpLess32F_0(v) + case OpLess32U: + return rewriteValueRISCV64_OpLess32U_0(v) + case OpLess64: + return rewriteValueRISCV64_OpLess64_0(v) + case OpLess64F: + return rewriteValueRISCV64_OpLess64F_0(v) + case OpLess64U: + return rewriteValueRISCV64_OpLess64U_0(v) + case OpLess8: + return rewriteValueRISCV64_OpLess8_0(v) + case OpLess8U: + return rewriteValueRISCV64_OpLess8U_0(v) + case OpLoad: + return rewriteValueRISCV64_OpLoad_0(v) + case OpLocalAddr: + return rewriteValueRISCV64_OpLocalAddr_0(v) + case OpLsh16x16: + return rewriteValueRISCV64_OpLsh16x16_0(v) + case OpLsh16x32: + return rewriteValueRISCV64_OpLsh16x32_0(v) + case OpLsh16x64: + return rewriteValueRISCV64_OpLsh16x64_0(v) + case OpLsh16x8: + return rewriteValueRISCV64_OpLsh16x8_0(v) + case OpLsh32x16: + return rewriteValueRISCV64_OpLsh32x16_0(v) + case OpLsh32x32: + return rewriteValueRISCV64_OpLsh32x32_0(v) + case OpLsh32x64: + return rewriteValueRISCV64_OpLsh32x64_0(v) + case OpLsh32x8: + return rewriteValueRISCV64_OpLsh32x8_0(v) + case OpLsh64x16: + return rewriteValueRISCV64_OpLsh64x16_0(v) + case OpLsh64x32: + return rewriteValueRISCV64_OpLsh64x32_0(v) + case OpLsh64x64: + return rewriteValueRISCV64_OpLsh64x64_0(v) + case OpLsh64x8: + return rewriteValueRISCV64_OpLsh64x8_0(v) + case OpLsh8x16: + return rewriteValueRISCV64_OpLsh8x16_0(v) + case OpLsh8x32: + return rewriteValueRISCV64_OpLsh8x32_0(v) + case OpLsh8x64: + return rewriteValueRISCV64_OpLsh8x64_0(v) + case OpLsh8x8: + return rewriteValueRISCV64_OpLsh8x8_0(v) + case OpMod16: + return rewriteValueRISCV64_OpMod16_0(v) + case OpMod16u: + return rewriteValueRISCV64_OpMod16u_0(v) + case OpMod32: + return rewriteValueRISCV64_OpMod32_0(v) + case OpMod32u: + return rewriteValueRISCV64_OpMod32u_0(v) + case OpMod64: + return rewriteValueRISCV64_OpMod64_0(v) + case OpMod64u: + return rewriteValueRISCV64_OpMod64u_0(v) + case OpMod8: + return rewriteValueRISCV64_OpMod8_0(v) + case OpMod8u: + return rewriteValueRISCV64_OpMod8u_0(v) + case OpMove: + return rewriteValueRISCV64_OpMove_0(v) + case OpMul16: + return rewriteValueRISCV64_OpMul16_0(v) + case OpMul32: + return rewriteValueRISCV64_OpMul32_0(v) + case OpMul32F: + return rewriteValueRISCV64_OpMul32F_0(v) + case OpMul64: + return rewriteValueRISCV64_OpMul64_0(v) + case OpMul64F: + return rewriteValueRISCV64_OpMul64F_0(v) + case OpMul8: + return rewriteValueRISCV64_OpMul8_0(v) + case OpNeg16: + return rewriteValueRISCV64_OpNeg16_0(v) + case OpNeg32: + return rewriteValueRISCV64_OpNeg32_0(v) + case OpNeg32F: + return rewriteValueRISCV64_OpNeg32F_0(v) + case OpNeg64: + return rewriteValueRISCV64_OpNeg64_0(v) + case OpNeg64F: + return rewriteValueRISCV64_OpNeg64F_0(v) + case OpNeg8: + return rewriteValueRISCV64_OpNeg8_0(v) + case OpNeq16: + return rewriteValueRISCV64_OpNeq16_0(v) + case OpNeq32: + return rewriteValueRISCV64_OpNeq32_0(v) + case OpNeq32F: + return rewriteValueRISCV64_OpNeq32F_0(v) + case OpNeq64: + return rewriteValueRISCV64_OpNeq64_0(v) + case OpNeq64F: + return rewriteValueRISCV64_OpNeq64F_0(v) + case OpNeq8: + return rewriteValueRISCV64_OpNeq8_0(v) + case OpNeqB: + return rewriteValueRISCV64_OpNeqB_0(v) + case OpNeqPtr: + return rewriteValueRISCV64_OpNeqPtr_0(v) + case OpNilCheck: + return rewriteValueRISCV64_OpNilCheck_0(v) + case OpNot: + return rewriteValueRISCV64_OpNot_0(v) + case OpOffPtr: + return rewriteValueRISCV64_OpOffPtr_0(v) + case OpOr16: + return rewriteValueRISCV64_OpOr16_0(v) + case OpOr32: + return rewriteValueRISCV64_OpOr32_0(v) + case OpOr64: + return rewriteValueRISCV64_OpOr64_0(v) + case OpOr8: + return rewriteValueRISCV64_OpOr8_0(v) + case OpOrB: + return rewriteValueRISCV64_OpOrB_0(v) + case OpPanicBounds: + return rewriteValueRISCV64_OpPanicBounds_0(v) + case OpRISCV64ADD: + return rewriteValueRISCV64_OpRISCV64ADD_0(v) + case OpRISCV64ADDI: + return rewriteValueRISCV64_OpRISCV64ADDI_0(v) + case OpRISCV64MOVBUload: + return rewriteValueRISCV64_OpRISCV64MOVBUload_0(v) + case OpRISCV64MOVBload: + return rewriteValueRISCV64_OpRISCV64MOVBload_0(v) + case OpRISCV64MOVBstore: + return rewriteValueRISCV64_OpRISCV64MOVBstore_0(v) + case OpRISCV64MOVDconst: + return rewriteValueRISCV64_OpRISCV64MOVDconst_0(v) + case OpRISCV64MOVDload: + return rewriteValueRISCV64_OpRISCV64MOVDload_0(v) + case OpRISCV64MOVDstore: + return rewriteValueRISCV64_OpRISCV64MOVDstore_0(v) + case OpRISCV64MOVHUload: + return rewriteValueRISCV64_OpRISCV64MOVHUload_0(v) + case OpRISCV64MOVHload: + return rewriteValueRISCV64_OpRISCV64MOVHload_0(v) + case OpRISCV64MOVHstore: + return rewriteValueRISCV64_OpRISCV64MOVHstore_0(v) + case OpRISCV64MOVWUload: + return rewriteValueRISCV64_OpRISCV64MOVWUload_0(v) + case OpRISCV64MOVWload: + return rewriteValueRISCV64_OpRISCV64MOVWload_0(v) + case OpRISCV64MOVWstore: + return rewriteValueRISCV64_OpRISCV64MOVWstore_0(v) + case OpRotateLeft16: + return rewriteValueRISCV64_OpRotateLeft16_0(v) + case OpRotateLeft32: + return rewriteValueRISCV64_OpRotateLeft32_0(v) + case OpRotateLeft64: + return rewriteValueRISCV64_OpRotateLeft64_0(v) + case OpRotateLeft8: + return rewriteValueRISCV64_OpRotateLeft8_0(v) + case OpRound32F: + return rewriteValueRISCV64_OpRound32F_0(v) + case OpRound64F: + return rewriteValueRISCV64_OpRound64F_0(v) + case OpRsh16Ux16: + return rewriteValueRISCV64_OpRsh16Ux16_0(v) + case OpRsh16Ux32: + return rewriteValueRISCV64_OpRsh16Ux32_0(v) + case OpRsh16Ux64: + return rewriteValueRISCV64_OpRsh16Ux64_0(v) + case OpRsh16Ux8: + return rewriteValueRISCV64_OpRsh16Ux8_0(v) + case OpRsh16x16: + return rewriteValueRISCV64_OpRsh16x16_0(v) + case OpRsh16x32: + return rewriteValueRISCV64_OpRsh16x32_0(v) + case OpRsh16x64: + return rewriteValueRISCV64_OpRsh16x64_0(v) + case OpRsh16x8: + return rewriteValueRISCV64_OpRsh16x8_0(v) + case OpRsh32Ux16: + return rewriteValueRISCV64_OpRsh32Ux16_0(v) + case OpRsh32Ux32: + return rewriteValueRISCV64_OpRsh32Ux32_0(v) + case OpRsh32Ux64: + return rewriteValueRISCV64_OpRsh32Ux64_0(v) + case OpRsh32Ux8: + return rewriteValueRISCV64_OpRsh32Ux8_0(v) + case OpRsh32x16: + return rewriteValueRISCV64_OpRsh32x16_0(v) + case OpRsh32x32: + return rewriteValueRISCV64_OpRsh32x32_0(v) + case OpRsh32x64: + return rewriteValueRISCV64_OpRsh32x64_0(v) + case OpRsh32x8: + return rewriteValueRISCV64_OpRsh32x8_0(v) + case OpRsh64Ux16: + return rewriteValueRISCV64_OpRsh64Ux16_0(v) + case OpRsh64Ux32: + return rewriteValueRISCV64_OpRsh64Ux32_0(v) + case OpRsh64Ux64: + return rewriteValueRISCV64_OpRsh64Ux64_0(v) + case OpRsh64Ux8: + return rewriteValueRISCV64_OpRsh64Ux8_0(v) + case OpRsh64x16: + return rewriteValueRISCV64_OpRsh64x16_0(v) + case OpRsh64x32: + return rewriteValueRISCV64_OpRsh64x32_0(v) + case OpRsh64x64: + return rewriteValueRISCV64_OpRsh64x64_0(v) + case OpRsh64x8: + return rewriteValueRISCV64_OpRsh64x8_0(v) + case OpRsh8Ux16: + return rewriteValueRISCV64_OpRsh8Ux16_0(v) + case OpRsh8Ux32: + return rewriteValueRISCV64_OpRsh8Ux32_0(v) + case OpRsh8Ux64: + return rewriteValueRISCV64_OpRsh8Ux64_0(v) + case OpRsh8Ux8: + return rewriteValueRISCV64_OpRsh8Ux8_0(v) + case OpRsh8x16: + return rewriteValueRISCV64_OpRsh8x16_0(v) + case OpRsh8x32: + return rewriteValueRISCV64_OpRsh8x32_0(v) + case OpRsh8x64: + return rewriteValueRISCV64_OpRsh8x64_0(v) + case OpRsh8x8: + return rewriteValueRISCV64_OpRsh8x8_0(v) + case OpSignExt16to32: + return rewriteValueRISCV64_OpSignExt16to32_0(v) + case OpSignExt16to64: + return rewriteValueRISCV64_OpSignExt16to64_0(v) + case OpSignExt32to64: + return rewriteValueRISCV64_OpSignExt32to64_0(v) + case OpSignExt8to16: + return rewriteValueRISCV64_OpSignExt8to16_0(v) + case OpSignExt8to32: + return rewriteValueRISCV64_OpSignExt8to32_0(v) + case OpSignExt8to64: + return rewriteValueRISCV64_OpSignExt8to64_0(v) + case OpSlicemask: + return rewriteValueRISCV64_OpSlicemask_0(v) + case OpSqrt: + return rewriteValueRISCV64_OpSqrt_0(v) + case OpStaticCall: + return rewriteValueRISCV64_OpStaticCall_0(v) + case OpStore: + return rewriteValueRISCV64_OpStore_0(v) + case OpSub16: + return rewriteValueRISCV64_OpSub16_0(v) + case OpSub32: + return rewriteValueRISCV64_OpSub32_0(v) + case OpSub32F: + return rewriteValueRISCV64_OpSub32F_0(v) + case OpSub64: + return rewriteValueRISCV64_OpSub64_0(v) + case OpSub64F: + return rewriteValueRISCV64_OpSub64F_0(v) + case OpSub8: + return rewriteValueRISCV64_OpSub8_0(v) + case OpSubPtr: + return rewriteValueRISCV64_OpSubPtr_0(v) + case OpTrunc16to8: + return rewriteValueRISCV64_OpTrunc16to8_0(v) + case OpTrunc32to16: + return rewriteValueRISCV64_OpTrunc32to16_0(v) + case OpTrunc32to8: + return rewriteValueRISCV64_OpTrunc32to8_0(v) + case OpTrunc64to16: + return rewriteValueRISCV64_OpTrunc64to16_0(v) + case OpTrunc64to32: + return rewriteValueRISCV64_OpTrunc64to32_0(v) + case OpTrunc64to8: + return rewriteValueRISCV64_OpTrunc64to8_0(v) + case OpWB: + return rewriteValueRISCV64_OpWB_0(v) + case OpXor16: + return rewriteValueRISCV64_OpXor16_0(v) + case OpXor32: + return rewriteValueRISCV64_OpXor32_0(v) + case OpXor64: + return rewriteValueRISCV64_OpXor64_0(v) + case OpXor8: + return rewriteValueRISCV64_OpXor8_0(v) + case OpZero: + return rewriteValueRISCV64_OpZero_0(v) + case OpZeroExt16to32: + return rewriteValueRISCV64_OpZeroExt16to32_0(v) + case OpZeroExt16to64: + return rewriteValueRISCV64_OpZeroExt16to64_0(v) + case OpZeroExt32to64: + return rewriteValueRISCV64_OpZeroExt32to64_0(v) + case OpZeroExt8to16: + return rewriteValueRISCV64_OpZeroExt8to16_0(v) + case OpZeroExt8to32: + return rewriteValueRISCV64_OpZeroExt8to32_0(v) + case OpZeroExt8to64: + return rewriteValueRISCV64_OpZeroExt8to64_0(v) + } + return false +} +func rewriteValueRISCV64_OpAdd16_0(v *Value) bool { + // match: (Add16 x y) + // result: (ADD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64ADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAdd32_0(v *Value) bool { + // match: (Add32 x y) + // result: (ADD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64ADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAdd32F_0(v *Value) bool { + // match: (Add32F x y) + // result: (FADDS x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FADDS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAdd64_0(v *Value) bool { + // match: (Add64 x y) + // result: (ADD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64ADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAdd64F_0(v *Value) bool { + // match: (Add64F x y) + // result: (FADDD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FADDD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAdd8_0(v *Value) bool { + // match: (Add8 x y) + // result: (ADD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64ADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAddPtr_0(v *Value) bool { + // match: (AddPtr x y) + // result: (ADD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64ADD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAddr_0(v *Value) bool { + // match: (Addr {sym} base) + // result: (MOVaddr {sym} base) + for { + sym := v.Aux + base := v.Args[0] + v.reset(OpRISCV64MOVaddr) + v.Aux = sym + v.AddArg(base) + return true + } +} +func rewriteValueRISCV64_OpAnd16_0(v *Value) bool { + // match: (And16 x y) + // result: (AND x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAnd32_0(v *Value) bool { + // match: (And32 x y) + // result: (AND x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAnd64_0(v *Value) bool { + // match: (And64 x y) + // result: (AND x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAnd8_0(v *Value) bool { + // match: (And8 x y) + // result: (AND x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAndB_0(v *Value) bool { + // match: (AndB x y) + // result: (AND x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpAvg64u_0(v *Value) bool { + b := v.Block + // match: (Avg64u x y) + // result: (ADD (ADD (SRLI [1] x) (SRLI [1] y)) (ANDI [1] (AND x y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t) + v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) + v1.AuxInt = 1 + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t) + v2.AuxInt = 1 + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t) + v3.AuxInt = 1 + v4 := b.NewValue0(v.Pos, OpRISCV64AND, t) + v4.AddArg(x) + v4.AddArg(y) + v3.AddArg(v4) + v.AddArg(v3) + return true + } +} +func rewriteValueRISCV64_OpClosureCall_0(v *Value) bool { + // match: (ClosureCall [argwid] entry closure mem) + // result: (CALLclosure [argwid] entry closure mem) + for { + argwid := v.AuxInt + mem := v.Args[2] + entry := v.Args[0] + closure := v.Args[1] + v.reset(OpRISCV64CALLclosure) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(closure) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpCom16_0(v *Value) bool { + // match: (Com16 x) + // result: (XORI [int64(-1)] x) + for { + x := v.Args[0] + v.reset(OpRISCV64XORI) + v.AuxInt = int64(-1) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCom32_0(v *Value) bool { + // match: (Com32 x) + // result: (XORI [int64(-1)] x) + for { + x := v.Args[0] + v.reset(OpRISCV64XORI) + v.AuxInt = int64(-1) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCom64_0(v *Value) bool { + // match: (Com64 x) + // result: (XORI [int64(-1)] x) + for { + x := v.Args[0] + v.reset(OpRISCV64XORI) + v.AuxInt = int64(-1) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCom8_0(v *Value) bool { + // match: (Com8 x) + // result: (XORI [int64(-1)] x) + for { + x := v.Args[0] + v.reset(OpRISCV64XORI) + v.AuxInt = int64(-1) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpConst16_0(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVHconst [val]) + for { + val := v.AuxInt + v.reset(OpRISCV64MOVHconst) + v.AuxInt = val + return true + } +} +func rewriteValueRISCV64_OpConst32_0(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVWconst [val]) + for { + val := v.AuxInt + v.reset(OpRISCV64MOVWconst) + v.AuxInt = val + return true + } +} +func rewriteValueRISCV64_OpConst32F_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Const32F [val]) + // result: (FMVSX (MOVWconst [int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val))))))])) + for { + val := v.AuxInt + v.reset(OpRISCV64FMVSX) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) + v0.AuxInt = int64(int32(math.Float32bits(float32(math.Float64frombits(uint64(val)))))) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpConst64_0(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVDconst [val]) + for { + val := v.AuxInt + v.reset(OpRISCV64MOVDconst) + v.AuxInt = val + return true + } +} +func rewriteValueRISCV64_OpConst64F_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Const64F [val]) + // result: (FMVDX (MOVDconst [val])) + for { + val := v.AuxInt + v.reset(OpRISCV64FMVDX) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = val + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpConst8_0(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVBconst [val]) + for { + val := v.AuxInt + v.reset(OpRISCV64MOVBconst) + v.AuxInt = val + return true + } +} +func rewriteValueRISCV64_OpConstBool_0(v *Value) bool { + // match: (ConstBool [b]) + // result: (MOVBconst [b]) + for { + b := v.AuxInt + v.reset(OpRISCV64MOVBconst) + v.AuxInt = b + return true + } +} +func rewriteValueRISCV64_OpConstNil_0(v *Value) bool { + // match: (ConstNil) + // result: (MOVDconst [0]) + for { + v.reset(OpRISCV64MOVDconst) + v.AuxInt = 0 + return true + } +} +func rewriteValueRISCV64_OpConvert_0(v *Value) bool { + // match: (Convert x mem) + // result: (MOVconvert x mem) + for { + mem := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MOVconvert) + v.AddArg(x) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpCvt32Fto32_0(v *Value) bool { + // match: (Cvt32Fto32 x) + // result: (FCVTWS x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTWS) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt32Fto64_0(v *Value) bool { + // match: (Cvt32Fto64 x) + // result: (FCVTLS x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTLS) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt32Fto64F_0(v *Value) bool { + // match: (Cvt32Fto64F x) + // result: (FCVTDS x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTDS) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt32to32F_0(v *Value) bool { + // match: (Cvt32to32F x) + // result: (FCVTSW x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTSW) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt32to64F_0(v *Value) bool { + // match: (Cvt32to64F x) + // result: (FCVTDW x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTDW) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt64Fto32_0(v *Value) bool { + // match: (Cvt64Fto32 x) + // result: (FCVTWD x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTWD) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt64Fto32F_0(v *Value) bool { + // match: (Cvt64Fto32F x) + // result: (FCVTSD x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTSD) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt64Fto64_0(v *Value) bool { + // match: (Cvt64Fto64 x) + // result: (FCVTLD x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTLD) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt64to32F_0(v *Value) bool { + // match: (Cvt64to32F x) + // result: (FCVTSL x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTSL) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpCvt64to64F_0(v *Value) bool { + // match: (Cvt64to64F x) + // result: (FCVTDL x) + for { + x := v.Args[0] + v.reset(OpRISCV64FCVTDL) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpDiv16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (DIVW (SignExt16to32 x) (SignExt16to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpDiv16u_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVUW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpDiv32_0(v *Value) bool { + // match: (Div32 x y) + // result: (DIVW x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpDiv32F_0(v *Value) bool { + // match: (Div32F x y) + // result: (FDIVS x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FDIVS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpDiv32u_0(v *Value) bool { + // match: (Div32u x y) + // result: (DIVUW x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVUW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpDiv64_0(v *Value) bool { + // match: (Div64 x y) + // result: (DIV x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIV) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpDiv64F_0(v *Value) bool { + // match: (Div64F x y) + // result: (FDIVD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FDIVD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpDiv64u_0(v *Value) bool { + // match: (Div64u x y) + // result: (DIVU x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpDiv8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (DIVW (SignExt8to32 x) (SignExt8to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpDiv8u_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64DIVUW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpEq16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (SEQZ (ZeroExt16to64 (SUB x y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEq32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (SEQZ (ZeroExt32to64 (SUB x y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEq32F_0(v *Value) bool { + // match: (Eq32F x y) + // result: (FEQS x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FEQS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpEq64_0(v *Value) bool { + b := v.Block + // match: (Eq64 x y) + // result: (SEQZ (SUB x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEq64F_0(v *Value) bool { + // match: (Eq64F x y) + // result: (FEQD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FEQD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpEq8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (SEQZ (ZeroExt8to64 (SUB x y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEqB_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XORI [1] (XOR x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64XORI) + v.AuxInt = 1 + v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpEqPtr_0(v *Value) bool { + b := v.Block + // match: (EqPtr x y) + // result: (SEQZ (SUB x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SEQZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq16 x y) + // result: (Not (Less16 x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq16U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq16U x y) + // result: (Not (Less16U x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq32 x y) + // result: (Not (Less32 x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq32F_0(v *Value) bool { + // match: (Geq32F x y) + // result: (FLES y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLES) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGeq32U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq32U x y) + // result: (Not (Less32U x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq64 x y) + // result: (Not (Less64 x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq64F_0(v *Value) bool { + // match: (Geq64F x y) + // result: (FLED y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLED) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGeq64U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq64U x y) + // result: (Not (Less64U x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq8 x y) + // result: (Not (Less8 x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGeq8U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Geq8U x y) + // result: (Not (Less8U x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpGetCallerPC_0(v *Value) bool { + // match: (GetCallerPC) + // result: (LoweredGetCallerPC) + for { + v.reset(OpRISCV64LoweredGetCallerPC) + return true + } +} +func rewriteValueRISCV64_OpGetCallerSP_0(v *Value) bool { + // match: (GetCallerSP) + // result: (LoweredGetCallerSP) + for { + v.reset(OpRISCV64LoweredGetCallerSP) + return true + } +} +func rewriteValueRISCV64_OpGetClosurePtr_0(v *Value) bool { + // match: (GetClosurePtr) + // result: (LoweredGetClosurePtr) + for { + v.reset(OpRISCV64LoweredGetClosurePtr) + return true + } +} +func rewriteValueRISCV64_OpGreater16_0(v *Value) bool { + // match: (Greater16 x y) + // result: (Less16 y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess16) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater16U_0(v *Value) bool { + // match: (Greater16U x y) + // result: (Less16U y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess16U) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater32_0(v *Value) bool { + // match: (Greater32 x y) + // result: (Less32 y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess32) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater32F_0(v *Value) bool { + // match: (Greater32F x y) + // result: (FLTS y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLTS) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater32U_0(v *Value) bool { + // match: (Greater32U x y) + // result: (Less32U y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess32U) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater64_0(v *Value) bool { + // match: (Greater64 x y) + // result: (Less64 y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess64) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater64F_0(v *Value) bool { + // match: (Greater64F x y) + // result: (FLTD y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLTD) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater64U_0(v *Value) bool { + // match: (Greater64U x y) + // result: (Less64U y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess64U) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater8_0(v *Value) bool { + // match: (Greater8 x y) + // result: (Less8 y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess8) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpGreater8U_0(v *Value) bool { + // match: (Greater8U x y) + // result: (Less8U y x) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpLess8U) + v.AddArg(y) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpHmul32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpHmul32u_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg(v2) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpHmul64_0(v *Value) bool { + // match: (Hmul64 x y) + // result: (MULH x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MULH) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpHmul64u_0(v *Value) bool { + // match: (Hmul64u x y) + // result: (MULHU x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MULHU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpInterCall_0(v *Value) bool { + // match: (InterCall [argwid] entry mem) + // result: (CALLinter [argwid] entry mem) + for { + argwid := v.AuxInt + mem := v.Args[1] + entry := v.Args[0] + v.reset(OpRISCV64CALLinter) + v.AuxInt = argwid + v.AddArg(entry) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpIsInBounds_0(v *Value) bool { + // match: (IsInBounds idx len) + // result: (Less64U idx len) + for { + len := v.Args[1] + idx := v.Args[0] + v.reset(OpLess64U) + v.AddArg(idx) + v.AddArg(len) + return true + } +} +func rewriteValueRISCV64_OpIsNonNil_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil p) + // result: (NeqPtr (MOVDconst) p) + for { + p := v.Args[0] + v.reset(OpNeqPtr) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v.AddArg(v0) + v.AddArg(p) + return true + } +} +func rewriteValueRISCV64_OpIsSliceInBounds_0(v *Value) bool { + // match: (IsSliceInBounds idx len) + // result: (Leq64U idx len) + for { + len := v.Args[1] + idx := v.Args[0] + v.reset(OpLeq64U) + v.AddArg(idx) + v.AddArg(len) + return true + } +} +func rewriteValueRISCV64_OpLeq16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (Not (Less16 y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq16U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (Not (Less16U y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (Not (Less32 y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq32F_0(v *Value) bool { + // match: (Leq32F x y) + // result: (FLES x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLES) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpLeq32U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (Not (Less32U y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (Not (Less64 y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq64F_0(v *Value) bool { + // match: (Leq64F x y) + // result: (FLED x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLED) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpLeq64U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (Not (Less64U y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (Not (Less8 y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLeq8U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (Not (Less8U y x)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpNot) + v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool) + v0.AddArg(y) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpLess16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (SLT (SignExt16to64 x) (SignExt16to64 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLT) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLess16U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLTU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLess32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (SLT (SignExt32to64 x) (SignExt32to64 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLT) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLess32F_0(v *Value) bool { + // match: (Less32F x y) + // result: (FLTS x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLTS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpLess32U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLTU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLess64_0(v *Value) bool { + // match: (Less64 x y) + // result: (SLT x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLT) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpLess64F_0(v *Value) bool { + // match: (Less64F x y) + // result: (FLTD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FLTD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpLess64U_0(v *Value) bool { + // match: (Less64U x y) + // result: (SLTU x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLTU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpLess8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (SLT (SignExt8to64 x) (SignExt8to64 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLT) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLess8U_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SLTU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLoad_0(v *Value) bool { + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(t.IsBoolean()) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: ( is8BitInt(t) && isSigned(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is8BitInt(t) && isSigned(t)) { + break + } + v.reset(OpRISCV64MOVBload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: ( is8BitInt(t) && !isSigned(t)) + // result: (MOVBUload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is8BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && isSigned(t)) + // result: (MOVHload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is16BitInt(t) && isSigned(t)) { + break + } + v.reset(OpRISCV64MOVHload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !isSigned(t)) + // result: (MOVHUload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is16BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpRISCV64MOVHUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && isSigned(t)) + // result: (MOVWload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is32BitInt(t) && isSigned(t)) { + break + } + v.reset(OpRISCV64MOVWload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !isSigned(t)) + // result: (MOVWUload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is32BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpRISCV64MOVWUload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVDload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpRISCV64MOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (FMOVWload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is32BitFloat(t)) { + break + } + v.reset(OpRISCV64FMOVWload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (FMOVDload ptr mem) + for { + t := v.Type + mem := v.Args[1] + ptr := v.Args[0] + if !(is64BitFloat(t)) { + break + } + v.reset(OpRISCV64FMOVDload) + v.AddArg(ptr) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpLocalAddr_0(v *Value) bool { + // match: (LocalAddr {sym} base _) + // result: (MOVaddr {sym} base) + for { + sym := v.Aux + _ = v.Args[1] + base := v.Args[0] + v.reset(OpRISCV64MOVaddr) + v.Aux = sym + v.AddArg(base) + return true + } +} +func rewriteValueRISCV64_OpLsh16x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh16x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh16x64_0(v *Value) bool { + b := v.Block + // match: (Lsh16x64 x y) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh16x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // result: (AND (SLL x y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg16, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh32x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh32x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh32x64_0(v *Value) bool { + b := v.Block + // match: (Lsh32x64 x y) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh32x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // result: (AND (SLL x y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh64x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh64x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh64x64_0(v *Value) bool { + b := v.Block + // match: (Lsh64x64 x y) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh64x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // result: (AND (SLL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh8x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh8x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh8x64_0(v *Value) bool { + b := v.Block + // match: (Lsh8x64 x y) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpLsh8x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // result: (AND (SLL x y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg8, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpMod16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (REMW (SignExt16to32 x) (SignExt16to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpMod16u_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMUW) + v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpMod32_0(v *Value) bool { + // match: (Mod32 x y) + // result: (REMW x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMod32u_0(v *Value) bool { + // match: (Mod32u x y) + // result: (REMUW x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMUW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMod64_0(v *Value) bool { + // match: (Mod64 x y) + // result: (REM x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REM) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMod64u_0(v *Value) bool { + // match: (Mod64u x y) + // result: (REMU x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMU) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMod8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (REMW (SignExt8to32 x) (SignExt8to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpMod8u_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64REMUW) + v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpMove_0(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if v.AuxInt != 0 { + break + } + mem := v.Args[2] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if v.AuxInt != 1 { + break + } + mem := v.Args[2] + dst := v.Args[0] + src := v.Args[1] + v.reset(OpRISCV64MOVBstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVHstore dst (MOVHload src mem) mem) + for { + if v.AuxInt != 2 { + break + } + mem := v.Args[2] + dst := v.Args[0] + src := v.Args[1] + v.reset(OpRISCV64MOVHstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if v.AuxInt != 4 { + break + } + mem := v.Args[2] + dst := v.Args[0] + src := v.Args[1] + v.reset(OpRISCV64MOVWstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [8] dst src mem) + // result: (MOVDstore dst (MOVDload src mem) mem) + for { + if v.AuxInt != 8 { + break + } + mem := v.Args[2] + dst := v.Args[0] + src := v.Args[1] + v.reset(OpRISCV64MOVDstore) + v.AddArg(dst) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64) + v0.AddArg(src) + v0.AddArg(mem) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Move [s] {t} dst src mem) + // result: (LoweredMove [t.(*types.Type).Alignment()] dst src (ADDI [s-moveSize(t.(*types.Type).Alignment(), config)] src) mem) + for { + s := v.AuxInt + t := v.Aux + mem := v.Args[2] + dst := v.Args[0] + src := v.Args[1] + v.reset(OpRISCV64LoweredMove) + v.AuxInt = t.(*types.Type).Alignment() + v.AddArg(dst) + v.AddArg(src) + v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type) + v0.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AddArg(src) + v.AddArg(v0) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpMul16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul16 x y) + // result: (MULW (SignExt16to32 x) (SignExt16to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MULW) + v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpMul32_0(v *Value) bool { + // match: (Mul32 x y) + // result: (MULW x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MULW) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMul32F_0(v *Value) bool { + // match: (Mul32F x y) + // result: (FMULS x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FMULS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMul64_0(v *Value) bool { + // match: (Mul64 x y) + // result: (MUL x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MUL) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMul64F_0(v *Value) bool { + // match: (Mul64F x y) + // result: (FMULD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FMULD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpMul8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul8 x y) + // result: (MULW (SignExt8to32 x) (SignExt8to32 y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64MULW) + v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32) + v1.AddArg(y) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpNeg16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg16 x) + // result: (SUB (MOVHconst) x) + for { + x := v.Args[0] + v.reset(OpRISCV64SUB) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) + v.AddArg(v0) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpNeg32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg32 x) + // result: (SUB (MOVWconst) x) + for { + x := v.Args[0] + v.reset(OpRISCV64SUB) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) + v.AddArg(v0) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpNeg32F_0(v *Value) bool { + // match: (Neg32F x) + // result: (FNEGS x) + for { + x := v.Args[0] + v.reset(OpRISCV64FNEGS) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpNeg64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg64 x) + // result: (SUB (MOVDconst) x) + for { + x := v.Args[0] + v.reset(OpRISCV64SUB) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v.AddArg(v0) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpNeg64F_0(v *Value) bool { + // match: (Neg64F x) + // result: (FNEGD x) + for { + x := v.Args[0] + v.reset(OpRISCV64FNEGD) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpNeg8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neg8 x) + // result: (SUB (MOVBconst) x) + for { + x := v.Args[0] + v.reset(OpRISCV64SUB) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) + v.AddArg(v0) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpNeq16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (SNEZ (ZeroExt16to64 (SUB x y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeq32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (SNEZ (ZeroExt32to64 (SUB x y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeq32F_0(v *Value) bool { + // match: (Neq32F x y) + // result: (FNES x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FNES) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpNeq64_0(v *Value) bool { + b := v.Block + // match: (Neq64 x y) + // result: (SNEZ (SUB x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeq64F_0(v *Value) bool { + // match: (Neq64F x y) + // result: (FNED x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FNED) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpNeq8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (SNEZ (ZeroExt8to64 (SUB x y))) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v1.AddArg(x) + v1.AddArg(y) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNeqB_0(v *Value) bool { + // match: (NeqB x y) + // result: (XOR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpNeqPtr_0(v *Value) bool { + b := v.Block + // match: (NeqPtr x y) + // result: (SNEZ (SUB x y)) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SNEZ) + v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpNilCheck_0(v *Value) bool { + // match: (NilCheck ptr mem) + // result: (LoweredNilCheck ptr mem) + for { + mem := v.Args[1] + ptr := v.Args[0] + v.reset(OpRISCV64LoweredNilCheck) + v.AddArg(ptr) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpNot_0(v *Value) bool { + // match: (Not x) + // result: (XORI [1] x) + for { + x := v.Args[0] + v.reset(OpRISCV64XORI) + v.AuxInt = 1 + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpOffPtr_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (OffPtr [off] ptr:(SP)) + // result: (MOVaddr [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + if ptr.Op != OpSP { + break + } + v.reset(OpRISCV64MOVaddr) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // cond: is32Bit(off) + // result: (ADDI [off] ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + if !(is32Bit(off)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADD (MOVDconst [off]) ptr) + for { + off := v.AuxInt + ptr := v.Args[0] + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = off + v.AddArg(v0) + v.AddArg(ptr) + return true + } +} +func rewriteValueRISCV64_OpOr16_0(v *Value) bool { + // match: (Or16 x y) + // result: (OR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpOr32_0(v *Value) bool { + // match: (Or32 x y) + // result: (OR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpOr64_0(v *Value) bool { + // match: (Or64 x y) + // result: (OR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpOr8_0(v *Value) bool { + // match: (Or8 x y) + // result: (OR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpOrB_0(v *Value) bool { + // match: (OrB x y) + // result: (OR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64OR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpPanicBounds_0(v *Value) bool { + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := v.AuxInt + mem := v.Args[2] + x := v.Args[0] + y := v.Args[1] + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpRISCV64LoweredPanicBoundsA) + v.AuxInt = kind + v.AddArg(x) + v.AddArg(y) + v.AddArg(mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := v.AuxInt + mem := v.Args[2] + x := v.Args[0] + y := v.Args[1] + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpRISCV64LoweredPanicBoundsB) + v.AuxInt = kind + v.AddArg(x) + v.AddArg(y) + v.AddArg(mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := v.AuxInt + mem := v.Args[2] + x := v.Args[0] + y := v.Args[1] + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpRISCV64LoweredPanicBoundsC) + v.AuxInt = kind + v.AddArg(x) + v.AddArg(y) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64ADD_0(v *Value) bool { + // match: (ADD (MOVDconst [off]) ptr) + // cond: is32Bit(off) + // result: (ADDI [off] ptr) + for { + ptr := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVDconst { + break + } + off := v_0.AuxInt + if !(is32Bit(off)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = off + v.AddArg(ptr) + return true + } + // match: (ADD ptr (MOVDconst [off])) + // cond: is32Bit(off) + // result: (ADDI [off] ptr) + for { + _ = v.Args[1] + ptr := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpRISCV64MOVDconst { + break + } + off := v_1.AuxInt + if !(is32Bit(off)) { + break + } + v.reset(OpRISCV64ADDI) + v.AuxInt = off + v.AddArg(ptr) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64ADDI_0(v *Value) bool { + // match: (ADDI [c] (MOVaddr [d] {s} x)) + // cond: is32Bit(c+d) + // result: (MOVaddr [c+d] {s} x) + for { + c := v.AuxInt + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + d := v_0.AuxInt + s := v_0.Aux + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpRISCV64MOVaddr) + v.AuxInt = c + d + v.Aux = s + v.AddArg(x) + return true + } + // match: (ADDI [0] x) + // result: x + for { + if v.AuxInt != 0 { + break + } + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBUload_0(v *Value) bool { + // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVBUload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVBUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBload_0(v *Value) bool { + // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVBload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVBload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVBload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVBstore_0(v *Value) bool { + // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVBstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVBstore [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVBstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDconst_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (MOVDconst [c]) + // cond: !is32Bit(c) && int32(c) < 0 + // result: (ADD (SLLI [32] (MOVDconst [c>>32+1])) (MOVDconst [int64(int32(c))])) + for { + t := v.Type + c := v.AuxInt + if !(!is32Bit(c) && int32(c) < 0) { + break + } + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 32 + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = c>>32 + 1 + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v2.AuxInt = int64(int32(c)) + v.AddArg(v2) + return true + } + // match: (MOVDconst [c]) + // cond: !is32Bit(c) && int32(c) >= 0 + // result: (ADD (SLLI [32] (MOVDconst [c>>32+0])) (MOVDconst [int64(int32(c))])) + for { + t := v.Type + c := v.AuxInt + if !(!is32Bit(c) && int32(c) >= 0) { + break + } + v.reset(OpRISCV64ADD) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 32 + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = c>>32 + 0 + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v2.AuxInt = int64(int32(c)) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDload_0(v *Value) bool { + // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVDload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVDload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVDload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVDstore_0(v *Value) bool { + // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVDstore [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHUload_0(v *Value) bool { + // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVHUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVHUload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVHUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHload_0(v *Value) bool { + // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVHload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVHload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVHload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVHstore_0(v *Value) bool { + // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVHstore [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWUload_0(v *Value) bool { + // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVWUload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWUload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVWUload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWload_0(v *Value) bool { + // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(mem) + return true + } + // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem) + // cond: is32Bit(off1+off2) + // result: (MOVWload [off1+off2] {sym} base mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[1] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVWload) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRISCV64MOVWstore_0(v *Value) bool { + // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) + // cond: is32Bit(off1+off2) && canMergeSym(sym1, sym2) + // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) + for { + off1 := v.AuxInt + sym1 := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64MOVaddr { + break + } + off2 := v_0.AuxInt + sym2 := v_0.Aux + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1+off2) && canMergeSym(sym1, sym2)) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = mergeSym(sym1, sym2) + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem) + // cond: is32Bit(off1+off2) + // result: (MOVWstore [off1+off2] {sym} base val mem) + for { + off1 := v.AuxInt + sym := v.Aux + mem := v.Args[2] + v_0 := v.Args[0] + if v_0.Op != OpRISCV64ADDI { + break + } + off2 := v_0.AuxInt + base := v_0.Args[0] + val := v.Args[1] + if !(is32Bit(off1 + off2)) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AuxInt = off1 + off2 + v.Aux = sym + v.AddArg(base) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVHconst [c])) + // result: (Or16 (Lsh16x64 x (MOVHconst [c&15])) (Rsh16Ux64 x (MOVHconst [-c&15]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpRISCV64MOVHconst { + break + } + c := v_1.AuxInt + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) + v1.AuxInt = c & 15 + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) + v3.AuxInt = -c & 15 + v2.AddArg(v3) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft32 x (MOVWconst [c])) + // result: (Or32 (Lsh32x64 x (MOVWconst [c&31])) (Rsh32Ux64 x (MOVWconst [-c&31]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpRISCV64MOVWconst { + break + } + c := v_1.AuxInt + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) + v1.AuxInt = c & 31 + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) + v3.AuxInt = -c & 31 + v2.AddArg(v3) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft64 x (MOVDconst [c])) + // result: (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpRISCV64MOVDconst { + break + } + c := v_1.AuxInt + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = c & 63 + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = -c & 63 + v2.AddArg(v3) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRotateLeft8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVBconst [c])) + // result: (Or8 (Lsh8x64 x (MOVBconst [c&7])) (Rsh8Ux64 x (MOVBconst [-c&7]))) + for { + t := v.Type + _ = v.Args[1] + x := v.Args[0] + v_1 := v.Args[1] + if v_1.Op != OpRISCV64MOVBconst { + break + } + c := v_1.AuxInt + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) + v1.AuxInt = c & 7 + v0.AddArg(v1) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) + v3.AuxInt = -c & 7 + v2.AddArg(v3) + v.AddArg(v2) + return true + } + return false +} +func rewriteValueRISCV64_OpRound32F_0(v *Value) bool { + // match: (Round32F x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpRound64F_0(v *Value) bool { + // match: (Round64F x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpRsh16Ux16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh16Ux32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh16Ux64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh16Ux8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // result: (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg16, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh16x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh16x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh16x64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh16x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // result: (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh32Ux16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg32, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh32Ux32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg32, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh32Ux64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x y) + // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg32, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh32Ux8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg32, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh32x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh32x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh32x64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 x y) + // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh32x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh64Ux16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh64Ux32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh64Ux64_0(v *Value) bool { + b := v.Block + // match: (Rsh64Ux64 x y) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh64Ux8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // result: (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v0.AddArg(x) + v0.AddArg(y) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpNeg64, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh64x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = -1 + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpRsh64x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x y) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = -1 + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpRsh64x64_0(v *Value) bool { + b := v.Block + // match: (Rsh64x64 x y) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = -1 + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = 64 + v2.AddArg(y) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpRsh64x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // result: (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v.AddArg(x) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = -1 + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = 64 + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpRsh8Ux16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh8Ux32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh8Ux64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v3.AddArg(y) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh8Ux8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // result: (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64AND) + v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v0.AddArg(v1) + v0.AddArg(y) + v.AddArg(v0) + v2 := b.NewValue0(v.Pos, OpNeg8, t) + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v.AddArg(v2) + return true + } +} +func rewriteValueRISCV64_OpRsh8x16_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh8x32_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh8x64_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v3.AddArg(y) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpRsh8x8_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // result: (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) + for { + t := v.Type + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SRA) + v.Type = t + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1.AddArg(y) + v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v2.AuxInt = -1 + v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v3.AuxInt = 64 + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v3.AddArg(v4) + v2.AddArg(v3) + v1.AddArg(v2) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpSignExt16to32_0(v *Value) bool { + b := v.Block + // match: (SignExt16to32 x) + // result: (SRAI [48] (SLLI [48] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 48 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 48 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpSignExt16to64_0(v *Value) bool { + b := v.Block + // match: (SignExt16to64 x) + // result: (SRAI [48] (SLLI [48] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 48 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 48 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpSignExt32to64_0(v *Value) bool { + b := v.Block + // match: (SignExt32to64 x) + // result: (SRAI [32] (SLLI [32] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 32 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpSignExt8to16_0(v *Value) bool { + b := v.Block + // match: (SignExt8to16 x) + // result: (SRAI [56] (SLLI [56] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 56 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpSignExt8to32_0(v *Value) bool { + b := v.Block + // match: (SignExt8to32 x) + // result: (SRAI [56] (SLLI [56] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 56 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpSignExt8to64_0(v *Value) bool { + b := v.Block + // match: (SignExt8to64 x) + // result: (SRAI [56] (SLLI [56] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRAI) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 56 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpSlicemask_0(v *Value) bool { + b := v.Block + typ := &b.Func.Config.Types + // match: (Slicemask x) + // result: (XOR (MOVDconst [-1]) (SRA (SUB x (MOVDconst [1])) (MOVDconst [63]))) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64XOR) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v0.AuxInt = -1 + v.AddArg(v0) + v1 := b.NewValue0(v.Pos, OpRISCV64SRA, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SUB, t) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v3.AuxInt = 1 + v2.AddArg(v3) + v1.AddArg(v2) + v4 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v4.AuxInt = 63 + v1.AddArg(v4) + v.AddArg(v1) + return true + } +} +func rewriteValueRISCV64_OpSqrt_0(v *Value) bool { + // match: (Sqrt x) + // result: (FSQRTD x) + for { + x := v.Args[0] + v.reset(OpRISCV64FSQRTD) + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpStaticCall_0(v *Value) bool { + // match: (StaticCall [argwid] {target} mem) + // result: (CALLstatic [argwid] {target} mem) + for { + argwid := v.AuxInt + target := v.Aux + mem := v.Args[0] + v.reset(OpRISCV64CALLstatic) + v.AuxInt = argwid + v.Aux = target + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpStore_0(v *Value) bool { + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := v.Aux + mem := v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + if !(t.(*types.Type).Size() == 1) { + break + } + v.reset(OpRISCV64MOVBstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := v.Aux + mem := v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + if !(t.(*types.Type).Size() == 2) { + break + } + v.reset(OpRISCV64MOVHstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type) + // result: (MOVWstore ptr val mem) + for { + t := v.Aux + mem := v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + if !(t.(*types.Type).Size() == 4 && !is32BitFloat(val.Type)) { + break + } + v.reset(OpRISCV64MOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type) + // result: (MOVDstore ptr val mem) + for { + t := v.Aux + mem := v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + if !(t.(*types.Type).Size() == 8 && !is64BitFloat(val.Type)) { + break + } + v.reset(OpRISCV64MOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 4 && is32BitFloat(val.Type) + // result: (FMOVWstore ptr val mem) + for { + t := v.Aux + mem := v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + if !(t.(*types.Type).Size() == 4 && is32BitFloat(val.Type)) { + break + } + v.reset(OpRISCV64FMOVWstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.(*types.Type).Size() == 8 && is64BitFloat(val.Type) + // result: (FMOVDstore ptr val mem) + for { + t := v.Aux + mem := v.Args[2] + ptr := v.Args[0] + val := v.Args[1] + if !(t.(*types.Type).Size() == 8 && is64BitFloat(val.Type)) { + break + } + v.reset(OpRISCV64FMOVDstore) + v.AddArg(ptr) + v.AddArg(val) + v.AddArg(mem) + return true + } + return false +} +func rewriteValueRISCV64_OpSub16_0(v *Value) bool { + // match: (Sub16 x y) + // result: (SUB x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpSub32_0(v *Value) bool { + // match: (Sub32 x y) + // result: (SUB x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpSub32F_0(v *Value) bool { + // match: (Sub32F x y) + // result: (FSUBS x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FSUBS) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpSub64_0(v *Value) bool { + // match: (Sub64 x y) + // result: (SUB x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpSub64F_0(v *Value) bool { + // match: (Sub64F x y) + // result: (FSUBD x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64FSUBD) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpSub8_0(v *Value) bool { + // match: (Sub8 x y) + // result: (SUB x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpSubPtr_0(v *Value) bool { + // match: (SubPtr x y) + // result: (SUB x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64SUB) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpTrunc16to8_0(v *Value) bool { + // match: (Trunc16to8 x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpTrunc32to16_0(v *Value) bool { + // match: (Trunc32to16 x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpTrunc32to8_0(v *Value) bool { + // match: (Trunc32to8 x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpTrunc64to16_0(v *Value) bool { + // match: (Trunc64to16 x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpTrunc64to32_0(v *Value) bool { + // match: (Trunc64to32 x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpTrunc64to8_0(v *Value) bool { + // match: (Trunc64to8 x) + // result: x + for { + x := v.Args[0] + v.reset(OpCopy) + v.Type = x.Type + v.AddArg(x) + return true + } +} +func rewriteValueRISCV64_OpWB_0(v *Value) bool { + // match: (WB {fn} destptr srcptr mem) + // result: (LoweredWB {fn} destptr srcptr mem) + for { + fn := v.Aux + mem := v.Args[2] + destptr := v.Args[0] + srcptr := v.Args[1] + v.reset(OpRISCV64LoweredWB) + v.Aux = fn + v.AddArg(destptr) + v.AddArg(srcptr) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpXor16_0(v *Value) bool { + // match: (Xor16 x y) + // result: (XOR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpXor32_0(v *Value) bool { + // match: (Xor32 x y) + // result: (XOR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpXor64_0(v *Value) bool { + // match: (Xor64 x y) + // result: (XOR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpXor8_0(v *Value) bool { + // match: (Xor8 x y) + // result: (XOR x y) + for { + y := v.Args[1] + x := v.Args[0] + v.reset(OpRISCV64XOR) + v.AddArg(x) + v.AddArg(y) + return true + } +} +func rewriteValueRISCV64_OpZero_0(v *Value) bool { + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if v.AuxInt != 0 { + break + } + mem := v.Args[1] + v.reset(OpCopy) + v.Type = mem.Type + v.AddArg(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVBconst) mem) + for { + if v.AuxInt != 1 { + break + } + mem := v.Args[1] + ptr := v.Args[0] + v.reset(OpRISCV64MOVBstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVBconst, typ.UInt8) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVHstore ptr (MOVHconst) mem) + for { + if v.AuxInt != 2 { + break + } + mem := v.Args[1] + ptr := v.Args[0] + v.reset(OpRISCV64MOVHstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVHconst, typ.UInt16) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVWstore ptr (MOVWconst) mem) + for { + if v.AuxInt != 4 { + break + } + mem := v.Args[1] + ptr := v.Args[0] + v.reset(OpRISCV64MOVWstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVWconst, typ.UInt32) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [8] ptr mem) + // result: (MOVDstore ptr (MOVDconst) mem) + for { + if v.AuxInt != 8 { + break + } + mem := v.Args[1] + ptr := v.Args[0] + v.reset(OpRISCV64MOVDstore) + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v.AddArg(v0) + v.AddArg(mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // result: (LoweredZero [t.(*types.Type).Alignment()] ptr (ADD ptr (MOVDconst [s-moveSize(t.(*types.Type).Alignment(), config)])) mem) + for { + s := v.AuxInt + t := v.Aux + mem := v.Args[1] + ptr := v.Args[0] + v.reset(OpRISCV64LoweredZero) + v.AuxInt = t.(*types.Type).Alignment() + v.AddArg(ptr) + v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type) + v0.AddArg(ptr) + v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) + v1.AuxInt = s - moveSize(t.(*types.Type).Alignment(), config) + v0.AddArg(v1) + v.AddArg(v0) + v.AddArg(mem) + return true + } +} +func rewriteValueRISCV64_OpZeroExt16to32_0(v *Value) bool { + b := v.Block + // match: (ZeroExt16to32 x) + // result: (SRLI [48] (SLLI [48] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 48 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 48 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpZeroExt16to64_0(v *Value) bool { + b := v.Block + // match: (ZeroExt16to64 x) + // result: (SRLI [48] (SLLI [48] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 48 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 48 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpZeroExt32to64_0(v *Value) bool { + b := v.Block + // match: (ZeroExt32to64 x) + // result: (SRLI [32] (SLLI [32] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 32 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 32 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpZeroExt8to16_0(v *Value) bool { + b := v.Block + // match: (ZeroExt8to16 x) + // result: (SRLI [56] (SLLI [56] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 56 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpZeroExt8to32_0(v *Value) bool { + b := v.Block + // match: (ZeroExt8to32 x) + // result: (SRLI [56] (SLLI [56] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 56 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueRISCV64_OpZeroExt8to64_0(v *Value) bool { + b := v.Block + // match: (ZeroExt8to64 x) + // result: (SRLI [56] (SLLI [56] x)) + for { + t := v.Type + x := v.Args[0] + v.reset(OpRISCV64SRLI) + v.AuxInt = 56 + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = 56 + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteBlockRISCV64(b *Block) bool { + switch b.Kind { + case BlockIf: + // match: (If cond yes no) + // result: (BNE cond yes no) + for { + cond := b.Controls[0] + b.Reset(BlockRISCV64BNE) + b.AddControl(cond) + return true + } + } + return false +} diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go index ff0ef25e90..5a77910bde 100644 --- a/src/cmd/compile/internal/ssa/schedule.go +++ b/src/cmd/compile/internal/ssa/schedule.go @@ -66,7 +66,7 @@ func (op Op) isLoweredGetClosurePtr() bool { switch op { case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr, Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr, - OpWasmLoweredGetClosurePtr: + OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr: return true } return false @@ -115,7 +115,7 @@ func schedule(f *Func) { v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck || v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck || v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck || - v.Op == OpWasmLoweredNilCheck: + v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck: // Nil checks must come before loads from the same address. score[v.ID] = ScoreNilCheck case v.Op == OpPhi: diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go index e3ec3361f9..3aa64a5ce2 100644 --- a/src/cmd/compile/main.go +++ b/src/cmd/compile/main.go @@ -12,6 +12,7 @@ import ( "cmd/compile/internal/mips" "cmd/compile/internal/mips64" "cmd/compile/internal/ppc64" + "cmd/compile/internal/riscv64" "cmd/compile/internal/s390x" "cmd/compile/internal/wasm" "cmd/compile/internal/x86" @@ -32,6 +33,7 @@ var archInits = map[string]func(*gc.Arch){ "mips64le": mips64.Init, "ppc64": ppc64.Init, "ppc64le": ppc64.Init, + "riscv64": riscv64.Init, "s390x": s390x.Init, "wasm": wasm.Init, } diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go index d30529bc5d..a07e64b472 100644 --- a/src/cmd/dist/buildtool.go +++ b/src/cmd/dist/buildtool.go @@ -45,10 +45,11 @@ var bootstrapDirs = []string{ "cmd/compile/internal/mips", "cmd/compile/internal/mips64", "cmd/compile/internal/ppc64", - "cmd/compile/internal/types", + "cmd/compile/internal/riscv64", "cmd/compile/internal/s390x", "cmd/compile/internal/ssa", "cmd/compile/internal/syntax", + "cmd/compile/internal/types", "cmd/compile/internal/x86", "cmd/compile/internal/wasm", "cmd/internal/bio", diff --git a/src/cmd/internal/obj/riscv/list.go b/src/cmd/internal/obj/riscv/list.go index f5f7ef21e4..de90961e32 100644 --- a/src/cmd/internal/obj/riscv/list.go +++ b/src/cmd/internal/obj/riscv/list.go @@ -11,11 +11,11 @@ import ( ) func init() { - obj.RegisterRegister(obj.RBaseRISCV, REG_END, regName) + obj.RegisterRegister(obj.RBaseRISCV, REG_END, RegName) obj.RegisterOpcode(obj.ABaseRISCV, Anames) } -func regName(r int) string { +func RegName(r int) string { switch { case r == 0: return "NONE" diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index b024a6a546..721908fe9d 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -487,8 +487,8 @@ func rewriteMOV(ctxt *obj.Link, newprog obj.ProgAlloc, p *obj.Prog) { } } -// invertBranch inverts the condition of a conditional branch. -func invertBranch(i obj.As) obj.As { +// InvertBranch inverts the condition of a conditional branch. +func InvertBranch(i obj.As) obj.As { switch i { case ABEQ: return ABNE @@ -503,7 +503,7 @@ func invertBranch(i obj.As) obj.As { case ABGEU: return ABLTU default: - panic("invertBranch: not a branch") + panic("InvertBranch: not a branch") } } @@ -800,7 +800,7 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { jmp.To = obj.Addr{Type: obj.TYPE_BRANCH} jmp.Pcond = p.Pcond - p.As = invertBranch(p.As) + p.As = InvertBranch(p.As) p.Pcond = jmp.Link // We may have made previous branches too long, @@ -1005,7 +1005,7 @@ func wantImmU(p *obj.Prog, pos string, a obj.Addr, nbits uint) { func wantReg(p *obj.Prog, pos string, descr string, r, min, max int16) { if r < min || r > max { - p.Ctxt.Diag("%v\texpected %s register in %s position but got non-%s register %s", p, descr, pos, descr, regName(int(r))) + p.Ctxt.Diag("%v\texpected %s register in %s position but got non-%s register %s", p, descr, pos, descr, RegName(int(r))) } }