From ec5bdefd0a8f4a4dc7ba8d6ab4e1cf393384d03c Mon Sep 17 00:00:00 2001 From: Xiaodong Liu Date: Wed, 24 Nov 2021 17:31:18 +0800 Subject: [PATCH] cmd/compile/internal/ssa{,/gen}: define rules and operation on loong64 The rules and operation definition is used to generate rewrite functions and OpKind type constant. Contributors to the loong64 port are: Weining Lu Lei Wang Lingqin Gong Xiaolin Zhao Meidan Li Xiaojuan Zhai Qiyuan Pu Guoqi Chen This port has been updated to Go 1.15.6: https://github.com/loongson/go Updates #46229 Change-Id: Ia362ed7ba5d84046697aadbc8d6d4cbe495f6076 Reviewed-on: https://go-review.googlesource.com/c/go/+/367039 Run-TryBot: Ian Lance Taylor TryBot-Result: Gopher Robot Reviewed-by: David Chase Reviewed-by: Ian Lance Taylor Auto-Submit: Ian Lance Taylor --- .../compile/internal/ssa/gen/LOONG64.rules | 675 ++ .../compile/internal/ssa/gen/LOONG64Ops.go | 480 + src/cmd/compile/internal/ssa/opGen.go | 1849 ++++ .../compile/internal/ssa/rewriteLOONG64.go | 7946 +++++++++++++++++ 4 files changed, 10950 insertions(+) create mode 100644 src/cmd/compile/internal/ssa/gen/LOONG64.rules create mode 100644 src/cmd/compile/internal/ssa/gen/LOONG64Ops.go create mode 100644 src/cmd/compile/internal/ssa/rewriteLOONG64.go diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64.rules b/src/cmd/compile/internal/ssa/gen/LOONG64.rules new file mode 100644 index 0000000000..e3bbb29efe --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/LOONG64.rules @@ -0,0 +1,675 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +(Add(Ptr|64|32|16|8) ...) => (ADDV ...) +(Add(32|64)F ...) => (ADD(F|D) ...) + +(Sub(Ptr|64|32|16|8) ...) => (SUBV ...) +(Sub(32|64)F ...) => (SUB(F|D) ...) + +(Mul(64|32|16|8) x y) => (Select1 (MULVU x y)) +(Mul(32|64)F ...) => (MUL(F|D) ...) +(Mul64uhilo ...) => (MULVU ...) +(Select0 (Mul64uover x y)) => (Select1 (MULVU x y)) +(Select1 (Mul64uover x y)) => (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) + +(Hmul64 x y) => (Select0 (MULV x y)) +(Hmul64u x y) => (Select0 (MULVU x y)) +(Hmul32 x y) => (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) +(Hmul32u x y) => (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) + +(Div64 x y) => (Select1 (DIVV x y)) +(Div64u x y) => (Select1 (DIVVU x y)) +(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) +(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) +(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) +(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Div(32|64)F ...) => (DIV(F|D) ...) + +(Mod64 x y) => (Select0 (DIVV x y)) +(Mod64u x y) => (Select0 (DIVVU x y)) +(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) +(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) +(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) +(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + +// (x + y) / 2 with x>=y => (x - y) / 2 + y +(Avg64u x y) => (ADDV (SRLVconst (SUBV x y) [1]) y) + +(And(64|32|16|8) ...) => (AND ...) +(Or(64|32|16|8) ...) => (OR ...) +(Xor(64|32|16|8) ...) => (XOR ...) + +// shifts +// hardware instruction uses only the low 6 bits of the shift +// we compare to 64 to ensure Go semantics for large shifts +(Lsh64x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh64x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh64x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh64x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Lsh32x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh32x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh32x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh32x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Lsh16x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh16x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh16x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh16x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Lsh8x64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) +(Lsh8x32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) +(Lsh8x16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) +(Lsh8x8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + +(Rsh64Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) +(Rsh64Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) +(Rsh64Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) +(Rsh64Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) + +(Rsh32Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) +(Rsh32Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Rsh32Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) +(Rsh32Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) + +(Rsh16Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) +(Rsh16Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) +(Rsh16Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Rsh16Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) + +(Rsh8Ux64 x y) => (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) +(Rsh8Ux32 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) +(Rsh8Ux16 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) +(Rsh8Ux8 x y) => (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) + +(Rsh64x64 x y) => (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh64x32 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh64x16 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh64x8 x y) => (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh32x64 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh32x32 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh32x16 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh32x8 x y) => (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh16x64 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh16x32 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh16x16 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh16x8 x y) => (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +(Rsh8x64 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) +(Rsh8x32 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) +(Rsh8x16 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) +(Rsh8x8 x y) => (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + +// rotates +(RotateLeft8 x (MOVVconst [c])) => (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) +(RotateLeft16 x (MOVVconst [c])) => (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) +(RotateLeft32 x (MOVVconst [c])) => (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) +(RotateLeft64 x (MOVVconst [c])) => (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) + +// unary ops +(Neg(64|32|16|8) ...) => (NEGV ...) +(Neg(32|64)F ...) => (NEG(F|D) ...) + +(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x) + +(Sqrt ...) => (SQRTD ...) +(Sqrt32 ...) => (SQRTF ...) + +// boolean ops -- booleans are represented with 0=false, 1=true +(AndB ...) => (AND ...) +(OrB ...) => (OR ...) +(EqB x y) => (XOR (MOVVconst [1]) (XOR x y)) +(NeqB ...) => (XOR ...) +(Not x) => (XORconst [1] x) + +// constants +(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)]) +(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)]) +(ConstNil) => (MOVVconst [0]) +(ConstBool [t]) => (MOVVconst [int64(b2i(t))]) + +(Slicemask x) => (SRAVconst (NEGV x) [63]) + +// truncations +// Because we ignore high parts of registers, truncates are just copies. +(Trunc16to8 ...) => (Copy ...) +(Trunc32to8 ...) => (Copy ...) +(Trunc32to16 ...) => (Copy ...) +(Trunc64to8 ...) => (Copy ...) +(Trunc64to16 ...) => (Copy ...) +(Trunc64to32 ...) => (Copy ...) + +// Zero-/Sign-extensions +(ZeroExt8to16 ...) => (MOVBUreg ...) +(ZeroExt8to32 ...) => (MOVBUreg ...) +(ZeroExt16to32 ...) => (MOVHUreg ...) +(ZeroExt8to64 ...) => (MOVBUreg ...) +(ZeroExt16to64 ...) => (MOVHUreg ...) +(ZeroExt32to64 ...) => (MOVWUreg ...) + +(SignExt8to16 ...) => (MOVBreg ...) +(SignExt8to32 ...) => (MOVBreg ...) +(SignExt16to32 ...) => (MOVHreg ...) +(SignExt8to64 ...) => (MOVBreg ...) +(SignExt16to64 ...) => (MOVHreg ...) +(SignExt32to64 ...) => (MOVWreg ...) + +// float <=> int conversion +(Cvt32to32F ...) => (MOVWF ...) +(Cvt32to64F ...) => (MOVWD ...) +(Cvt64to32F ...) => (MOVVF ...) +(Cvt64to64F ...) => (MOVVD ...) +(Cvt32Fto32 ...) => (TRUNCFW ...) +(Cvt64Fto32 ...) => (TRUNCDW ...) +(Cvt32Fto64 ...) => (TRUNCFV ...) +(Cvt64Fto64 ...) => (TRUNCDV ...) +(Cvt32Fto64F ...) => (MOVFD ...) +(Cvt64Fto32F ...) => (MOVDF ...) + +(CvtBoolToUint8 ...) => (Copy ...) + +(Round(32|64)F ...) => (Copy ...) + +// comparisons +(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y)) +(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y)) + +(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) +(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) +(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) +(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0])) +(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0])) +(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y)) + +(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x)) +(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x)) +(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x)) +(Less64 x y) => (SGT y x) +(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN + +(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) +(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) +(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) +(Less64U x y) => (SGTU y x) + +(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) +(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) +(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) +(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y)) +(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN + +(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) +(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) +(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) +(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y)) + +(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr) +(OffPtr [off] ptr) => (ADDVconst [off] ptr) + +(Addr {sym} base) => (MOVVaddr {sym} base) +(LocalAddr {sym} base _) => (MOVVaddr {sym} base) + +// loads +(Load ptr mem) && t.IsBoolean() => (MOVBUload ptr mem) +(Load ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem) +(Load ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem) +(Load ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem) +(Load ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem) +(Load ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem) +(Load ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem) +(Load ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem) +(Load ptr mem) && is32BitFloat(t) => (MOVFload ptr mem) +(Load ptr mem) && is64BitFloat(t) => (MOVDload ptr mem) + +// stores +(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem) +(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem) + +// zeroing +(Zero [0] _ mem) => mem +(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem) +(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore ptr (MOVVconst [0]) mem) +(Zero [2] ptr mem) => + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem)) +(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore ptr (MOVVconst [0]) mem) +(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem)) +(Zero [4] ptr mem) => + (MOVBstore [3] ptr (MOVVconst [0]) + (MOVBstore [2] ptr (MOVVconst [0]) + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem)))) +(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore ptr (MOVVconst [0]) mem) +(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] ptr (MOVVconst [0]) + (MOVWstore [0] ptr (MOVVconst [0]) mem)) +(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] ptr (MOVVconst [0]) + (MOVHstore [4] ptr (MOVVconst [0]) + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem)))) + +(Zero [3] ptr mem) => + (MOVBstore [2] ptr (MOVVconst [0]) + (MOVBstore [1] ptr (MOVVconst [0]) + (MOVBstore [0] ptr (MOVVconst [0]) mem))) +(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] ptr (MOVVconst [0]) + (MOVHstore [2] ptr (MOVVconst [0]) + (MOVHstore [0] ptr (MOVVconst [0]) mem))) +(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] ptr (MOVVconst [0]) + (MOVWstore [4] ptr (MOVVconst [0]) + (MOVWstore [0] ptr (MOVVconst [0]) mem))) +(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore [8] ptr (MOVVconst [0]) + (MOVVstore [0] ptr (MOVVconst [0]) mem)) +(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 => + (MOVVstore [16] ptr (MOVVconst [0]) + (MOVVstore [8] ptr (MOVVconst [0]) + (MOVVstore [0] ptr (MOVVconst [0]) mem))) + +// medium zeroing uses a duff device +// 8, and 128 are magic constants, see runtime/mkduff.go +(Zero [s] {t} ptr mem) + && s%8 == 0 && s > 24 && s <= 8*128 + && t.Alignment()%8 == 0 && !config.noDuffDevice => + (DUFFZERO [8 * (128 - s/8)] ptr mem) + +// large or unaligned zeroing uses a loop +(Zero [s] {t} ptr mem) + && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 => + (LoweredZero [t.Alignment()] + ptr + (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) + mem) + +// moves +(Move [0] _ _ mem) => mem +(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem) +(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore dst (MOVHload src mem) mem) +(Move [2] dst src mem) => + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)) +(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore dst (MOVWload src mem) mem) +(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)) +(Move [4] dst src mem) => + (MOVBstore [3] dst (MOVBload [3] src mem) + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem)))) +(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore dst (MOVVload src mem) mem) +(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem)) +(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [6] dst (MOVHload [6] src mem) + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem)))) + +(Move [3] dst src mem) => + (MOVBstore [2] dst (MOVBload [2] src mem) + (MOVBstore [1] dst (MOVBload [1] src mem) + (MOVBstore dst (MOVBload src mem) mem))) +(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 => + (MOVHstore [4] dst (MOVHload [4] src mem) + (MOVHstore [2] dst (MOVHload [2] src mem) + (MOVHstore dst (MOVHload src mem) mem))) +(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 => + (MOVWstore [8] dst (MOVWload [8] src mem) + (MOVWstore [4] dst (MOVWload [4] src mem) + (MOVWstore dst (MOVWload src mem) mem))) +(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore [8] dst (MOVVload [8] src mem) + (MOVVstore dst (MOVVload src mem) mem)) +(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 => + (MOVVstore [16] dst (MOVVload [16] src mem) + (MOVVstore [8] dst (MOVVload [8] src mem) + (MOVVstore dst (MOVVload src mem) mem))) + +// medium move uses a duff device +(Move [s] {t} dst src mem) + && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 + && !config.noDuffDevice && logLargeCopy(v, s) => + (DUFFCOPY [16 * (128 - s/8)] dst src mem) +// 16 and 128 are magic constants. 16 is the number of bytes to encode: +// MOVV (R1), R23 +// ADDV $8, R1 +// MOVV R23, (R2) +// ADDV $8, R2 +// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy. + +// large or unaligned move uses a loop +(Move [s] {t} dst src mem) + && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 => + (LoweredMove [t.Alignment()] + dst + src + (ADDVconst src [s-moveSize(t.Alignment(), config)]) + mem) + +// calls +(StaticCall ...) => (CALLstatic ...) +(ClosureCall ...) => (CALLclosure ...) +(InterCall ...) => (CALLinter ...) +(TailCall ...) => (CALLtail ...) + +// atomic intrinsics +(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...) +(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...) + +(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...) +(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...) + +(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...) + +(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...) + +(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...) + +// checks +(NilCheck ...) => (LoweredNilCheck ...) +(IsNonNil ptr) => (SGTU ptr (MOVVconst [0])) +(IsInBounds idx len) => (SGTU len idx) +(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len)) + +// pseudo-ops +(GetClosurePtr ...) => (LoweredGetClosurePtr ...) +(GetCallerSP ...) => (LoweredGetCallerSP ...) +(GetCallerPC ...) => (LoweredGetCallerPC ...) + +(If cond yes no) => (NE cond yes no) + +// Write barrier. +(WB ...) => (LoweredWB ...) + +(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem) +(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem) + +// Optimizations + +// Absorb boolean tests into block +(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no) +(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no) +(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no) +(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no) +(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no) +(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no) +(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no) +(NE (SGTUconst [1] x) yes no) => (EQ x yes no) +(EQ (SGTUconst [1] x) yes no) => (NE x yes no) +(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no) +(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no) +(NE (SGTconst [0] x) yes no) => (LTZ x yes no) +(EQ (SGTconst [0] x) yes no) => (GEZ x yes no) +(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) +(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) + +// fold offset into address +(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + +// fold address into load/store +(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem) +(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) +(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem) +(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) +(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem) +(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) +(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem) +(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem) +(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem) + +(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) +(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) +(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) +(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) +(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) +(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) +(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) +(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + +(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) +(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => + (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) +(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) +(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem) + +// don't extend after proper load +(MOVBreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWreg x:(MOVWload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x) + +// fold double extensions +(MOVBreg x:(MOVBreg _)) => (MOVVreg x) +(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBreg _)) => (MOVVreg x) +(MOVHreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHreg x:(MOVHreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBreg _)) => (MOVVreg x) +(MOVWreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWreg x:(MOVHreg _)) => (MOVVreg x) +(MOVWreg x:(MOVWreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x) +(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x) + +// don't extend before store +(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem) +(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem) + +// if a register move has only 1 use, just use the same register without emitting instruction +// MOVVnop doesn't emit instruction, only for ensuring the type. +(MOVVreg x) && x.Uses == 1 => (MOVVnop x) + +// fold constant into arithmatic ops +(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) +(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) +(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) +(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) +(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x) +(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x) + +(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0]) +(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63]) +(SLLV x (MOVVconst [c])) => (SLLVconst x [c]) +(SRLV x (MOVVconst [c])) => (SRLVconst x [c]) +(SRAV x (MOVVconst [c])) => (SRAVconst x [c]) + +(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x) +(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x) + +// mul by constant +(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x) +(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0]) +(Select1 (MULVU x (MOVVconst [1]))) => x +(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x) + +// div by constant +(Select1 (DIVVU x (MOVVconst [1]))) => x +(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x) +(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod +(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod + +// generic simplifications +(ADDV x (NEGV y)) => (SUBV x y) +(SUBV x x) => (MOVVconst [0]) +(SUBV (MOVVconst [0]) x) => (NEGV x) +(AND x x) => x +(OR x x) => x +(XOR x x) => (MOVVconst [0]) + +// remove redundant *const ops +(ADDVconst [0] x) => x +(SUBVconst [0] x) => x +(ANDconst [0] _) => (MOVVconst [0]) +(ANDconst [-1] x) => x +(ORconst [0] x) => x +(ORconst [-1] _) => (MOVVconst [-1]) +(XORconst [0] x) => x +(XORconst [-1] x) => (NORconst [0] x) + +// generic constant folding +(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d]) +(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x) +(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x) +(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c]) +(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x) +(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x) +(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d< (MOVVconst [int64(uint64(d)>>uint64(c))]) +(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)]) +(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d]) +(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c/d]) +(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)/uint64(d))]) +(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c%d]) // mod +(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod +(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d]) +(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d]) +(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x) +(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d]) +(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x) +(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)]) +(NEGV (MOVVconst [c])) => (MOVVconst [-c]) +(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))]) +(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))]) +(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))]) +(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))]) +(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))]) +(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))]) +(MOVVreg (MOVVconst [c])) => (MOVVconst [c]) + +// constant comparisons +(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1]) +(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1]) +(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0]) + +// other known comparisons +(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1]) +(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0]) +(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1]) +(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0]) +(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1]) +(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0]) +(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0]) +(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1]) +(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1]) +(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) +(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1]) + +// absorb constants into branches +(EQ (MOVVconst [0]) yes no) => (First yes no) +(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes) +(NE (MOVVconst [0]) yes no) => (First no yes) +(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no) +(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes) +(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no) +(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes) +(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no) +(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes) +(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no) +(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes) diff --git a/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go new file mode 100644 index 0000000000..e06ad166bb --- /dev/null +++ b/src/cmd/compile/internal/ssa/gen/LOONG64Ops.go @@ -0,0 +1,480 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore +// +build ignore + +package main + +import "strings" + +// Notes: +// - Integer types live in the low portion of registers. Upper portions are junk. +// - Boolean types use the low-order byte of a register. 0=false, 1=true. +// Upper bytes are junk. +// - *const instructions may use a constant larger than the instruction can encode. +// In this case the assembler expands to multiple instructions and uses tmp +// register (R23). + +// Suffixes encode the bit width of various instructions. +// V (vlong) = 64 bit +// WU (word) = 32 bit unsigned +// W (word) = 32 bit +// H (half word) = 16 bit +// HU = 16 bit unsigned +// B (byte) = 8 bit +// BU = 8 bit unsigned +// F (float) = 32 bit float +// D (double) = 64 bit float + +// Note: registers not used in regalloc are not included in this list, +// so that regmask stays within int64 +// Be careful when hand coding regmasks. +var regNamesLOONG64 = []string{ + "R0", // constant 0 + "R1", + "SP", // aka R3 + "R4", + "R5", + "R6", + "R7", + "R8", + "R9", + "R10", + "R11", + "R12", + "R13", + "R14", + "R15", + "R16", + "R17", + "R18", + "R19", + "R20", + "R21", + "g", // aka R22 + "R23", + "R24", + "R25", + "R26", + "R27", + "R28", + "R29", + // R30 is REGTMP not used in regalloc + "R31", + + "F0", + "F1", + "F2", + "F3", + "F4", + "F5", + "F6", + "F7", + "F8", + "F9", + "F10", + "F11", + "F12", + "F13", + "F14", + "F15", + "F16", + "F17", + "F18", + "F19", + "F20", + "F21", + "F22", + "F23", + "F24", + "F25", + "F26", + "F27", + "F28", + "F29", + "F30", + "F31", + + // If you add registers, update asyncPreempt in runtime. + + // pseudo-registers + "SB", +} + +func init() { + // Make map from reg names to reg integers. + if len(regNamesLOONG64) > 64 { + panic("too many registers") + } + num := map[string]int{} + for i, name := range regNamesLOONG64 { + num[name] = i + } + buildReg := func(s string) regMask { + m := regMask(0) + for _, r := range strings.Split(s, " ") { + if n, ok := num[r]; ok { + m |= regMask(1) << uint(n) + continue + } + panic("register " + r + " not found") + } + return m + } + + // Common individual register masks + var ( + gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R21-unused, R22 is g, R30 is REGTMP + gps = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") | buildReg("g") + gpg = gp | buildReg("g") + gpsp = gp | buildReg("SP") + gpspg = gpg | buildReg("SP") + gpspsbg = gpspg | buildReg("SB") + fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") + callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g + r1 = buildReg("R19") + r2 = buildReg("R18") + r3 = buildReg("R17") + r4 = buildReg("R4") + ) + // Common regInfo + var ( + gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} + gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}} + gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}} + gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}} + gpmuldiv = regInfo{inputs: []regMask{gps, gps}, outputs: []regMask{buildReg("R17"), buildReg("R18")}} + gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}} + gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}} + gpstore0 = regInfo{inputs: []regMask{gpspsbg}} + gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}} + gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}} + fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} + fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} + fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} + fp2flags = regInfo{inputs: []regMask{fp, fp}} + fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}} + fpstore = regInfo{inputs: []regMask{gpspsbg, fp}} + readflags = regInfo{inputs: nil, outputs: []regMask{gp}} + ) + ops := []opData{ + // binary ops + {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1 + {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops. + {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1 + {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt + + {name: "MULV", argLength: 2, reg: gpmuldiv, commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed + {name: "MULVU", argLength: 2, reg: gpmuldiv, commutative: true, typ: "(UInt64,UInt64)"}, // arg0 * arg1, unsigned + {name: "DIVV", argLength: 2, reg: gpmuldiv, typ: "(Int64,Int64)"}, // arg0 / arg1, signed + {name: "DIVVU", argLength: 2, reg: gpmuldiv, typ: "(UInt64,UInt64)"}, // arg0 / arg1, unsigned + + {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1 + {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1 + {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1 + {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1 + {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1 + {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1 + {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1 + {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1 + + {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1 + {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt + {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1 + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt + {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1 + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt + {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1) + {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt) + + {name: "NEGV", argLength: 1, reg: gp11}, // -arg0 + {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32 + {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64 + {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64 + {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32 + + // shifts + {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64 + {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt + {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64 + {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned + {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64 + {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed + + // comparisons + {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise + {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise + {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise + {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise + + {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32 + {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64 + {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32 + {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64 + {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32 + {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64 + + // moves + {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint + {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float + {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float + + {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB + + {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem. + + {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem. + + {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem. + {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem. + + // conversions + {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte + {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte + {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half + {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half + {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word + {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word + {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0 + + {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register + + {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32 + {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64 + {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32 + {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64 + {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32 + {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32 + {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64 + {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64 + {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64 + {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 + + // function calls + {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + + // duffzero + // arg0 = address of memory to zero + // arg1 = mem + // auxint = offset into duffzero code to start executing + // returns mem + // R19 aka loong64.REGRT1 changed as side effect + { + name: "DUFFZERO", + aux: "Int64", + argLength: 2, + reg: regInfo{ + inputs: []regMask{gp}, + clobbers: buildReg("R19 R1"), + }, + faultOnNilArg0: true, + }, + + // duffcopy + // arg0 = address of dst memory (in R20, changed as side effect) REGRT2 + // arg1 = address of src memory (in R19, changed as side effect) REGRT1 + // arg2 = mem + // auxint = offset into duffcopy code to start executing + // returns mem + { + name: "DUFFCOPY", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R20"), buildReg("R19")}, + clobbers: buildReg("R19 R20 R1"), + }, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // large or unaligned zeroing + // arg0 = address of memory to zero (in R19, changed as side effect) + // arg1 = address of the last element to zero + // arg2 = mem + // auxint = alignment + // returns mem + // SUBV $8, R19 + // MOVV R0, 8(R19) + // ADDV $8, R19 + // BNE Rarg1, R19, -2(PC) + { + name: "LoweredZero", + aux: "Int64", + argLength: 3, + reg: regInfo{ + inputs: []regMask{buildReg("R19"), gp}, + clobbers: buildReg("R19"), + }, + clobberFlags: true, + faultOnNilArg0: true, + }, + + // large or unaligned move + // arg0 = address of dst memory (in R4, changed as side effect) + // arg1 = address of src memory (in R19, changed as side effect) + // arg2 = address of the last element of src + // arg3 = mem + // auxint = alignment + // returns mem + // SUBV $8, R19 + // MOVV 8(R19), Rtmp + // MOVV Rtmp, (R4) + // ADDV $8, R19 + // ADDV $8, R4 + // BNE Rarg2, R19, -4(PC) + { + name: "LoweredMove", + aux: "Int64", + argLength: 4, + reg: regInfo{ + inputs: []regMask{buildReg("R4"), buildReg("R19"), gp}, + clobbers: buildReg("R19 R4"), + }, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + }, + + // atomic loads. + // load from arg0. arg1=mem. + // returns so they can be properly ordered with other loads. + {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true}, + {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true}, + + // atomic stores. + // store arg1 to arg0. arg2=mem. returns memory. + {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true}, + // store zero to arg0. arg1=mem. returns memory. + {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true}, + + // atomic exchange. + // store arg1 to arg0. arg2=mem. returns . + // DBAR + // LL (Rarg0), Rout + // MOVV Rarg1, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic add. + // *arg0 += arg1. arg2=mem. returns . + // DBAR + // LL (Rarg0), Rout + // ADDV Rarg1, Rout, Rtmp + // SC Rtmp, (Rarg0) + // BEQ Rtmp, -3(PC) + // DBAR + // ADDV Rarg1, Rout + {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + // *arg0 += auxint. arg1=mem. returns . auxint is 32-bit. + {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // atomic compare and swap. + // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. + // if *arg0 == arg1 { + // *arg0 = arg2 + // return (true, memory) + // } else { + // return (false, memory) + // } + // DBAR + // MOVV $0, Rout + // LL (Rarg0), Rtmp + // BNE Rtmp, Rarg1, 4(PC) + // MOVV Rarg2, Rout + // SC Rout, (Rarg0) + // BEQ Rout, -4(PC) + // DBAR + {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true}, + + // pseudo-ops + {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem. + + {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true + {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false + + // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, + // and sorts it to the very beginning of the block to prevent other + // use of R22 (loong64.REGCTXT, the closure pointer) + {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R29")}}, zeroWidth: true}, + + // LoweredGetCallerSP returns the SP of the caller of the current function. + {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, + + // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. + // I.e., if f calls g "calls" getcallerpc, + // the result should be the PC within f that g will return to. + // See runtime/stubs.go for a more detailed discussion. + {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, + + // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier + // It saves all GP registers if necessary, + // but clobbers R1 (LR) because it's a call + // and R30 (REGTMP). + {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R27"), buildReg("R28")}, clobbers: (callerSave &^ gpg) | buildReg("R1")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, + + // There are three of these functions so that they can have three different register inputs. + // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the + // default registers to match so we don't need to copy registers around unnecessarily. + {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). + } + + blocks := []blockData{ + {name: "EQ", controls: 1}, + {name: "NE", controls: 1}, + {name: "LTZ", controls: 1}, // < 0 + {name: "LEZ", controls: 1}, // <= 0 + {name: "GTZ", controls: 1}, // > 0 + {name: "GEZ", controls: 1}, // >= 0 + {name: "FPT", controls: 1}, // FP flag is true + {name: "FPF", controls: 1}, // FP flag is false + } + + archs = append(archs, arch{ + name: "LOONG64", + pkg: "cmd/internal/obj/loong64", + genfile: "../../loong64/ssa.go", + ops: ops, + blocks: blocks, + regnames: regNamesLOONG64, + // TODO: support register ABI on loong64 + ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11", + ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7", + gpregmask: gp, + fpregmask: fp, + framepointerreg: -1, // not used + linkreg: int8(num["R1"]), + }) +} diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index e43de07a6d..ac879faa61 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -6,6 +6,7 @@ import ( "cmd/internal/obj" "cmd/internal/obj/arm" "cmd/internal/obj/arm64" + "cmd/internal/obj/loong64" "cmd/internal/obj/mips" "cmd/internal/obj/ppc64" "cmd/internal/obj/riscv" @@ -92,6 +93,15 @@ const ( BlockARM64GTnoov BlockARM64GEnoov + BlockLOONG64EQ + BlockLOONG64NE + BlockLOONG64LTZ + BlockLOONG64LEZ + BlockLOONG64GTZ + BlockLOONG64GEZ + BlockLOONG64FPT + BlockLOONG64FPF + BlockMIPSEQ BlockMIPSNE BlockMIPSLTZ @@ -232,6 +242,15 @@ var blockString = [...]string{ BlockARM64GTnoov: "GTnoov", BlockARM64GEnoov: "GEnoov", + BlockLOONG64EQ: "EQ", + BlockLOONG64NE: "NE", + BlockLOONG64LTZ: "LTZ", + BlockLOONG64LEZ: "LEZ", + BlockLOONG64GTZ: "GTZ", + BlockLOONG64GEZ: "GEZ", + BlockLOONG64FPT: "FPT", + BlockLOONG64FPF: "FPF", + BlockMIPSEQ: "EQ", BlockMIPSNE: "NE", BlockMIPSLTZ: "LTZ", @@ -1687,6 +1706,127 @@ const ( OpARM64PRFM OpARM64DMB + OpLOONG64ADDV + OpLOONG64ADDVconst + OpLOONG64SUBV + OpLOONG64SUBVconst + OpLOONG64MULV + OpLOONG64MULVU + OpLOONG64DIVV + OpLOONG64DIVVU + OpLOONG64ADDF + OpLOONG64ADDD + OpLOONG64SUBF + OpLOONG64SUBD + OpLOONG64MULF + OpLOONG64MULD + OpLOONG64DIVF + OpLOONG64DIVD + OpLOONG64AND + OpLOONG64ANDconst + OpLOONG64OR + OpLOONG64ORconst + OpLOONG64XOR + OpLOONG64XORconst + OpLOONG64NOR + OpLOONG64NORconst + OpLOONG64NEGV + OpLOONG64NEGF + OpLOONG64NEGD + OpLOONG64SQRTD + OpLOONG64SQRTF + OpLOONG64SLLV + OpLOONG64SLLVconst + OpLOONG64SRLV + OpLOONG64SRLVconst + OpLOONG64SRAV + OpLOONG64SRAVconst + OpLOONG64SGT + OpLOONG64SGTconst + OpLOONG64SGTU + OpLOONG64SGTUconst + OpLOONG64CMPEQF + OpLOONG64CMPEQD + OpLOONG64CMPGEF + OpLOONG64CMPGED + OpLOONG64CMPGTF + OpLOONG64CMPGTD + OpLOONG64MOVVconst + OpLOONG64MOVFconst + OpLOONG64MOVDconst + OpLOONG64MOVVaddr + OpLOONG64MOVBload + OpLOONG64MOVBUload + OpLOONG64MOVHload + OpLOONG64MOVHUload + OpLOONG64MOVWload + OpLOONG64MOVWUload + OpLOONG64MOVVload + OpLOONG64MOVFload + OpLOONG64MOVDload + OpLOONG64MOVBstore + OpLOONG64MOVHstore + OpLOONG64MOVWstore + OpLOONG64MOVVstore + OpLOONG64MOVFstore + OpLOONG64MOVDstore + OpLOONG64MOVBstorezero + OpLOONG64MOVHstorezero + OpLOONG64MOVWstorezero + OpLOONG64MOVVstorezero + OpLOONG64MOVBreg + OpLOONG64MOVBUreg + OpLOONG64MOVHreg + OpLOONG64MOVHUreg + OpLOONG64MOVWreg + OpLOONG64MOVWUreg + OpLOONG64MOVVreg + OpLOONG64MOVVnop + OpLOONG64MOVWF + OpLOONG64MOVWD + OpLOONG64MOVVF + OpLOONG64MOVVD + OpLOONG64TRUNCFW + OpLOONG64TRUNCDW + OpLOONG64TRUNCFV + OpLOONG64TRUNCDV + OpLOONG64MOVFD + OpLOONG64MOVDF + OpLOONG64CALLstatic + OpLOONG64CALLtail + OpLOONG64CALLclosure + OpLOONG64CALLinter + OpLOONG64DUFFZERO + OpLOONG64DUFFCOPY + OpLOONG64LoweredZero + OpLOONG64LoweredMove + OpLOONG64LoweredAtomicLoad8 + OpLOONG64LoweredAtomicLoad32 + OpLOONG64LoweredAtomicLoad64 + OpLOONG64LoweredAtomicStore8 + OpLOONG64LoweredAtomicStore32 + OpLOONG64LoweredAtomicStore64 + OpLOONG64LoweredAtomicStorezero32 + OpLOONG64LoweredAtomicStorezero64 + OpLOONG64LoweredAtomicExchange32 + OpLOONG64LoweredAtomicExchange64 + OpLOONG64LoweredAtomicAdd32 + OpLOONG64LoweredAtomicAdd64 + OpLOONG64LoweredAtomicAddconst32 + OpLOONG64LoweredAtomicAddconst64 + OpLOONG64LoweredAtomicCas32 + OpLOONG64LoweredAtomicCas64 + OpLOONG64LoweredNilCheck + OpLOONG64FPFlagTrue + OpLOONG64FPFlagFalse + OpLOONG64LoweredGetClosurePtr + OpLOONG64LoweredGetCallerSP + OpLOONG64LoweredGetCallerPC + OpLOONG64LoweredWB + OpLOONG64LoweredPanicBoundsA + OpLOONG64LoweredPanicBoundsB + OpLOONG64LoweredPanicBoundsC + OpMIPSADD OpMIPSADDconst OpMIPSSUB @@ -22656,6 +22796,1643 @@ var opcodeTable = [...]opInfo{ reg: regInfo{}, }, + { + name: "ADDV", + argLen: 2, + commutative: true, + asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ADDVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AADDVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693244}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SUBV", + argLen: 2, + asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SUBVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASUBVU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MULV", + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 196608, // R17 R18 + outputs: []outputInfo{ + {0, 65536}, // R17 + {1, 131072}, // R18 + }, + }, + }, + { + name: "MULVU", + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 196608, // R17 R18 + outputs: []outputInfo{ + {0, 65536}, // R17 + {1, 131072}, // R18 + }, + }, + }, + { + name: "DIVV", + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 196608, // R17 R18 + outputs: []outputInfo{ + {0, 65536}, // R17 + {1, 131072}, // R18 + }, + }, + }, + { + name: "DIVVU", + argLen: 2, + clobberFlags: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072496632}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 196608, // R17 R18 + outputs: []outputInfo{ + {0, 65536}, // R17 + {1, 131072}, // R18 + }, + }, + }, + { + name: "ADDF", + argLen: 2, + commutative: true, + asm: loong64.AADDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "ADDD", + argLen: 2, + commutative: true, + asm: loong64.AADDD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBF", + argLen: 2, + asm: loong64.ASUBF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SUBD", + argLen: 2, + asm: loong64.ASUBD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MULF", + argLen: 2, + commutative: true, + asm: loong64.AMULF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MULD", + argLen: 2, + commutative: true, + asm: loong64.AMULD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVF", + argLen: 2, + asm: loong64.ADIVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "DIVD", + argLen: 2, + asm: loong64.ADIVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "AND", + argLen: 2, + commutative: true, + asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ANDconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AAND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "OR", + argLen: 2, + commutative: true, + asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "ORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "XOR", + argLen: 2, + commutative: true, + asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "XORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.AXOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NOR", + argLen: 2, + commutative: true, + asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NORconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ANOR, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NEGV", + argLen: 1, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "NEGF", + argLen: 1, + asm: loong64.ANEGF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "NEGD", + argLen: 1, + asm: loong64.ANEGD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTD", + argLen: 1, + asm: loong64.ASQRTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SQRTF", + argLen: 1, + asm: loong64.ASQRTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "SLLV", + argLen: 2, + asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SLLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASLLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRLV", + argLen: 2, + asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRLVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRLV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRAV", + argLen: 2, + asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SRAVconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASRAV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGT", + argLen: 2, + asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGT, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTU", + argLen: 2, + asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "SGTUconst", + auxType: auxInt64, + argLen: 1, + asm: loong64.ASGTU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "CMPEQF", + argLen: 2, + asm: loong64.ACMPEQF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPEQD", + argLen: 2, + asm: loong64.ACMPEQD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGEF", + argLen: 2, + asm: loong64.ACMPGEF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGED", + argLen: 2, + asm: loong64.ACMPGED, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTF", + argLen: 2, + asm: loong64.ACMPGTF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CMPGTD", + argLen: 2, + asm: loong64.ACMPGTD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVconst", + auxType: auxInt64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVV, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVFconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVF, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDconst", + auxType: auxFloat64, + argLen: 0, + rematerializeable: true, + asm: loong64.AMOVD, + reg: regInfo{ + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVaddr", + auxType: auxSymOff, + argLen: 1, + rematerializeable: true, + symEffect: SymAddr, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686018427387908}, // SP SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWUload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVVload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVFload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDload", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymRead, + asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVHstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVWstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVVstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVFstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDstore", + auxType: auxSymOff, + argLen: 3, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVBstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVHstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVWstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVVstorezero", + auxType: auxSymOff, + argLen: 2, + faultOnNilArg0: true, + symEffect: SymWrite, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "MOVBreg", + argLen: 1, + asm: loong64.AMOVB, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVBUreg", + argLen: 1, + asm: loong64.AMOVBU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHreg", + argLen: 1, + asm: loong64.AMOVH, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVHUreg", + argLen: 1, + asm: loong64.AMOVHU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWreg", + argLen: 1, + asm: loong64.AMOVW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWUreg", + argLen: 1, + asm: loong64.AMOVWU, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVVreg", + argLen: 1, + asm: loong64.AMOVV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVVnop", + argLen: 1, + resultInArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "MOVWF", + argLen: 1, + asm: loong64.AMOVWF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVWD", + argLen: 1, + asm: loong64.AMOVWD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVF", + argLen: 1, + asm: loong64.AMOVVF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVVD", + argLen: 1, + asm: loong64.AMOVVD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFW", + argLen: 1, + asm: loong64.ATRUNCFW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDW", + argLen: 1, + asm: loong64.ATRUNCDW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCFV", + argLen: 1, + asm: loong64.ATRUNCFV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "TRUNCDV", + argLen: 1, + asm: loong64.ATRUNCDV, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVFD", + argLen: 1, + asm: loong64.AMOVFD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "MOVDF", + argLen: 1, + asm: loong64.AMOVDF, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "CALLstatic", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLtail", + auxType: auxCallOff, + argLen: 1, + clobberFlags: true, + call: true, + reg: regInfo{ + clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLclosure", + auxType: auxCallOff, + argLen: 3, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 268435456}, // R29 + {0, 1070596092}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "CALLinter", + auxType: auxCallOff, + argLen: 2, + clobberFlags: true, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "DUFFZERO", + auxType: auxInt64, + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 262146, // R1 R19 + }, + }, + { + name: "DUFFCOPY", + auxType: auxInt64, + argLen: 3, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 524288}, // R20 + {1, 262144}, // R19 + }, + clobbers: 786434, // R1 R19 R20 + }, + }, + { + name: "LoweredZero", + auxType: auxInt64, + argLen: 3, + clobberFlags: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 262144}, // R19 + {1, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 262144, // R19 + }, + }, + { + name: "LoweredMove", + auxType: auxInt64, + argLen: 4, + clobberFlags: true, + faultOnNilArg0: true, + faultOnNilArg1: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 8}, // R4 + {1, 262144}, // R19 + {2, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + clobbers: 262152, // R4 R19 + }, + }, + { + name: "LoweredAtomicLoad8", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad32", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicLoad64", + argLen: 2, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicStore8", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore32", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStore64", + argLen: 3, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero32", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicStorezero64", + argLen: 2, + faultOnNilArg0: true, + hasSideEffects: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + }, + }, + { + name: "LoweredAtomicExchange32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicExchange64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd32", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAdd64", + argLen: 3, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst32", + auxType: auxInt32, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicAddconst64", + auxType: auxInt64, + argLen: 2, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicCas32", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredAtomicCas64", + argLen: 4, + resultNotInArgs: true, + faultOnNilArg0: true, + hasSideEffects: true, + unsafePoint: true, + reg: regInfo{ + inputs: []inputInfo{ + {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + }, + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredNilCheck", + argLen: 2, + nilCheck: true, + faultOnNilArg0: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "FPFlagTrue", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "FPFlagFalse", + argLen: 1, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredGetClosurePtr", + argLen: 0, + zeroWidth: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 268435456}, // R29 + }, + }, + }, + { + name: "LoweredGetCallerSP", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredGetCallerPC", + argLen: 0, + rematerializeable: true, + reg: regInfo{ + outputs: []outputInfo{ + {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + }, + }, + }, + { + name: "LoweredWB", + auxType: auxSym, + argLen: 3, + clobberFlags: true, + symEffect: SymNone, + reg: regInfo{ + inputs: []inputInfo{ + {0, 67108864}, // R27 + {1, 134217728}, // R28 + }, + clobbers: 4611686017353646082, // R1 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + { + name: "LoweredPanicBoundsA", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 65536}, // R17 + {1, 8}, // R4 + }, + }, + }, + { + name: "LoweredPanicBoundsB", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 131072}, // R18 + {1, 65536}, // R17 + }, + }, + }, + { + name: "LoweredPanicBoundsC", + auxType: auxInt64, + argLen: 3, + call: true, + reg: regInfo{ + inputs: []inputInfo{ + {0, 262144}, // R19 + {1, 131072}, // R18 + }, + }, + }, + { name: "ADD", argLen: 2, @@ -37969,6 +39746,78 @@ var fpRegMaskARM64 = regMask(9223372034707292160) var specialRegMaskARM64 = regMask(0) var framepointerRegARM64 = int8(-1) var linkRegARM64 = int8(29) +var registersLOONG64 = [...]Register{ + {0, loong64.REG_R0, -1, "R0"}, + {1, loong64.REG_R1, -1, "R1"}, + {2, loong64.REGSP, -1, "SP"}, + {3, loong64.REG_R4, 0, "R4"}, + {4, loong64.REG_R5, 1, "R5"}, + {5, loong64.REG_R6, 2, "R6"}, + {6, loong64.REG_R7, 3, "R7"}, + {7, loong64.REG_R8, 4, "R8"}, + {8, loong64.REG_R9, 5, "R9"}, + {9, loong64.REG_R10, 6, "R10"}, + {10, loong64.REG_R11, 7, "R11"}, + {11, loong64.REG_R12, 8, "R12"}, + {12, loong64.REG_R13, 9, "R13"}, + {13, loong64.REG_R14, 10, "R14"}, + {14, loong64.REG_R15, 11, "R15"}, + {15, loong64.REG_R16, 12, "R16"}, + {16, loong64.REG_R17, 13, "R17"}, + {17, loong64.REG_R18, 14, "R18"}, + {18, loong64.REG_R19, 15, "R19"}, + {19, loong64.REG_R20, 16, "R20"}, + {20, loong64.REG_R21, -1, "R21"}, + {21, loong64.REGG, -1, "g"}, + {22, loong64.REG_R23, 17, "R23"}, + {23, loong64.REG_R24, 18, "R24"}, + {24, loong64.REG_R25, 19, "R25"}, + {25, loong64.REG_R26, 20, "R26"}, + {26, loong64.REG_R27, 21, "R27"}, + {27, loong64.REG_R28, 22, "R28"}, + {28, loong64.REG_R29, 23, "R29"}, + {29, loong64.REG_R31, 24, "R31"}, + {30, loong64.REG_F0, -1, "F0"}, + {31, loong64.REG_F1, -1, "F1"}, + {32, loong64.REG_F2, -1, "F2"}, + {33, loong64.REG_F3, -1, "F3"}, + {34, loong64.REG_F4, -1, "F4"}, + {35, loong64.REG_F5, -1, "F5"}, + {36, loong64.REG_F6, -1, "F6"}, + {37, loong64.REG_F7, -1, "F7"}, + {38, loong64.REG_F8, -1, "F8"}, + {39, loong64.REG_F9, -1, "F9"}, + {40, loong64.REG_F10, -1, "F10"}, + {41, loong64.REG_F11, -1, "F11"}, + {42, loong64.REG_F12, -1, "F12"}, + {43, loong64.REG_F13, -1, "F13"}, + {44, loong64.REG_F14, -1, "F14"}, + {45, loong64.REG_F15, -1, "F15"}, + {46, loong64.REG_F16, -1, "F16"}, + {47, loong64.REG_F17, -1, "F17"}, + {48, loong64.REG_F18, -1, "F18"}, + {49, loong64.REG_F19, -1, "F19"}, + {50, loong64.REG_F20, -1, "F20"}, + {51, loong64.REG_F21, -1, "F21"}, + {52, loong64.REG_F22, -1, "F22"}, + {53, loong64.REG_F23, -1, "F23"}, + {54, loong64.REG_F24, -1, "F24"}, + {55, loong64.REG_F25, -1, "F25"}, + {56, loong64.REG_F26, -1, "F26"}, + {57, loong64.REG_F27, -1, "F27"}, + {58, loong64.REG_F28, -1, "F28"}, + {59, loong64.REG_F29, -1, "F29"}, + {60, loong64.REG_F30, -1, "F30"}, + {61, loong64.REG_F31, -1, "F31"}, + {62, 0, -1, "SB"}, +} +var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10} +var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37} +var gpRegMaskLOONG64 = regMask(1070596088) +var fpRegMaskLOONG64 = regMask(4611686017353646080) +var specialRegMaskLOONG64 = regMask(0) +var framepointerRegLOONG64 = int8(-1) +var linkRegLOONG64 = int8(1) var registersMIPS = [...]Register{ {0, mips.REG_R0, -1, "R0"}, {1, mips.REG_R1, 0, "R1"}, diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go new file mode 100644 index 0000000000..de49397fff --- /dev/null +++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go @@ -0,0 +1,7946 @@ +// Code generated from gen/LOONG64.rules; DO NOT EDIT. +// generated with: cd gen; go run *.go + +package ssa + +import "cmd/compile/internal/types" + +func rewriteValueLOONG64(v *Value) bool { + switch v.Op { + case OpAdd16: + v.Op = OpLOONG64ADDV + return true + case OpAdd32: + v.Op = OpLOONG64ADDV + return true + case OpAdd32F: + v.Op = OpLOONG64ADDF + return true + case OpAdd64: + v.Op = OpLOONG64ADDV + return true + case OpAdd64F: + v.Op = OpLOONG64ADDD + return true + case OpAdd8: + v.Op = OpLOONG64ADDV + return true + case OpAddPtr: + v.Op = OpLOONG64ADDV + return true + case OpAddr: + return rewriteValueLOONG64_OpAddr(v) + case OpAnd16: + v.Op = OpLOONG64AND + return true + case OpAnd32: + v.Op = OpLOONG64AND + return true + case OpAnd64: + v.Op = OpLOONG64AND + return true + case OpAnd8: + v.Op = OpLOONG64AND + return true + case OpAndB: + v.Op = OpLOONG64AND + return true + case OpAtomicAdd32: + v.Op = OpLOONG64LoweredAtomicAdd32 + return true + case OpAtomicAdd64: + v.Op = OpLOONG64LoweredAtomicAdd64 + return true + case OpAtomicCompareAndSwap32: + v.Op = OpLOONG64LoweredAtomicCas32 + return true + case OpAtomicCompareAndSwap64: + v.Op = OpLOONG64LoweredAtomicCas64 + return true + case OpAtomicExchange32: + v.Op = OpLOONG64LoweredAtomicExchange32 + return true + case OpAtomicExchange64: + v.Op = OpLOONG64LoweredAtomicExchange64 + return true + case OpAtomicLoad32: + v.Op = OpLOONG64LoweredAtomicLoad32 + return true + case OpAtomicLoad64: + v.Op = OpLOONG64LoweredAtomicLoad64 + return true + case OpAtomicLoad8: + v.Op = OpLOONG64LoweredAtomicLoad8 + return true + case OpAtomicLoadPtr: + v.Op = OpLOONG64LoweredAtomicLoad64 + return true + case OpAtomicStore32: + v.Op = OpLOONG64LoweredAtomicStore32 + return true + case OpAtomicStore64: + v.Op = OpLOONG64LoweredAtomicStore64 + return true + case OpAtomicStore8: + v.Op = OpLOONG64LoweredAtomicStore8 + return true + case OpAtomicStorePtrNoWB: + v.Op = OpLOONG64LoweredAtomicStore64 + return true + case OpAvg64u: + return rewriteValueLOONG64_OpAvg64u(v) + case OpClosureCall: + v.Op = OpLOONG64CALLclosure + return true + case OpCom16: + return rewriteValueLOONG64_OpCom16(v) + case OpCom32: + return rewriteValueLOONG64_OpCom32(v) + case OpCom64: + return rewriteValueLOONG64_OpCom64(v) + case OpCom8: + return rewriteValueLOONG64_OpCom8(v) + case OpConst16: + return rewriteValueLOONG64_OpConst16(v) + case OpConst32: + return rewriteValueLOONG64_OpConst32(v) + case OpConst32F: + return rewriteValueLOONG64_OpConst32F(v) + case OpConst64: + return rewriteValueLOONG64_OpConst64(v) + case OpConst64F: + return rewriteValueLOONG64_OpConst64F(v) + case OpConst8: + return rewriteValueLOONG64_OpConst8(v) + case OpConstBool: + return rewriteValueLOONG64_OpConstBool(v) + case OpConstNil: + return rewriteValueLOONG64_OpConstNil(v) + case OpCvt32Fto32: + v.Op = OpLOONG64TRUNCFW + return true + case OpCvt32Fto64: + v.Op = OpLOONG64TRUNCFV + return true + case OpCvt32Fto64F: + v.Op = OpLOONG64MOVFD + return true + case OpCvt32to32F: + v.Op = OpLOONG64MOVWF + return true + case OpCvt32to64F: + v.Op = OpLOONG64MOVWD + return true + case OpCvt64Fto32: + v.Op = OpLOONG64TRUNCDW + return true + case OpCvt64Fto32F: + v.Op = OpLOONG64MOVDF + return true + case OpCvt64Fto64: + v.Op = OpLOONG64TRUNCDV + return true + case OpCvt64to32F: + v.Op = OpLOONG64MOVVF + return true + case OpCvt64to64F: + v.Op = OpLOONG64MOVVD + return true + case OpCvtBoolToUint8: + v.Op = OpCopy + return true + case OpDiv16: + return rewriteValueLOONG64_OpDiv16(v) + case OpDiv16u: + return rewriteValueLOONG64_OpDiv16u(v) + case OpDiv32: + return rewriteValueLOONG64_OpDiv32(v) + case OpDiv32F: + v.Op = OpLOONG64DIVF + return true + case OpDiv32u: + return rewriteValueLOONG64_OpDiv32u(v) + case OpDiv64: + return rewriteValueLOONG64_OpDiv64(v) + case OpDiv64F: + v.Op = OpLOONG64DIVD + return true + case OpDiv64u: + return rewriteValueLOONG64_OpDiv64u(v) + case OpDiv8: + return rewriteValueLOONG64_OpDiv8(v) + case OpDiv8u: + return rewriteValueLOONG64_OpDiv8u(v) + case OpEq16: + return rewriteValueLOONG64_OpEq16(v) + case OpEq32: + return rewriteValueLOONG64_OpEq32(v) + case OpEq32F: + return rewriteValueLOONG64_OpEq32F(v) + case OpEq64: + return rewriteValueLOONG64_OpEq64(v) + case OpEq64F: + return rewriteValueLOONG64_OpEq64F(v) + case OpEq8: + return rewriteValueLOONG64_OpEq8(v) + case OpEqB: + return rewriteValueLOONG64_OpEqB(v) + case OpEqPtr: + return rewriteValueLOONG64_OpEqPtr(v) + case OpGetCallerPC: + v.Op = OpLOONG64LoweredGetCallerPC + return true + case OpGetCallerSP: + v.Op = OpLOONG64LoweredGetCallerSP + return true + case OpGetClosurePtr: + v.Op = OpLOONG64LoweredGetClosurePtr + return true + case OpHmul32: + return rewriteValueLOONG64_OpHmul32(v) + case OpHmul32u: + return rewriteValueLOONG64_OpHmul32u(v) + case OpHmul64: + return rewriteValueLOONG64_OpHmul64(v) + case OpHmul64u: + return rewriteValueLOONG64_OpHmul64u(v) + case OpInterCall: + v.Op = OpLOONG64CALLinter + return true + case OpIsInBounds: + return rewriteValueLOONG64_OpIsInBounds(v) + case OpIsNonNil: + return rewriteValueLOONG64_OpIsNonNil(v) + case OpIsSliceInBounds: + return rewriteValueLOONG64_OpIsSliceInBounds(v) + case OpLOONG64ADDV: + return rewriteValueLOONG64_OpLOONG64ADDV(v) + case OpLOONG64ADDVconst: + return rewriteValueLOONG64_OpLOONG64ADDVconst(v) + case OpLOONG64AND: + return rewriteValueLOONG64_OpLOONG64AND(v) + case OpLOONG64ANDconst: + return rewriteValueLOONG64_OpLOONG64ANDconst(v) + case OpLOONG64LoweredAtomicAdd32: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v) + case OpLOONG64LoweredAtomicAdd64: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v) + case OpLOONG64LoweredAtomicStore32: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v) + case OpLOONG64LoweredAtomicStore64: + return rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v) + case OpLOONG64MOVBUload: + return rewriteValueLOONG64_OpLOONG64MOVBUload(v) + case OpLOONG64MOVBUreg: + return rewriteValueLOONG64_OpLOONG64MOVBUreg(v) + case OpLOONG64MOVBload: + return rewriteValueLOONG64_OpLOONG64MOVBload(v) + case OpLOONG64MOVBreg: + return rewriteValueLOONG64_OpLOONG64MOVBreg(v) + case OpLOONG64MOVBstore: + return rewriteValueLOONG64_OpLOONG64MOVBstore(v) + case OpLOONG64MOVBstorezero: + return rewriteValueLOONG64_OpLOONG64MOVBstorezero(v) + case OpLOONG64MOVDload: + return rewriteValueLOONG64_OpLOONG64MOVDload(v) + case OpLOONG64MOVDstore: + return rewriteValueLOONG64_OpLOONG64MOVDstore(v) + case OpLOONG64MOVFload: + return rewriteValueLOONG64_OpLOONG64MOVFload(v) + case OpLOONG64MOVFstore: + return rewriteValueLOONG64_OpLOONG64MOVFstore(v) + case OpLOONG64MOVHUload: + return rewriteValueLOONG64_OpLOONG64MOVHUload(v) + case OpLOONG64MOVHUreg: + return rewriteValueLOONG64_OpLOONG64MOVHUreg(v) + case OpLOONG64MOVHload: + return rewriteValueLOONG64_OpLOONG64MOVHload(v) + case OpLOONG64MOVHreg: + return rewriteValueLOONG64_OpLOONG64MOVHreg(v) + case OpLOONG64MOVHstore: + return rewriteValueLOONG64_OpLOONG64MOVHstore(v) + case OpLOONG64MOVHstorezero: + return rewriteValueLOONG64_OpLOONG64MOVHstorezero(v) + case OpLOONG64MOVVload: + return rewriteValueLOONG64_OpLOONG64MOVVload(v) + case OpLOONG64MOVVreg: + return rewriteValueLOONG64_OpLOONG64MOVVreg(v) + case OpLOONG64MOVVstore: + return rewriteValueLOONG64_OpLOONG64MOVVstore(v) + case OpLOONG64MOVVstorezero: + return rewriteValueLOONG64_OpLOONG64MOVVstorezero(v) + case OpLOONG64MOVWUload: + return rewriteValueLOONG64_OpLOONG64MOVWUload(v) + case OpLOONG64MOVWUreg: + return rewriteValueLOONG64_OpLOONG64MOVWUreg(v) + case OpLOONG64MOVWload: + return rewriteValueLOONG64_OpLOONG64MOVWload(v) + case OpLOONG64MOVWreg: + return rewriteValueLOONG64_OpLOONG64MOVWreg(v) + case OpLOONG64MOVWstore: + return rewriteValueLOONG64_OpLOONG64MOVWstore(v) + case OpLOONG64MOVWstorezero: + return rewriteValueLOONG64_OpLOONG64MOVWstorezero(v) + case OpLOONG64NEGV: + return rewriteValueLOONG64_OpLOONG64NEGV(v) + case OpLOONG64NOR: + return rewriteValueLOONG64_OpLOONG64NOR(v) + case OpLOONG64NORconst: + return rewriteValueLOONG64_OpLOONG64NORconst(v) + case OpLOONG64OR: + return rewriteValueLOONG64_OpLOONG64OR(v) + case OpLOONG64ORconst: + return rewriteValueLOONG64_OpLOONG64ORconst(v) + case OpLOONG64SGT: + return rewriteValueLOONG64_OpLOONG64SGT(v) + case OpLOONG64SGTU: + return rewriteValueLOONG64_OpLOONG64SGTU(v) + case OpLOONG64SGTUconst: + return rewriteValueLOONG64_OpLOONG64SGTUconst(v) + case OpLOONG64SGTconst: + return rewriteValueLOONG64_OpLOONG64SGTconst(v) + case OpLOONG64SLLV: + return rewriteValueLOONG64_OpLOONG64SLLV(v) + case OpLOONG64SLLVconst: + return rewriteValueLOONG64_OpLOONG64SLLVconst(v) + case OpLOONG64SRAV: + return rewriteValueLOONG64_OpLOONG64SRAV(v) + case OpLOONG64SRAVconst: + return rewriteValueLOONG64_OpLOONG64SRAVconst(v) + case OpLOONG64SRLV: + return rewriteValueLOONG64_OpLOONG64SRLV(v) + case OpLOONG64SRLVconst: + return rewriteValueLOONG64_OpLOONG64SRLVconst(v) + case OpLOONG64SUBV: + return rewriteValueLOONG64_OpLOONG64SUBV(v) + case OpLOONG64SUBVconst: + return rewriteValueLOONG64_OpLOONG64SUBVconst(v) + case OpLOONG64XOR: + return rewriteValueLOONG64_OpLOONG64XOR(v) + case OpLOONG64XORconst: + return rewriteValueLOONG64_OpLOONG64XORconst(v) + case OpLeq16: + return rewriteValueLOONG64_OpLeq16(v) + case OpLeq16U: + return rewriteValueLOONG64_OpLeq16U(v) + case OpLeq32: + return rewriteValueLOONG64_OpLeq32(v) + case OpLeq32F: + return rewriteValueLOONG64_OpLeq32F(v) + case OpLeq32U: + return rewriteValueLOONG64_OpLeq32U(v) + case OpLeq64: + return rewriteValueLOONG64_OpLeq64(v) + case OpLeq64F: + return rewriteValueLOONG64_OpLeq64F(v) + case OpLeq64U: + return rewriteValueLOONG64_OpLeq64U(v) + case OpLeq8: + return rewriteValueLOONG64_OpLeq8(v) + case OpLeq8U: + return rewriteValueLOONG64_OpLeq8U(v) + case OpLess16: + return rewriteValueLOONG64_OpLess16(v) + case OpLess16U: + return rewriteValueLOONG64_OpLess16U(v) + case OpLess32: + return rewriteValueLOONG64_OpLess32(v) + case OpLess32F: + return rewriteValueLOONG64_OpLess32F(v) + case OpLess32U: + return rewriteValueLOONG64_OpLess32U(v) + case OpLess64: + return rewriteValueLOONG64_OpLess64(v) + case OpLess64F: + return rewriteValueLOONG64_OpLess64F(v) + case OpLess64U: + return rewriteValueLOONG64_OpLess64U(v) + case OpLess8: + return rewriteValueLOONG64_OpLess8(v) + case OpLess8U: + return rewriteValueLOONG64_OpLess8U(v) + case OpLoad: + return rewriteValueLOONG64_OpLoad(v) + case OpLocalAddr: + return rewriteValueLOONG64_OpLocalAddr(v) + case OpLsh16x16: + return rewriteValueLOONG64_OpLsh16x16(v) + case OpLsh16x32: + return rewriteValueLOONG64_OpLsh16x32(v) + case OpLsh16x64: + return rewriteValueLOONG64_OpLsh16x64(v) + case OpLsh16x8: + return rewriteValueLOONG64_OpLsh16x8(v) + case OpLsh32x16: + return rewriteValueLOONG64_OpLsh32x16(v) + case OpLsh32x32: + return rewriteValueLOONG64_OpLsh32x32(v) + case OpLsh32x64: + return rewriteValueLOONG64_OpLsh32x64(v) + case OpLsh32x8: + return rewriteValueLOONG64_OpLsh32x8(v) + case OpLsh64x16: + return rewriteValueLOONG64_OpLsh64x16(v) + case OpLsh64x32: + return rewriteValueLOONG64_OpLsh64x32(v) + case OpLsh64x64: + return rewriteValueLOONG64_OpLsh64x64(v) + case OpLsh64x8: + return rewriteValueLOONG64_OpLsh64x8(v) + case OpLsh8x16: + return rewriteValueLOONG64_OpLsh8x16(v) + case OpLsh8x32: + return rewriteValueLOONG64_OpLsh8x32(v) + case OpLsh8x64: + return rewriteValueLOONG64_OpLsh8x64(v) + case OpLsh8x8: + return rewriteValueLOONG64_OpLsh8x8(v) + case OpMod16: + return rewriteValueLOONG64_OpMod16(v) + case OpMod16u: + return rewriteValueLOONG64_OpMod16u(v) + case OpMod32: + return rewriteValueLOONG64_OpMod32(v) + case OpMod32u: + return rewriteValueLOONG64_OpMod32u(v) + case OpMod64: + return rewriteValueLOONG64_OpMod64(v) + case OpMod64u: + return rewriteValueLOONG64_OpMod64u(v) + case OpMod8: + return rewriteValueLOONG64_OpMod8(v) + case OpMod8u: + return rewriteValueLOONG64_OpMod8u(v) + case OpMove: + return rewriteValueLOONG64_OpMove(v) + case OpMul16: + return rewriteValueLOONG64_OpMul16(v) + case OpMul32: + return rewriteValueLOONG64_OpMul32(v) + case OpMul32F: + v.Op = OpLOONG64MULF + return true + case OpMul64: + return rewriteValueLOONG64_OpMul64(v) + case OpMul64F: + v.Op = OpLOONG64MULD + return true + case OpMul64uhilo: + v.Op = OpLOONG64MULVU + return true + case OpMul8: + return rewriteValueLOONG64_OpMul8(v) + case OpNeg16: + v.Op = OpLOONG64NEGV + return true + case OpNeg32: + v.Op = OpLOONG64NEGV + return true + case OpNeg32F: + v.Op = OpLOONG64NEGF + return true + case OpNeg64: + v.Op = OpLOONG64NEGV + return true + case OpNeg64F: + v.Op = OpLOONG64NEGD + return true + case OpNeg8: + v.Op = OpLOONG64NEGV + return true + case OpNeq16: + return rewriteValueLOONG64_OpNeq16(v) + case OpNeq32: + return rewriteValueLOONG64_OpNeq32(v) + case OpNeq32F: + return rewriteValueLOONG64_OpNeq32F(v) + case OpNeq64: + return rewriteValueLOONG64_OpNeq64(v) + case OpNeq64F: + return rewriteValueLOONG64_OpNeq64F(v) + case OpNeq8: + return rewriteValueLOONG64_OpNeq8(v) + case OpNeqB: + v.Op = OpLOONG64XOR + return true + case OpNeqPtr: + return rewriteValueLOONG64_OpNeqPtr(v) + case OpNilCheck: + v.Op = OpLOONG64LoweredNilCheck + return true + case OpNot: + return rewriteValueLOONG64_OpNot(v) + case OpOffPtr: + return rewriteValueLOONG64_OpOffPtr(v) + case OpOr16: + v.Op = OpLOONG64OR + return true + case OpOr32: + v.Op = OpLOONG64OR + return true + case OpOr64: + v.Op = OpLOONG64OR + return true + case OpOr8: + v.Op = OpLOONG64OR + return true + case OpOrB: + v.Op = OpLOONG64OR + return true + case OpPanicBounds: + return rewriteValueLOONG64_OpPanicBounds(v) + case OpRotateLeft16: + return rewriteValueLOONG64_OpRotateLeft16(v) + case OpRotateLeft32: + return rewriteValueLOONG64_OpRotateLeft32(v) + case OpRotateLeft64: + return rewriteValueLOONG64_OpRotateLeft64(v) + case OpRotateLeft8: + return rewriteValueLOONG64_OpRotateLeft8(v) + case OpRound32F: + v.Op = OpCopy + return true + case OpRound64F: + v.Op = OpCopy + return true + case OpRsh16Ux16: + return rewriteValueLOONG64_OpRsh16Ux16(v) + case OpRsh16Ux32: + return rewriteValueLOONG64_OpRsh16Ux32(v) + case OpRsh16Ux64: + return rewriteValueLOONG64_OpRsh16Ux64(v) + case OpRsh16Ux8: + return rewriteValueLOONG64_OpRsh16Ux8(v) + case OpRsh16x16: + return rewriteValueLOONG64_OpRsh16x16(v) + case OpRsh16x32: + return rewriteValueLOONG64_OpRsh16x32(v) + case OpRsh16x64: + return rewriteValueLOONG64_OpRsh16x64(v) + case OpRsh16x8: + return rewriteValueLOONG64_OpRsh16x8(v) + case OpRsh32Ux16: + return rewriteValueLOONG64_OpRsh32Ux16(v) + case OpRsh32Ux32: + return rewriteValueLOONG64_OpRsh32Ux32(v) + case OpRsh32Ux64: + return rewriteValueLOONG64_OpRsh32Ux64(v) + case OpRsh32Ux8: + return rewriteValueLOONG64_OpRsh32Ux8(v) + case OpRsh32x16: + return rewriteValueLOONG64_OpRsh32x16(v) + case OpRsh32x32: + return rewriteValueLOONG64_OpRsh32x32(v) + case OpRsh32x64: + return rewriteValueLOONG64_OpRsh32x64(v) + case OpRsh32x8: + return rewriteValueLOONG64_OpRsh32x8(v) + case OpRsh64Ux16: + return rewriteValueLOONG64_OpRsh64Ux16(v) + case OpRsh64Ux32: + return rewriteValueLOONG64_OpRsh64Ux32(v) + case OpRsh64Ux64: + return rewriteValueLOONG64_OpRsh64Ux64(v) + case OpRsh64Ux8: + return rewriteValueLOONG64_OpRsh64Ux8(v) + case OpRsh64x16: + return rewriteValueLOONG64_OpRsh64x16(v) + case OpRsh64x32: + return rewriteValueLOONG64_OpRsh64x32(v) + case OpRsh64x64: + return rewriteValueLOONG64_OpRsh64x64(v) + case OpRsh64x8: + return rewriteValueLOONG64_OpRsh64x8(v) + case OpRsh8Ux16: + return rewriteValueLOONG64_OpRsh8Ux16(v) + case OpRsh8Ux32: + return rewriteValueLOONG64_OpRsh8Ux32(v) + case OpRsh8Ux64: + return rewriteValueLOONG64_OpRsh8Ux64(v) + case OpRsh8Ux8: + return rewriteValueLOONG64_OpRsh8Ux8(v) + case OpRsh8x16: + return rewriteValueLOONG64_OpRsh8x16(v) + case OpRsh8x32: + return rewriteValueLOONG64_OpRsh8x32(v) + case OpRsh8x64: + return rewriteValueLOONG64_OpRsh8x64(v) + case OpRsh8x8: + return rewriteValueLOONG64_OpRsh8x8(v) + case OpSelect0: + return rewriteValueLOONG64_OpSelect0(v) + case OpSelect1: + return rewriteValueLOONG64_OpSelect1(v) + case OpSignExt16to32: + v.Op = OpLOONG64MOVHreg + return true + case OpSignExt16to64: + v.Op = OpLOONG64MOVHreg + return true + case OpSignExt32to64: + v.Op = OpLOONG64MOVWreg + return true + case OpSignExt8to16: + v.Op = OpLOONG64MOVBreg + return true + case OpSignExt8to32: + v.Op = OpLOONG64MOVBreg + return true + case OpSignExt8to64: + v.Op = OpLOONG64MOVBreg + return true + case OpSlicemask: + return rewriteValueLOONG64_OpSlicemask(v) + case OpSqrt: + v.Op = OpLOONG64SQRTD + return true + case OpSqrt32: + v.Op = OpLOONG64SQRTF + return true + case OpStaticCall: + v.Op = OpLOONG64CALLstatic + return true + case OpStore: + return rewriteValueLOONG64_OpStore(v) + case OpSub16: + v.Op = OpLOONG64SUBV + return true + case OpSub32: + v.Op = OpLOONG64SUBV + return true + case OpSub32F: + v.Op = OpLOONG64SUBF + return true + case OpSub64: + v.Op = OpLOONG64SUBV + return true + case OpSub64F: + v.Op = OpLOONG64SUBD + return true + case OpSub8: + v.Op = OpLOONG64SUBV + return true + case OpSubPtr: + v.Op = OpLOONG64SUBV + return true + case OpTailCall: + v.Op = OpLOONG64CALLtail + return true + case OpTrunc16to8: + v.Op = OpCopy + return true + case OpTrunc32to16: + v.Op = OpCopy + return true + case OpTrunc32to8: + v.Op = OpCopy + return true + case OpTrunc64to16: + v.Op = OpCopy + return true + case OpTrunc64to32: + v.Op = OpCopy + return true + case OpTrunc64to8: + v.Op = OpCopy + return true + case OpWB: + v.Op = OpLOONG64LoweredWB + return true + case OpXor16: + v.Op = OpLOONG64XOR + return true + case OpXor32: + v.Op = OpLOONG64XOR + return true + case OpXor64: + v.Op = OpLOONG64XOR + return true + case OpXor8: + v.Op = OpLOONG64XOR + return true + case OpZero: + return rewriteValueLOONG64_OpZero(v) + case OpZeroExt16to32: + v.Op = OpLOONG64MOVHUreg + return true + case OpZeroExt16to64: + v.Op = OpLOONG64MOVHUreg + return true + case OpZeroExt32to64: + v.Op = OpLOONG64MOVWUreg + return true + case OpZeroExt8to16: + v.Op = OpLOONG64MOVBUreg + return true + case OpZeroExt8to32: + v.Op = OpLOONG64MOVBUreg + return true + case OpZeroExt8to64: + v.Op = OpLOONG64MOVBUreg + return true + } + return false +} +func rewriteValueLOONG64_OpAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (Addr {sym} base) + // result: (MOVVaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpLOONG64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueLOONG64_OpAvg64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Avg64u x y) + // result: (ADDV (SRLVconst (SUBV x y) [1]) y) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64ADDV) + v0 := b.NewValue0(v.Pos, OpLOONG64SRLVconst, t) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SUBV, t) + v1.AddArg2(x, y) + v0.AddArg(v1) + v.AddArg2(v0, y) + return true + } +} +func rewriteValueLOONG64_OpCom16(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com16 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCom32(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com32 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCom64(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com64 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpCom8(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Com8 x) + // result: (NOR (MOVVconst [0]) x) + for { + x := v_0 + v.reset(OpLOONG64NOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, x) + return true + } +} +func rewriteValueLOONG64_OpConst16(v *Value) bool { + // match: (Const16 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt16(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst32(v *Value) bool { + // match: (Const32 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt32(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst32F(v *Value) bool { + // match: (Const32F [val]) + // result: (MOVFconst [float64(val)]) + for { + val := auxIntToFloat32(v.AuxInt) + v.reset(OpLOONG64MOVFconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst64(v *Value) bool { + // match: (Const64 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt64(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst64F(v *Value) bool { + // match: (Const64F [val]) + // result: (MOVDconst [float64(val)]) + for { + val := auxIntToFloat64(v.AuxInt) + v.reset(OpLOONG64MOVDconst) + v.AuxInt = float64ToAuxInt(float64(val)) + return true + } +} +func rewriteValueLOONG64_OpConst8(v *Value) bool { + // match: (Const8 [val]) + // result: (MOVVconst [int64(val)]) + for { + val := auxIntToInt8(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(val)) + return true + } +} +func rewriteValueLOONG64_OpConstBool(v *Value) bool { + // match: (ConstBool [t]) + // result: (MOVVconst [int64(b2i(t))]) + for { + t := auxIntToBool(v.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(b2i(t))) + return true + } +} +func rewriteValueLOONG64_OpConstNil(v *Value) bool { + // match: (ConstNil) + // result: (MOVVconst [0]) + for { + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } +} +func rewriteValueLOONG64_OpDiv16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16 x y) + // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div16u x y) + // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32 x y) + // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div32u x y) + // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64 x y) + // result: (Select1 (DIVV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div64u x y) + // result: (Select1 (DIVVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8 x y) + // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpDiv8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Div8u x y) + // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpEq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq16 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq32 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq32F x y) + // result: (FPFlagTrue (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpEq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq64 x y) + // result: (SGTU (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Eq64F x y) + // result: (FPFlagTrue (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpEq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Eq8 x y) + // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEqB(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqB x y) + // result: (XOR (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpEqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (EqPtr x y) + // result: (SGTU (MOVVconst [1]) (XOR x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpHmul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32 x y) + // result: (SRAVconst (Select1 (MULV (SignExt32to64 x) (SignExt32to64 y))) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64) + v1 := b.NewValue0(v.Pos, OpLOONG64MULV, types.NewTuple(typ.Int64, typ.Int64)) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpHmul32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul32u x y) + // result: (SRLVconst (Select1 (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32]) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SRLVconst) + v.AuxInt = int64ToAuxInt(32) + v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpHmul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64 x y) + // result: (Select0 (MULV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64MULV, types.NewTuple(typ.Int64, typ.Int64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpHmul64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Hmul64u x y) + // result: (Select0 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpIsInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (IsInBounds idx len) + // result: (SGTU len idx) + for { + idx := v_0 + len := v_1 + v.reset(OpLOONG64SGTU) + v.AddArg2(len, idx) + return true + } +} +func rewriteValueLOONG64_OpIsNonNil(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsNonNil ptr) + // result: (SGTU ptr (MOVVconst [0])) + for { + ptr := v_0 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(ptr, v0) + return true + } +} +func rewriteValueLOONG64_OpIsSliceInBounds(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (IsSliceInBounds idx len) + // result: (XOR (MOVVconst [1]) (SGTU idx len)) + for { + idx := v_0 + len := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v1.AddArg2(idx, len) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (ADDV x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ADDVconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (ADDV x (NEGV y)) + // result: (SUBV x y) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64NEGV { + continue + } + y := v_1.Args[0] + v.reset(OpLOONG64SUBV) + v.AddArg2(x, y) + return true + } + break + } + return false +} +func rewriteValueLOONG64_OpLOONG64ADDVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) + // cond: is32Bit(off1+int64(off2)) + // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) + for { + off1 := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + if !(is32Bit(off1 + int64(off2))) { + break + } + v.reset(OpLOONG64MOVVaddr) + v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg(ptr) + return true + } + // match: (ADDVconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ADDVconst [c] (MOVVconst [d])) + // result: (MOVVconst [c+d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c + d) + return true + } + // match: (ADDVconst [c] (ADDVconst [d] x)) + // cond: is32Bit(c+d) + // result: (ADDVconst [c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ADDVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c + d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(c + d) + v.AddArg(x) + return true + } + // match: (ADDVconst [c] (SUBVconst [d] x)) + // cond: is32Bit(c-d) + // result: (ADDVconst [c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SUBVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c - d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(c - d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64AND(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (AND x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ANDconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64ANDconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (AND x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64ANDconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ANDconst [0] _) + // result: (MOVVconst [0]) + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (ANDconst [-1] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ANDconst [c] (MOVVconst [d])) + // result: (MOVVconst [c&d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c & d) + return true + } + // match: (ANDconst [c] (ANDconst [d] x)) + // result: (ANDconst [c&d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ANDconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + v.reset(OpLOONG64ANDconst) + v.AuxInt = int64ToAuxInt(c & d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64LoweredAtomicAddconst32) + v.AuxInt = int32ToAuxInt(int32(c)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicAdd64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) + // cond: is32Bit(c) + // result: (LoweredAtomicAddconst64 [c] ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + mem := v_2 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64LoweredAtomicAddconst64) + v.AuxInt = int64ToAuxInt(c) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore32(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem) + // result: (LoweredAtomicStorezero32 ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpLOONG64LoweredAtomicStorezero32) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64LoweredAtomicStore64(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem) + // result: (LoweredAtomicStorezero64 ptr mem) + for { + ptr := v_0 + if v_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + mem := v_2 + v.reset(OpLOONG64LoweredAtomicStorezero64) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint8(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint8(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVBload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVBreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVBreg (MOVVconst [c])) + // result: (MOVVconst [int64(int8(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int8(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVBreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVBUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVBstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVBstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVDload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVDstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVFload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVFload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVFstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVFstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVHUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg x:(MOVHUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint16(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint16(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVHload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVHreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg x:(MOVHreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVHreg (MOVVconst [c])) + // result: (MOVVconst [int64(int16(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int16(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVHUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVHstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVHstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVVload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVVload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVVreg x) + // cond: x.Uses == 1 + // result: (MOVVnop x) + for { + x := v_0 + if !(x.Uses == 1) { + break + } + v.reset(OpLOONG64MOVVnop) + v.AddArg(x) + return true + } + // match: (MOVVreg (MOVVconst [c])) + // result: (MOVVconst [c]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVVstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVWUload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWUreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVHUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg x:(MOVWUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWUreg (MOVVconst [c])) + // result: (MOVVconst [int64(uint32(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint32(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVWload) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWreg(v *Value) bool { + v_0 := v.Args[0] + // match: (MOVWreg x:(MOVBload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHUload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHUload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWload _ _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWload { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVBUreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVBUreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVHreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVHreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg x:(MOVWreg _)) + // result: (MOVVreg x) + for { + x := v_0 + if x.Op != OpLOONG64MOVWreg { + break + } + v.reset(OpLOONG64MOVVreg) + v.AddArg(x) + return true + } + // match: (MOVWreg (MOVVconst [c])) + // result: (MOVVconst [int64(int32(c))]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(int32(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + val := v_1 + mem := v_2 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg3(ptr, val, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) + // result: (MOVWstore [off] {sym} ptr x mem) + for { + off := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + ptr := v_0 + if v_1.Op != OpLOONG64MOVWUreg { + break + } + x := v_1.Args[0] + mem := v_2 + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(off) + v.Aux = symToAux(sym) + v.AddArg3(ptr, x, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) + // cond: is32Bit(int64(off1)+off2) + // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym := auxToSym(v.Aux) + if v_0.Op != OpLOONG64ADDVconst { + break + } + off2 := auxIntToInt64(v_0.AuxInt) + ptr := v_0.Args[0] + mem := v_1 + if !(is32Bit(int64(off1) + off2)) { + break + } + v.reset(OpLOONG64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(sym) + v.AddArg2(ptr, mem) + return true + } + // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + for { + off1 := auxIntToInt32(v.AuxInt) + sym1 := auxToSym(v.Aux) + if v_0.Op != OpLOONG64MOVVaddr { + break + } + off2 := auxIntToInt32(v_0.AuxInt) + sym2 := auxToSym(v_0.Aux) + ptr := v_0.Args[0] + mem := v_1 + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + break + } + v.reset(OpLOONG64MOVWstorezero) + v.AuxInt = int32ToAuxInt(off1 + int32(off2)) + v.Aux = symToAux(mergeSym(sym1, sym2)) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64NEGV(v *Value) bool { + v_0 := v.Args[0] + // match: (NEGV (MOVVconst [c])) + // result: (MOVVconst [-c]) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(-c) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64NOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (NOR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (NORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64NORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + return false +} +func rewriteValueLOONG64_OpLOONG64NORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (NORconst [c] (MOVVconst [d])) + // result: (MOVVconst [^(c|d)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(^(c | d)) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64OR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (OR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (ORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64ORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (OR x x) + // result: x + for { + x := v_0 + if x != v_1 { + break + } + v.copyOf(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64ORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (ORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (ORconst [-1] _) + // result: (MOVVconst [-1]) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(-1) + return true + } + // match: (ORconst [c] (MOVVconst [d])) + // result: (MOVVconst [c|d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c | d) + return true + } + // match: (ORconst [c] (ORconst [d] x)) + // cond: is32Bit(c|d) + // result: (ORconst [c|d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c | d)) { + break + } + v.reset(OpLOONG64ORconst) + v.AuxInt = int64ToAuxInt(c | d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGT(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGT (MOVVconst [c]) x) + // cond: is32Bit(c) + // result: (SGTconst [c] x) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64SGTconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGTU(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SGTU (MOVVconst [c]) x) + // cond: is32Bit(c) + // result: (SGTUconst [c] x) + for { + if v_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + x := v_1 + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64SGTUconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGTUconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTUconst [c] (MOVVconst [d])) + // cond: uint64(c)>uint64(d) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(uint64(c) > uint64(d)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVVconst [d])) + // cond: uint64(c)<=uint64(d) + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(uint64(c) <= uint64(d)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTUconst [c] (MOVBUreg _)) + // cond: 0xff < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBUreg || !(0xff < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (MOVHUreg _)) + // cond: 0xffff < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (ANDconst [m] _)) + // cond: uint64(m) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(uint64(m) < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTUconst [c] (SRLVconst _ [d])) + // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SRLVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SGTconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SGTconst [c] (MOVVconst [d])) + // cond: c>d + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(c > d) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVVconst [d])) + // cond: c<=d + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(c <= d) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: 0x7f < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBreg || !(0x7f < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBreg _)) + // cond: c <= -0x80 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBreg || !(c <= -0x80) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: 0xff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBUreg || !(0xff < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVBUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVBUreg || !(c < 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: 0x7fff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHreg || !(0x7fff < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHreg _)) + // cond: c <= -0x8000 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHreg || !(c <= -0x8000) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: 0xffff < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHUreg || !(0xffff < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (MOVHUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVHUreg || !(c < 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (MOVWUreg _)) + // cond: c < 0 + // result: (MOVVconst [0]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVWUreg || !(c < 0) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SGTconst [c] (ANDconst [m] _)) + // cond: 0 <= m && m < c + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ANDconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + if !(0 <= m && m < c) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + // match: (SGTconst [c] (SRLVconst _ [d])) + // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) + // result: (MOVVconst [1]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SRLVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(1) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SLLV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLLV _ (MOVVconst [c])) + // cond: uint64(c)>=64 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SLLV x (MOVVconst [c])) + // result: (SLLVconst x [c]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64SLLVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SLLVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SLLVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d<=64 + // result: (SRAVconst x [63]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(63) + v.AddArg(x) + return true + } + // match: (SRAV x (MOVVconst [c])) + // result: (SRAVconst x [c]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SRAVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRAVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d>>uint64(c)]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(d >> uint64(c)) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SRLV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SRLV _ (MOVVconst [c])) + // cond: uint64(c)>=64 + // result: (MOVVconst [0]) + for { + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(uint64(c) >= 64) { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLV x (MOVVconst [c])) + // result: (SRLVconst x [c]) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpLOONG64SRLVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SRLVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SRLVconst [c] (MOVVconst [d])) + // result: (MOVVconst [int64(uint64(d)>>uint64(c))]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c))) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SUBV(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SUBV x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (SUBVconst [c] x) + for { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + break + } + v.reset(OpLOONG64SUBVconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + // match: (SUBV x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SUBV (MOVVconst [0]) x) + // result: (NEGV x) + for { + if v_0.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_1 + v.reset(OpLOONG64NEGV) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64SUBVconst(v *Value) bool { + v_0 := v.Args[0] + // match: (SUBVconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (SUBVconst [c] (MOVVconst [d])) + // result: (MOVVconst [d-c]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(d - c) + return true + } + // match: (SUBVconst [c] (SUBVconst [d] x)) + // cond: is32Bit(-c-d) + // result: (ADDVconst [-c-d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64SUBVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c - d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(-c - d) + v.AddArg(x) + return true + } + // match: (SUBVconst [c] (ADDVconst [d] x)) + // cond: is32Bit(-c+d) + // result: (ADDVconst [-c+d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64ADDVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(-c + d)) { + break + } + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(-c + d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64XOR(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (XOR x (MOVVconst [c])) + // cond: is32Bit(c) + // result: (XORconst [c] x) + for { + for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + continue + } + c := auxIntToInt64(v_1.AuxInt) + if !(is32Bit(c)) { + continue + } + v.reset(OpLOONG64XORconst) + v.AuxInt = int64ToAuxInt(c) + v.AddArg(x) + return true + } + break + } + // match: (XOR x x) + // result: (MOVVconst [0]) + for { + x := v_0 + if x != v_1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + return false +} +func rewriteValueLOONG64_OpLOONG64XORconst(v *Value) bool { + v_0 := v.Args[0] + // match: (XORconst [0] x) + // result: x + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + x := v_0 + v.copyOf(x) + return true + } + // match: (XORconst [-1] x) + // result: (NORconst [0] x) + for { + if auxIntToInt64(v.AuxInt) != -1 { + break + } + x := v_0 + v.reset(OpLOONG64NORconst) + v.AuxInt = int64ToAuxInt(0) + v.AddArg(x) + return true + } + // match: (XORconst [c] (MOVVconst [d])) + // result: (MOVVconst [c^d]) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c ^ d) + return true + } + // match: (XORconst [c] (XORconst [d] x)) + // cond: is32Bit(c^d) + // result: (XORconst [c^d] x) + for { + c := auxIntToInt64(v.AuxInt) + if v_0.Op != OpLOONG64XORconst { + break + } + d := auxIntToInt64(v_0.AuxInt) + x := v_0.Args[0] + if !(is32Bit(c ^ d)) { + break + } + v.reset(OpLOONG64XORconst) + v.AuxInt = int64ToAuxInt(c ^ d) + v.AddArg(x) + return true + } + return false +} +func rewriteValueLOONG64_OpLeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq16U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq32F x y) + // result: (FPFlagTrue (CMPGEF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGEF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLeq32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq32U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64 x y) + // result: (XOR (MOVVconst [1]) (SGT x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Leq64F x y) + // result: (FPFlagTrue (CMPGED y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGED, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLeq64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq64U x y) + // result: (XOR (MOVVconst [1]) (SGTU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v1.AddArg2(x, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8 x y) + // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGT, typ.Bool) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLeq8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Leq8U x y) + // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64XOR) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(x) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16 x y) + // result: (SGT (SignExt16to64 y) (SignExt16to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess16U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less16U x y) + // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32 x y) + // result: (SGT (SignExt32to64 y) (SignExt32to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less32F x y) + // result: (FPFlagTrue (CMPGTF y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTF, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLess32U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less32U x y) + // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64 x y) + // result: (SGT y x) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v.AddArg2(y, x) + return true + } +} +func rewriteValueLOONG64_OpLess64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Less64F x y) + // result: (FPFlagTrue (CMPGTD y x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagTrue) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPGTD, types.TypeFlags) + v0.AddArg2(y, x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpLess64U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Less64U x y) + // result: (SGTU y x) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v.AddArg2(y, x) + return true + } +} +func rewriteValueLOONG64_OpLess8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8 x y) + // result: (SGT (SignExt8to64 y) (SignExt8to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGT) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLess8U(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Less8U x y) + // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v0.AddArg(y) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpLoad(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Load ptr mem) + // cond: t.IsBoolean() + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(t.IsBoolean()) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && isSigned(t)) + // result: (MOVBload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && isSigned(t)) { + break + } + v.reset(OpLOONG64MOVBload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is8BitInt(t) && !isSigned(t)) + // result: (MOVBUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is8BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpLOONG64MOVBUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && isSigned(t)) + // result: (MOVHload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && isSigned(t)) { + break + } + v.reset(OpLOONG64MOVHload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is16BitInt(t) && !isSigned(t)) + // result: (MOVHUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is16BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpLOONG64MOVHUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && isSigned(t)) + // result: (MOVWload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && isSigned(t)) { + break + } + v.reset(OpLOONG64MOVWload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is32BitInt(t) && !isSigned(t)) + // result: (MOVWUload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitInt(t) && !isSigned(t)) { + break + } + v.reset(OpLOONG64MOVWUload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: (is64BitInt(t) || isPtr(t)) + // result: (MOVVload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitInt(t) || isPtr(t)) { + break + } + v.reset(OpLOONG64MOVVload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is32BitFloat(t) + // result: (MOVFload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is32BitFloat(t)) { + break + } + v.reset(OpLOONG64MOVFload) + v.AddArg2(ptr, mem) + return true + } + // match: (Load ptr mem) + // cond: is64BitFloat(t) + // result: (MOVDload ptr mem) + for { + t := v.Type + ptr := v_0 + mem := v_1 + if !(is64BitFloat(t)) { + break + } + v.reset(OpLOONG64MOVDload) + v.AddArg2(ptr, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpLocalAddr(v *Value) bool { + v_0 := v.Args[0] + // match: (LocalAddr {sym} base _) + // result: (MOVVaddr {sym} base) + for { + sym := auxToSym(v.Aux) + base := v_0 + v.reset(OpLOONG64MOVVaddr) + v.Aux = symToAux(sym) + v.AddArg(base) + return true + } +} +func rewriteValueLOONG64_OpLsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpLsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh16x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpLsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh32x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpLsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh64x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SLLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SLLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpLsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SLLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpLsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Lsh8x8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SLLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SLLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpMod16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16 x y) + // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod16u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod16u x y) + // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32 x y) + // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod32u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod32u x y) + // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64 x y) + // result: (Select0 (DIVV x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod64u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod64u x y) + // result: (Select0 (DIVVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8 x y) + // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVV, types.NewTuple(typ.Int64, typ.Int64)) + v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMod8u(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mod8u x y) + // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + x := v_0 + y := v_1 + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpLOONG64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMove(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Move [0] _ _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_2 + v.copyOf(mem) + return true + } + // match: (Move [1] dst src mem) + // result: (MOVBstore dst (MOVBload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore dst (MOVHload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [2] dst src mem) + // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(1) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore dst (MOVWload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [4] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [4] dst src mem) + // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(3) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(1) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v4.AuxInt = int32ToAuxInt(1) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore dst (MOVVload src mem) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v0.AddArg2(src, mem) + v.AddArg3(dst, v0, mem) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [8] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(6) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(2) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v4.AuxInt = int32ToAuxInt(2) + v4.AddArg2(src, mem) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v6 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v6.AddArg2(src, mem) + v5.AddArg3(dst, v6, mem) + v3.AddArg3(dst, v4, v5) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [3] dst src mem) + // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + dst := v_0 + src := v_1 + mem := v_2 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v0.AuxInt = int32ToAuxInt(2) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v2.AuxInt = int32ToAuxInt(1) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVBload, typ.Int8) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [6] {t} dst src mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v0.AuxInt = int32ToAuxInt(4) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v2.AuxInt = int32ToAuxInt(2) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVHload, typ.Int16) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [12] {t} dst src mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v2.AuxInt = int32ToAuxInt(4) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVWload, typ.Int32) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [16] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(8) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v2.AddArg2(src, mem) + v1.AddArg3(dst, v2, mem) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [24] {t} dst src mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v0.AuxInt = int32ToAuxInt(16) + v0.AddArg2(src, mem) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v2.AuxInt = int32ToAuxInt(8) + v2.AddArg2(src, mem) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVload, typ.UInt64) + v4.AddArg2(src, mem) + v3.AddArg3(dst, v4, mem) + v1.AddArg3(dst, v2, v3) + v.AddArg3(dst, v0, v1) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s) + // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) { + break + } + v.reset(OpLOONG64DUFFCOPY) + v.AuxInt = int64ToAuxInt(16 * (128 - s/8)) + v.AddArg3(dst, src, mem) + return true + } + // match: (Move [s] {t} dst src mem) + // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 + // result: (LoweredMove [t.Alignment()] dst src (ADDVconst src [s-moveSize(t.Alignment(), config)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + dst := v_0 + src := v_1 + mem := v_2 + if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) { + break + } + v.reset(OpLOONG64LoweredMove) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, src.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(src) + v.AddArg4(dst, src, v0, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpMul16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul16 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMul32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul32 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMul64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul64 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpMul8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Mul8 x y) + // result: (Select1 (MULVU x y)) + for { + x := v_0 + y := v_1 + v.reset(OpSelect1) + v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpNeq16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq16 x y) + // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpNeq32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq32 x y) + // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpNeq32F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq32F x y) + // result: (FPFlagFalse (CMPEQF x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagFalse) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQF, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpNeq64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq64 x y) + // result: (SGTU (XOR x y) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpNeq64F(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + // match: (Neq64F x y) + // result: (FPFlagFalse (CMPEQD x y)) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64FPFlagFalse) + v0 := b.NewValue0(v.Pos, OpLOONG64CMPEQD, types.TypeFlags) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpNeq8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Neq8 x y) + // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v1.AddArg(x) + v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v2.AddArg(y) + v0.AddArg2(v1, v2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpNeqPtr(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (NeqPtr x y) + // result: (SGTU (XOR x y) (MOVVconst [0])) + for { + x := v_0 + y := v_1 + v.reset(OpLOONG64SGTU) + v0 := b.NewValue0(v.Pos, OpLOONG64XOR, typ.UInt64) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpNot(v *Value) bool { + v_0 := v.Args[0] + // match: (Not x) + // result: (XORconst [1] x) + for { + x := v_0 + v.reset(OpLOONG64XORconst) + v.AuxInt = int64ToAuxInt(1) + v.AddArg(x) + return true + } +} +func rewriteValueLOONG64_OpOffPtr(v *Value) bool { + v_0 := v.Args[0] + // match: (OffPtr [off] ptr:(SP)) + // result: (MOVVaddr [int32(off)] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + if ptr.Op != OpSP { + break + } + v.reset(OpLOONG64MOVVaddr) + v.AuxInt = int32ToAuxInt(int32(off)) + v.AddArg(ptr) + return true + } + // match: (OffPtr [off] ptr) + // result: (ADDVconst [off] ptr) + for { + off := auxIntToInt64(v.AuxInt) + ptr := v_0 + v.reset(OpLOONG64ADDVconst) + v.AuxInt = int64ToAuxInt(off) + v.AddArg(ptr) + return true + } +} +func rewriteValueLOONG64_OpPanicBounds(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 0 + // result: (LoweredPanicBoundsA [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 0) { + break + } + v.reset(OpLOONG64LoweredPanicBoundsA) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 1 + // result: (LoweredPanicBoundsB [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 1) { + break + } + v.reset(OpLOONG64LoweredPanicBoundsB) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + // match: (PanicBounds [kind] x y mem) + // cond: boundsABI(kind) == 2 + // result: (LoweredPanicBoundsC [kind] x y mem) + for { + kind := auxIntToInt64(v.AuxInt) + x := v_0 + y := v_1 + mem := v_2 + if !(boundsABI(kind) == 2) { + break + } + v.reset(OpLOONG64LoweredPanicBoundsC) + v.AuxInt = int64ToAuxInt(kind) + v.AddArg3(x, y, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpRotateLeft16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft16 x (MOVVconst [c])) + // result: (Or16 (Lsh16x64 x (MOVVconst [c&15])) (Rsh16Ux64 x (MOVVconst [-c&15]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr16) + v0 := b.NewValue0(v.Pos, OpLsh16x64, t) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 15) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 15) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpRotateLeft32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft32 x (MOVVconst [c])) + // result: (Or32 (Lsh32x64 x (MOVVconst [c&31])) (Rsh32Ux64 x (MOVVconst [-c&31]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr32) + v0 := b.NewValue0(v.Pos, OpLsh32x64, t) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 31) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 31) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpRotateLeft64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft64 x (MOVVconst [c])) + // result: (Or64 (Lsh64x64 x (MOVVconst [c&63])) (Rsh64Ux64 x (MOVVconst [-c&63]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr64) + v0 := b.NewValue0(v.Pos, OpLsh64x64, t) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 63) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 63) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpRotateLeft8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (RotateLeft8 x (MOVVconst [c])) + // result: (Or8 (Lsh8x64 x (MOVVconst [c&7])) (Rsh8Ux64 x (MOVVconst [-c&7]))) + for { + t := v.Type + x := v_0 + if v_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_1.AuxInt) + v.reset(OpOr8) + v0 := b.NewValue0(v.Pos, OpLsh8x64, t) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v1.AuxInt = int64ToAuxInt(c & 7) + v0.AddArg2(x, v1) + v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(-c & 7) + v2.AddArg2(x, v3) + v.AddArg2(v0, v2) + return true + } + return false +} +func rewriteValueLOONG64_OpRsh16Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh16Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh16Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt16to64 x) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(x) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh16Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt16to64 x) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh16x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x16 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh16x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x32 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh16x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x64 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh16x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh16x8 x y) + // result: (SRAV (SignExt16to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt32to64 x) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(x) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh32Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt32to64 x) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh32x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x16 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x32 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x64 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh32x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh32x8 x y) + // result: (SRAV (SignExt32to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV x (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV x (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV x y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v3.AddArg2(x, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh64Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV x (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v4.AddArg2(x, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh64x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x16 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh64x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x32 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh64x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x64 x y) + // result: (SRAV x (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v3.AuxInt = int64ToAuxInt(63) + v2.AddArg2(y, v3) + v1.AddArg(v2) + v0.AddArg2(v1, y) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh64x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh64x8 x y) + // result: (SRAV x (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v1 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v2 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v2.AddArg2(v3, v4) + v1.AddArg(v2) + v0.AddArg2(v1, v3) + v.AddArg2(x, v0) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux16 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt16to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux32 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt32to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux64 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) y)) (SRLV (ZeroExt8to64 x) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v1.AddArg2(v2, y) + v0.AddArg(v1) + v3 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(x) + v3.AddArg2(v4, y) + v.AddArg2(v0, v3) + return true + } +} +func rewriteValueLOONG64_OpRsh8Ux8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8Ux8 x y) + // result: (AND (NEGV (SGTU (MOVVconst [64]) (ZeroExt8to64 y))) (SRLV (ZeroExt8to64 x) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64AND) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v1 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(64) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) + v1.AddArg2(v2, v3) + v0.AddArg(v1) + v4 := b.NewValue0(v.Pos, OpLOONG64SRLV, t) + v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v5.AddArg(x) + v4.AddArg2(v5, v3) + v.AddArg2(v0, v4) + return true + } +} +func rewriteValueLOONG64_OpRsh8x16(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x16 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt16to64 y) (MOVVconst [63]))) (ZeroExt16to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh8x32(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x32 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt32to64 y) (MOVVconst [63]))) (ZeroExt32to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh8x64(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x64 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU y (MOVVconst [63]))) y)) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v4.AuxInt = int64ToAuxInt(63) + v3.AddArg2(y, v4) + v2.AddArg(v3) + v1.AddArg2(v2, y) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpRsh8x8(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Rsh8x8 x y) + // result: (SRAV (SignExt8to64 x) (OR (NEGV (SGTU (ZeroExt8to64 y) (MOVVconst [63]))) (ZeroExt8to64 y))) + for { + t := v.Type + x := v_0 + y := v_1 + v.reset(OpLOONG64SRAV) + v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64) + v0.AddArg(x) + v1 := b.NewValue0(v.Pos, OpLOONG64OR, t) + v2 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v3 := b.NewValue0(v.Pos, OpLOONG64SGTU, typ.Bool) + v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v4.AddArg(y) + v5 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v5.AuxInt = int64ToAuxInt(63) + v3.AddArg2(v4, v5) + v2.AddArg(v3) + v1.AddArg2(v2, v4) + v.AddArg2(v0, v1) + return true + } +} +func rewriteValueLOONG64_OpSelect0(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select0 (Mul64uover x y)) + // result: (Select1 (MULVU x y)) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpSelect1) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v0.AddArg2(x, y) + v.AddArg(v0) + return true + } + // match: (Select0 (DIVVU _ (MOVVconst [1]))) + // result: (MOVVconst [0]) + for { + if v_0.Op != OpLOONG64DIVVU { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select0 (DIVVU x (MOVVconst [c]))) + // cond: isPowerOfTwo64(c) + // result: (ANDconst [c-1] x) + for { + if v_0.Op != OpLOONG64DIVVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpLOONG64ANDconst) + v.AuxInt = int64ToAuxInt(c - 1) + v.AddArg(x) + return true + } + // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) + // result: (MOVVconst [c%d]) + for { + if v_0.Op != OpLOONG64DIVV { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c % d) + return true + } + // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) + // result: (MOVVconst [int64(uint64(c)%uint64(d))]) + for { + if v_0.Op != OpLOONG64DIVVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d))) + return true + } + return false +} +func rewriteValueLOONG64_OpSelect1(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types + // match: (Select1 (Mul64uover x y)) + // result: (SGTU (Select0 (MULVU x y)) (MOVVconst [0])) + for { + if v_0.Op != OpMul64uover { + break + } + y := v_0.Args[1] + x := v_0.Args[0] + v.reset(OpLOONG64SGTU) + v.Type = typ.Bool + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpLOONG64MULVU, types.NewTuple(typ.UInt64, typ.UInt64)) + v1.AddArg2(x, y) + v0.AddArg(v1) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v2.AuxInt = int64ToAuxInt(0) + v.AddArg2(v0, v2) + return true + } + // match: (Select1 (MULVU x (MOVVconst [-1]))) + // result: (NEGV x) + for { + if v_0.Op != OpLOONG64MULVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 { + break + } + v.reset(OpLOONG64NEGV) + v.AddArg(x) + return true + } + // match: (Select1 (MULVU _ (MOVVconst [0]))) + // result: (MOVVconst [0]) + for { + if v_0.Op != OpLOONG64MULVU { + break + } + _ = v_0.Args[1] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select1 (MULVU x (MOVVconst [1]))) + // result: x + for { + if v_0.Op != OpLOONG64MULVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { + break + } + v.copyOf(x) + return true + } + // match: (Select1 (MULVU x (MOVVconst [c]))) + // cond: isPowerOfTwo64(c) + // result: (SLLVconst [log64(c)] x) + for { + if v_0.Op != OpLOONG64MULVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpLOONG64SLLVconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + // match: (Select1 (DIVVU x (MOVVconst [1]))) + // result: x + for { + if v_0.Op != OpLOONG64DIVVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 { + break + } + v.copyOf(x) + return true + } + // match: (Select1 (DIVVU x (MOVVconst [c]))) + // cond: isPowerOfTwo64(c) + // result: (SRLVconst [log64(c)] x) + for { + if v_0.Op != OpLOONG64DIVVU { + break + } + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_1.AuxInt) + if !(isPowerOfTwo64(c)) { + break + } + v.reset(OpLOONG64SRLVconst) + v.AuxInt = int64ToAuxInt(log64(c)) + v.AddArg(x) + return true + } + // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) + // result: (MOVVconst [c*d]) + for { + if v_0.Op != OpLOONG64MULVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c * d) + return true + } + // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) + // result: (MOVVconst [c/d]) + for { + if v_0.Op != OpLOONG64DIVV { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(c / d) + return true + } + // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) + // result: (MOVVconst [int64(uint64(c)/uint64(d))]) + for { + if v_0.Op != OpLOONG64DIVVU { + break + } + _ = v_0.Args[1] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpLOONG64MOVVconst { + break + } + c := auxIntToInt64(v_0_0.AuxInt) + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst { + break + } + d := auxIntToInt64(v_0_1.AuxInt) + v.reset(OpLOONG64MOVVconst) + v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d))) + return true + } + return false +} +func rewriteValueLOONG64_OpSlicemask(v *Value) bool { + v_0 := v.Args[0] + b := v.Block + // match: (Slicemask x) + // result: (SRAVconst (NEGV x) [63]) + for { + t := v.Type + x := v_0 + v.reset(OpLOONG64SRAVconst) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpLOONG64NEGV, t) + v0.AddArg(x) + v.AddArg(v0) + return true + } +} +func rewriteValueLOONG64_OpStore(v *Value) bool { + v_2 := v.Args[2] + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (Store {t} ptr val mem) + // cond: t.Size() == 1 + // result: (MOVBstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 1) { + break + } + v.reset(OpLOONG64MOVBstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 2 + // result: (MOVHstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 2) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && !is32BitFloat(val.Type) + // result: (MOVWstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && !is32BitFloat(val.Type)) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && !is64BitFloat(val.Type) + // result: (MOVVstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && !is64BitFloat(val.Type)) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 4 && is32BitFloat(val.Type) + // result: (MOVFstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 4 && is32BitFloat(val.Type)) { + break + } + v.reset(OpLOONG64MOVFstore) + v.AddArg3(ptr, val, mem) + return true + } + // match: (Store {t} ptr val mem) + // cond: t.Size() == 8 && is64BitFloat(val.Type) + // result: (MOVDstore ptr val mem) + for { + t := auxToType(v.Aux) + ptr := v_0 + val := v_1 + mem := v_2 + if !(t.Size() == 8 && is64BitFloat(val.Type)) { + break + } + v.reset(OpLOONG64MOVDstore) + v.AddArg3(ptr, val, mem) + return true + } + return false +} +func rewriteValueLOONG64_OpZero(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + b := v.Block + config := b.Func.Config + typ := &b.Func.Config.Types + // match: (Zero [0] _ mem) + // result: mem + for { + if auxIntToInt64(v.AuxInt) != 0 { + break + } + mem := v_1 + v.copyOf(mem) + return true + } + // match: (Zero [1] ptr mem) + // result: (MOVBstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 1 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [2] ptr mem) + // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 2 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [4] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [4] ptr mem) + // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 4 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(3) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(1) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore ptr (MOVVconst [0]) mem) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg3(ptr, v0, mem) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [8] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))) + for { + if auxIntToInt64(v.AuxInt) != 8 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(2) + v3 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v3.AuxInt = int32ToAuxInt(0) + v3.AddArg3(ptr, v0, mem) + v2.AddArg3(ptr, v0, v3) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [3] ptr mem) + // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 3 { + break + } + ptr := v_0 + mem := v_1 + v.reset(OpLOONG64MOVBstore) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(1) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVBstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [6] {t} ptr mem) + // cond: t.Alignment()%2 == 0 + // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 6 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%2 == 0) { + break + } + v.reset(OpLOONG64MOVHstore) + v.AuxInt = int32ToAuxInt(4) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(2) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVHstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [12] {t} ptr mem) + // cond: t.Alignment()%4 == 0 + // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 12 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%4 == 0) { + break + } + v.reset(OpLOONG64MOVWstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(4) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVWstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [16] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)) + for { + if auxIntToInt64(v.AuxInt) != 16 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(8) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(0) + v1.AddArg3(ptr, v0, mem) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [24] {t} ptr mem) + // cond: t.Alignment()%8 == 0 + // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))) + for { + if auxIntToInt64(v.AuxInt) != 24 { + break + } + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(t.Alignment()%8 == 0) { + break + } + v.reset(OpLOONG64MOVVstore) + v.AuxInt = int32ToAuxInt(16) + v0 := b.NewValue0(v.Pos, OpLOONG64MOVVconst, typ.UInt64) + v0.AuxInt = int64ToAuxInt(0) + v1 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v1.AuxInt = int32ToAuxInt(8) + v2 := b.NewValue0(v.Pos, OpLOONG64MOVVstore, types.TypeMem) + v2.AuxInt = int32ToAuxInt(0) + v2.AddArg3(ptr, v0, mem) + v1.AddArg3(ptr, v0, v2) + v.AddArg3(ptr, v0, v1) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice + // result: (DUFFZERO [8 * (128 - s/8)] ptr mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) { + break + } + v.reset(OpLOONG64DUFFZERO) + v.AuxInt = int64ToAuxInt(8 * (128 - s/8)) + v.AddArg2(ptr, mem) + return true + } + // match: (Zero [s] {t} ptr mem) + // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 + // result: (LoweredZero [t.Alignment()] ptr (ADDVconst ptr [s-moveSize(t.Alignment(), config)]) mem) + for { + s := auxIntToInt64(v.AuxInt) + t := auxToType(v.Aux) + ptr := v_0 + mem := v_1 + if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) { + break + } + v.reset(OpLOONG64LoweredZero) + v.AuxInt = int64ToAuxInt(t.Alignment()) + v0 := b.NewValue0(v.Pos, OpLOONG64ADDVconst, ptr.Type) + v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config)) + v0.AddArg(ptr) + v.AddArg3(ptr, v0, mem) + return true + } + return false +} +func rewriteBlockLOONG64(b *Block) bool { + switch b.Kind { + case BlockLOONG64EQ: + // match: (EQ (FPFlagTrue cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPF, cmp) + return true + } + // match: (EQ (FPFlagFalse cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPT, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGT { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTU { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTconst { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (NE cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTUconst { + break + } + b.resetWithControl(BlockLOONG64NE, cmp) + return true + } + // match: (EQ (SGTUconst [1] x) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpLOONG64SGTUconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64NE, x) + return true + } + // match: (EQ (SGTU x (MOVVconst [0])) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpLOONG64SGTU { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64EQ, x) + return true + } + // match: (EQ (SGTconst [0] x) yes no) + // result: (GEZ x yes no) + for b.Controls[0].Op == OpLOONG64SGTconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64GEZ, x) + return true + } + // match: (EQ (SGT x (MOVVconst [0])) yes no) + // result: (LEZ x yes no) + for b.Controls[0].Op == OpLOONG64SGT { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64LEZ, x) + return true + } + // match: (EQ (MOVVconst [0]) yes no) + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + return true + } + // match: (EQ (MOVVconst [c]) yes no) + // cond: c != 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64GEZ: + // match: (GEZ (MOVVconst [c]) yes no) + // cond: c >= 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GEZ (MOVVconst [c]) yes no) + // cond: c < 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64GTZ: + // match: (GTZ (MOVVconst [c]) yes no) + // cond: c > 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (GTZ (MOVVconst [c]) yes no) + // cond: c <= 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockIf: + // match: (If cond yes no) + // result: (NE cond yes no) + for { + cond := b.Controls[0] + b.resetWithControl(BlockLOONG64NE, cond) + return true + } + case BlockLOONG64LEZ: + // match: (LEZ (MOVVconst [c]) yes no) + // cond: c <= 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c <= 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LEZ (MOVVconst [c]) yes no) + // cond: c > 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c > 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64LTZ: + // match: (LTZ (MOVVconst [c]) yes no) + // cond: c < 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c < 0) { + break + } + b.Reset(BlockFirst) + return true + } + // match: (LTZ (MOVVconst [c]) yes no) + // cond: c >= 0 + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c >= 0) { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + case BlockLOONG64NE: + // match: (NE (FPFlagTrue cmp) yes no) + // result: (FPT cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagTrue { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPT, cmp) + return true + } + // match: (NE (FPFlagFalse cmp) yes no) + // result: (FPF cmp yes no) + for b.Controls[0].Op == OpLOONG64FPFlagFalse { + v_0 := b.Controls[0] + cmp := v_0.Args[0] + b.resetWithControl(BlockLOONG64FPF, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGT { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTU { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTconst { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no) + // result: (EQ cmp yes no) + for b.Controls[0].Op == OpLOONG64XORconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + cmp := v_0.Args[0] + if cmp.Op != OpLOONG64SGTUconst { + break + } + b.resetWithControl(BlockLOONG64EQ, cmp) + return true + } + // match: (NE (SGTUconst [1] x) yes no) + // result: (EQ x yes no) + for b.Controls[0].Op == OpLOONG64SGTUconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 1 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64EQ, x) + return true + } + // match: (NE (SGTU x (MOVVconst [0])) yes no) + // result: (NE x yes no) + for b.Controls[0].Op == OpLOONG64SGTU { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64NE, x) + return true + } + // match: (NE (SGTconst [0] x) yes no) + // result: (LTZ x yes no) + for b.Controls[0].Op == OpLOONG64SGTconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + x := v_0.Args[0] + b.resetWithControl(BlockLOONG64LTZ, x) + return true + } + // match: (NE (SGT x (MOVVconst [0])) yes no) + // result: (GTZ x yes no) + for b.Controls[0].Op == OpLOONG64SGT { + v_0 := b.Controls[0] + _ = v_0.Args[1] + x := v_0.Args[0] + v_0_1 := v_0.Args[1] + if v_0_1.Op != OpLOONG64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 { + break + } + b.resetWithControl(BlockLOONG64GTZ, x) + return true + } + // match: (NE (MOVVconst [0]) yes no) + // result: (First no yes) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + if auxIntToInt64(v_0.AuxInt) != 0 { + break + } + b.Reset(BlockFirst) + b.swapSuccessors() + return true + } + // match: (NE (MOVVconst [c]) yes no) + // cond: c != 0 + // result: (First yes no) + for b.Controls[0].Op == OpLOONG64MOVVconst { + v_0 := b.Controls[0] + c := auxIntToInt64(v_0.AuxInt) + if !(c != 0) { + break + } + b.Reset(BlockFirst) + return true + } + } + return false +}