diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go index d3a0ba1d80..c4097947bf 100644 --- a/src/cmd/compile/internal/ppc64/ssa.go +++ b/src/cmd/compile/internal/ppc64/ssa.go @@ -702,7 +702,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = v.Args[0].Reg() - case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, + case ssa.OpPPC64ADDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst, ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst, ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst: p := s.Prog(v.Op.Asm()) @@ -761,7 +761,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt p.To.Type = obj.TYPE_REG - p.To.Reg = ppc64.REGTMP // discard result + // p.To.Reg = ppc64.REGTMP // discard result + p.To.Reg = v.Reg0() case ssa.OpPPC64MOVDaddr: switch v.Aux.(type) { diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 834bf4ab8a..859bf0aa0c 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -142,20 +142,20 @@ // Rotate generation with non-const shift // these match patterns from math/bits/RotateLeft[32|64], but there could be others -(ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) => (ROTL x y) -(ADD (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) => (ROTL x y) -( OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) => (ROTL x y) -( OR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) => (ROTL x y) -(XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) => (ROTL x y) -(XOR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) => (ROTL x y) +(ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) => (ROTL x y) +(ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) => (ROTL x y) +( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))))=> (ROTL x y) +( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) => (ROTL x y) +(XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) => (ROTL x y) +(XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) => (ROTL x y) -(ADD (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) => (ROTLW x y) -(ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) => (ROTLW x y) -( OR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) => (ROTLW x y) -( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) => (ROTLW x y) -(XOR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) => (ROTLW x y) -(XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) => (ROTLW x y) +(ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) +(ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) +( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) +( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) +(XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) +(XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) => (ROTLW x y) // Lowering rotates @@ -167,22 +167,22 @@ (ROTL x (MOVDconst [c])) => (ROTLconst x [c&63]) // Combine rotate and mask operations -(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) +(Select0 (ANDCCconst [m] (ROTLWconst [r] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) (AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x) -(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) +(Select0 (ANDCCconst [m] (ROTLW x r))) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) (AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r) // Note, any rotated word bitmask is still a valid word bitmask. (ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) -(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) +(ROTLWconst [r] (Select0 (ANDCCconst [m] x))) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) -(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) -(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) +(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) +(Select0 (ANDCCconst [m] (SRWconst x [s]))) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0]) (AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x) -(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) -(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) +(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) +(SRWconst (Select0 (ANDCCconst [m] x)) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0]) (SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) @@ -253,41 +253,41 @@ // These are subexpressions found in statements that can become rotates // In these cases the shift count is known to be < 64 so the more complicated expressions // with Mask & Carry is not needed -(Lsh64x64 x (AND y (MOVDconst [63]))) => (SLD x (ANDconst [63] y)) -(Lsh64x64 x (ANDconst [63] y)) => (SLD x (ANDconst [63] y)) -(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst [63] y)) -(Rsh64Ux64 x (ANDconst [63] y)) => (SRD x (ANDconst [63] y)) -(Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) => (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) -(Rsh64Ux64 x (SUBFCconst [64] (ANDconst [63] y))) => (SRD x (SUBFCconst [64] (ANDconst [63] y))) -(Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) -(Rsh64Ux64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SRD x (SUBFCconst [64] (ANDconst [63] y))) -(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst [63] y)) -(Rsh64x64 x (ANDconst [63] y)) => (SRAD x (ANDconst [63] y)) -(Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) => (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) -(Rsh64x64 x (SUBFCconst [64] (ANDconst [63] y))) => (SRAD x (SUBFCconst [64] (ANDconst [63] y))) -(Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) -(Rsh64x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SRAD x (SUBFCconst [64] (ANDconst [63] y))) +(Lsh64x64 x (AND y (MOVDconst [63]))) => (SLD x (Select0 (ANDCCconst [63] y))) +(Lsh64x64 x (Select0 (ANDCCconst [63] y))) => (SLD x (Select0 (ANDCCconst [63] y))) +(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (Select0 (ANDCCconst [63] y))) +(Rsh64Ux64 x (Select0 (ANDCCconst [63] y))) => (SRD x (Select0 (ANDCCconst [63] y))) +(Rsh64Ux64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) => (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) +(Rsh64Ux64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) => (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) +(Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) +(Rsh64Ux64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) +(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (Select0 (ANDCCconst [63] y))) +(Rsh64x64 x (Select0 (ANDCCconst [63] y))) => (SRAD x (Select0 (ANDCCconst [63] y))) +(Rsh64x64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) => (SRAD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) +(Rsh64x64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) => (SRAD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) +(Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) => (SRAD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) +(Rsh64x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) => (SRAD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) (Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) (Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) (Rsh64Ux64 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64])))) -(Lsh32x64 x (AND y (MOVDconst [31]))) => (SLW x (ANDconst [31] y)) -(Lsh32x64 x (ANDconst [31] y)) => (SLW x (ANDconst [31] y)) +(Lsh32x64 x (AND y (MOVDconst [31]))) => (SLW x (Select0 (ANDCCconst [31] y))) +(Lsh32x64 x (Select0 (ANDCCconst [31] y))) => (SLW x (Select0 (ANDCCconst [31] y))) -(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst [31] y)) -(Rsh32Ux64 x (ANDconst [31] y)) => (SRW x (ANDconst [31] y)) -(Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) => (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) -(Rsh32Ux64 x (SUBFCconst [32] (ANDconst [31] y))) => (SRW x (SUBFCconst [32] (ANDconst [31] y))) -(Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) -(Rsh32Ux64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SRW x (SUBFCconst [32] (ANDconst [31] y))) +(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (Select0 (ANDCCconst [31] y))) +(Rsh32Ux64 x (Select0 (ANDCCconst [31] y))) => (SRW x (Select0 (ANDCCconst [31] y))) +(Rsh32Ux64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) => (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) +(Rsh32Ux64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) => (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) +(Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) +(Rsh32Ux64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) -(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst [31] y)) -(Rsh32x64 x (ANDconst [31] y)) => (SRAW x (ANDconst [31] y)) -(Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) => (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) -(Rsh32x64 x (SUBFCconst [32] (ANDconst [31] y))) => (SRAW x (SUBFCconst [32] (ANDconst [31] y))) -(Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) -(Rsh32x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SRAW x (SUBFCconst [32] (ANDconst [31] y))) +(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (Select0 (ANDCCconst [31] y))) +(Rsh32x64 x (Select0 (ANDCCconst [31] y))) => (SRAW x (Select0 (ANDCCconst [31] y))) +(Rsh32x64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) => (SRAW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) +(Rsh32x64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) => (SRAW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) +(Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) => (SRAW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) +(Rsh32x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) => (SRAW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) (Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) (Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32])))) @@ -351,8 +351,8 @@ (Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8])))) // Cleaning up shift ops -(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y) -(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y) +(ISEL [0] (Select0 (ANDCCconst [d] y)) (MOVDconst [-1]) (CMPU (Select0 (ANDCCconst [d] y)) (MOVDconst [c]))) && c >= d => (Select0 (ANDCCconst [d] y)) +(ISEL [0] (Select0 (ANDCCconst [d] y)) (MOVDconst [-1]) (CMPUconst [c] (Select0 (ANDCCconst [d] y)))) && c >= d => (Select0 (ANDCCconst [d] y)) (ORN x (MOVDconst [-1])) => x (S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x) @@ -401,7 +401,7 @@ (OR x (NOR y y)) => (ORN x y) // Lowering comparisons -(EqB x y) => (ANDconst [1] (EQV x y)) +(EqB x y) => (Select0 (ANDCCconst [1] (EQV x y))) // Sign extension dependence on operand sign sets up for sign/zero-extension elision later (Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y))) (Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y))) @@ -461,25 +461,23 @@ (If (FGreaterThan cc) yes no) => (FGT cc yes no) (If (FGreaterEqual cc) yes no) => (FGE cc yes no) -(If cond yes no) => (NE (CMPWconst [0] (ANDconst [1] cond)) yes no) +(If cond yes no) => (NE (CMPWconst [0] (Select0 (ANDCCconst [1] cond))) yes no) // Absorb boolean tests into block -(NE (CMPWconst [0] (ANDconst [1] (Equal cc))) yes no) => (EQ cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (NotEqual cc))) yes no) => (NE cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (LessThan cc))) yes no) => (LT cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (LessEqual cc))) yes no) => (LE cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (GreaterThan cc))) yes no) => (GT cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (GreaterEqual cc))) yes no) => (GE cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (FLessThan cc))) yes no) => (FLT cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (FLessEqual cc))) yes no) => (FLE cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (FGreaterThan cc))) yes no) => (FGT cc yes no) -(NE (CMPWconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no) => (FGE cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (Equal cc)))) yes no) => (EQ cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (NotEqual cc)))) yes no) => (NE cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessThan cc)))) yes no) => (LT cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessEqual cc)))) yes no) => (LE cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterThan cc)))) yes no) => (GT cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterEqual cc)))) yes no) => (GE cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessThan cc)))) yes no) => (FLT cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessEqual cc)))) yes no) => (FLE cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterThan cc)))) yes no) => (FGT cc yes no) +(NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterEqual cc)))) yes no) => (FGE cc yes no) -// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available. -(EQ (CMPconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no) -(NE (CMPconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no) -(EQ (CMPWconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no) -(NE (CMPWconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no) +// Elide compares of bit tests +((EQ|NE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 (ANDCCconst [c] x)) yes no) +((EQ|NE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE) (Select1 (ANDCCconst [c] x)) yes no) // absorb flag constants into branches (EQ (FlagEQ) yes no) => (First yes no) @@ -570,9 +568,9 @@ (LessEqual (InvertFlags x)) => (GreaterEqual x) (GreaterEqual (InvertFlags x)) => (LessEqual x) -// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available. -((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no) -((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no) +// Elide compares of bit tests +((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCCconst [c] x)) yes no) +((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) => ((EQ|NE|LT|LE|GT|GE) (Select1 (ANDCCconst [c] x)) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no) ((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no) @@ -722,44 +720,44 @@ (NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)]) // Discover consts -(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x) +(AND x (MOVDconst [c])) && isU16Bit(c) => (Select0 (ANDCCconst [c] x)) (XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x) (OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x) // Simplify consts -(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x) +(Select0 (ANDCCconst [c] (Select0 (ANDCCconst [d] x)))) => (Select0 (ANDCCconst [c&d] x)) (ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x) (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x) -(ANDconst [-1] x) => x -(ANDconst [0] _) => (MOVDconst [0]) +(Select0 (ANDCCconst [-1] x)) => x +(Select0 (ANDCCconst [0] _)) => (MOVDconst [0]) (XORconst [0] x) => x (ORconst [-1] _) => (MOVDconst [-1]) (ORconst [0] x) => x // zero-extend of small and => small and -(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y -(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y -(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y +(MOVBZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFF => y +(MOVHZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y +(MOVWZreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFFFFFF => y (MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y // sign extend of small-positive and => small-positive-and -(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y -(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y -(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 +(MOVBreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7F => y +(MOVHreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0x7FFF => y +(MOVWreg y:(Select0 (ANDCCconst [c] _))) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0 (MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y // small and of zero-extend => either zero-extend or small and -(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y -(ANDconst [0xFF] y:(MOVBreg _)) => y -(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y -(ANDconst [0xFFFF] y:(MOVHreg _)) => y +(Select0 (ANDCCconst [c] y:(MOVBZreg _))) && c&0xFF == 0xFF => y +(Select0 (ANDCCconst [0xFF] y:(MOVBreg _))) => y +(Select0 (ANDCCconst [c] y:(MOVHZreg _))) && c&0xFFFF == 0xFFFF => y +(Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _))) => y (AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x) // normal case -(ANDconst [c] (MOV(B|BZ)reg x)) => (ANDconst [c&0xFF] x) -(ANDconst [c] (MOV(H|HZ)reg x)) => (ANDconst [c&0xFFFF] x) -(ANDconst [c] (MOV(W|WZ)reg x)) => (ANDconst [c&0xFFFFFFFF] x) +(Select0 (ANDCCconst [c] (MOV(B|BZ)reg x))) => (Select0 (ANDCCconst [c&0xFF] x)) +(Select0 (ANDCCconst [c] (MOV(H|HZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFF] x)) +(Select0 (ANDCCconst [c] (MOV(W|WZ)reg x))) => (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) // Eliminate unnecessary sign/zero extend following right shift (MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x)) @@ -841,11 +839,11 @@ (MOVBZreg ((OR|XOR|AND) x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) (MOVBZreg ((OR|XOR|AND) x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) x y)) -(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z +(MOV(B|H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) => z (MOVBZreg z:(AND y (MOVBZload ptr x))) => z -(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z +(MOV(H|W)Zreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) => z (MOVHZreg z:(AND y (MOVHZload ptr x))) => z -(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z +(MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) => z (MOVWZreg z:(AND y (MOVWZload ptr x))) => z // Arithmetic constant ops @@ -1067,11 +1065,11 @@ (SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x) (SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x) -(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) +(SLDconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) (SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) (SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x) (SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x) -(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) +(SLWconst [c] z:(Select0 (ANDCCconst [d] x))) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) (SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) // special case for power9 (SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x) @@ -1155,6 +1153,16 @@ (ISEL [4] x _ (Flag(EQ|GT))) => x (ISEL [4] _ y (FlagLT)) => y +(ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [1] z)))) => (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) +(ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [1] z)))) => (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) +(ISELB [2] x (CMPconst [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 (ANDCCconst [1] z ))) +(ISELB [6] x (CMPconst [0] (Select0 (ANDCCconst [1] z)))) => (Select0 (ANDCCconst [1] z )) + +(ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) => (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) +(ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) => (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) +(ISELB [2] x (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) => (XORconst [1] (Select0 (ANDCCconst [1] z ))) +(ISELB [6] x (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) => (Select0 (ANDCCconst [1] z )) + (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool) (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool) (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool) @@ -1166,7 +1174,7 @@ (XORconst [1] (ISELB [4] (MOVDconst [1]) cmp)) => (ISELB [0] (MOVDconst [1]) cmp) // A particular pattern seen in cgo code: -(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x) +(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (Select0 (ANDCCconst [c&0xFF] x)) // floating point negative abs (FNEG (FABS x)) => (FNABS x) diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go index a4d906ddd3..5e0ed6f3b5 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go @@ -311,10 +311,9 @@ func init() { {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 - {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux - {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux - {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always. - {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}}, asm: "ANDCC", aux: "Int64", typ: "Flags"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. + {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux + {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux + {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true, typ: "(Int,Flags)"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 12fed422ad..efda41f69c 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2165,7 +2165,6 @@ const ( OpPPC64FCPSGN OpPPC64ORconst OpPPC64XORconst - OpPPC64ANDconst OpPPC64ANDCCconst OpPPC64MOVBreg OpPPC64MOVBZreg @@ -29029,7 +29028,7 @@ var opcodeTable = [...]opInfo{ }, }, { - name: "ANDconst", + name: "ANDCCconst", auxType: auxInt64, argLen: 1, clobberFlags: true, @@ -29043,17 +29042,6 @@ var opcodeTable = [...]opInfo{ }, }, }, - { - name: "ANDCCconst", - auxType: auxInt64, - argLen: 1, - asm: ppc64.AANDCC, - reg: regInfo{ - inputs: []inputInfo{ - {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 - }, - }, - }, { name: "MOVBreg", argLen: 1, diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 8d6f976d74..ae1985854f 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -441,8 +441,6 @@ func rewriteValuePPC64(v *Value) bool { return rewriteValuePPC64_OpPPC64AND(v) case OpPPC64ANDN: return rewriteValuePPC64_OpPPC64ANDN(v) - case OpPPC64ANDconst: - return rewriteValuePPC64_OpPPC64ANDconst(v) case OpPPC64CLRLSLDI: return rewriteValuePPC64_OpPPC64CLRLSLDI(v) case OpPPC64CMP: @@ -1765,14 +1763,17 @@ func rewriteValuePPC64_OpEqB(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (EqB x y) - // result: (ANDconst [1] (EQV x y)) + // result: (Select0 (ANDCCconst [1] (EQV x y))) for { x := v_0 y := v_1 - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(1) - v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) - v0.AddArg2(x, y) + v.reset(OpSelect0) + v.Type = typ.Int + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(1) + v1 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64) + v1.AddArg2(x, y) + v0.AddArg(v1) v.AddArg(v0) return true } @@ -2707,7 +2708,7 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { return true } // match: (Lsh32x64 x (AND y (MOVDconst [31]))) - // result: (SLW x (ANDconst [31] y)) + // result: (SLW x (Select0 (ANDCCconst [31] y))) for { x := v_0 if v_1.Op != OpPPC64AND { @@ -2722,26 +2723,34 @@ func rewriteValuePPC64_OpLsh32x64(v *Value) bool { continue } v.reset(OpPPC64SLW) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = int64ToAuxInt(31) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } break } - // match: (Lsh32x64 x (ANDconst [31] y)) - // result: (SLW x (ANDconst [31] y)) + // match: (Lsh32x64 x (Select0 (ANDCCconst [31] y))) + // result: (SLW x (Select0 (ANDCCconst [31] y))) for { x := v_0 - if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int32 || auxIntToInt64(v_1.AuxInt) != 31 { + if v_1.Op != OpSelect0 || v_1.Type != typ.Int32 { break } - y := v_1.Args[0] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_0.AuxInt) != 31 { + break + } + y := v_1_0.Args[0] v.reset(OpPPC64SLW) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = int64ToAuxInt(31) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } @@ -2947,7 +2956,7 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { return true } // match: (Lsh64x64 x (AND y (MOVDconst [63]))) - // result: (SLD x (ANDconst [63] y)) + // result: (SLD x (Select0 (ANDCCconst [63] y))) for { x := v_0 if v_1.Op != OpPPC64AND { @@ -2962,26 +2971,34 @@ func rewriteValuePPC64_OpLsh64x64(v *Value) bool { continue } v.reset(OpPPC64SLD) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = int64ToAuxInt(63) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } break } - // match: (Lsh64x64 x (ANDconst [63] y)) - // result: (SLD x (ANDconst [63] y)) + // match: (Lsh64x64 x (Select0 (ANDCCconst [63] y))) + // result: (SLD x (Select0 (ANDCCconst [63] y))) for { x := v_0 - if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int64 || auxIntToInt64(v_1.AuxInt) != 63 { + if v_1.Op != OpSelect0 { break } - y := v_1.Args[0] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.Int64 || auxIntToInt64(v_1_0.AuxInt) != 63 { + break + } + y := v_1_0.Args[0] v.reset(OpPPC64SLD) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = int64ToAuxInt(63) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } @@ -3952,7 +3969,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } - // match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // match: (ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) // result: (ROTL x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -3962,10 +3979,14 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRD { continue } @@ -3983,7 +4004,11 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] { + if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { + continue + } + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] { continue } v.reset(OpPPC64ROTL) @@ -3992,7 +4017,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } - // match: (ADD (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) + // match: (ADD (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) // result: (ROTL x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -4002,10 +4027,14 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRD { continue } @@ -4018,7 +4047,11 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] { + if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] { continue } v.reset(OpPPC64ROTL) @@ -4027,7 +4060,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } - // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) + // match: (ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) // result: (ROTLW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -4037,10 +4070,14 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRW { continue } @@ -4053,7 +4090,11 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] { + if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] { continue } v.reset(OpPPC64ROTLW) @@ -4062,7 +4103,7 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } - // match: (ADD (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // match: (ADD (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) // result: (ROTLW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -4072,10 +4113,14 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRW { continue } @@ -4093,7 +4138,11 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { continue } v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] { + if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { + continue + } + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] { continue } v.reset(OpPPC64ROTLW) @@ -4239,6 +4288,8 @@ func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool { func rewriteValuePPC64_OpPPC64AND(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + typ := &b.Func.Config.Types // match: (AND (MOVDconst [m]) (ROTLWconst [r] x)) // cond: isPPC64WordRotateMask(m) // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) @@ -4371,7 +4422,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { } // match: (AND x (MOVDconst [c])) // cond: isU16Bit(c) - // result: (ANDconst [c] x) + // result: (Select0 (ANDCCconst [c] x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { x := v_0 @@ -4382,9 +4433,11 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if !(isU16Bit(c)) { continue } - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c) - v.AddArg(x) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c) + v0.AddArg(x) + v.AddArg(v0) return true } break @@ -4426,7 +4479,7 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { break } // match: (AND (MOVDconst [c]) x:(MOVBZload _ _)) - // result: (ANDconst [c&0xFF] x) + // result: (Select0 (ANDCCconst [c&0xFF] x)) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpPPC64MOVDconst { @@ -4437,9 +4490,11 @@ func rewriteValuePPC64_OpPPC64AND(v *Value) bool { if x.Op != OpPPC64MOVBZload { continue } - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFF) - v.AddArg(x) + v.reset(OpSelect0) + v0 := b.NewValue0(x.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFF) + v0.AddArg(x) + v.AddArg(v0) return true } break @@ -4466,242 +4521,6 @@ func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool { } return false } -func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool { - v_0 := v.Args[0] - // match: (ANDconst [m] (ROTLWconst [r] x)) - // cond: isPPC64WordRotateMask(m) - // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) - for { - m := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64ROTLWconst { - break - } - r := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if !(isPPC64WordRotateMask(m)) { - break - } - v.reset(OpPPC64RLWINM) - v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32)) - v.AddArg(x) - return true - } - // match: (ANDconst [m] (ROTLW x r)) - // cond: isPPC64WordRotateMask(m) - // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r) - for { - m := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64ROTLW { - break - } - r := v_0.Args[1] - x := v_0.Args[0] - if !(isPPC64WordRotateMask(m)) { - break - } - v.reset(OpPPC64RLWNM) - v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) - v.AddArg2(x, r) - return true - } - // match: (ANDconst [m] (SRWconst x [s])) - // cond: mergePPC64RShiftMask(m,s,32) == 0 - // result: (MOVDconst [0]) - for { - m := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64SRWconst { - break - } - s := auxIntToInt64(v_0.AuxInt) - if !(mergePPC64RShiftMask(m, s, 32) == 0) { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } - // match: (ANDconst [m] (SRWconst x [s])) - // cond: mergePPC64AndSrwi(m,s) != 0 - // result: (RLWINM [mergePPC64AndSrwi(m,s)] x) - for { - m := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64SRWconst { - break - } - s := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - if !(mergePPC64AndSrwi(m, s) != 0) { - break - } - v.reset(OpPPC64RLWINM) - v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s)) - v.AddArg(x) - return true - } - // match: (ANDconst [c] (ANDconst [d] x)) - // result: (ANDconst [c&d] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64ANDconst { - break - } - d := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & d) - v.AddArg(x) - return true - } - // match: (ANDconst [-1] x) - // result: x - for { - if auxIntToInt64(v.AuxInt) != -1 { - break - } - x := v_0 - v.copyOf(x) - return true - } - // match: (ANDconst [0] _) - // result: (MOVDconst [0]) - for { - if auxIntToInt64(v.AuxInt) != 0 { - break - } - v.reset(OpPPC64MOVDconst) - v.AuxInt = int64ToAuxInt(0) - return true - } - // match: (ANDconst [c] y:(MOVBZreg _)) - // cond: c&0xFF == 0xFF - // result: y - for { - c := auxIntToInt64(v.AuxInt) - y := v_0 - if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { - break - } - v.copyOf(y) - return true - } - // match: (ANDconst [0xFF] y:(MOVBreg _)) - // result: y - for { - if auxIntToInt64(v.AuxInt) != 0xFF { - break - } - y := v_0 - if y.Op != OpPPC64MOVBreg { - break - } - v.copyOf(y) - return true - } - // match: (ANDconst [c] y:(MOVHZreg _)) - // cond: c&0xFFFF == 0xFFFF - // result: y - for { - c := auxIntToInt64(v.AuxInt) - y := v_0 - if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { - break - } - v.copyOf(y) - return true - } - // match: (ANDconst [0xFFFF] y:(MOVHreg _)) - // result: y - for { - if auxIntToInt64(v.AuxInt) != 0xFFFF { - break - } - y := v_0 - if y.Op != OpPPC64MOVHreg { - break - } - v.copyOf(y) - return true - } - // match: (ANDconst [c] (MOVBreg x)) - // result: (ANDconst [c&0xFF] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64MOVBreg { - break - } - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFF) - v.AddArg(x) - return true - } - // match: (ANDconst [c] (MOVBZreg x)) - // result: (ANDconst [c&0xFF] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64MOVBZreg { - break - } - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFF) - v.AddArg(x) - return true - } - // match: (ANDconst [c] (MOVHreg x)) - // result: (ANDconst [c&0xFFFF] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64MOVHreg { - break - } - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFFFF) - v.AddArg(x) - return true - } - // match: (ANDconst [c] (MOVHZreg x)) - // result: (ANDconst [c&0xFFFF] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64MOVHZreg { - break - } - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFFFF) - v.AddArg(x) - return true - } - // match: (ANDconst [c] (MOVWreg x)) - // result: (ANDconst [c&0xFFFFFFFF] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64MOVWreg { - break - } - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) - v.AddArg(x) - return true - } - // match: (ANDconst [c] (MOVWZreg x)) - // result: (ANDconst [c&0xFFFFFFFF] x) - for { - c := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64MOVWZreg { - break - } - x := v_0.Args[0] - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) - v.AddArg(x) - return true - } - return false -} func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool { v_0 := v.Args[0] // match: (CLRLSLDI [c] (SRWconst [s] x)) @@ -5879,21 +5698,31 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) + b := v.Block + typ := &b.Func.Config.Types + // match: (ISEL [0] (Select0 (ANDCCconst [d] y)) (MOVDconst [-1]) (CMPU (Select0 (ANDCCconst [d] y)) (MOVDconst [c]))) // cond: c >= d - // result: (ANDconst [d] y) + // result: (Select0 (ANDCCconst [d] y)) for { - if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpSelect0 { break } - d := auxIntToInt64(v_0.AuxInt) - y := v_0.Args[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(v_0_0.AuxInt) + y := v_0_0.Args[0] if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPU { break } _ = v_2.Args[1] v_2_0 := v_2.Args[0] - if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] { + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != d || y != v_2_0_0.Args[0] { break } v_2_1 := v_2.Args[1] @@ -5904,31 +5733,43 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { if !(c >= d) { break } - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(d) - v.AddArg(y) + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(y) + v.AddArg(v0) return true } - // match: (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) + // match: (ISEL [0] (Select0 (ANDCCconst [d] y)) (MOVDconst [-1]) (CMPUconst [c] (Select0 (ANDCCconst [d] y)))) // cond: c >= d - // result: (ANDconst [d] y) + // result: (Select0 (ANDCCconst [d] y)) for { - if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst { + if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpSelect0 { break } - d := auxIntToInt64(v_0.AuxInt) - y := v_0.Args[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(v_0_0.AuxInt) + y := v_0_0.Args[0] if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPUconst { break } c := auxIntToInt64(v_2.AuxInt) v_2_0 := v_2.Args[0] - if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] || !(c >= d) { + if v_2_0.Op != OpSelect0 { break } - v.reset(OpPPC64ANDconst) - v.AuxInt = int64ToAuxInt(d) - v.AddArg(y) + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != d || y != v_2_0_0.Args[0] || !(c >= d) { + break + } + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(d) + v0.AddArg(y) + v.AddArg(v0) return true } // match: (ISEL [6] x y (CMPWconst [0] (ISELB [c] one cmp))) @@ -6187,6 +6028,126 @@ func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool { v.copyOf(y) return true } + // match: (ISEL [2] x y (CMPconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + break + } + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [6] x y (CMPconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPconst || auxIntToInt64(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + break + } + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [2] x y (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (ISEL [2] x y (Select1 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + break + } + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(2) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } + // match: (ISEL [6] x y (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (ISEL [6] x y (Select1 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + x := v_0 + y := v_1 + if v_2.Op != OpPPC64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 { + break + } + v_2_0 := v_2.Args[0] + if v_2_0.Op != OpSelect0 { + break + } + v_2_0_0 := v_2_0.Args[0] + if v_2_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0_0.AuxInt) != 1 { + break + } + z := v_2_0_0.Args[0] + v.reset(OpPPC64ISEL) + v.AuxInt = int32ToAuxInt(6) + v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg3(x, y, v0) + return true + } // match: (ISEL [n] x y (InvertFlags bool)) // cond: n%4 == 0 // result: (ISEL [n+1] x y bool) @@ -6431,6 +6392,114 @@ func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool { v.AuxInt = int64ToAuxInt(1) return true } + // match: (ISELB [2] x (CMPconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (XORconst [1] (Select0 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSelect0 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { + break + } + z := v_1_0_0.Args[0] + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (ISELB [6] x (CMPconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (Select0 (ANDCCconst [1] z )) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + if v_1.Op != OpPPC64CMPconst || auxIntToInt64(v_1.AuxInt) != 0 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSelect0 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { + break + } + z := v_1_0_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(1) + v0.AddArg(z) + v.AddArg(v0) + return true + } + // match: (ISELB [2] x (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (XORconst [1] (Select0 (ANDCCconst [1] z ))) + for { + if auxIntToInt32(v.AuxInt) != 2 { + break + } + if v_1.Op != OpPPC64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSelect0 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { + break + } + z := v_1_0_0.Args[0] + v.reset(OpPPC64XORconst) + v.AuxInt = int64ToAuxInt(1) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(1) + v1.AddArg(z) + v0.AddArg(v1) + v.AddArg(v0) + return true + } + // match: (ISELB [6] x (CMPWconst [0] (Select0 (ANDCCconst [1] z)))) + // result: (Select0 (ANDCCconst [1] z )) + for { + if auxIntToInt32(v.AuxInt) != 6 { + break + } + if v_1.Op != OpPPC64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 { + break + } + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpSelect0 { + break + } + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 { + break + } + z := v_1_0_0.Args[0] + v.reset(OpSelect0) + v.Type = typ.UInt64 + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(1) + v0.AddArg(z) + v.AddArg(v0) + return true + } // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool)) // cond: n%4 == 0 // result: (ISELB [n+1] (MOVDconst [1]) bool) @@ -6761,15 +6830,19 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVBZreg y:(ANDconst [c] _)) + // match: (MOVBZreg y:(Select0 (ANDCCconst [c] _))) // cond: uint64(c) <= 0xFF // result: y for { y := v_0 - if y.Op != OpPPC64ANDconst { + if y.Op != OpSelect0 { break } - c := auxIntToInt64(y.AuxInt) + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) if !(uint64(c) <= 0xFF) { break } @@ -7083,15 +7156,19 @@ func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool { } break } - // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x))) + // match: (MOVBZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) // result: z for { z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64MOVBZload { + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVBZload { break } v.copyOf(z) @@ -7182,15 +7259,19 @@ func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVBreg y:(ANDconst [c] _)) + // match: (MOVBreg y:(Select0 (ANDCCconst [c] _))) // cond: uint64(c) <= 0x7F // result: y for { y := v_0 - if y.Op != OpPPC64ANDconst { + if y.Op != OpSelect0 { break } - c := auxIntToInt64(y.AuxInt) + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) if !(uint64(c) <= 0x7F) { break } @@ -8926,15 +9007,19 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVHZreg y:(ANDconst [c] _)) + // match: (MOVHZreg y:(Select0 (ANDCCconst [c] _))) // cond: uint64(c) <= 0xFFFF // result: y for { y := v_0 - if y.Op != OpPPC64ANDconst { + if y.Op != OpSelect0 { break } - c := auxIntToInt64(y.AuxInt) + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) if !(uint64(c) <= 0xFFFF) { break } @@ -9216,29 +9301,37 @@ func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool { } break } - // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x))) + // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) // result: z for { z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64MOVBZload { + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVBZload { break } v.copyOf(z) return true } - // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x))) + // match: (MOVHZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) // result: z for { z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64MOVHZload { + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVHZload { break } v.copyOf(z) @@ -9448,15 +9541,19 @@ func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVHreg y:(ANDconst [c] _)) + // match: (MOVHreg y:(Select0 (ANDCCconst [c] _))) // cond: uint64(c) <= 0x7FFF // result: y for { y := v_0 - if y.Op != OpPPC64ANDconst { + if y.Op != OpSelect0 { break } - c := auxIntToInt64(y.AuxInt) + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) if !(uint64(c) <= 0x7FFF) { break } @@ -10222,15 +10319,19 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVWZreg y:(ANDconst [c] _)) + // match: (MOVWZreg y:(Select0 (ANDCCconst [c] _))) // cond: uint64(c) <= 0xFFFFFFFF // result: y for { y := v_0 - if y.Op != OpPPC64ANDconst { + if y.Op != OpSelect0 { break } - c := auxIntToInt64(y.AuxInt) + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) if !(uint64(c) <= 0xFFFFFFFF) { break } @@ -10485,43 +10586,55 @@ func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool { } break } - // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x))) + // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVBZload ptr x)))) // result: z for { z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64MOVBZload { + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVBZload { break } v.copyOf(z) return true } - // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x))) + // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVHZload ptr x)))) // result: z for { z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64MOVHZload { + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVHZload { break } v.copyOf(z) return true } - // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) + // match: (MOVWZreg z:(Select0 (ANDCCconst [c] (MOVWZload ptr x)))) // result: z for { z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } z_0 := z.Args[0] - if z_0.Op != OpPPC64MOVWZload { + if z_0.Op != OpPPC64ANDCCconst { + break + } + z_0_0 := z_0.Args[0] + if z_0_0.Op != OpPPC64MOVWZload { break } v.copyOf(z) @@ -10765,15 +10878,19 @@ func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (MOVWreg y:(ANDconst [c] _)) + // match: (MOVWreg y:(Select0 (ANDCCconst [c] _))) // cond: uint64(c) <= 0xFFFF // result: y for { y := v_0 - if y.Op != OpPPC64ANDconst { + if y.Op != OpSelect0 { break } - c := auxIntToInt64(y.AuxInt) + y_0 := y.Args[0] + if y_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(y_0.AuxInt) if !(uint64(c) <= 0xFFFF) { break } @@ -11573,7 +11690,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } break } - // match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // match: ( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) // result: (ROTL x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -11583,10 +11700,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRD { continue } @@ -11604,7 +11725,11 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] { + if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { + continue + } + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] { continue } v.reset(OpPPC64ROTL) @@ -11613,7 +11738,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } break } - // match: ( OR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) + // match: ( OR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) // result: (ROTL x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -11623,10 +11748,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRD { continue } @@ -11639,7 +11768,11 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] { + if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] { continue } v.reset(OpPPC64ROTL) @@ -11648,7 +11781,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } break } - // match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) + // match: ( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) // result: (ROTLW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -11658,10 +11791,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRW { continue } @@ -11674,7 +11811,11 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] { + if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] { continue } v.reset(OpPPC64ROTLW) @@ -11683,7 +11824,7 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { } break } - // match: ( OR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // match: ( OR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) // result: (ROTLW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -11693,10 +11834,14 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRW { continue } @@ -11714,7 +11859,11 @@ func rewriteValuePPC64_OpPPC64OR(v *Value) bool { continue } v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] { + if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { + continue + } + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] { continue } v.reset(OpPPC64ROTLW) @@ -13201,16 +13350,20 @@ func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool { } break } - // match: (ROTLWconst [r] (ANDconst [m] x)) + // match: (ROTLWconst [r] (Select0 (ANDCCconst [m] x))) // cond: isPPC64WordRotateMask(m) // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x) for { r := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64ANDconst { + if v_0.Op != OpSelect0 { break } - m := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] if !(isPPC64WordRotateMask(m)) { break } @@ -13313,17 +13466,21 @@ func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool { v.AddArg(x) return true } - // match: (SLDconst [c] z:(ANDconst [d] x)) + // match: (SLDconst [c] z:(Select0 (ANDCCconst [d] x))) // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x) for { c := auxIntToInt64(v.AuxInt) z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } - d := auxIntToInt64(z.AuxInt) - x := z.Args[0] + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(z_0.AuxInt) + x := z_0.Args[0] if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) { break } @@ -13436,17 +13593,21 @@ func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool { v.AddArg(x) return true } - // match: (SLWconst [c] z:(ANDconst [d] x)) + // match: (SLWconst [c] z:(Select0 (ANDCCconst [d] x))) // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x) for { c := auxIntToInt64(v.AuxInt) z := v_0 - if z.Op != OpPPC64ANDconst { + if z.Op != OpSelect0 { break } - d := auxIntToInt64(z.AuxInt) - x := z.Args[0] + z_0 := z.Args[0] + if z_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(z_0.AuxInt) + x := z_0.Args[0] if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) { break } @@ -13577,15 +13738,19 @@ func rewriteValuePPC64_OpPPC64SRW(v *Value) bool { } func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool { v_0 := v.Args[0] - // match: (SRWconst (ANDconst [m] x) [s]) + // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s]) // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0 // result: (MOVDconst [0]) for { s := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64ANDconst { + if v_0.Op != OpSelect0 { break } - m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0_0.AuxInt) if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) { break } @@ -13593,16 +13758,20 @@ func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool { v.AuxInt = int64ToAuxInt(0) return true } - // match: (SRWconst (ANDconst [m] x) [s]) + // match: (SRWconst (Select0 (ANDCCconst [m] x)) [s]) // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0 // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x) for { s := auxIntToInt64(v.AuxInt) - if v_0.Op != OpPPC64ANDconst { + if v_0.Op != OpSelect0 { break } - m := auxIntToInt64(v_0.AuxInt) - x := v_0.Args[0] + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) { break } @@ -13831,7 +14000,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { } break } - // match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y)))) + // match: (XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y))))) // result: (ROTL x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13841,10 +14010,14 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRD { continue } @@ -13862,7 +14035,11 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] { + if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { + continue + } + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 63 || y != v_1_1_1_0.Args[0] { continue } v.reset(OpPPC64ROTL) @@ -13871,7 +14048,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { } break } - // match: (XOR (SLD x (ANDconst [63] y)) (SRD x (SUBFCconst [64] (ANDconst [63] y)))) + // match: (XOR (SLD x (Select0 (ANDCCconst [63] y))) (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y))))) // result: (ROTL x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13881,10 +14058,14 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 63 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 63 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRD { continue } @@ -13897,7 +14078,11 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] { + if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 63 || y != v_1_1_0_0.Args[0] { continue } v.reset(OpPPC64ROTL) @@ -13906,7 +14091,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { } break } - // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUBFCconst [32] (ANDconst [31] y)))) + // match: (XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y))))) // result: (ROTLW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13916,10 +14101,14 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRW { continue } @@ -13932,7 +14121,11 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v_1_1_0 := v_1_1.Args[0] - if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] { + if v_1_1_0.Op != OpSelect0 || v_1_1_0.Type != typ.UInt { + continue + } + v_1_1_0_0 := v_1_1_0.Args[0] + if v_1_1_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 31 || y != v_1_1_0_0.Args[0] { continue } v.reset(OpPPC64ROTLW) @@ -13941,7 +14134,7 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { } break } - // match: (XOR (SLW x (ANDconst [31] y)) (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y)))) + // match: (XOR (SLW x (Select0 (ANDCCconst [31] y))) (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y))))) // result: (ROTLW x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -13951,10 +14144,14 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { _ = v_0.Args[1] x := v_0.Args[0] v_0_1 := v_0.Args[1] - if v_0_1.Op != OpPPC64ANDconst || auxIntToInt64(v_0_1.AuxInt) != 31 { + if v_0_1.Op != OpSelect0 { continue } - y := v_0_1.Args[0] + v_0_1_0 := v_0_1.Args[0] + if v_0_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_1_0.AuxInt) != 31 { + continue + } + y := v_0_1_0.Args[0] if v_1.Op != OpPPC64SRW { continue } @@ -13972,7 +14169,11 @@ func rewriteValuePPC64_OpPPC64XOR(v *Value) bool { continue } v_1_1_1 := v_1_1.Args[1] - if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] { + if v_1_1_1.Op != OpSelect0 || v_1_1_1.Type != typ.UInt { + continue + } + v_1_1_1_0 := v_1_1_1.Args[0] + if v_1_1_1_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_1_1_1_0.AuxInt) != 31 || y != v_1_1_1_0.Args[0] { continue } v.reset(OpPPC64ROTLW) @@ -14930,7 +15131,7 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { return true } // match: (Rsh32Ux64 x (AND y (MOVDconst [31]))) - // result: (SRW x (ANDconst [31] y)) + // result: (SRW x (Select0 (ANDCCconst [31] y))) for { x := v_0 if v_1.Op != OpPPC64AND { @@ -14945,31 +15146,39 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { continue } v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = int64ToAuxInt(31) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } break } - // match: (Rsh32Ux64 x (ANDconst [31] y)) - // result: (SRW x (ANDconst [31] y)) + // match: (Rsh32Ux64 x (Select0 (ANDCCconst [31] y))) + // result: (SRW x (Select0 (ANDCCconst [31] y))) for { x := v_0 - if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 { + if v_1.Op != OpSelect0 { break } - y := v_1.Args[0] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { + break + } + y := v_1_0.Args[0] v.reset(OpPPC64SRW) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(31) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } - // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) - // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) + // result: (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -14981,45 +15190,57 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { break } v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 { + if v_1_1.Op != OpSelect0 { break } - y := v_1_1.Args[0] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 { + break + } + y := v_1_1_0.Args[0] v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(31) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true } - // match: (Rsh32Ux64 x (SUBFCconst [32] (ANDconst [31] y))) - // result: (SRW x (SUBFCconst [32] (ANDconst [31] y))) + // match: (Rsh32Ux64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) + // result: (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { break } v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { + if v_1_0.Op != OpSelect0 { break } - y := v_1_0.Args[0] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 31 { + break + } + y := v_1_0_0.Args[0] v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(31) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true } // match: (Rsh32Ux64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) - // result: (SRW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + // result: (SRW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -15046,9 +15267,11 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(31) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true @@ -15056,7 +15279,7 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { break } // match: (Rsh32Ux64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) - // result: (SRW x (SUBFCconst [32] (ANDconst [31] y))) + // result: (SRW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { @@ -15077,9 +15300,11 @@ func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool { v.reset(OpPPC64SRW) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(31) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true @@ -15290,7 +15515,7 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { return true } // match: (Rsh32x64 x (AND y (MOVDconst [31]))) - // result: (SRAW x (ANDconst [31] y)) + // result: (SRAW x (Select0 (ANDCCconst [31] y))) for { x := v_0 if v_1.Op != OpPPC64AND { @@ -15305,31 +15530,39 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { continue } v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32) - v0.AuxInt = int64ToAuxInt(31) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int32) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } break } - // match: (Rsh32x64 x (ANDconst [31] y)) - // result: (SRAW x (ANDconst [31] y)) + // match: (Rsh32x64 x (Select0 (ANDCCconst [31] y))) + // result: (SRAW x (Select0 (ANDCCconst [31] y))) for { x := v_0 - if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 { + if v_1.Op != OpSelect0 { break } - y := v_1.Args[0] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { + break + } + y := v_1_0.Args[0] v.reset(OpPPC64SRAW) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(31) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(31) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } - // match: (Rsh32x64 x (SUB (MOVDconst [32]) (ANDconst [31] y))) - // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + // match: (Rsh32x64 x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) + // result: (SRAW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -15341,45 +15574,57 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { break } v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 { + if v_1_1.Op != OpSelect0 { break } - y := v_1_1.Args[0] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 { + break + } + y := v_1_1_0.Args[0] v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(31) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true } - // match: (Rsh32x64 x (SUBFCconst [32] (ANDconst [31] y))) - // result: (SRAW x (SUBFCconst [32] (ANDconst [31] y))) + // match: (Rsh32x64 x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) + // result: (SRAW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { break } v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 { + if v_1_0.Op != OpSelect0 { break } - y := v_1_0.Args[0] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 31 { + break + } + y := v_1_0_0.Args[0] v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(31) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true } // match: (Rsh32x64 x (SUB (MOVDconst [32]) (AND y (MOVDconst [31])))) - // result: (SRAW x (SUB (MOVDconst [32]) (ANDconst [31] y))) + // result: (SRAW x (SUB (MOVDconst [32]) (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -15406,9 +15651,11 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(32) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(31) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(31) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true @@ -15416,7 +15663,7 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { break } // match: (Rsh32x64 x (SUBFCconst [32] (AND y (MOVDconst [31])))) - // result: (SRAW x (SUBFCconst [32] (ANDconst [31] y))) + // result: (SRAW x (SUBFCconst [32] (Select0 (ANDCCconst [31] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 { @@ -15437,9 +15684,11 @@ func rewriteValuePPC64_OpRsh32x64(v *Value) bool { v.reset(OpPPC64SRAW) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(32) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(31) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(31) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true @@ -15648,7 +15897,7 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { return true } // match: (Rsh64Ux64 x (AND y (MOVDconst [63]))) - // result: (SRD x (ANDconst [63] y)) + // result: (SRD x (Select0 (ANDCCconst [63] y))) for { x := v_0 if v_1.Op != OpPPC64AND { @@ -15663,31 +15912,39 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { continue } v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = int64ToAuxInt(63) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } break } - // match: (Rsh64Ux64 x (ANDconst [63] y)) - // result: (SRD x (ANDconst [63] y)) + // match: (Rsh64Ux64 x (Select0 (ANDCCconst [63] y))) + // result: (SRD x (Select0 (ANDCCconst [63] y))) for { x := v_0 - if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 { + if v_1.Op != OpSelect0 { break } - y := v_1.Args[0] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { + break + } + y := v_1_0.Args[0] v.reset(OpPPC64SRD) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(63) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } - // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) - // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) + // result: (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -15699,45 +15956,57 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { break } v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 { + if v_1_1.Op != OpSelect0 { break } - y := v_1_1.Args[0] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 { + break + } + y := v_1_1_0.Args[0] v.reset(OpPPC64SRD) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(63) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true } - // match: (Rsh64Ux64 x (SUBFCconst [64] (ANDconst [63] y))) - // result: (SRD x (SUBFCconst [64] (ANDconst [63] y))) + // match: (Rsh64Ux64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) + // result: (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { break } v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { + if v_1_0.Op != OpSelect0 { break } - y := v_1_0.Args[0] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 63 { + break + } + y := v_1_0_0.Args[0] v.reset(OpPPC64SRD) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(63) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true } // match: (Rsh64Ux64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) - // result: (SRD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + // result: (SRD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -15764,9 +16033,11 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(63) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true @@ -15774,7 +16045,7 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { break } // match: (Rsh64Ux64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) - // result: (SRD x (SUBFCconst [64] (ANDconst [63] y))) + // result: (SRD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { @@ -15795,9 +16066,11 @@ func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool { v.reset(OpPPC64SRD) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(63) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true @@ -16008,7 +16281,7 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { return true } // match: (Rsh64x64 x (AND y (MOVDconst [63]))) - // result: (SRAD x (ANDconst [63] y)) + // result: (SRAD x (Select0 (ANDCCconst [63] y))) for { x := v_0 if v_1.Op != OpPPC64AND { @@ -16023,31 +16296,39 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { continue } v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64) - v0.AuxInt = int64ToAuxInt(63) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.Int64) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } break } - // match: (Rsh64x64 x (ANDconst [63] y)) - // result: (SRAD x (ANDconst [63] y)) + // match: (Rsh64x64 x (Select0 (ANDCCconst [63] y))) + // result: (SRAD x (Select0 (ANDCCconst [63] y))) for { x := v_0 - if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 { + if v_1.Op != OpSelect0 { break } - y := v_1.Args[0] + v_1_0 := v_1.Args[0] + if v_1_0.Op != OpPPC64ANDCCconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { + break + } + y := v_1_0.Args[0] v.reset(OpPPC64SRAD) - v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v0.AuxInt = int64ToAuxInt(63) - v0.AddArg(y) + v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(63) + v1.AddArg(y) + v0.AddArg(v1) v.AddArg2(x, v0) return true } - // match: (Rsh64x64 x (SUB (MOVDconst [64]) (ANDconst [63] y))) - // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + // match: (Rsh64x64 x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) + // result: (SRAD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -16059,45 +16340,57 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { break } v_1_1 := v_1.Args[1] - if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 { + if v_1_1.Op != OpSelect0 { break } - y := v_1_1.Args[0] + v_1_1_0 := v_1_1.Args[0] + if v_1_1_0.Op != OpPPC64ANDCCconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 { + break + } + y := v_1_1_0.Args[0] v.reset(OpPPC64SRAD) v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(63) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true } - // match: (Rsh64x64 x (SUBFCconst [64] (ANDconst [63] y))) - // result: (SRAD x (SUBFCconst [64] (ANDconst [63] y))) + // match: (Rsh64x64 x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) + // result: (SRAD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { break } v_1_0 := v_1.Args[0] - if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 { + if v_1_0.Op != OpSelect0 { break } - y := v_1_0.Args[0] + v_1_0_0 := v_1_0.Args[0] + if v_1_0_0.Op != OpPPC64ANDCCconst || v_1_0_0.Type != typ.UInt || auxIntToInt64(v_1_0_0.AuxInt) != 63 { + break + } + y := v_1_0_0.Args[0] v.reset(OpPPC64SRAD) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(63) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true } // match: (Rsh64x64 x (SUB (MOVDconst [64]) (AND y (MOVDconst [63])))) - // result: (SRAD x (SUB (MOVDconst [64]) (ANDconst [63] y))) + // result: (SRAD x (SUB (MOVDconst [64]) (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt { @@ -16124,9 +16417,11 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt) v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64) v1.AuxInt = int64ToAuxInt(64) - v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v2.AuxInt = int64ToAuxInt(63) - v2.AddArg(y) + v2 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v3 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v3.AuxInt = int64ToAuxInt(63) + v3.AddArg(y) + v2.AddArg(v3) v0.AddArg2(v1, v2) v.AddArg2(x, v0) return true @@ -16134,7 +16429,7 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { break } // match: (Rsh64x64 x (SUBFCconst [64] (AND y (MOVDconst [63])))) - // result: (SRAD x (SUBFCconst [64] (ANDconst [63] y))) + // result: (SRAD x (SUBFCconst [64] (Select0 (ANDCCconst [63] y)))) for { x := v_0 if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 { @@ -16155,9 +16450,11 @@ func rewriteValuePPC64_OpRsh64x64(v *Value) bool { v.reset(OpPPC64SRAD) v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt) v0.AuxInt = int64ToAuxInt(64) - v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt) - v1.AuxInt = int64ToAuxInt(63) - v1.AddArg(y) + v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt) + v2 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(63) + v2.AddArg(y) + v1.AddArg(v2) v0.AddArg(v1) v.AddArg2(x, v0) return true @@ -16714,6 +17011,306 @@ func rewriteValuePPC64_OpSelect0(v *Value) bool { v.AddArg(v0) return true } + // match: (Select0 (ANDCCconst [m] (ROTLWconst [r] x))) + // cond: isPPC64WordRotateMask(m) + // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ROTLWconst { + break + } + r := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32)) + v.AddArg(x) + return true + } + // match: (Select0 (ANDCCconst [m] (ROTLW x r))) + // cond: isPPC64WordRotateMask(m) + // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64ROTLW { + break + } + r := v_0_0.Args[1] + x := v_0_0.Args[0] + if !(isPPC64WordRotateMask(m)) { + break + } + v.reset(OpPPC64RLWNM) + v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32)) + v.AddArg2(x, r) + return true + } + // match: (Select0 (ANDCCconst [m] (SRWconst x [s]))) + // cond: mergePPC64RShiftMask(m,s,32) == 0 + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0_0.AuxInt) + if !(mergePPC64RShiftMask(m, s, 32) == 0) { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select0 (ANDCCconst [m] (SRWconst x [s]))) + // cond: mergePPC64AndSrwi(m,s) != 0 + // result: (RLWINM [mergePPC64AndSrwi(m,s)] x) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + m := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64SRWconst { + break + } + s := auxIntToInt64(v_0_0.AuxInt) + x := v_0_0.Args[0] + if !(mergePPC64AndSrwi(m, s) != 0) { + break + } + v.reset(OpPPC64RLWINM) + v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s)) + v.AddArg(x) + return true + } + // match: (Select0 (ANDCCconst [c] (Select0 (ANDCCconst [d] x)))) + // result: (Select0 (ANDCCconst [c&d] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpSelect0 { + break + } + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + d := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & d) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [-1] x)) + // result: x + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != -1 { + break + } + x := v_0.Args[0] + v.copyOf(x) + return true + } + // match: (Select0 (ANDCCconst [0] _)) + // result: (MOVDconst [0]) + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0 { + break + } + v.reset(OpPPC64MOVDconst) + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (Select0 (ANDCCconst [c] y:(MOVBZreg _))) + // cond: c&0xFF == 0xFF + // result: y + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) { + break + } + v.copyOf(y) + return true + } + // match: (Select0 (ANDCCconst [0xFF] y:(MOVBreg _))) + // result: y + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFF { + break + } + y := v_0.Args[0] + if y.Op != OpPPC64MOVBreg { + break + } + v.copyOf(y) + return true + } + // match: (Select0 (ANDCCconst [c] y:(MOVHZreg _))) + // cond: c&0xFFFF == 0xFFFF + // result: y + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + y := v_0.Args[0] + if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) { + break + } + v.copyOf(y) + return true + } + // match: (Select0 (ANDCCconst [0xFFFF] y:(MOVHreg _))) + // result: y + for { + if v_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0.AuxInt) != 0xFFFF { + break + } + y := v_0.Args[0] + if y.Op != OpPPC64MOVHreg { + break + } + v.copyOf(y) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVBreg x))) + // result: (Select0 (ANDCCconst [c&0xFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVBZreg x))) + // result: (Select0 (ANDCCconst [c&0xFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVBZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVHreg x))) + // result: (Select0 (ANDCCconst [c&0xFFFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFFFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVHZreg x))) + // result: (Select0 (ANDCCconst [c&0xFFFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVHZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFFFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVWreg x))) + // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } + // match: (Select0 (ANDCCconst [c] (MOVWZreg x))) + // result: (Select0 (ANDCCconst [c&0xFFFFFFFF] x)) + for { + if v_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0.AuxInt) + v_0_0 := v_0.Args[0] + if v_0_0.Op != OpPPC64MOVWZreg { + break + } + x := v_0_0.Args[0] + v.reset(OpSelect0) + v0 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v0.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF) + v0.AddArg(x) + v.AddArg(v0) + return true + } return false } func rewriteValuePPC64_OpSelect1(v *Value) bool { @@ -17419,41 +18016,53 @@ func rewriteBlockPPC64(b *Block) bool { typ := &b.Func.Config.Types switch b.Kind { case BlockPPC64EQ: - // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (EQ (ANDCCconst [c] x) yes no) + // match: (EQ (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (EQ (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } - // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (EQ (ANDCCconst [c] x) yes no) + // match: (EQ (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (EQ (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } @@ -17485,41 +18094,53 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64EQ, cmp) return true } - // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (EQ (ANDCCconst [c] x) yes no) + // match: (EQ (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (EQ (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } - // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (EQ (ANDCCconst [c] x) yes no) + // match: (EQ (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (EQ (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64EQ, v0) return true } @@ -17635,41 +18256,53 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64LE, cmp) return true } - // match: (GE (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (GE (ANDCCconst [c] x) yes no) + // match: (GE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (GE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GE, v0) return true } - // match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (GE (ANDCCconst [c] x) yes no) + // match: (GE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (GE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GE, v0) return true } @@ -17786,41 +18419,53 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64LT, cmp) return true } - // match: (GT (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (GT (ANDCCconst [c] x) yes no) + // match: (GT (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (GT (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GT, v0) return true } - // match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (GT (ANDCCconst [c] x) yes no) + // match: (GT (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (GT (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64GT, v0) return true } @@ -17990,14 +18635,16 @@ func rewriteBlockPPC64(b *Block) bool { return true } // match: (If cond yes no) - // result: (NE (CMPWconst [0] (ANDconst [1] cond)) yes no) + // result: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] cond))) yes no) for { cond := b.Controls[0] v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags) v0.AuxInt = int32ToAuxInt(0) - v1 := b.NewValue0(cond.Pos, OpPPC64ANDconst, typ.UInt32) - v1.AuxInt = int64ToAuxInt(1) - v1.AddArg(cond) + v1 := b.NewValue0(cond.Pos, OpSelect0, typ.UInt32) + v2 := b.NewValue0(cond.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v2.AuxInt = int64ToAuxInt(1) + v2.AddArg(cond) + v1.AddArg(v2) v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true @@ -18030,41 +18677,53 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64GE, cmp) return true } - // match: (LE (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (LE (ANDCCconst [c] x) yes no) + // match: (LE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (LE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LE, v0) return true } - // match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (LE (ANDCCconst [c] x) yes no) + // match: (LE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (LE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LE, v0) return true } @@ -18181,41 +18840,53 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64GT, cmp) return true } - // match: (LT (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (LT (ANDCCconst [c] x) yes no) + // match: (LT (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (LT (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LT, v0) return true } - // match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (LT (ANDCCconst [c] x) yes no) + // match: (LT (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (LT (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64LT, v0) return true } @@ -18304,7 +18975,7 @@ func rewriteBlockPPC64(b *Block) bool { break } case BlockPPC64NE: - // match: (NE (CMPWconst [0] (ANDconst [1] (Equal cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (Equal cc)))) yes no) // result: (EQ cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18312,18 +18983,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64Equal { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64Equal { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64EQ, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (NotEqual cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (NotEqual cc)))) yes no) // result: (NE cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18331,18 +19006,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64NotEqual { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64NotEqual { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64NE, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (LessThan cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessThan cc)))) yes no) // result: (LT cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18350,18 +19029,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64LessThan { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64LessThan { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64LT, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (LessEqual cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (LessEqual cc)))) yes no) // result: (LE cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18369,18 +19052,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64LessEqual { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64LessEqual { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64LE, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (GreaterThan cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterThan cc)))) yes no) // result: (GT cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18388,18 +19075,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64GreaterThan { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64GreaterThan { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64GT, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (GreaterEqual cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (GreaterEqual cc)))) yes no) // result: (GE cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18407,18 +19098,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64GreaterEqual { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64GreaterEqual { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64GE, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (FLessThan cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessThan cc)))) yes no) // result: (FLT cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18426,18 +19121,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64FLessThan { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FLessThan { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64FLT, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (FLessEqual cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FLessEqual cc)))) yes no) // result: (FLE cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18445,18 +19144,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64FLessEqual { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FLessEqual { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64FLE, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (FGreaterThan cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterThan cc)))) yes no) // result: (FGT cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18464,18 +19167,22 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64FGreaterThan { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FGreaterThan { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64FGT, cc) return true } - // match: (NE (CMPWconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [1] (FGreaterEqual cc)))) yes no) // result: (FGE cc yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] @@ -18483,52 +19190,68 @@ func rewriteBlockPPC64(b *Block) bool { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 { + if v_0_0.Op != OpSelect0 { break } v_0_0_0 := v_0_0.Args[0] - if v_0_0_0.Op != OpPPC64FGreaterEqual { + if v_0_0_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 { break } - cc := v_0_0_0.Args[0] + v_0_0_0_0 := v_0_0_0.Args[0] + if v_0_0_0_0.Op != OpPPC64FGreaterEqual { + break + } + cc := v_0_0_0_0.Args[0] b.resetWithControl(BlockPPC64FGE, cc) return true } - // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (NE (ANDCCconst [c] x) yes no) + // match: (NE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (NE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true } - // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (NE (ANDCCconst [c] x) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (NE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true } @@ -18559,41 +19282,53 @@ func rewriteBlockPPC64(b *Block) bool { b.resetWithControl(BlockPPC64NE, cmp) return true } - // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no) - // result: (NE (ANDCCconst [c] x) yes no) + // match: (NE (CMPconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (NE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPconst { v_0 := b.Controls[0] if auxIntToInt64(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true } - // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no) - // result: (NE (ANDCCconst [c] x) yes no) + // match: (NE (CMPWconst [0] (Select0 (ANDCCconst [c] x))) yes no) + // result: (NE (Select1 (ANDCCconst [c] x)) yes no) for b.Controls[0].Op == OpPPC64CMPWconst { v_0 := b.Controls[0] if auxIntToInt32(v_0.AuxInt) != 0 { break } v_0_0 := v_0.Args[0] - if v_0_0.Op != OpPPC64ANDconst { + if v_0_0.Op != OpSelect0 { break } - c := auxIntToInt64(v_0_0.AuxInt) - x := v_0_0.Args[0] - v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags) - v0.AuxInt = int64ToAuxInt(c) - v0.AddArg(x) + v_0_0_0 := v_0_0.Args[0] + if v_0_0_0.Op != OpPPC64ANDCCconst { + break + } + c := auxIntToInt64(v_0_0_0.AuxInt) + x := v_0_0_0.Args[0] + v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags) + v1 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags)) + v1.AuxInt = int64ToAuxInt(c) + v1.AddArg(x) + v0.AddArg(v1) b.resetWithControl(BlockPPC64NE, v0) return true } diff --git a/test/codegen/bool.go b/test/codegen/bool.go index a32423308e..d921b55c06 100644 --- a/test/codegen/bool.go +++ b/test/codegen/bool.go @@ -10,24 +10,64 @@ package codegen func convertNeq0B(x uint8, c bool) bool { // amd64:"ANDL\t[$]1",-"SETNE" + // ppc64:"ANDCC",-"CMPW",-"ISEL" + // ppc64le:"ANDCC",-"CMPW",-"ISEL" + // ppc64le/power9:"ANDCC",-"CMPW",-"ISEL" b := x&1 != 0 return c && b } func convertNeq0W(x uint16, c bool) bool { // amd64:"ANDL\t[$]1",-"SETNE" + // ppc64:"ANDCC",-"CMPW",-"ISEL" + // ppc64le:"ANDCC",-"CMPW",-"ISEL" + // ppc64le/power9:"ANDCC",-CMPW",-"ISEL" b := x&1 != 0 return c && b } func convertNeq0L(x uint32, c bool) bool { // amd64:"ANDL\t[$]1",-"SETB" + // ppc64:"ANDCC",-"CMPW",-"ISEL" + // ppc64le:"ANDCC",-"CMPW",-"ISEL" + // ppc64le/power9:"ANDCC",-"CMPW",-"ISEL" b := x&1 != 0 return c && b } func convertNeq0Q(x uint64, c bool) bool { // amd64:"ANDL\t[$]1",-"SETB" + // ppc64:"ANDCC",-"CMP",-"ISEL" + // ppc64le:"ANDCC",-"CMP",-"ISEL" + // ppc64le/power9:"ANDCC",-"CMP",-"ISEL" b := x&1 != 0 return c && b } + +func convertNeqBool32(x uint32) bool { + // ppc64:"ANDCC",-"CMPW",-"ISEL" + // ppc64le:"ANDCC",-"CMPW",-"ISEL" + // ppc64le/power9:"ANDCC",-"CMPW",-"ISEL" + return x&1 != 0 +} + +func convertEqBool32(x uint32) bool { + // ppc64:"ANDCC",-"CMPW","XOR",-"ISEL" + // ppc64le:"ANDCC",-"CMPW","XOR",-"ISEL" + // ppc64le/power9:"ANDCC","XOR",-"CMPW",-"ISEL" + return x&1 == 0 +} + +func convertNeqBool64(x uint64) bool { + // ppc64:"ANDCC",-"CMP",-"ISEL" + // ppc64le:"ANDCC",-"CMP",-"ISEL" + // ppc64le/power9:"ANDCC",-"CMP",-"ISEL" + return x&1 != 0 +} + +func convertEqBool64(x uint64) bool { + // ppc64:"ANDCC","XOR",-"CMP",-"ISEL" + // ppc64le:"ANDCC","XOR",-"CMP",-"ISEL" + // ppc64le/power9:"ANDCC","XOR",-"CMP",-"ISEL" + return x&1 == 0 +}