1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Lowering arithmetic
6(Add(Ptr|64|32|16|8) ...) => (ADD ...)
7(Add(64|32)F ...) => (FADD(D|S) ...)
8
9(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
10(Sub(64|32)F ...) => (FSUB(D|S) ...)
11
12(Mul64 ...) => (MUL ...)
13(Mul64uhilo ...) => (LoweredMuluhilo ...)
14(Mul64uover ...) => (LoweredMuluover ...)
15(Mul32 ...) => (MULW ...)
16(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
17(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
18(Mul(64|32)F ...) => (FMUL(D|S) ...)
19
20(Div(64|32)F ...) => (FDIV(D|S) ...)
21
22(Div64 x y [false]) => (DIV x y)
23(Div64u ...) => (DIVU ...)
24(Div32 x y [false]) => (DIVW x y)
25(Div32u ...) => (DIVUW ...)
26(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
27(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
28(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
29(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
30
31(Hmul64 ...) => (MULH ...)
32(Hmul64u ...) => (MULHU ...)
33(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
34(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
35
36(Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
37(Select1 (Add64carry x y c)) =>
38 (OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
39
40(Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
41(Select1 (Sub64borrow x y c)) =>
42 (OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
43
44// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
45(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
46
47(Mod64 x y [false]) => (REM x y)
48(Mod64u ...) => (REMU ...)
49(Mod32 x y [false]) => (REMW x y)
50(Mod32u ...) => (REMUW ...)
51(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
52(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
53(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
54(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
55
56(And(64|32|16|8) ...) => (AND ...)
57(Or(64|32|16|8) ...) => (OR ...)
58(Xor(64|32|16|8) ...) => (XOR ...)
59
60(Neg(64|32|16|8) ...) => (NEG ...)
61(Neg(64|32)F ...) => (FNEG(D|S) ...)
62
63(Com(64|32|16|8) ...) => (NOT ...)
64
65
66(Sqrt ...) => (FSQRTD ...)
67(Sqrt32 ...) => (FSQRTS ...)
68
69(Copysign ...) => (FSGNJD ...)
70
71(Abs ...) => (FABSD ...)
72
73(FMA ...) => (FMADDD ...)
74
75// Sign and zero extension.
76
77(SignExt8to16 ...) => (MOVBreg ...)
78(SignExt8to32 ...) => (MOVBreg ...)
79(SignExt8to64 ...) => (MOVBreg ...)
80(SignExt16to32 ...) => (MOVHreg ...)
81(SignExt16to64 ...) => (MOVHreg ...)
82(SignExt32to64 ...) => (MOVWreg ...)
83
84(ZeroExt8to16 ...) => (MOVBUreg ...)
85(ZeroExt8to32 ...) => (MOVBUreg ...)
86(ZeroExt8to64 ...) => (MOVBUreg ...)
87(ZeroExt16to32 ...) => (MOVHUreg ...)
88(ZeroExt16to64 ...) => (MOVHUreg ...)
89(ZeroExt32to64 ...) => (MOVWUreg ...)
90
91(Cvt32to32F ...) => (FCVTSW ...)
92(Cvt32to64F ...) => (FCVTDW ...)
93(Cvt64to32F ...) => (FCVTSL ...)
94(Cvt64to64F ...) => (FCVTDL ...)
95
96(Cvt32Fto32 ...) => (FCVTWS ...)
97(Cvt32Fto64 ...) => (FCVTLS ...)
98(Cvt64Fto32 ...) => (FCVTWD ...)
99(Cvt64Fto64 ...) => (FCVTLD ...)
100
101(Cvt32Fto64F ...) => (FCVTDS ...)
102(Cvt64Fto32F ...) => (FCVTSD ...)
103
104(CvtBoolToUint8 ...) => (Copy ...)
105
106(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
107
108(Slicemask <t> x) => (SRAI [63] (NEG <t> x))
109
110// Truncations
111// We ignore the unused high parts of registers, so truncates are just copies.
112(Trunc16to8 ...) => (Copy ...)
113(Trunc32to8 ...) => (Copy ...)
114(Trunc32to16 ...) => (Copy ...)
115(Trunc64to8 ...) => (Copy ...)
116(Trunc64to16 ...) => (Copy ...)
117(Trunc64to32 ...) => (Copy ...)
118
119// Shifts
120
121// SLL only considers the bottom 6 bits of y. If y > 64, the result should
122// always be 0.
123//
124// Breaking down the operation:
125//
126// (SLL x y) generates x << (y & 63).
127//
128// If y < 64, this is the value we want. Otherwise, we want zero.
129//
130// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
131(Lsh8x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
132(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
133(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
134(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
135(Lsh16x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
136(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
137(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
138(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
139(Lsh32x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
140(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
141(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
142(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
143(Lsh64x8 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
144(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
145(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
146(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
147
148(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
149(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
150(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
151(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
152
153// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
154// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
155// the maximum value. See Lsh above for a detailed description.
156(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
157(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
158(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
159(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
160(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
161(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
162(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
163(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
164(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64 y))))
165(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
166(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
167(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t> x y) (Neg32 <t> (SLTIU <t> [32] y)))
168(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
169(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
170(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
171(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
172
173(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y)
174(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y)
175(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y)
176(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
177
178// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
179// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
180// depending on the instruction), the result of the shift should be either 0
181// or -1 based on the sign bit of x.
182//
183// We implement this by performing the max shift (-1) if y > the maximum value.
184//
185// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
186// us with -1 (0xffff...) if y >= 64. Similarly, we OR (uint64(y < 32) - 1) into y
187// before passing it to SRAW.
188//
189// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
190// more than the 5 or 6 bits SRAW and SRA care about.
191(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
192(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
193(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
194(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
195(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
196(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
197(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
198(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
199(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64 y)))))
200(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
201(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
202(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
203(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
204(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
205(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
206(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
207
208(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y)
209(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y)
210(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
211(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
212
213// Rotates.
214(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
215(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
216(RotateLeft32 <t> x (MOVDconst [c])) => (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
217(RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
218
219(Less64 ...) => (SLT ...)
220(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
221(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
222(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
223(Less64U ...) => (SLTU ...)
224(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
225(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
226(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
227(Less(64|32)F ...) => (FLT(D|S) ...)
228
229// Convert x <= y to !(y > x).
230(Leq(64|32|16|8) x y) => (Not (Less(64|32|16|8) y x))
231(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
232(Leq(64|32)F ...) => (FLE(D|S) ...)
233
234(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
235(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
236(Eq32 x y) && x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
237(Eq32 x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
238(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
239(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
240(Eq(64|32)F ...) => (FEQ(D|S) ...)
241
242(NeqPtr x y) => (Not (EqPtr x y))
243(Neq64 x y) => (Not (Eq64 x y))
244(Neq32 x y) => (Not (Eq32 x y))
245(Neq16 x y) => (Not (Eq16 x y))
246(Neq8 x y) => (Not (Eq8 x y))
247(Neq(64|32)F ...) => (FNE(D|S) ...)
248
249// Loads
250(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
251(Load <t> ptr mem) && ( is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
252(Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
253(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
254(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
255(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
256(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
257(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
258(Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
259(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
260
261// Stores
262(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
263(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
264(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
265(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
266(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVWstore ptr val mem)
267(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
268
269// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
270// knows what variables are being read/written by the ops.
271(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
272 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
273(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
274 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
275(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
276 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
277(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
278 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
279(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
280 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
281(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
282 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
283(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
284 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
285
286(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
287 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
288(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
289 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
290(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
291 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
292(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
293 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
294(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
295 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
296(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
297 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
298(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
299 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
300(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
301 (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
302
303(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
304 (MOVBUload [off1+int32(off2)] {sym} base mem)
305(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
306 (MOVBload [off1+int32(off2)] {sym} base mem)
307(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
308 (MOVHUload [off1+int32(off2)] {sym} base mem)
309(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
310 (MOVHload [off1+int32(off2)] {sym} base mem)
311(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
312 (MOVWUload [off1+int32(off2)] {sym} base mem)
313(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
314 (MOVWload [off1+int32(off2)] {sym} base mem)
315(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
316 (MOVDload [off1+int32(off2)] {sym} base mem)
317
318(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
319 (MOVBstore [off1+int32(off2)] {sym} base val mem)
320(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
321 (MOVHstore [off1+int32(off2)] {sym} base val mem)
322(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
323 (MOVWstore [off1+int32(off2)] {sym} base val mem)
324(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
325 (MOVDstore [off1+int32(off2)] {sym} base val mem)
326(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
327(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
328(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
329(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
330
331// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
332// with OffPtr -> ADDI.
333(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
334
335// Small zeroing
336(Zero [0] _ mem) => mem
337(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
338(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
339 (MOVHstore ptr (MOVDconst [0]) mem)
340(Zero [2] ptr mem) =>
341 (MOVBstore [1] ptr (MOVDconst [0])
342 (MOVBstore ptr (MOVDconst [0]) mem))
343(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
344 (MOVWstore ptr (MOVDconst [0]) mem)
345(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
346 (MOVHstore [2] ptr (MOVDconst [0])
347 (MOVHstore ptr (MOVDconst [0]) mem))
348(Zero [4] ptr mem) =>
349 (MOVBstore [3] ptr (MOVDconst [0])
350 (MOVBstore [2] ptr (MOVDconst [0])
351 (MOVBstore [1] ptr (MOVDconst [0])
352 (MOVBstore ptr (MOVDconst [0]) mem))))
353(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
354 (MOVDstore ptr (MOVDconst [0]) mem)
355(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
356 (MOVWstore [4] ptr (MOVDconst [0])
357 (MOVWstore ptr (MOVDconst [0]) mem))
358(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
359 (MOVHstore [6] ptr (MOVDconst [0])
360 (MOVHstore [4] ptr (MOVDconst [0])
361 (MOVHstore [2] ptr (MOVDconst [0])
362 (MOVHstore ptr (MOVDconst [0]) mem))))
363
364(Zero [3] ptr mem) =>
365 (MOVBstore [2] ptr (MOVDconst [0])
366 (MOVBstore [1] ptr (MOVDconst [0])
367 (MOVBstore ptr (MOVDconst [0]) mem)))
368(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
369 (MOVHstore [4] ptr (MOVDconst [0])
370 (MOVHstore [2] ptr (MOVDconst [0])
371 (MOVHstore ptr (MOVDconst [0]) mem)))
372(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
373 (MOVWstore [8] ptr (MOVDconst [0])
374 (MOVWstore [4] ptr (MOVDconst [0])
375 (MOVWstore ptr (MOVDconst [0]) mem)))
376(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
377 (MOVDstore [8] ptr (MOVDconst [0])
378 (MOVDstore ptr (MOVDconst [0]) mem))
379(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
380 (MOVDstore [16] ptr (MOVDconst [0])
381 (MOVDstore [8] ptr (MOVDconst [0])
382 (MOVDstore ptr (MOVDconst [0]) mem)))
383(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
384 (MOVDstore [24] ptr (MOVDconst [0])
385 (MOVDstore [16] ptr (MOVDconst [0])
386 (MOVDstore [8] ptr (MOVDconst [0])
387 (MOVDstore ptr (MOVDconst [0]) mem))))
388
389// Medium 8-aligned zeroing uses a Duff's device
390// 8 and 128 are magic constants, see runtime/mkduff.go
391(Zero [s] {t} ptr mem)
392 && s%8 == 0 && s <= 8*128
393 && t.Alignment()%8 == 0 && !config.noDuffDevice =>
394 (DUFFZERO [8 * (128 - s/8)] ptr mem)
395
396// Generic zeroing uses a loop
397(Zero [s] {t} ptr mem) =>
398 (LoweredZero [t.Alignment()]
399 ptr
400 (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
401 mem)
402
403// Checks
404(IsNonNil ...) => (SNEZ ...)
405(IsInBounds ...) => (Less64U ...)
406(IsSliceInBounds ...) => (Leq64U ...)
407
408// Trivial lowering
409(NilCheck ...) => (LoweredNilCheck ...)
410(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
411(GetCallerSP ...) => (LoweredGetCallerSP ...)
412(GetCallerPC ...) => (LoweredGetCallerPC ...)
413
414// Write barrier.
415(WB ...) => (LoweredWB ...)
416
417// Publication barrier as intrinsic
418(PubBarrier ...) => (LoweredPubBarrier ...)
419
420(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
421(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
422(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
423
424// Small moves
425(Move [0] _ _ mem) => mem
426(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
427(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
428 (MOVHstore dst (MOVHload src mem) mem)
429(Move [2] dst src mem) =>
430 (MOVBstore [1] dst (MOVBload [1] src mem)
431 (MOVBstore dst (MOVBload src mem) mem))
432(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
433 (MOVWstore dst (MOVWload src mem) mem)
434(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
435 (MOVHstore [2] dst (MOVHload [2] src mem)
436 (MOVHstore dst (MOVHload src mem) mem))
437(Move [4] dst src mem) =>
438 (MOVBstore [3] dst (MOVBload [3] src mem)
439 (MOVBstore [2] dst (MOVBload [2] src mem)
440 (MOVBstore [1] dst (MOVBload [1] src mem)
441 (MOVBstore dst (MOVBload src mem) mem))))
442(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
443 (MOVDstore dst (MOVDload src mem) mem)
444(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
445 (MOVWstore [4] dst (MOVWload [4] src mem)
446 (MOVWstore dst (MOVWload src mem) mem))
447(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
448 (MOVHstore [6] dst (MOVHload [6] src mem)
449 (MOVHstore [4] dst (MOVHload [4] src mem)
450 (MOVHstore [2] dst (MOVHload [2] src mem)
451 (MOVHstore dst (MOVHload src mem) mem))))
452
453(Move [3] dst src mem) =>
454 (MOVBstore [2] dst (MOVBload [2] src mem)
455 (MOVBstore [1] dst (MOVBload [1] src mem)
456 (MOVBstore dst (MOVBload src mem) mem)))
457(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
458 (MOVHstore [4] dst (MOVHload [4] src mem)
459 (MOVHstore [2] dst (MOVHload [2] src mem)
460 (MOVHstore dst (MOVHload src mem) mem)))
461(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
462 (MOVWstore [8] dst (MOVWload [8] src mem)
463 (MOVWstore [4] dst (MOVWload [4] src mem)
464 (MOVWstore dst (MOVWload src mem) mem)))
465(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
466 (MOVDstore [8] dst (MOVDload [8] src mem)
467 (MOVDstore dst (MOVDload src mem) mem))
468(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
469 (MOVDstore [16] dst (MOVDload [16] src mem)
470 (MOVDstore [8] dst (MOVDload [8] src mem)
471 (MOVDstore dst (MOVDload src mem) mem)))
472(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
473 (MOVDstore [24] dst (MOVDload [24] src mem)
474 (MOVDstore [16] dst (MOVDload [16] src mem)
475 (MOVDstore [8] dst (MOVDload [8] src mem)
476 (MOVDstore dst (MOVDload src mem) mem))))
477
478// Medium 8-aligned move uses a Duff's device
479// 16 and 128 are magic constants, see runtime/mkduff.go
480(Move [s] {t} dst src mem)
481 && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
482 && !config.noDuffDevice && logLargeCopy(v, s) =>
483 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
484
485// Generic move uses a loop
486(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
487 (LoweredMove [t.Alignment()]
488 dst
489 src
490 (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
491 mem)
492
493// Boolean ops; 0=false, 1=true
494(AndB ...) => (AND ...)
495(OrB ...) => (OR ...)
496(EqB x y) => (SEQZ (SUB <typ.Bool> x y))
497(NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
498(Not ...) => (SEQZ ...)
499
500// Lowering pointer arithmetic
501// TODO: Special handling for SP offsets, like ARM
502(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
503(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
504(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
505
506(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
507(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
508(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
509(ConstNil) => (MOVDconst [0])
510(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
511
512(Addr {sym} base) => (MOVaddr {sym} [0] base)
513(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
514(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVaddr {sym} base)
515
516// Calls
517(StaticCall ...) => (CALLstatic ...)
518(ClosureCall ...) => (CALLclosure ...)
519(InterCall ...) => (CALLinter ...)
520(TailCall ...) => (CALLtail ...)
521
522// Atomic Intrinsics
523(AtomicLoad(Ptr|64|32|8) ...) => (LoweredAtomicLoad(64|64|32|8) ...)
524(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
525(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
526
527// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
528(AtomicAnd8 ptr val mem) =>
529 (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
530 (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
531 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
532
533(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
534
535(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
536(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
537
538(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
539
540// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
541(AtomicOr8 ptr val mem) =>
542 (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
543 (SLL <typ.UInt32> (ZeroExt8to32 val)
544 (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
545
546(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
547
548// Conditional branches
549(If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
550
551// Optimizations
552
553// Absorb SEQZ/SNEZ into branch.
554(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
555(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
556(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
557(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
558
559// Remove redundant NEG from BEQZ/BNEZ.
560(BEQZ (NEG x) yes no) => (BEQZ x yes no)
561(BNEZ (NEG x) yes no) => (BNEZ x yes no)
562
563// Negate comparison with FNES/FNED.
564(BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
565(BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
566(BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
567(BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
568
569// Convert BEQZ/BNEZ into more optimal branch conditions.
570(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
571(BNEZ (SUB x y) yes no) => (BNE x y yes no)
572(BEQZ (SLT x y) yes no) => (BGE x y yes no)
573(BNEZ (SLT x y) yes no) => (BLT x y yes no)
574(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
575(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
576(BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
577(BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
578(BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
579(BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
580
581// Convert branch with zero to more optimal branch zero.
582(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
583(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
584(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
585(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
586(BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
587(BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
588(BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
589(BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
590
591// Remove redundant NEG from SEQZ/SNEZ.
592(SEQZ (NEG x)) => (SEQZ x)
593(SNEZ (NEG x)) => (SNEZ x)
594
595// Remove redundant SEQZ/SNEZ.
596(SEQZ (SEQZ x)) => (SNEZ x)
597(SEQZ (SNEZ x)) => (SEQZ x)
598(SNEZ (SEQZ x)) => (SEQZ x)
599(SNEZ (SNEZ x)) => (SNEZ x)
600
601// Store zero.
602(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
603(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
604(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
605(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
606
607// Boolean ops are already extended.
608(MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
609(MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
610(MOVBUreg x:((SEQZ|SNEZ) _)) => x
611(MOVBUreg x:((SLT|SLTU) _ _)) => x
612
613// Avoid extending when already sufficiently masked.
614(MOVBreg x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
615(MOVHreg x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
616(MOVWreg x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
617(MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
618(MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
619(MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
620
621// Combine masking and zero extension.
622(MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
623(MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
624(MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
625
626// Avoid sign/zero extension for consts.
627(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
628(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
629(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
630(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
631(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
632(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
633
634// Avoid sign/zero extension after properly typed load.
635(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
636(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
637(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
638(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
639(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
640(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
641(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
642(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
643(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
644(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
645(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
646(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
647(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
648(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
649(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
650
651// Avoid zero extension after properly typed atomic operation.
652(MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
653(MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
654(MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
655
656// Avoid sign extension after word arithmetic.
657(MOVWreg x:(ADDIW _)) => (MOVDreg x)
658(MOVWreg x:(SUBW _ _)) => (MOVDreg x)
659(MOVWreg x:(NEGW _)) => (MOVDreg x)
660(MOVWreg x:(MULW _ _)) => (MOVDreg x)
661(MOVWreg x:(DIVW _ _)) => (MOVDreg x)
662(MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
663(MOVWreg x:(REMW _ _)) => (MOVDreg x)
664(MOVWreg x:(REMUW _ _)) => (MOVDreg x)
665
666// Fold double extensions.
667(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
668(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
669(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
670(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
671(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
672(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
673(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
674(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
675(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
676(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
677(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
678(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
679(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
680(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
681
682// Do not extend before store.
683(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
684(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
685(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
686(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
687(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
688(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
689(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
690(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
691(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
692(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
693(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
694(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
695
696// Replace extend after load with alternate load where possible.
697(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
698(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
699(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
700(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
701(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
702(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
703
704// If a register move has only 1 use, just use the same register without emitting instruction
705// MOVnop does not emit an instruction, only for ensuring the type.
706(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
707
708// TODO: we should be able to get rid of MOVDnop all together.
709// But for now, this is enough to get rid of lots of them.
710(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
711
712// Avoid unnecessary zero and sign extension when right shifting.
713(SRAI <t> [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
714(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
715
716// Replace right shifts that exceed size of signed type.
717(SRAI <t> [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI <t> [56] y))
718(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI <t> [48] y))
719(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
720
721// Eliminate right shifts that exceed size of unsigned type.
722(SRLI <t> [x] (MOVBUreg y)) && x >= 8 => (MOVDconst <t> [0])
723(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
724(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
725
726// Fold constant into immediate instructions where possible.
727(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
728(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
729(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
730(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
731(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
732(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
733(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
734(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
735(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
736(SLT x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI [val] x)
737(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
738
739// Convert const subtraction into ADDI with negative immediate, where possible.
740(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
741(SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
742
743// Subtraction of zero.
744(SUB x (MOVDconst [0])) => x
745(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
746
747// Subtraction from zero.
748(SUB (MOVDconst [0]) x) => (NEG x)
749(SUBW (MOVDconst [0]) x) => (NEGW x)
750
751// Fold negation into subtraction.
752(NEG (SUB x y)) => (SUB y x)
753(NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
754
755// Double negation.
756(NEG (NEG x)) => x
757
758// Addition of zero or two constants.
759(ADDI [0] x) => x
760(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
761
762// ANDI with all zeros, all ones or two constants.
763(ANDI [0] x) => (MOVDconst [0])
764(ANDI [-1] x) => x
765(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
766
767// ORI with all zeroes, all ones or two constants.
768(ORI [0] x) => x
769(ORI [-1] x) => (MOVDconst [-1])
770(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
771
772// Combine operations with immediate.
773(ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
774(ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
775(ORI [x] (ORI [y] z)) => (ORI [x | y] z)
776
777// Negation of a constant.
778(NEG (MOVDconst [x])) => (MOVDconst [-x])
779(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
780
781// Shift of a constant.
782(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
783(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
784(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
785
786// SLTI/SLTIU with constants.
787(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
788(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
789
790// SLTI/SLTIU with known outcomes.
791(SLTI [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
792(SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
793(SLTI [x] (ORI [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
794(SLTIU [x] (ORI [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
795
796// SLT/SLTU with known outcomes.
797(SLT x x) => (MOVDconst [0])
798(SLTU x x) => (MOVDconst [0])
799
800// Deadcode for LoweredMuluhilo
801(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
802(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
803
804(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
805(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
806(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
807
808// Merge negation into fused multiply-add and multiply-subtract.
809//
810// Key:
811//
812// [+ -](x * y [+ -] z).
813// _ N A S
814// D U
815// D B
816//
817// Note: multiplication commutativity handled by rule generator.
818(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
819(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
820(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
821(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
View as plain text