1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the interfaces that X86 uses to lower LLVM code into a
12 //===----------------------------------------------------------------------===//
14 #include "X86ISelLowering.h"
15 #include "MCTargetDesc/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86IntrinsicsInfo.h"
21 #include "X86MachineFunctionInfo.h"
22 #include "X86TargetMachine.h"
23 #include "X86TargetObjectFile.h"
24 #include "llvm/ADT/SmallBitVector.h"
25 #include "llvm/ADT/SmallSet.h"
26 #include "llvm/ADT/Statistic.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/Analysis/BlockFrequencyInfo.h"
30 #include "llvm/Analysis/EHPersonalities.h"
31 #include "llvm/Analysis/ProfileSummaryInfo.h"
32 #include "llvm/Analysis/VectorUtils.h"
33 #include "llvm/CodeGen/IntrinsicLowering.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineJumpTableInfo.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineRegisterInfo.h"
40 #include "llvm/CodeGen/TargetLowering.h"
41 #include "llvm/CodeGen/WinEHFuncInfo.h"
42 #include "llvm/IR/CallingConv.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DerivedTypes.h"
45 #include "llvm/IR/DiagnosticInfo.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GlobalAlias.h"
48 #include "llvm/IR/GlobalVariable.h"
49 #include "llvm/IR/Instructions.h"
50 #include "llvm/IR/Intrinsics.h"
51 #include "llvm/MC/MCAsmInfo.h"
52 #include "llvm/MC/MCContext.h"
53 #include "llvm/MC/MCExpr.h"
54 #include "llvm/MC/MCSymbol.h"
55 #include "llvm/Support/CommandLine.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/KnownBits.h"
59 #include "llvm/Support/MathExtras.h"
60 #include "llvm/Target/TargetOptions.h"
67 #define DEBUG_TYPE "x86-isel"
69 STATISTIC(NumTailCalls, "Number of tail calls");
71 static cl::opt<int> ExperimentalPrefLoopAlignment(
72 "x86-experimental-pref-loop-alignment", cl::init(4),
74 "Sets the preferable loop alignment for experiments (as log2 bytes)"
75 "(the last x86-experimental-pref-loop-alignment bits"
76 " of the loop header PC will be 0)."),
79 static cl::opt<bool> MulConstantOptimization(
80 "mul-constant-optimization", cl::init(true),
81 cl::desc("Replace 'mul x, Const' with more effective instructions like "
85 static cl::opt<bool> ExperimentalUnorderedISEL(
86 "x86-experimental-unordered-atomic-isel", cl::init(false),
87 cl::desc("Use LoadSDNode and StoreSDNode instead of "
88 "AtomicSDNode for unordered atomic loads and "
89 "stores respectively."),
92 /// Call this when the user attempts to do something unsupported, like
93 /// returning a double without SSE2 enabled on x86_64. This is not fatal, unlike
94 /// report_fatal_error, so calling code should attempt to recover without
96 static void errorUnsupported(SelectionDAG &DAG, const SDLoc &dl,
98 MachineFunction &MF = DAG.getMachineFunction();
99 DAG.getContext()->diagnose(
100 DiagnosticInfoUnsupported(MF.getFunction(), Msg, dl.getDebugLoc()));
103 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
104 const X86Subtarget &STI)
105 : TargetLowering(TM), Subtarget(STI) {
106 bool UseX87 = !Subtarget.useSoftFloat() && Subtarget.hasX87();
107 X86ScalarSSEf64 = Subtarget.hasSSE2();
108 X86ScalarSSEf32 = Subtarget.hasSSE1();
109 MVT PtrVT = MVT::getIntegerVT(TM.getPointerSizeInBits(0));
111 // Set up the TargetLowering object.
113 // X86 is weird. It always uses i8 for shift amounts and setcc results.
114 setBooleanContents(ZeroOrOneBooleanContent);
115 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
116 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
118 // For 64-bit, since we have so many registers, use the ILP scheduler.
119 // For 32-bit, use the register pressure specific scheduling.
120 // For Atom, always use ILP scheduling.
121 if (Subtarget.isAtom())
122 setSchedulingPreference(Sched::ILP);
123 else if (Subtarget.is64Bit())
124 setSchedulingPreference(Sched::ILP);
126 setSchedulingPreference(Sched::RegPressure);
127 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
128 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
130 // Bypass expensive divides and use cheaper ones.
131 if (TM.getOptLevel() >= CodeGenOpt::Default) {
132 if (Subtarget.hasSlowDivide32())
133 addBypassSlowDiv(32, 8);
134 if (Subtarget.hasSlowDivide64() && Subtarget.is64Bit())
135 addBypassSlowDiv(64, 32);
138 if (Subtarget.isTargetWindowsMSVC() ||
139 Subtarget.isTargetWindowsItanium()) {
140 // Setup Windows compiler runtime calls.
141 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
142 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
143 setLibcallName(RTLIB::SREM_I64, "_allrem");
144 setLibcallName(RTLIB::UREM_I64, "_aullrem");
145 setLibcallName(RTLIB::MUL_I64, "_allmul");
146 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
147 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
148 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
149 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
150 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
153 if (Subtarget.getTargetTriple().isOSMSVCRT()) {
154 // MSVCRT doesn't have powi; fall back to pow
155 setLibcallName(RTLIB::POWI_F32, nullptr);
156 setLibcallName(RTLIB::POWI_F64, nullptr);
159 // If we don't have cmpxchg8b(meaing this is a 386/486), limit atomic size to
160 // 32 bits so the AtomicExpandPass will expand it so we don't need cmpxchg8b.
161 // FIXME: Should we be limiting the atomic size on other configs? Default is
163 if (!Subtarget.hasCmpxchg8b())
164 setMaxAtomicSizeInBitsSupported(32);
166 // Set up the register classes.
167 addRegisterClass(MVT::i8, &X86::GR8RegClass);
168 addRegisterClass(MVT::i16, &X86::GR16RegClass);
169 addRegisterClass(MVT::i32, &X86::GR32RegClass);
170 if (Subtarget.is64Bit())
171 addRegisterClass(MVT::i64, &X86::GR64RegClass);
173 for (MVT VT : MVT::integer_valuetypes())
174 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
176 // We don't accept any truncstore of integer registers.
177 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
178 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
179 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
180 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
181 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
182 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
184 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
186 // SETOEQ and SETUNE require checking two conditions.
187 for (auto VT : {MVT::f32, MVT::f64, MVT::f80}) {
188 setCondCodeAction(ISD::SETOEQ, VT, Expand);
189 setCondCodeAction(ISD::SETUNE, VT, Expand);
193 if (Subtarget.hasCMov()) {
194 setOperationAction(ISD::ABS , MVT::i16 , Custom);
195 setOperationAction(ISD::ABS , MVT::i32 , Custom);
197 setOperationAction(ISD::ABS , MVT::i64 , Custom);
200 for (auto ShiftOp : {ISD::FSHL, ISD::FSHR}) {
201 // For slow shld targets we only lower for code size.
202 LegalizeAction ShiftDoubleAction = Subtarget.isSHLDSlow() ? Custom : Legal;
204 setOperationAction(ShiftOp , MVT::i8 , Custom);
205 setOperationAction(ShiftOp , MVT::i16 , Custom);
206 setOperationAction(ShiftOp , MVT::i32 , ShiftDoubleAction);
207 if (Subtarget.is64Bit())
208 setOperationAction(ShiftOp , MVT::i64 , ShiftDoubleAction);
211 if (!Subtarget.useSoftFloat()) {
212 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
214 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
215 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i8, Promote);
216 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
217 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i16, Promote);
218 // We have an algorithm for SSE2, and we turn this into a 64-bit
219 // FILD or VCVTUSI2SS/SD for other targets.
220 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
221 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i32, Custom);
222 // We have an algorithm for SSE2->double, and we turn this into a
223 // 64-bit FILD followed by conditional FADD for other targets.
224 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
225 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::i64, Custom);
227 // Promote i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
229 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
230 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i8, Promote);
231 // SSE has no i16 to fp conversion, only i32. We promote in the handler
232 // to allow f80 to use i16 and f64 to use i16 with sse1 only
233 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Custom);
234 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i16, Custom);
235 // f32 and f64 cases are Legal with SSE1/SSE2, f80 case is not
236 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
237 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i32, Custom);
238 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
239 // are Legal, f80 is custom lowered.
240 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
241 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::i64, Custom);
243 // Promote i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
245 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
246 // FIXME: This doesn't generate invalid exception when it should. PR44019.
247 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i8, Promote);
248 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Custom);
249 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i16, Custom);
250 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
251 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
252 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
253 // are Legal, f80 is custom lowered.
254 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
255 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i64, Custom);
257 // Handle FP_TO_UINT by promoting the destination to a larger signed
259 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
260 // FIXME: This doesn't generate invalid exception when it should. PR44019.
261 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i8, Promote);
262 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
263 // FIXME: This doesn't generate invalid exception when it should. PR44019.
264 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i16, Promote);
265 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
266 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
267 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
268 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i64, Custom);
270 setOperationAction(ISD::LRINT, MVT::f32, Custom);
271 setOperationAction(ISD::LRINT, MVT::f64, Custom);
272 setOperationAction(ISD::LLRINT, MVT::f32, Custom);
273 setOperationAction(ISD::LLRINT, MVT::f64, Custom);
275 if (!Subtarget.is64Bit()) {
276 setOperationAction(ISD::LRINT, MVT::i64, Custom);
277 setOperationAction(ISD::LLRINT, MVT::i64, Custom);
281 // Handle address space casts between mixed sized pointers.
282 setOperationAction(ISD::ADDRSPACECAST, MVT::i32, Custom);
283 setOperationAction(ISD::ADDRSPACECAST, MVT::i64, Custom);
285 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
286 if (!X86ScalarSSEf64) {
287 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
288 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
289 if (Subtarget.is64Bit()) {
290 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
291 // Without SSE, i64->f64 goes through memory.
292 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
294 } else if (!Subtarget.is64Bit())
295 setOperationAction(ISD::BITCAST , MVT::i64 , Custom);
297 // Scalar integer divide and remainder are lowered to use operations that
298 // produce two results, to match the available instructions. This exposes
299 // the two-result form to trivial CSE, which is able to combine x/y and x%y
300 // into a single instruction.
302 // Scalar integer multiply-high is also lowered to use two-result
303 // operations, to match the available instructions. However, plain multiply
304 // (low) operations are left as Legal, as there are single-result
305 // instructions for this in x86. Using the two-result multiply instructions
306 // when both high and low results are needed must be arranged by dagcombine.
307 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
308 setOperationAction(ISD::MULHS, VT, Expand);
309 setOperationAction(ISD::MULHU, VT, Expand);
310 setOperationAction(ISD::SDIV, VT, Expand);
311 setOperationAction(ISD::UDIV, VT, Expand);
312 setOperationAction(ISD::SREM, VT, Expand);
313 setOperationAction(ISD::UREM, VT, Expand);
316 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
317 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
318 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128,
319 MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
320 setOperationAction(ISD::BR_CC, VT, Expand);
321 setOperationAction(ISD::SELECT_CC, VT, Expand);
323 if (Subtarget.is64Bit())
324 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
325 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
326 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
327 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
329 setOperationAction(ISD::FREM , MVT::f32 , Expand);
330 setOperationAction(ISD::FREM , MVT::f64 , Expand);
331 setOperationAction(ISD::FREM , MVT::f80 , Expand);
332 setOperationAction(ISD::FREM , MVT::f128 , Expand);
333 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
335 // Promote the i8 variants and force them on up to i32 which has a shorter
337 setOperationPromotedToType(ISD::CTTZ , MVT::i8 , MVT::i32);
338 setOperationPromotedToType(ISD::CTTZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
339 if (!Subtarget.hasBMI()) {
340 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
341 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
342 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Legal);
343 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Legal);
344 if (Subtarget.is64Bit()) {
345 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
346 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Legal);
350 if (Subtarget.hasLZCNT()) {
351 // When promoting the i8 variants, force them to i32 for a shorter
353 setOperationPromotedToType(ISD::CTLZ , MVT::i8 , MVT::i32);
354 setOperationPromotedToType(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
356 for (auto VT : {MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
357 if (VT == MVT::i64 && !Subtarget.is64Bit())
359 setOperationAction(ISD::CTLZ , VT, Custom);
360 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Custom);
364 for (auto Op : {ISD::FP16_TO_FP, ISD::STRICT_FP16_TO_FP, ISD::FP_TO_FP16,
365 ISD::STRICT_FP_TO_FP16}) {
366 // Special handling for half-precision floating point conversions.
367 // If we don't have F16C support, then lower half float conversions
368 // into library calls.
371 (!Subtarget.useSoftFloat() && Subtarget.hasF16C()) ? Custom : Expand);
372 // There's never any support for operations beyond MVT::f32.
373 setOperationAction(Op, MVT::f64, Expand);
374 setOperationAction(Op, MVT::f80, Expand);
375 setOperationAction(Op, MVT::f128, Expand);
378 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
379 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
380 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
381 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f16, Expand);
382 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
383 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
384 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
385 setTruncStoreAction(MVT::f128, MVT::f16, Expand);
387 if (Subtarget.hasPOPCNT()) {
388 setOperationPromotedToType(ISD::CTPOP, MVT::i8, MVT::i32);
390 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
391 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
392 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
393 if (Subtarget.is64Bit())
394 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
396 setOperationAction(ISD::CTPOP , MVT::i64 , Custom);
399 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
401 if (!Subtarget.hasMOVBE())
402 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
404 // X86 wants to expand cmov itself.
405 for (auto VT : { MVT::f32, MVT::f64, MVT::f80, MVT::f128 }) {
406 setOperationAction(ISD::SELECT, VT, Custom);
407 setOperationAction(ISD::SETCC, VT, Custom);
408 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
409 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
411 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
412 if (VT == MVT::i64 && !Subtarget.is64Bit())
414 setOperationAction(ISD::SELECT, VT, Custom);
415 setOperationAction(ISD::SETCC, VT, Custom);
418 // Custom action for SELECT MMX and expand action for SELECT_CC MMX
419 setOperationAction(ISD::SELECT, MVT::x86mmx, Custom);
420 setOperationAction(ISD::SELECT_CC, MVT::x86mmx, Expand);
422 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
423 // NOTE: EH_SJLJ_SETJMP/_LONGJMP are not recommended, since
424 // LLVM/Clang supports zero-cost DWARF and SEH exception handling.
425 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
426 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
427 setOperationAction(ISD::EH_SJLJ_SETUP_DISPATCH, MVT::Other, Custom);
428 if (TM.Options.ExceptionModel == ExceptionHandling::SjLj)
429 setLibcallName(RTLIB::UNWIND_RESUME, "_Unwind_SjLj_Resume");
432 for (auto VT : { MVT::i32, MVT::i64 }) {
433 if (VT == MVT::i64 && !Subtarget.is64Bit())
435 setOperationAction(ISD::ConstantPool , VT, Custom);
436 setOperationAction(ISD::JumpTable , VT, Custom);
437 setOperationAction(ISD::GlobalAddress , VT, Custom);
438 setOperationAction(ISD::GlobalTLSAddress, VT, Custom);
439 setOperationAction(ISD::ExternalSymbol , VT, Custom);
440 setOperationAction(ISD::BlockAddress , VT, Custom);
443 // 64-bit shl, sra, srl (iff 32-bit x86)
444 for (auto VT : { MVT::i32, MVT::i64 }) {
445 if (VT == MVT::i64 && !Subtarget.is64Bit())
447 setOperationAction(ISD::SHL_PARTS, VT, Custom);
448 setOperationAction(ISD::SRA_PARTS, VT, Custom);
449 setOperationAction(ISD::SRL_PARTS, VT, Custom);
452 if (Subtarget.hasSSEPrefetch() || Subtarget.has3DNow())
453 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
455 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
457 // Expand certain atomics
458 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
459 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
460 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
461 setOperationAction(ISD::ATOMIC_LOAD_ADD, VT, Custom);
462 setOperationAction(ISD::ATOMIC_LOAD_OR, VT, Custom);
463 setOperationAction(ISD::ATOMIC_LOAD_XOR, VT, Custom);
464 setOperationAction(ISD::ATOMIC_LOAD_AND, VT, Custom);
465 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
468 if (!Subtarget.is64Bit())
469 setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
471 if (Subtarget.hasCmpxchg16b()) {
472 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
475 // FIXME - use subtarget debug flags
476 if (!Subtarget.isTargetDarwin() && !Subtarget.isTargetELF() &&
477 !Subtarget.isTargetCygMing() && !Subtarget.isTargetWin64() &&
478 TM.Options.ExceptionModel != ExceptionHandling::SjLj) {
479 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
482 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
483 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
485 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
486 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
488 setOperationAction(ISD::TRAP, MVT::Other, Legal);
489 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
491 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
492 setOperationAction(ISD::VASTART , MVT::Other, Custom);
493 setOperationAction(ISD::VAEND , MVT::Other, Expand);
494 bool Is64Bit = Subtarget.is64Bit();
495 setOperationAction(ISD::VAARG, MVT::Other, Is64Bit ? Custom : Expand);
496 setOperationAction(ISD::VACOPY, MVT::Other, Is64Bit ? Custom : Expand);
498 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
499 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
501 setOperationAction(ISD::DYNAMIC_STACKALLOC, PtrVT, Custom);
503 // GC_TRANSITION_START and GC_TRANSITION_END need custom lowering.
504 setOperationAction(ISD::GC_TRANSITION_START, MVT::Other, Custom);
505 setOperationAction(ISD::GC_TRANSITION_END, MVT::Other, Custom);
507 if (!Subtarget.useSoftFloat() && X86ScalarSSEf64) {
508 // f32 and f64 use SSE.
509 // Set up the FP register classes.
510 addRegisterClass(MVT::f32, Subtarget.hasAVX512() ? &X86::FR32XRegClass
511 : &X86::FR32RegClass);
512 addRegisterClass(MVT::f64, Subtarget.hasAVX512() ? &X86::FR64XRegClass
513 : &X86::FR64RegClass);
515 // Disable f32->f64 extload as we can only generate this in one instruction
516 // under optsize. So its easier to pattern match (fpext (load)) for that
517 // case instead of needing to emit 2 instructions for extload in the
519 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
521 for (auto VT : { MVT::f32, MVT::f64 }) {
522 // Use ANDPD to simulate FABS.
523 setOperationAction(ISD::FABS, VT, Custom);
525 // Use XORP to simulate FNEG.
526 setOperationAction(ISD::FNEG, VT, Custom);
528 // Use ANDPD and ORPD to simulate FCOPYSIGN.
529 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
531 // These might be better off as horizontal vector ops.
532 setOperationAction(ISD::FADD, VT, Custom);
533 setOperationAction(ISD::FSUB, VT, Custom);
535 // We don't support sin/cos/fmod
536 setOperationAction(ISD::FSIN , VT, Expand);
537 setOperationAction(ISD::FCOS , VT, Expand);
538 setOperationAction(ISD::FSINCOS, VT, Expand);
541 // Lower this to MOVMSK plus an AND.
542 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
543 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
545 } else if (!Subtarget.useSoftFloat() && X86ScalarSSEf32 &&
546 (UseX87 || Is64Bit)) {
547 // Use SSE for f32, x87 for f64.
548 // Set up the FP register classes.
549 addRegisterClass(MVT::f32, &X86::FR32RegClass);
551 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
553 // Use ANDPS to simulate FABS.
554 setOperationAction(ISD::FABS , MVT::f32, Custom);
556 // Use XORP to simulate FNEG.
557 setOperationAction(ISD::FNEG , MVT::f32, Custom);
560 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
562 // Use ANDPS and ORPS to simulate FCOPYSIGN.
564 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
565 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
567 // We don't support sin/cos/fmod
568 setOperationAction(ISD::FSIN , MVT::f32, Expand);
569 setOperationAction(ISD::FCOS , MVT::f32, Expand);
570 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
573 // Always expand sin/cos functions even though x87 has an instruction.
574 setOperationAction(ISD::FSIN, MVT::f64, Expand);
575 setOperationAction(ISD::FCOS, MVT::f64, Expand);
576 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
579 // f32 and f64 in x87.
580 // Set up the FP register classes.
581 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
582 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
584 for (auto VT : { MVT::f32, MVT::f64 }) {
585 setOperationAction(ISD::UNDEF, VT, Expand);
586 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
588 // Always expand sin/cos functions even though x87 has an instruction.
589 setOperationAction(ISD::FSIN , VT, Expand);
590 setOperationAction(ISD::FCOS , VT, Expand);
591 setOperationAction(ISD::FSINCOS, VT, Expand);
595 // Expand FP32 immediates into loads from the stack, save special cases.
596 if (isTypeLegal(MVT::f32)) {
597 if (UseX87 && (getRegClassFor(MVT::f32) == &X86::RFP32RegClass)) {
598 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
599 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
600 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
601 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
602 } else // SSE immediates.
603 addLegalFPImmediate(APFloat(+0.0f)); // xorps
605 // Expand FP64 immediates into loads from the stack, save special cases.
606 if (isTypeLegal(MVT::f64)) {
607 if (UseX87 && getRegClassFor(MVT::f64) == &X86::RFP64RegClass) {
608 addLegalFPImmediate(APFloat(+0.0)); // FLD0
609 addLegalFPImmediate(APFloat(+1.0)); // FLD1
610 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
611 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
612 } else // SSE immediates.
613 addLegalFPImmediate(APFloat(+0.0)); // xorpd
615 // Handle constrained floating-point operations of scalar.
616 setOperationAction(ISD::STRICT_FADD, MVT::f32, Legal);
617 setOperationAction(ISD::STRICT_FADD, MVT::f64, Legal);
618 setOperationAction(ISD::STRICT_FSUB, MVT::f32, Legal);
619 setOperationAction(ISD::STRICT_FSUB, MVT::f64, Legal);
620 setOperationAction(ISD::STRICT_FMUL, MVT::f32, Legal);
621 setOperationAction(ISD::STRICT_FMUL, MVT::f64, Legal);
622 setOperationAction(ISD::STRICT_FDIV, MVT::f32, Legal);
623 setOperationAction(ISD::STRICT_FDIV, MVT::f64, Legal);
624 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f64, Legal);
625 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Legal);
626 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Legal);
627 setOperationAction(ISD::STRICT_FSQRT, MVT::f32, Legal);
628 setOperationAction(ISD::STRICT_FSQRT, MVT::f64, Legal);
630 // We don't support FMA.
631 setOperationAction(ISD::FMA, MVT::f64, Expand);
632 setOperationAction(ISD::FMA, MVT::f32, Expand);
634 // f80 always uses X87.
636 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
637 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
638 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
640 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended());
641 addLegalFPImmediate(TmpFlt); // FLD0
643 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
646 APFloat TmpFlt2(+1.0);
647 TmpFlt2.convert(APFloat::x87DoubleExtended(), APFloat::rmNearestTiesToEven,
649 addLegalFPImmediate(TmpFlt2); // FLD1
650 TmpFlt2.changeSign();
651 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
654 // Always expand sin/cos functions even though x87 has an instruction.
655 setOperationAction(ISD::FSIN , MVT::f80, Expand);
656 setOperationAction(ISD::FCOS , MVT::f80, Expand);
657 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
659 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
660 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
661 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
662 setOperationAction(ISD::FRINT, MVT::f80, Expand);
663 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
664 setOperationAction(ISD::FMA, MVT::f80, Expand);
665 setOperationAction(ISD::LROUND, MVT::f80, Expand);
666 setOperationAction(ISD::LLROUND, MVT::f80, Expand);
667 setOperationAction(ISD::LRINT, MVT::f80, Custom);
668 setOperationAction(ISD::LLRINT, MVT::f80, Custom);
670 // Handle constrained floating-point operations of scalar.
671 setOperationAction(ISD::STRICT_FADD , MVT::f80, Legal);
672 setOperationAction(ISD::STRICT_FSUB , MVT::f80, Legal);
673 setOperationAction(ISD::STRICT_FMUL , MVT::f80, Legal);
674 setOperationAction(ISD::STRICT_FDIV , MVT::f80, Legal);
675 setOperationAction(ISD::STRICT_FSQRT , MVT::f80, Legal);
676 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f80, Legal);
677 // FIXME: When the target is 64-bit, STRICT_FP_ROUND will be overwritten
679 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Legal);
682 // f128 uses xmm registers, but most operations require libcalls.
683 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) {
684 addRegisterClass(MVT::f128, Subtarget.hasVLX() ? &X86::VR128XRegClass
685 : &X86::VR128RegClass);
687 addLegalFPImmediate(APFloat::getZero(APFloat::IEEEquad())); // xorps
689 setOperationAction(ISD::FADD, MVT::f128, LibCall);
690 setOperationAction(ISD::STRICT_FADD, MVT::f128, LibCall);
691 setOperationAction(ISD::FSUB, MVT::f128, LibCall);
692 setOperationAction(ISD::STRICT_FSUB, MVT::f128, LibCall);
693 setOperationAction(ISD::FDIV, MVT::f128, LibCall);
694 setOperationAction(ISD::STRICT_FDIV, MVT::f128, LibCall);
695 setOperationAction(ISD::FMUL, MVT::f128, LibCall);
696 setOperationAction(ISD::STRICT_FMUL, MVT::f128, LibCall);
697 setOperationAction(ISD::FMA, MVT::f128, LibCall);
698 setOperationAction(ISD::STRICT_FMA, MVT::f128, LibCall);
700 setOperationAction(ISD::FABS, MVT::f128, Custom);
701 setOperationAction(ISD::FNEG, MVT::f128, Custom);
702 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Custom);
704 setOperationAction(ISD::FSIN, MVT::f128, LibCall);
705 setOperationAction(ISD::STRICT_FSIN, MVT::f128, LibCall);
706 setOperationAction(ISD::FCOS, MVT::f128, LibCall);
707 setOperationAction(ISD::STRICT_FCOS, MVT::f128, LibCall);
708 setOperationAction(ISD::FSINCOS, MVT::f128, LibCall);
710 setOperationAction(ISD::FSQRT, MVT::f128, LibCall);
711 setOperationAction(ISD::STRICT_FSQRT, MVT::f128, LibCall);
713 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
714 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::f128, Custom);
715 // We need to custom handle any FP_ROUND with an f128 input, but
716 // LegalizeDAG uses the result type to know when to run a custom handler.
717 // So we have to list all legal floating point result types here.
718 if (isTypeLegal(MVT::f32)) {
719 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
720 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f32, Custom);
722 if (isTypeLegal(MVT::f64)) {
723 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
724 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f64, Custom);
726 if (isTypeLegal(MVT::f80)) {
727 setOperationAction(ISD::FP_ROUND, MVT::f80, Custom);
728 setOperationAction(ISD::STRICT_FP_ROUND, MVT::f80, Custom);
731 setOperationAction(ISD::SETCC, MVT::f128, Custom);
733 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f32, Expand);
734 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f64, Expand);
735 setLoadExtAction(ISD::EXTLOAD, MVT::f128, MVT::f80, Expand);
736 setTruncStoreAction(MVT::f128, MVT::f32, Expand);
737 setTruncStoreAction(MVT::f128, MVT::f64, Expand);
738 setTruncStoreAction(MVT::f128, MVT::f80, Expand);
741 // Always use a library call for pow.
742 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
743 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
744 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
745 setOperationAction(ISD::FPOW , MVT::f128 , Expand);
747 setOperationAction(ISD::FLOG, MVT::f80, Expand);
748 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
749 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
750 setOperationAction(ISD::FEXP, MVT::f80, Expand);
751 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
752 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
753 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
755 // Some FP actions are always expanded for vector types.
756 for (auto VT : { MVT::v4f32, MVT::v8f32, MVT::v16f32,
757 MVT::v2f64, MVT::v4f64, MVT::v8f64 }) {
758 setOperationAction(ISD::FSIN, VT, Expand);
759 setOperationAction(ISD::FSINCOS, VT, Expand);
760 setOperationAction(ISD::FCOS, VT, Expand);
761 setOperationAction(ISD::FREM, VT, Expand);
762 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
763 setOperationAction(ISD::FPOW, VT, Expand);
764 setOperationAction(ISD::FLOG, VT, Expand);
765 setOperationAction(ISD::FLOG2, VT, Expand);
766 setOperationAction(ISD::FLOG10, VT, Expand);
767 setOperationAction(ISD::FEXP, VT, Expand);
768 setOperationAction(ISD::FEXP2, VT, Expand);
771 // First set operation action for all vector types to either promote
772 // (for widening) or expand (for scalarization). Then we will selectively
773 // turn on ones that can be effectively codegen'd.
774 for (MVT VT : MVT::fixedlen_vector_valuetypes()) {
775 setOperationAction(ISD::SDIV, VT, Expand);
776 setOperationAction(ISD::UDIV, VT, Expand);
777 setOperationAction(ISD::SREM, VT, Expand);
778 setOperationAction(ISD::UREM, VT, Expand);
779 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
780 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
781 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
782 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
783 setOperationAction(ISD::FMA, VT, Expand);
784 setOperationAction(ISD::FFLOOR, VT, Expand);
785 setOperationAction(ISD::FCEIL, VT, Expand);
786 setOperationAction(ISD::FTRUNC, VT, Expand);
787 setOperationAction(ISD::FRINT, VT, Expand);
788 setOperationAction(ISD::FNEARBYINT, VT, Expand);
789 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
790 setOperationAction(ISD::MULHS, VT, Expand);
791 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
792 setOperationAction(ISD::MULHU, VT, Expand);
793 setOperationAction(ISD::SDIVREM, VT, Expand);
794 setOperationAction(ISD::UDIVREM, VT, Expand);
795 setOperationAction(ISD::CTPOP, VT, Expand);
796 setOperationAction(ISD::CTTZ, VT, Expand);
797 setOperationAction(ISD::CTLZ, VT, Expand);
798 setOperationAction(ISD::ROTL, VT, Expand);
799 setOperationAction(ISD::ROTR, VT, Expand);
800 setOperationAction(ISD::BSWAP, VT, Expand);
801 setOperationAction(ISD::SETCC, VT, Expand);
802 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
803 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
804 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
805 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
806 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
807 setOperationAction(ISD::TRUNCATE, VT, Expand);
808 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
809 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
810 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
811 setOperationAction(ISD::SELECT_CC, VT, Expand);
812 for (MVT InnerVT : MVT::fixedlen_vector_valuetypes()) {
813 setTruncStoreAction(InnerVT, VT, Expand);
815 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
816 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
818 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
819 // types, we have to deal with them whether we ask for Expansion or not.
820 // Setting Expand causes its own optimisation problems though, so leave
822 if (VT.getVectorElementType() == MVT::i1)
823 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
825 // EXTLOAD for MVT::f16 vectors is not legal because f16 vectors are
826 // split/scalarized right now.
827 if (VT.getVectorElementType() == MVT::f16)
828 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
832 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
833 // with -msoft-float, disable use of MMX as well.
834 if (!Subtarget.useSoftFloat() && Subtarget.hasMMX()) {
835 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
836 // No operations on x86mmx supported, everything uses intrinsics.
839 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) {
840 addRegisterClass(MVT::v4f32, Subtarget.hasVLX() ? &X86::VR128XRegClass
841 : &X86::VR128RegClass);
843 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
844 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
845 setOperationAction(ISD::FCOPYSIGN, MVT::v4f32, Custom);
846 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
847 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
848 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
849 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
850 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
852 setOperationAction(ISD::LOAD, MVT::v2f32, Custom);
853 setOperationAction(ISD::STORE, MVT::v2f32, Custom);
855 setOperationAction(ISD::STRICT_FADD, MVT::v4f32, Legal);
856 setOperationAction(ISD::STRICT_FSUB, MVT::v4f32, Legal);
857 setOperationAction(ISD::STRICT_FMUL, MVT::v4f32, Legal);
858 setOperationAction(ISD::STRICT_FDIV, MVT::v4f32, Legal);
859 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f32, Legal);
862 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) {
863 addRegisterClass(MVT::v2f64, Subtarget.hasVLX() ? &X86::VR128XRegClass
864 : &X86::VR128RegClass);
866 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
867 // registers cannot be used even for integer operations.
868 addRegisterClass(MVT::v16i8, Subtarget.hasVLX() ? &X86::VR128XRegClass
869 : &X86::VR128RegClass);
870 addRegisterClass(MVT::v8i16, Subtarget.hasVLX() ? &X86::VR128XRegClass
871 : &X86::VR128RegClass);
872 addRegisterClass(MVT::v4i32, Subtarget.hasVLX() ? &X86::VR128XRegClass
873 : &X86::VR128RegClass);
874 addRegisterClass(MVT::v2i64, Subtarget.hasVLX() ? &X86::VR128XRegClass
875 : &X86::VR128RegClass);
877 for (auto VT : { MVT::v2i8, MVT::v4i8, MVT::v8i8,
878 MVT::v2i16, MVT::v4i16, MVT::v2i32 }) {
879 setOperationAction(ISD::SDIV, VT, Custom);
880 setOperationAction(ISD::SREM, VT, Custom);
881 setOperationAction(ISD::UDIV, VT, Custom);
882 setOperationAction(ISD::UREM, VT, Custom);
885 setOperationAction(ISD::MUL, MVT::v2i8, Custom);
886 setOperationAction(ISD::MUL, MVT::v4i8, Custom);
887 setOperationAction(ISD::MUL, MVT::v8i8, Custom);
889 setOperationAction(ISD::MUL, MVT::v16i8, Custom);
890 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
891 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
892 setOperationAction(ISD::MULHU, MVT::v4i32, Custom);
893 setOperationAction(ISD::MULHS, MVT::v4i32, Custom);
894 setOperationAction(ISD::MULHU, MVT::v16i8, Custom);
895 setOperationAction(ISD::MULHS, MVT::v16i8, Custom);
896 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
897 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
898 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
899 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
900 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
901 setOperationAction(ISD::FCOPYSIGN, MVT::v2f64, Custom);
903 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
904 setOperationAction(ISD::SMAX, VT, VT == MVT::v8i16 ? Legal : Custom);
905 setOperationAction(ISD::SMIN, VT, VT == MVT::v8i16 ? Legal : Custom);
906 setOperationAction(ISD::UMAX, VT, VT == MVT::v16i8 ? Legal : Custom);
907 setOperationAction(ISD::UMIN, VT, VT == MVT::v16i8 ? Legal : Custom);
910 setOperationAction(ISD::UADDSAT, MVT::v16i8, Legal);
911 setOperationAction(ISD::SADDSAT, MVT::v16i8, Legal);
912 setOperationAction(ISD::USUBSAT, MVT::v16i8, Legal);
913 setOperationAction(ISD::SSUBSAT, MVT::v16i8, Legal);
914 setOperationAction(ISD::UADDSAT, MVT::v8i16, Legal);
915 setOperationAction(ISD::SADDSAT, MVT::v8i16, Legal);
916 setOperationAction(ISD::USUBSAT, MVT::v8i16, Legal);
917 setOperationAction(ISD::SSUBSAT, MVT::v8i16, Legal);
918 setOperationAction(ISD::UADDSAT, MVT::v4i32, Custom);
919 setOperationAction(ISD::USUBSAT, MVT::v4i32, Custom);
920 setOperationAction(ISD::UADDSAT, MVT::v2i64, Custom);
921 setOperationAction(ISD::USUBSAT, MVT::v2i64, Custom);
923 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
924 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
925 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
927 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
928 setOperationAction(ISD::SETCC, VT, Custom);
929 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
930 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
931 setOperationAction(ISD::CTPOP, VT, Custom);
932 setOperationAction(ISD::ABS, VT, Custom);
934 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
935 // setcc all the way to isel and prefer SETGT in some isel patterns.
936 setCondCodeAction(ISD::SETLT, VT, Custom);
937 setCondCodeAction(ISD::SETLE, VT, Custom);
940 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32 }) {
941 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
942 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
943 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
944 setOperationAction(ISD::VSELECT, VT, Custom);
945 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
948 for (auto VT : { MVT::v2f64, MVT::v2i64 }) {
949 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
950 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
951 setOperationAction(ISD::VSELECT, VT, Custom);
953 if (VT == MVT::v2i64 && !Subtarget.is64Bit())
956 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
957 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
960 // Custom lower v2i64 and v2f64 selects.
961 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
962 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
963 setOperationAction(ISD::SELECT, MVT::v4i32, Custom);
964 setOperationAction(ISD::SELECT, MVT::v8i16, Custom);
965 setOperationAction(ISD::SELECT, MVT::v16i8, Custom);
967 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
968 setOperationAction(ISD::FP_TO_SINT, MVT::v2i32, Custom);
969 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v4i32, Legal);
970 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i32, Custom);
972 // Custom legalize these to avoid over promotion or custom promotion.
973 for (auto VT : {MVT::v2i8, MVT::v4i8, MVT::v8i8, MVT::v2i16, MVT::v4i16}) {
974 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
975 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
976 setOperationAction(ISD::STRICT_FP_TO_SINT, VT, Custom);
977 setOperationAction(ISD::STRICT_FP_TO_UINT, VT, Custom);
980 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
981 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i32, Legal);
982 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom);
983 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2i32, Custom);
985 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom);
986 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2i32, Custom);
988 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
989 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32, Custom);
991 // Fast v2f32 UINT_TO_FP( v2i32 ) custom conversion.
992 setOperationAction(ISD::SINT_TO_FP, MVT::v2f32, Custom);
993 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v2f32, Custom);
994 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
995 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v2f32, Custom);
997 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
998 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v2f32, Custom);
999 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1000 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v2f32, Custom);
1002 // We want to legalize this to an f64 load rather than an i64 load on
1003 // 64-bit targets and two 32-bit loads on a 32-bit target. Similar for
1005 setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
1006 setOperationAction(ISD::LOAD, MVT::v4i16, Custom);
1007 setOperationAction(ISD::LOAD, MVT::v8i8, Custom);
1008 setOperationAction(ISD::STORE, MVT::v2i32, Custom);
1009 setOperationAction(ISD::STORE, MVT::v4i16, Custom);
1010 setOperationAction(ISD::STORE, MVT::v8i8, Custom);
1012 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1013 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1014 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1015 if (!Subtarget.hasAVX512())
1016 setOperationAction(ISD::BITCAST, MVT::v16i1, Custom);
1018 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v2i64, Custom);
1019 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v4i32, Custom);
1020 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, MVT::v8i16, Custom);
1022 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1024 setOperationAction(ISD::TRUNCATE, MVT::v2i8, Custom);
1025 setOperationAction(ISD::TRUNCATE, MVT::v2i16, Custom);
1026 setOperationAction(ISD::TRUNCATE, MVT::v2i32, Custom);
1027 setOperationAction(ISD::TRUNCATE, MVT::v4i8, Custom);
1028 setOperationAction(ISD::TRUNCATE, MVT::v4i16, Custom);
1029 setOperationAction(ISD::TRUNCATE, MVT::v8i8, Custom);
1031 // In the customized shift lowering, the legal v4i32/v2i64 cases
1032 // in AVX2 will be recognized.
1033 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1034 setOperationAction(ISD::SRL, VT, Custom);
1035 setOperationAction(ISD::SHL, VT, Custom);
1036 setOperationAction(ISD::SRA, VT, Custom);
1039 setOperationAction(ISD::ROTL, MVT::v4i32, Custom);
1040 setOperationAction(ISD::ROTL, MVT::v8i16, Custom);
1042 // With 512-bit registers or AVX512VL+BW, expanding (and promoting the
1043 // shifts) is better.
1044 if (!Subtarget.useAVX512Regs() &&
1045 !(Subtarget.hasBWI() && Subtarget.hasVLX()))
1046 setOperationAction(ISD::ROTL, MVT::v16i8, Custom);
1048 setOperationAction(ISD::STRICT_FSQRT, MVT::v2f64, Legal);
1049 setOperationAction(ISD::STRICT_FADD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::STRICT_FSUB, MVT::v2f64, Legal);
1051 setOperationAction(ISD::STRICT_FMUL, MVT::v2f64, Legal);
1052 setOperationAction(ISD::STRICT_FDIV, MVT::v2f64, Legal);
1055 if (!Subtarget.useSoftFloat() && Subtarget.hasSSSE3()) {
1056 setOperationAction(ISD::ABS, MVT::v16i8, Legal);
1057 setOperationAction(ISD::ABS, MVT::v8i16, Legal);
1058 setOperationAction(ISD::ABS, MVT::v4i32, Legal);
1059 setOperationAction(ISD::BITREVERSE, MVT::v16i8, Custom);
1060 setOperationAction(ISD::CTLZ, MVT::v16i8, Custom);
1061 setOperationAction(ISD::CTLZ, MVT::v8i16, Custom);
1062 setOperationAction(ISD::CTLZ, MVT::v4i32, Custom);
1063 setOperationAction(ISD::CTLZ, MVT::v2i64, Custom);
1065 // These might be better off as horizontal vector ops.
1066 setOperationAction(ISD::ADD, MVT::i16, Custom);
1067 setOperationAction(ISD::ADD, MVT::i32, Custom);
1068 setOperationAction(ISD::SUB, MVT::i16, Custom);
1069 setOperationAction(ISD::SUB, MVT::i32, Custom);
1072 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE41()) {
1073 for (MVT RoundedTy : {MVT::f32, MVT::f64, MVT::v4f32, MVT::v2f64}) {
1074 setOperationAction(ISD::FFLOOR, RoundedTy, Legal);
1075 setOperationAction(ISD::STRICT_FFLOOR, RoundedTy, Legal);
1076 setOperationAction(ISD::FCEIL, RoundedTy, Legal);
1077 setOperationAction(ISD::STRICT_FCEIL, RoundedTy, Legal);
1078 setOperationAction(ISD::FTRUNC, RoundedTy, Legal);
1079 setOperationAction(ISD::STRICT_FTRUNC, RoundedTy, Legal);
1080 setOperationAction(ISD::FRINT, RoundedTy, Legal);
1081 setOperationAction(ISD::STRICT_FRINT, RoundedTy, Legal);
1082 setOperationAction(ISD::FNEARBYINT, RoundedTy, Legal);
1083 setOperationAction(ISD::STRICT_FNEARBYINT, RoundedTy, Legal);
1085 setOperationAction(ISD::FROUND, RoundedTy, Custom);
1088 setOperationAction(ISD::SMAX, MVT::v16i8, Legal);
1089 setOperationAction(ISD::SMAX, MVT::v4i32, Legal);
1090 setOperationAction(ISD::UMAX, MVT::v8i16, Legal);
1091 setOperationAction(ISD::UMAX, MVT::v4i32, Legal);
1092 setOperationAction(ISD::SMIN, MVT::v16i8, Legal);
1093 setOperationAction(ISD::SMIN, MVT::v4i32, Legal);
1094 setOperationAction(ISD::UMIN, MVT::v8i16, Legal);
1095 setOperationAction(ISD::UMIN, MVT::v4i32, Legal);
1097 // FIXME: Do we need to handle scalar-to-vector here?
1098 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1100 // We directly match byte blends in the backend as they match the VSELECT
1102 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1104 // SSE41 brings specific instructions for doing vector sign extend even in
1105 // cases where we don't have SRA.
1106 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1107 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Legal);
1108 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Legal);
1111 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1112 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1113 setLoadExtAction(LoadExtOp, MVT::v8i16, MVT::v8i8, Legal);
1114 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i8, Legal);
1115 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i8, Legal);
1116 setLoadExtAction(LoadExtOp, MVT::v4i32, MVT::v4i16, Legal);
1117 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i16, Legal);
1118 setLoadExtAction(LoadExtOp, MVT::v2i64, MVT::v2i32, Legal);
1121 // i8 vectors are custom because the source register and source
1122 // source memory operand types are not the same width.
1123 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1125 if (Subtarget.is64Bit() && !Subtarget.hasAVX512()) {
1126 // We need to scalarize v4i64->v432 uint_to_fp using cvtsi2ss, but we can
1127 // do the pre and post work in the vector domain.
1128 setOperationAction(ISD::UINT_TO_FP, MVT::v4i64, Custom);
1129 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i64, Custom);
1130 // We need to mark SINT_TO_FP as Custom even though we want to expand it
1131 // so that DAG combine doesn't try to turn it into uint_to_fp.
1132 setOperationAction(ISD::SINT_TO_FP, MVT::v4i64, Custom);
1133 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v4i64, Custom);
1137 if (!Subtarget.useSoftFloat() && Subtarget.hasXOP()) {
1138 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1139 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1140 setOperationAction(ISD::ROTL, VT, Custom);
1142 // XOP can efficiently perform BITREVERSE with VPPERM.
1143 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 })
1144 setOperationAction(ISD::BITREVERSE, VT, Custom);
1146 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1147 MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 })
1148 setOperationAction(ISD::BITREVERSE, VT, Custom);
1151 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX()) {
1152 bool HasInt256 = Subtarget.hasInt256();
1154 addRegisterClass(MVT::v32i8, Subtarget.hasVLX() ? &X86::VR256XRegClass
1155 : &X86::VR256RegClass);
1156 addRegisterClass(MVT::v16i16, Subtarget.hasVLX() ? &X86::VR256XRegClass
1157 : &X86::VR256RegClass);
1158 addRegisterClass(MVT::v8i32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1159 : &X86::VR256RegClass);
1160 addRegisterClass(MVT::v8f32, Subtarget.hasVLX() ? &X86::VR256XRegClass
1161 : &X86::VR256RegClass);
1162 addRegisterClass(MVT::v4i64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1163 : &X86::VR256RegClass);
1164 addRegisterClass(MVT::v4f64, Subtarget.hasVLX() ? &X86::VR256XRegClass
1165 : &X86::VR256RegClass);
1167 for (auto VT : { MVT::v8f32, MVT::v4f64 }) {
1168 setOperationAction(ISD::FFLOOR, VT, Legal);
1169 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1170 setOperationAction(ISD::FCEIL, VT, Legal);
1171 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1172 setOperationAction(ISD::FTRUNC, VT, Legal);
1173 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1174 setOperationAction(ISD::FRINT, VT, Legal);
1175 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1176 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1177 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1179 setOperationAction(ISD::FROUND, VT, Custom);
1181 setOperationAction(ISD::FNEG, VT, Custom);
1182 setOperationAction(ISD::FABS, VT, Custom);
1183 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1186 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1187 // even though v8i16 is a legal type.
1188 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1189 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1190 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i16, MVT::v8i32);
1191 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i16, MVT::v8i32);
1192 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1193 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i32, Legal);
1195 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1196 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i32, Legal);
1198 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v4f32, Legal);
1199 setOperationAction(ISD::STRICT_FADD, MVT::v8f32, Legal);
1200 setOperationAction(ISD::STRICT_FADD, MVT::v4f64, Legal);
1201 setOperationAction(ISD::STRICT_FSUB, MVT::v8f32, Legal);
1202 setOperationAction(ISD::STRICT_FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::STRICT_FMUL, MVT::v8f32, Legal);
1204 setOperationAction(ISD::STRICT_FMUL, MVT::v4f64, Legal);
1205 setOperationAction(ISD::STRICT_FDIV, MVT::v8f32, Legal);
1206 setOperationAction(ISD::STRICT_FDIV, MVT::v4f64, Legal);
1207 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v4f64, Legal);
1208 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f32, Legal);
1209 setOperationAction(ISD::STRICT_FSQRT, MVT::v4f64, Legal);
1211 if (!Subtarget.hasAVX512())
1212 setOperationAction(ISD::BITCAST, MVT::v32i1, Custom);
1214 // In the customized shift lowering, the legal v8i32/v4i64 cases
1215 // in AVX2 will be recognized.
1216 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1217 setOperationAction(ISD::SRL, VT, Custom);
1218 setOperationAction(ISD::SHL, VT, Custom);
1219 setOperationAction(ISD::SRA, VT, Custom);
1222 // These types need custom splitting if their input is a 128-bit vector.
1223 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1224 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1225 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1226 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1228 setOperationAction(ISD::ROTL, MVT::v8i32, Custom);
1229 setOperationAction(ISD::ROTL, MVT::v16i16, Custom);
1231 // With BWI, expanding (and promoting the shifts) is the better.
1232 if (!Subtarget.useBWIRegs())
1233 setOperationAction(ISD::ROTL, MVT::v32i8, Custom);
1235 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1236 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1237 setOperationAction(ISD::SELECT, MVT::v8i32, Custom);
1238 setOperationAction(ISD::SELECT, MVT::v16i16, Custom);
1239 setOperationAction(ISD::SELECT, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1242 for (auto VT : { MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1243 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1244 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1245 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1248 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1249 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1250 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1251 setOperationAction(ISD::BITREVERSE, MVT::v32i8, Custom);
1253 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1254 setOperationAction(ISD::SETCC, VT, Custom);
1255 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1256 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1257 setOperationAction(ISD::CTPOP, VT, Custom);
1258 setOperationAction(ISD::CTLZ, VT, Custom);
1260 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1261 // setcc all the way to isel and prefer SETGT in some isel patterns.
1262 setCondCodeAction(ISD::SETLT, VT, Custom);
1263 setCondCodeAction(ISD::SETLE, VT, Custom);
1266 if (Subtarget.hasAnyFMA()) {
1267 for (auto VT : { MVT::f32, MVT::f64, MVT::v4f32, MVT::v8f32,
1268 MVT::v2f64, MVT::v4f64 }) {
1269 setOperationAction(ISD::FMA, VT, Legal);
1270 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1274 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1275 setOperationAction(ISD::ADD, VT, HasInt256 ? Legal : Custom);
1276 setOperationAction(ISD::SUB, VT, HasInt256 ? Legal : Custom);
1279 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1280 setOperationAction(ISD::MUL, MVT::v8i32, HasInt256 ? Legal : Custom);
1281 setOperationAction(ISD::MUL, MVT::v16i16, HasInt256 ? Legal : Custom);
1282 setOperationAction(ISD::MUL, MVT::v32i8, Custom);
1284 setOperationAction(ISD::MULHU, MVT::v8i32, Custom);
1285 setOperationAction(ISD::MULHS, MVT::v8i32, Custom);
1286 setOperationAction(ISD::MULHU, MVT::v16i16, HasInt256 ? Legal : Custom);
1287 setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
1288 setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
1289 setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
1291 setOperationAction(ISD::ABS, MVT::v4i64, Custom);
1292 setOperationAction(ISD::SMAX, MVT::v4i64, Custom);
1293 setOperationAction(ISD::UMAX, MVT::v4i64, Custom);
1294 setOperationAction(ISD::SMIN, MVT::v4i64, Custom);
1295 setOperationAction(ISD::UMIN, MVT::v4i64, Custom);
1297 setOperationAction(ISD::UADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1298 setOperationAction(ISD::SADDSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1299 setOperationAction(ISD::USUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1300 setOperationAction(ISD::SSUBSAT, MVT::v32i8, HasInt256 ? Legal : Custom);
1301 setOperationAction(ISD::UADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1302 setOperationAction(ISD::SADDSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1303 setOperationAction(ISD::USUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1304 setOperationAction(ISD::SSUBSAT, MVT::v16i16, HasInt256 ? Legal : Custom);
1306 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32 }) {
1307 setOperationAction(ISD::ABS, VT, HasInt256 ? Legal : Custom);
1308 setOperationAction(ISD::SMAX, VT, HasInt256 ? Legal : Custom);
1309 setOperationAction(ISD::UMAX, VT, HasInt256 ? Legal : Custom);
1310 setOperationAction(ISD::SMIN, VT, HasInt256 ? Legal : Custom);
1311 setOperationAction(ISD::UMIN, VT, HasInt256 ? Legal : Custom);
1314 for (auto VT : {MVT::v16i16, MVT::v8i32, MVT::v4i64}) {
1315 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1316 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1320 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1321 // when we have a 256bit-wide blend with immediate.
1322 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1323 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32, Custom);
1325 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1326 for (auto LoadExtOp : { ISD::SEXTLOAD, ISD::ZEXTLOAD }) {
1327 setLoadExtAction(LoadExtOp, MVT::v16i16, MVT::v16i8, Legal);
1328 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i8, Legal);
1329 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i8, Legal);
1330 setLoadExtAction(LoadExtOp, MVT::v8i32, MVT::v8i16, Legal);
1331 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i16, Legal);
1332 setLoadExtAction(LoadExtOp, MVT::v4i64, MVT::v4i32, Legal);
1336 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1337 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 }) {
1338 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1339 setOperationAction(ISD::MSTORE, VT, Legal);
1342 // Extract subvector is special because the value type
1343 // (result) is 128-bit but the source is 256-bit wide.
1344 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64,
1345 MVT::v4f32, MVT::v2f64 }) {
1346 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1349 // Custom lower several nodes for 256-bit types.
1350 for (MVT VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1351 MVT::v8f32, MVT::v4f64 }) {
1352 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1353 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1354 setOperationAction(ISD::VSELECT, VT, Custom);
1355 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1356 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1357 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1358 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1359 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1360 setOperationAction(ISD::STORE, VT, Custom);
1364 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1366 // Custom legalize 2x32 to get a little better code.
1367 setOperationAction(ISD::MGATHER, MVT::v2f32, Custom);
1368 setOperationAction(ISD::MGATHER, MVT::v2i32, Custom);
1370 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1371 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1372 setOperationAction(ISD::MGATHER, VT, Custom);
1376 // This block controls legalization of the mask vector sizes that are
1377 // available with AVX512. 512-bit vectors are in a separate block controlled
1378 // by useAVX512Regs.
1379 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1380 addRegisterClass(MVT::v1i1, &X86::VK1RegClass);
1381 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1382 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1383 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1384 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1386 setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
1387 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v1i1, Custom);
1388 setOperationAction(ISD::BUILD_VECTOR, MVT::v1i1, Custom);
1390 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1391 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1392 setOperationPromotedToType(ISD::FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1393 setOperationPromotedToType(ISD::FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1394 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v8i1, MVT::v8i32);
1395 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v8i1, MVT::v8i32);
1396 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, MVT::v4i1, MVT::v4i32);
1397 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, MVT::v4i1, MVT::v4i32);
1398 setOperationAction(ISD::FP_TO_SINT, MVT::v2i1, Custom);
1399 setOperationAction(ISD::FP_TO_UINT, MVT::v2i1, Custom);
1400 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2i1, Custom);
1401 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i1, Custom);
1403 // There is no byte sized k-register load or store without AVX512DQ.
1404 if (!Subtarget.hasDQI()) {
1405 setOperationAction(ISD::LOAD, MVT::v1i1, Custom);
1406 setOperationAction(ISD::LOAD, MVT::v2i1, Custom);
1407 setOperationAction(ISD::LOAD, MVT::v4i1, Custom);
1408 setOperationAction(ISD::LOAD, MVT::v8i1, Custom);
1410 setOperationAction(ISD::STORE, MVT::v1i1, Custom);
1411 setOperationAction(ISD::STORE, MVT::v2i1, Custom);
1412 setOperationAction(ISD::STORE, MVT::v4i1, Custom);
1413 setOperationAction(ISD::STORE, MVT::v8i1, Custom);
1416 // Extends of v16i1/v8i1/v4i1/v2i1 to 128-bit vectors.
1417 for (auto VT : { MVT::v16i8, MVT::v8i16, MVT::v4i32, MVT::v2i64 }) {
1418 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
1419 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
1420 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
1423 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1424 setOperationAction(ISD::ADD, VT, Custom);
1425 setOperationAction(ISD::SUB, VT, Custom);
1426 setOperationAction(ISD::MUL, VT, Custom);
1427 setOperationAction(ISD::UADDSAT, VT, Custom);
1428 setOperationAction(ISD::SADDSAT, VT, Custom);
1429 setOperationAction(ISD::USUBSAT, VT, Custom);
1430 setOperationAction(ISD::SSUBSAT, VT, Custom);
1431 setOperationAction(ISD::VSELECT, VT, Expand);
1434 for (auto VT : { MVT::v2i1, MVT::v4i1, MVT::v8i1, MVT::v16i1 }) {
1435 setOperationAction(ISD::SETCC, VT, Custom);
1436 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1437 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1438 setOperationAction(ISD::SELECT, VT, Custom);
1439 setOperationAction(ISD::TRUNCATE, VT, Custom);
1441 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1442 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1443 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1444 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1445 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1446 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1449 for (auto VT : { MVT::v1i1, MVT::v2i1, MVT::v4i1, MVT::v8i1 })
1450 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1453 // This block controls legalization for 512-bit operations with 32/64 bit
1454 // elements. 512-bits can be disabled based on prefer-vector-width and
1455 // required-vector-width function attributes.
1456 if (!Subtarget.useSoftFloat() && Subtarget.useAVX512Regs()) {
1457 bool HasBWI = Subtarget.hasBWI();
1459 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1460 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1461 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1462 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1463 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1464 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1466 for (auto ExtType : {ISD::ZEXTLOAD, ISD::SEXTLOAD}) {
1467 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i8, Legal);
1468 setLoadExtAction(ExtType, MVT::v16i32, MVT::v16i16, Legal);
1469 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i8, Legal);
1470 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i16, Legal);
1471 setLoadExtAction(ExtType, MVT::v8i64, MVT::v8i32, Legal);
1473 setLoadExtAction(ExtType, MVT::v32i16, MVT::v32i8, Legal);
1476 for (MVT VT : { MVT::v16f32, MVT::v8f64 }) {
1477 setOperationAction(ISD::FNEG, VT, Custom);
1478 setOperationAction(ISD::FABS, VT, Custom);
1479 setOperationAction(ISD::FMA, VT, Legal);
1480 setOperationAction(ISD::STRICT_FMA, VT, Legal);
1481 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
1484 for (MVT VT : { MVT::v16i1, MVT::v16i8, MVT::v16i16 }) {
1485 setOperationPromotedToType(ISD::FP_TO_SINT , VT, MVT::v16i32);
1486 setOperationPromotedToType(ISD::FP_TO_UINT , VT, MVT::v16i32);
1487 setOperationPromotedToType(ISD::STRICT_FP_TO_SINT, VT, MVT::v16i32);
1488 setOperationPromotedToType(ISD::STRICT_FP_TO_UINT, VT, MVT::v16i32);
1490 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1491 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1492 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v16i32, Legal);
1493 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v16i32, Legal);
1494 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1495 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1496 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v16i32, Legal);
1497 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v16i32, Legal);
1499 setOperationAction(ISD::STRICT_FADD, MVT::v16f32, Legal);
1500 setOperationAction(ISD::STRICT_FADD, MVT::v8f64, Legal);
1501 setOperationAction(ISD::STRICT_FSUB, MVT::v16f32, Legal);
1502 setOperationAction(ISD::STRICT_FSUB, MVT::v8f64, Legal);
1503 setOperationAction(ISD::STRICT_FMUL, MVT::v16f32, Legal);
1504 setOperationAction(ISD::STRICT_FMUL, MVT::v8f64, Legal);
1505 setOperationAction(ISD::STRICT_FDIV, MVT::v16f32, Legal);
1506 setOperationAction(ISD::STRICT_FDIV, MVT::v8f64, Legal);
1507 setOperationAction(ISD::STRICT_FSQRT, MVT::v16f32, Legal);
1508 setOperationAction(ISD::STRICT_FSQRT, MVT::v8f64, Legal);
1509 setOperationAction(ISD::STRICT_FP_EXTEND, MVT::v8f64, Legal);
1510 setOperationAction(ISD::STRICT_FP_ROUND, MVT::v8f32, Legal);
1512 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal);
1513 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal);
1514 setTruncStoreAction(MVT::v8i64, MVT::v8i32, Legal);
1515 setTruncStoreAction(MVT::v16i32, MVT::v16i8, Legal);
1516 setTruncStoreAction(MVT::v16i32, MVT::v16i16, Legal);
1518 setTruncStoreAction(MVT::v32i16, MVT::v32i8, Legal);
1520 // With 512-bit vectors and no VLX, we prefer to widen MLOAD/MSTORE
1521 // to 512-bit rather than use the AVX2 instructions so that we can use
1523 if (!Subtarget.hasVLX()) {
1524 for (auto VT : {MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1525 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64}) {
1526 setOperationAction(ISD::MLOAD, VT, Custom);
1527 setOperationAction(ISD::MSTORE, VT, Custom);
1531 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Legal);
1532 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Legal);
1533 setOperationAction(ISD::TRUNCATE, MVT::v32i8, HasBWI ? Legal : Custom);
1534 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1535 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
1536 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1537 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1538 setOperationAction(ISD::ANY_EXTEND, MVT::v32i16, Custom);
1539 setOperationAction(ISD::ANY_EXTEND, MVT::v16i32, Custom);
1540 setOperationAction(ISD::ANY_EXTEND, MVT::v8i64, Custom);
1541 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
1542 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1543 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1546 // Extends from v64i1 masks to 512-bit vectors.
1547 setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
1548 setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
1549 setOperationAction(ISD::ANY_EXTEND, MVT::v64i8, Custom);
1552 for (auto VT : { MVT::v16f32, MVT::v8f64 }) {
1553 setOperationAction(ISD::FFLOOR, VT, Legal);
1554 setOperationAction(ISD::STRICT_FFLOOR, VT, Legal);
1555 setOperationAction(ISD::FCEIL, VT, Legal);
1556 setOperationAction(ISD::STRICT_FCEIL, VT, Legal);
1557 setOperationAction(ISD::FTRUNC, VT, Legal);
1558 setOperationAction(ISD::STRICT_FTRUNC, VT, Legal);
1559 setOperationAction(ISD::FRINT, VT, Legal);
1560 setOperationAction(ISD::STRICT_FRINT, VT, Legal);
1561 setOperationAction(ISD::FNEARBYINT, VT, Legal);
1562 setOperationAction(ISD::STRICT_FNEARBYINT, VT, Legal);
1564 setOperationAction(ISD::FROUND, VT, Custom);
1567 for (auto VT : {MVT::v32i16, MVT::v16i32, MVT::v8i64}) {
1568 setOperationAction(ISD::SIGN_EXTEND_VECTOR_INREG, VT, Custom);
1569 setOperationAction(ISD::ZERO_EXTEND_VECTOR_INREG, VT, Custom);
1572 setOperationAction(ISD::ADD, MVT::v32i16, HasBWI ? Legal : Custom);
1573 setOperationAction(ISD::SUB, MVT::v32i16, HasBWI ? Legal : Custom);
1574 setOperationAction(ISD::ADD, MVT::v64i8, HasBWI ? Legal : Custom);
1575 setOperationAction(ISD::SUB, MVT::v64i8, HasBWI ? Legal : Custom);
1577 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1578 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1579 setOperationAction(ISD::MUL, MVT::v32i16, HasBWI ? Legal : Custom);
1580 setOperationAction(ISD::MUL, MVT::v64i8, Custom);
1582 setOperationAction(ISD::MULHU, MVT::v16i32, Custom);
1583 setOperationAction(ISD::MULHS, MVT::v16i32, Custom);
1584 setOperationAction(ISD::MULHS, MVT::v32i16, HasBWI ? Legal : Custom);
1585 setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
1586 setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
1587 setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
1589 setOperationAction(ISD::BITREVERSE, MVT::v64i8, Custom);
1591 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1592 setOperationAction(ISD::SRL, VT, Custom);
1593 setOperationAction(ISD::SHL, VT, Custom);
1594 setOperationAction(ISD::SRA, VT, Custom);
1595 setOperationAction(ISD::SETCC, VT, Custom);
1597 // The condition codes aren't legal in SSE/AVX and under AVX512 we use
1598 // setcc all the way to isel and prefer SETGT in some isel patterns.
1599 setCondCodeAction(ISD::SETLT, VT, Custom);
1600 setCondCodeAction(ISD::SETLE, VT, Custom);
1602 for (auto VT : { MVT::v16i32, MVT::v8i64 }) {
1603 setOperationAction(ISD::SMAX, VT, Legal);
1604 setOperationAction(ISD::UMAX, VT, Legal);
1605 setOperationAction(ISD::SMIN, VT, Legal);
1606 setOperationAction(ISD::UMIN, VT, Legal);
1607 setOperationAction(ISD::ABS, VT, Legal);
1608 setOperationAction(ISD::CTPOP, VT, Custom);
1609 setOperationAction(ISD::ROTL, VT, Custom);
1610 setOperationAction(ISD::ROTR, VT, Custom);
1611 setOperationAction(ISD::STRICT_FSETCC, VT, Custom);
1612 setOperationAction(ISD::STRICT_FSETCCS, VT, Custom);
1615 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1616 setOperationAction(ISD::ABS, VT, HasBWI ? Legal : Custom);
1617 setOperationAction(ISD::CTPOP, VT, Subtarget.hasBITALG() ? Legal : Custom);
1618 setOperationAction(ISD::CTLZ, VT, Custom);
1619 setOperationAction(ISD::SMAX, VT, HasBWI ? Legal : Custom);
1620 setOperationAction(ISD::UMAX, VT, HasBWI ? Legal : Custom);
1621 setOperationAction(ISD::SMIN, VT, HasBWI ? Legal : Custom);
1622 setOperationAction(ISD::UMIN, VT, HasBWI ? Legal : Custom);
1623 setOperationAction(ISD::UADDSAT, VT, HasBWI ? Legal : Custom);
1624 setOperationAction(ISD::SADDSAT, VT, HasBWI ? Legal : Custom);
1625 setOperationAction(ISD::USUBSAT, VT, HasBWI ? Legal : Custom);
1626 setOperationAction(ISD::SSUBSAT, VT, HasBWI ? Legal : Custom);
1629 if (Subtarget.hasDQI()) {
1630 setOperationAction(ISD::SINT_TO_FP, MVT::v8i64, Legal);
1631 setOperationAction(ISD::UINT_TO_FP, MVT::v8i64, Legal);
1632 setOperationAction(ISD::STRICT_SINT_TO_FP, MVT::v8i64, Legal);
1633 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i64, Legal);
1634 setOperationAction(ISD::FP_TO_SINT, MVT::v8i64, Legal);
1635 setOperationAction(ISD::FP_TO_UINT, MVT::v8i64, Legal);
1636 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v8i64, Legal);
1637 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i64, Legal);
1639 setOperationAction(ISD::MUL, MVT::v8i64, Legal);
1642 if (Subtarget.hasCDI()) {
1643 // NonVLX sub-targets extend 128/256 vectors to use the 512 version.
1644 for (auto VT : { MVT::v16i32, MVT::v8i64} ) {
1645 setOperationAction(ISD::CTLZ, VT, Legal);
1647 } // Subtarget.hasCDI()
1649 if (Subtarget.hasVPOPCNTDQ()) {
1650 for (auto VT : { MVT::v16i32, MVT::v8i64 })
1651 setOperationAction(ISD::CTPOP, VT, Legal);
1654 // Extract subvector is special because the value type
1655 // (result) is 256-bit but the source is 512-bit wide.
1656 // 128-bit was made Legal under AVX1.
1657 for (auto VT : { MVT::v32i8, MVT::v16i16, MVT::v8i32, MVT::v4i64,
1658 MVT::v8f32, MVT::v4f64 })
1659 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1661 for (auto VT : { MVT::v64i8, MVT::v32i16, MVT::v16i32, MVT::v8i64,
1662 MVT::v16f32, MVT::v8f64 }) {
1663 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1664 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Legal);
1665 setOperationAction(ISD::SELECT, VT, Custom);
1666 setOperationAction(ISD::VSELECT, VT, Custom);
1667 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1668 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1669 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1670 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1671 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1674 for (auto VT : { MVT::v16i32, MVT::v8i64, MVT::v16f32, MVT::v8f64 }) {
1675 setOperationAction(ISD::MLOAD, VT, Legal);
1676 setOperationAction(ISD::MSTORE, VT, Legal);
1677 setOperationAction(ISD::MGATHER, VT, Custom);
1678 setOperationAction(ISD::MSCATTER, VT, Custom);
1681 for (auto VT : { MVT::v64i8, MVT::v32i16 }) {
1682 setOperationAction(ISD::MLOAD, VT, Legal);
1683 setOperationAction(ISD::MSTORE, VT, Legal);
1686 setOperationAction(ISD::STORE, MVT::v32i16, Custom);
1687 setOperationAction(ISD::STORE, MVT::v64i8, Custom);
1690 if (Subtarget.hasVBMI2()) {
1691 for (auto VT : { MVT::v32i16, MVT::v16i32, MVT::v8i64 }) {
1692 setOperationAction(ISD::FSHL, VT, Custom);
1693 setOperationAction(ISD::FSHR, VT, Custom);
1698 // This block controls legalization for operations that don't have
1699 // pre-AVX512 equivalents. Without VLX we use 512-bit operations for
1701 if (!Subtarget.useSoftFloat() && Subtarget.hasAVX512()) {
1702 // These operations are handled on non-VLX by artificially widening in
1705 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32,
1706 Subtarget.hasVLX() ? Legal : Custom);
1707 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32,
1708 Subtarget.hasVLX() ? Legal : Custom);
1709 setOperationAction(ISD::FP_TO_UINT, MVT::v2i32, Custom);
1710 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v8i32,
1711 Subtarget.hasVLX() ? Legal : Custom);
1712 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v4i32,
1713 Subtarget.hasVLX() ? Legal : Custom);
1714 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2i32, Custom);
1715 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32,
1716 Subtarget.hasVLX() ? Legal : Custom);
1717 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32,
1718 Subtarget.hasVLX() ? Legal : Custom);
1719 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v8i32,
1720 Subtarget.hasVLX() ? Legal : Custom);
1721 setOperationAction(ISD::STRICT_UINT_TO_FP, MVT::v4i32,
1722 Subtarget.hasVLX() ? Legal : Custom);
1724 if (Subtarget.hasDQI()) {
1725 // Fast v2f32 SINT_TO_FP( v2i64 ) custom conversion.
1726 // v2f32 UINT_TO_FP is already custom under SSE2.
1727 assert(isOperationCustom(ISD::UINT_TO_FP, MVT::v2f32) &&
1728 isOperationCustom(ISD::STRICT_UINT_TO_FP, MVT::v2f32) &&
1729 "Unexpected operation action!");
1730 // v2i64 FP_TO_S/UINT(v2f32) custom conversion.
1731 setOperationAction(ISD::FP_TO_SINT, MVT::v2f32, Custom);
1732 setOperationAction(ISD::FP_TO_UINT, MVT::v2f32, Custom);
1733 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::v2f32, Custom);
1734 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::v2f32, Custom);
1737 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1738 setOperationAction(ISD::SMAX, VT, Legal);
1739 setOperationAction(ISD::UMAX, VT, Legal);
1740 setOperationAction(ISD::SMIN, VT, Legal);
1741 setOperationAction(ISD::UMIN, VT, Legal);
1742 setOperationAction(ISD::ABS, VT, Legal);
1745 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1746 setOperationAction(ISD::ROTL, VT, Custom);
1747 setOperationAction(ISD::ROTR, VT, Custom);
1750 // Custom legalize 2x32 to get a little better code.
1751 setOperationAction(ISD::MSCATTER, MVT::v2f32, Custom);
1752 setOperationAction(ISD::MSCATTER, MVT::v2i32, Custom);
1754 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64,
1755 MVT::v4f32, MVT::v8f32, MVT::v2f64, MVT::v4f64 })
1756 setOperationAction(ISD::MSCATTER, VT, Custom);
1758 if (Subtarget.hasDQI()) {
1759 for (auto VT : { MVT::v2i64, MVT::v4i64 }) {
1760 setOperationAction(ISD::SINT_TO_FP, VT,
1761 Subtarget.hasVLX() ? Legal : Custom);
1762 setOperationAction(ISD::UINT_TO_FP, VT,
1763 Subtarget.hasVLX() ? Legal : Custom);
1764 setOperationAction(ISD::STRICT_SINT_TO_FP, VT,
1765 Subtarget.hasVLX() ? Legal : Custom);
1766 setOperationAction(ISD::STRICT_UINT_TO_FP, VT,
1767 Subtarget.hasVLX() ? Legal : Custom);
1768 setOperationAction(ISD::FP_TO_SINT, VT,
1769 Subtarget.hasVLX() ? Legal : Custom);
1770 setOperationAction(ISD::FP_TO_UINT, VT,
1771 Subtarget.hasVLX() ? Legal : Custom);
1772 setOperationAction(ISD::STRICT_FP_TO_SINT, VT,
1773 Subtarget.hasVLX() ? Legal : Custom);
1774 setOperationAction(ISD::STRICT_FP_TO_UINT, VT,
1775 Subtarget.hasVLX() ? Legal : Custom);
1776 setOperationAction(ISD::MUL, VT, Legal);
1780 if (Subtarget.hasCDI()) {
1781 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 }) {
1782 setOperationAction(ISD::CTLZ, VT, Legal);
1784 } // Subtarget.hasCDI()
1786 if (Subtarget.hasVPOPCNTDQ()) {
1787 for (auto VT : { MVT::v4i32, MVT::v8i32, MVT::v2i64, MVT::v4i64 })
1788 setOperationAction(ISD::CTPOP, VT, Legal);
1792 // This block control legalization of v32i1/v64i1 which are available with
1793 // AVX512BW. 512-bit v32i16 and v64i8 vector legalization is controlled with
1795 if (!Subtarget.useSoftFloat() && Subtarget.hasBWI()) {
1796 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1797 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1799 for (auto VT : { MVT::v32i1, MVT::v64i1 }) {
1800 setOperationAction(ISD::ADD, VT, Custom);
1801 setOperationAction(ISD::SUB, VT, Custom);
1802 setOperationAction(ISD::MUL, VT, Custom);
1803 setOperationAction(ISD::VSELECT, VT, Expand);
1804 setOperationAction(ISD::UADDSAT, VT, Custom);
1805 setOperationAction(ISD::SADDSAT, VT, Custom);
1806 setOperationAction(ISD::USUBSAT, VT, Custom);
1807 setOperationAction(ISD::SSUBSAT, VT, Custom);
1809 setOperationAction(ISD::TRUNCATE, VT, Custom);
1810 setOperationAction(ISD::SETCC, VT, Custom);
1811 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1812 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1813 setOperationAction(ISD::SELECT, VT, Custom);
1814 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1815 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1816 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1817 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1820 for (auto VT : { MVT::v16i1, MVT::v32i1 })
1821 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1823 // Extends from v32i1 masks to 256-bit vectors.
1824 setOperationAction(ISD::SIGN_EXTEND, MVT::v32i8, Custom);
1825 setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
1826 setOperationAction(ISD::ANY_EXTEND, MVT::v32i8, Custom);
1828 for (auto VT : { MVT::v32i8, MVT::v16i8, MVT::v16i16, MVT::v8i16 }) {
1829 setOperationAction(ISD::MLOAD, VT, Subtarget.hasVLX() ? Legal : Custom);
1830 setOperationAction(ISD::MSTORE, VT, Subtarget.hasVLX() ? Legal : Custom);
1833 // These operations are handled on non-VLX by artificially widening in
1835 // TODO: Custom widen in lowering on non-VLX and drop the isel patterns?
1837 if (Subtarget.hasBITALG()) {
1838 for (auto VT : { MVT::v16i8, MVT::v32i8, MVT::v8i16, MVT::v16i16 })
1839 setOperationAction(ISD::CTPOP, VT, Legal);
1843 if (!Subtarget.useSoftFloat() && Subtarget.hasVLX()) {
1844 setTruncStoreAction(MVT::v4i64, MVT::v4i8, Legal);
1845 setTruncStoreAction(MVT::v4i64, MVT::v4i16, Legal);
1846 setTruncStoreAction(MVT::v4i64, MVT::v4i32, Legal);
1847 setTruncStoreAction(MVT::v8i32, MVT::v8i8, Legal);
1848 setTruncStoreAction(MVT::v8i32, MVT::v8i16, Legal);
1850 setTruncStoreAction(MVT::v2i64, MVT::v2i8, Legal);
1851 setTruncStoreAction(MVT::v2i64, MVT::v2i16, Legal);
1852 setTruncStoreAction(MVT::v2i64, MVT::v2i32, Legal);
1853 setTruncStoreAction(MVT::v4i32, MVT::v4i8, Legal);
1854 setTruncStoreAction(MVT::v4i32, MVT::v4i16, Legal);
1856 if (Subtarget.hasBWI()) {
1857 setTruncStoreAction(MVT::v16i16, MVT::v16i8, Legal);
1858 setTruncStoreAction(MVT::v8i16, MVT::v8i8, Legal);
1861 if (Subtarget.hasVBMI2()) {
1862 // TODO: Make these legal even without VLX?
1863 for (auto VT : { MVT::v8i16, MVT::v4i32, MVT::v2i64,
1864 MVT::v16i16, MVT::v8i32, MVT::v4i64 }) {
1865 setOperationAction(ISD::FSHL, VT, Custom);
1866 setOperationAction(ISD::FSHR, VT, Custom);
1870 setOperationAction(ISD::TRUNCATE, MVT::v16i32, Custom);
1871 setOperationAction(ISD::TRUNCATE, MVT::v8i64, Custom);
1872 setOperationAction(ISD::TRUNCATE, MVT::v16i64, Custom);
1875 // We want to custom lower some of our intrinsics.
1876 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1877 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1878 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1879 if (!Subtarget.is64Bit()) {
1880 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1883 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1884 // handle type legalization for these operations here.
1886 // FIXME: We really should do custom legalization for addition and
1887 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1888 // than generic legalization for 64-bit multiplication-with-overflow, though.
1889 for (auto VT : { MVT::i8, MVT::i16, MVT::i32, MVT::i64 }) {
1890 if (VT == MVT::i64 && !Subtarget.is64Bit())
1892 // Add/Sub/Mul with overflow operations are custom lowered.
1893 setOperationAction(ISD::SADDO, VT, Custom);
1894 setOperationAction(ISD::UADDO, VT, Custom);
1895 setOperationAction(ISD::SSUBO, VT, Custom);
1896 setOperationAction(ISD::USUBO, VT, Custom);
1897 setOperationAction(ISD::SMULO, VT, Custom);
1898 setOperationAction(ISD::UMULO, VT, Custom);
1900 // Support carry in as value rather than glue.
1901 setOperationAction(ISD::ADDCARRY, VT, Custom);
1902 setOperationAction(ISD::SUBCARRY, VT, Custom);
1903 setOperationAction(ISD::SETCCCARRY, VT, Custom);
1906 if (!Subtarget.is64Bit()) {
1907 // These libcalls are not available in 32-bit.
1908 setLibcallName(RTLIB::SHL_I128, nullptr);
1909 setLibcallName(RTLIB::SRL_I128, nullptr);
1910 setLibcallName(RTLIB::SRA_I128, nullptr);
1911 setLibcallName(RTLIB::MUL_I128, nullptr);
1914 // Combine sin / cos into _sincos_stret if it is available.
1915 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr &&
1916 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) {
1917 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1918 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1921 if (Subtarget.isTargetWin64()) {
1922 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1923 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1924 setOperationAction(ISD::SREM, MVT::i128, Custom);
1925 setOperationAction(ISD::UREM, MVT::i128, Custom);
1926 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1927 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1930 // On 32 bit MSVC, `fmodf(f32)` is not defined - only `fmod(f64)`
1931 // is. We should promote the value to 64-bits to solve this.
1932 // This is what the CRT headers do - `fmodf` is an inline header
1933 // function casting to f64 and calling `fmod`.
1934 if (Subtarget.is32Bit() &&
1935 (Subtarget.isTargetWindowsMSVC() || Subtarget.isTargetWindowsItanium()))
1936 for (ISD::NodeType Op :
1937 {ISD::FCEIL, ISD::STRICT_FCEIL,
1938 ISD::FCOS, ISD::STRICT_FCOS,
1939 ISD::FEXP, ISD::STRICT_FEXP,
1940 ISD::FFLOOR, ISD::STRICT_FFLOOR,
1941 ISD::FREM, ISD::STRICT_FREM,
1942 ISD::FLOG, ISD::STRICT_FLOG,
1943 ISD::FLOG10, ISD::STRICT_FLOG10,
1944 ISD::FPOW, ISD::STRICT_FPOW,
1945 ISD::FSIN, ISD::STRICT_FSIN})
1946 if (isOperationExpand(Op, MVT::f32))
1947 setOperationAction(Op, MVT::f32, Promote);
1949 // We have target-specific dag combine patterns for the following nodes:
1950 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1951 setTargetDAGCombine(ISD::SCALAR_TO_VECTOR);
1952 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT);
1953 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1954 setTargetDAGCombine(ISD::CONCAT_VECTORS);
1955 setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
1956 setTargetDAGCombine(ISD::EXTRACT_SUBVECTOR);
1957 setTargetDAGCombine(ISD::BITCAST);
1958 setTargetDAGCombine(ISD::VSELECT);
1959 setTargetDAGCombine(ISD::SELECT);
1960 setTargetDAGCombine(ISD::SHL);
1961 setTargetDAGCombine(ISD::SRA);
1962 setTargetDAGCombine(ISD::SRL);
1963 setTargetDAGCombine(ISD::OR);
1964 setTargetDAGCombine(ISD::AND);
1965 setTargetDAGCombine(ISD::ADD);
1966 setTargetDAGCombine(ISD::FADD);
1967 setTargetDAGCombine(ISD::FSUB);
1968 setTargetDAGCombine(ISD::FNEG);
1969 setTargetDAGCombine(ISD::FMA);
1970 setTargetDAGCombine(ISD::STRICT_FMA);
1971 setTargetDAGCombine(ISD::FMINNUM);
1972 setTargetDAGCombine(ISD::FMAXNUM);
1973 setTargetDAGCombine(ISD::SUB);
1974 setTargetDAGCombine(ISD::LOAD);
1975 setTargetDAGCombine(ISD::MLOAD);
1976 setTargetDAGCombine(ISD::STORE);
1977 setTargetDAGCombine(ISD::MSTORE);
1978 setTargetDAGCombine(ISD::TRUNCATE);
1979 setTargetDAGCombine(ISD::ZERO_EXTEND);
1980 setTargetDAGCombine(ISD::ANY_EXTEND);
1981 setTargetDAGCombine(ISD::SIGN_EXTEND);
1982 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1983 setTargetDAGCombine(ISD::ANY_EXTEND_VECTOR_INREG);
1984 setTargetDAGCombine(ISD::SIGN_EXTEND_VECTOR_INREG);
1985 setTargetDAGCombine(ISD::ZERO_EXTEND_VECTOR_INREG);
1986 setTargetDAGCombine(ISD::SINT_TO_FP);
1987 setTargetDAGCombine(ISD::UINT_TO_FP);
1988 setTargetDAGCombine(ISD::STRICT_SINT_TO_FP);
1989 setTargetDAGCombine(ISD::STRICT_UINT_TO_FP);
1990 setTargetDAGCombine(ISD::SETCC);
1991 setTargetDAGCombine(ISD::MUL);
1992 setTargetDAGCombine(ISD::XOR);
1993 setTargetDAGCombine(ISD::MSCATTER);
1994 setTargetDAGCombine(ISD::MGATHER);
1995 setTargetDAGCombine(ISD::FP16_TO_FP);
1996 setTargetDAGCombine(ISD::FP_EXTEND);
1997 setTargetDAGCombine(ISD::STRICT_FP_EXTEND);
1998 setTargetDAGCombine(ISD::FP_ROUND);
2000 computeRegisterProperties(Subtarget.getRegisterInfo());
2002 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
2003 MaxStoresPerMemsetOptSize = 8;
2004 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
2005 MaxStoresPerMemcpyOptSize = 4;
2006 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
2007 MaxStoresPerMemmoveOptSize = 4;
2009 // TODO: These control memcmp expansion in CGP and could be raised higher, but
2010 // that needs to benchmarked and balanced with the potential use of vector
2011 // load/store types (PR33329, PR33914).
2012 MaxLoadsPerMemcmp = 2;
2013 MaxLoadsPerMemcmpOptSize = 2;
2015 // Set loop alignment to 2^ExperimentalPrefLoopAlignment bytes (default: 2^4).
2016 setPrefLoopAlignment(Align(1ULL << ExperimentalPrefLoopAlignment));
2018 // An out-of-order CPU can speculatively execute past a predictable branch,
2019 // but a conditional move could be stalled by an expensive earlier operation.
2020 PredictableSelectIsExpensive = Subtarget.getSchedModel().isOutOfOrder();
2021 EnableExtLdPromotion = true;
2022 setPrefFunctionAlignment(Align(16));
2024 verifyIntrinsicTables();
2026 // Default to having -disable-strictnode-mutation on
2027 IsStrictFPEnabled = true;
2030 // This has so far only been implemented for 64-bit MachO.
2031 bool X86TargetLowering::useLoadStackGuardNode() const {
2032 return Subtarget.isTargetMachO() && Subtarget.is64Bit();
2035 bool X86TargetLowering::useStackGuardXorFP() const {
2036 // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
2037 return Subtarget.getTargetTriple().isOSMSVCRT() && !Subtarget.isTargetMachO();
2040 SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
2041 const SDLoc &DL) const {
2042 EVT PtrTy = getPointerTy(DAG.getDataLayout());
2043 unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
2044 MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
2045 return SDValue(Node, 0);
2048 TargetLoweringBase::LegalizeTypeAction
2049 X86TargetLowering::getPreferredVectorAction(MVT VT) const {
2050 if ((VT == MVT::v32i1 || VT == MVT::v64i1) && Subtarget.hasAVX512() &&
2051 !Subtarget.hasBWI())
2052 return TypeSplitVector;
2054 if (VT.getVectorNumElements() != 1 &&
2055 VT.getVectorElementType() != MVT::i1)
2056 return TypeWidenVector;
2058 return TargetLoweringBase::getPreferredVectorAction(VT);
2061 static std::pair<MVT, unsigned>
2062 handleMaskRegisterForCallingConv(unsigned NumElts, CallingConv::ID CC,
2063 const X86Subtarget &Subtarget) {
2064 // v2i1/v4i1/v8i1/v16i1 all pass in xmm registers unless the calling
2065 // convention is one that uses k registers.
2067 return {MVT::v2i64, 1};
2069 return {MVT::v4i32, 1};
2070 if (NumElts == 8 && CC != CallingConv::X86_RegCall &&
2071 CC != CallingConv::Intel_OCL_BI)
2072 return {MVT::v8i16, 1};
2073 if (NumElts == 16 && CC != CallingConv::X86_RegCall &&
2074 CC != CallingConv::Intel_OCL_BI)
2075 return {MVT::v16i8, 1};
2076 // v32i1 passes in ymm unless we have BWI and the calling convention is
2078 if (NumElts == 32 && (!Subtarget.hasBWI() || CC != CallingConv::X86_RegCall))
2079 return {MVT::v32i8, 1};
2080 // Split v64i1 vectors if we don't have v64i8 available.
2081 if (NumElts == 64 && Subtarget.hasBWI() && CC != CallingConv::X86_RegCall) {
2082 if (Subtarget.useAVX512Regs())
2083 return {MVT::v64i8, 1};
2084 return {MVT::v32i8, 2};
2087 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2088 if (!isPowerOf2_32(NumElts) || (NumElts == 64 && !Subtarget.hasBWI()) ||
2090 return {MVT::i8, NumElts};
2092 return {MVT::INVALID_SIMPLE_VALUE_TYPE, 0};
2095 MVT X86TargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
2098 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2099 Subtarget.hasAVX512()) {
2100 unsigned NumElts = VT.getVectorNumElements();
2103 unsigned NumRegisters;
2104 std::tie(RegisterVT, NumRegisters) =
2105 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2106 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2110 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
2113 unsigned X86TargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
2116 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2117 Subtarget.hasAVX512()) {
2118 unsigned NumElts = VT.getVectorNumElements();
2121 unsigned NumRegisters;
2122 std::tie(RegisterVT, NumRegisters) =
2123 handleMaskRegisterForCallingConv(NumElts, CC, Subtarget);
2124 if (RegisterVT != MVT::INVALID_SIMPLE_VALUE_TYPE)
2125 return NumRegisters;
2128 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
2131 unsigned X86TargetLowering::getVectorTypeBreakdownForCallingConv(
2132 LLVMContext &Context, CallingConv::ID CC, EVT VT, EVT &IntermediateVT,
2133 unsigned &NumIntermediates, MVT &RegisterVT) const {
2134 // Break wide or odd vXi1 vectors into scalars to match avx2 behavior.
2135 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
2136 Subtarget.hasAVX512() &&
2137 (!isPowerOf2_32(VT.getVectorNumElements()) ||
2138 (VT.getVectorNumElements() == 64 && !Subtarget.hasBWI()) ||
2139 VT.getVectorNumElements() > 64)) {
2140 RegisterVT = MVT::i8;
2141 IntermediateVT = MVT::i1;
2142 NumIntermediates = VT.getVectorNumElements();
2143 return NumIntermediates;
2146 // Split v64i1 vectors if we don't have v64i8 available.
2147 if (VT == MVT::v64i1 && Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
2148 CC != CallingConv::X86_RegCall) {
2149 RegisterVT = MVT::v32i8;
2150 IntermediateVT = MVT::v32i1;
2151 NumIntermediates = 2;
2155 return TargetLowering::getVectorTypeBreakdownForCallingConv(Context, CC, VT, IntermediateVT,
2156 NumIntermediates, RegisterVT);
2159 EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
2160 LLVMContext& Context,
2165 if (Subtarget.hasAVX512()) {
2166 const unsigned NumElts = VT.getVectorNumElements();
2168 // Figure out what this type will be legalized to.
2170 while (getTypeAction(Context, LegalVT) != TypeLegal)
2171 LegalVT = getTypeToTransformTo(Context, LegalVT);
2173 // If we got a 512-bit vector then we'll definitely have a vXi1 compare.
2174 if (LegalVT.getSimpleVT().is512BitVector())
2175 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2177 if (LegalVT.getSimpleVT().isVector() && Subtarget.hasVLX()) {
2178 // If we legalized to less than a 512-bit vector, then we will use a vXi1
2179 // compare for vXi32/vXi64 for sure. If we have BWI we will also support
2181 MVT EltVT = LegalVT.getSimpleVT().getVectorElementType();
2182 if (Subtarget.hasBWI() || EltVT.getSizeInBits() >= 32)
2183 return EVT::getVectorVT(Context, MVT::i1, NumElts);
2187 return VT.changeVectorElementTypeToInteger();
2190 /// Helper for getByValTypeAlignment to determine
2191 /// the desired ByVal argument alignment.
2192 static void getMaxByValAlign(Type *Ty, Align &MaxAlign) {
2195 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
2196 if (VTy->getPrimitiveSizeInBits().getFixedSize() == 128)
2197 MaxAlign = Align(16);
2198 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2200 getMaxByValAlign(ATy->getElementType(), EltAlign);
2201 if (EltAlign > MaxAlign)
2202 MaxAlign = EltAlign;
2203 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
2204 for (auto *EltTy : STy->elements()) {
2206 getMaxByValAlign(EltTy, EltAlign);
2207 if (EltAlign > MaxAlign)
2208 MaxAlign = EltAlign;
2215 /// Return the desired alignment for ByVal aggregate
2216 /// function arguments in the caller parameter area. For X86, aggregates
2217 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
2218 /// are at 4-byte boundaries.
2219 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty,
2220 const DataLayout &DL) const {
2221 if (Subtarget.is64Bit()) {
2222 // Max of 8 and alignment of type.
2223 Align TyAlign = DL.getABITypeAlign(Ty);
2225 return TyAlign.value();
2230 if (Subtarget.hasSSE1())
2231 getMaxByValAlign(Ty, Alignment);
2232 return Alignment.value();
2235 /// It returns EVT::Other if the type should be determined using generic
2236 /// target-independent logic.
2237 /// For vector ops we check that the overall size isn't larger than our
2238 /// preferred vector width.
2239 EVT X86TargetLowering::getOptimalMemOpType(
2240 const MemOp &Op, const AttributeList &FuncAttributes) const {
2241 if (!FuncAttributes.hasFnAttribute(Attribute::NoImplicitFloat)) {
2242 if (Op.size() >= 16 &&
2243 (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
2244 // FIXME: Check if unaligned 64-byte accesses are slow.
2245 if (Op.size() >= 64 && Subtarget.hasAVX512() &&
2246 (Subtarget.getPreferVectorWidth() >= 512)) {
2247 return Subtarget.hasBWI() ? MVT::v64i8 : MVT::v16i32;
2249 // FIXME: Check if unaligned 32-byte accesses are slow.
2250 if (Op.size() >= 32 && Subtarget.hasAVX() &&
2251 (Subtarget.getPreferVectorWidth() >= 256)) {
2252 // Although this isn't a well-supported type for AVX1, we'll let
2253 // legalization and shuffle lowering produce the optimal codegen. If we
2254 // choose an optimal type with a vector element larger than a byte,
2255 // getMemsetStores() may create an intermediate splat (using an integer
2256 // multiply) before we splat as a vector.
2259 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128))
2261 // TODO: Can SSE1 handle a byte vector?
2262 // If we have SSE1 registers we should be able to use them.
2263 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) &&
2264 (Subtarget.getPreferVectorWidth() >= 128))
2266 } else if (((Op.isMemcpy() && !Op.isMemcpyStrSrc()) || Op.isZeroMemset()) &&
2267 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) {
2268 // Do not use f64 to lower memcpy if source is string constant. It's
2269 // better to use i32 to avoid the loads.
2270 // Also, do not use f64 to lower memset unless this is a memset of zeros.
2271 // The gymnastics of splatting a byte value into an XMM register and then
2272 // only using 8-byte stores (because this is a CPU with slow unaligned
2273 // 16-byte accesses) makes that a loser.
2277 // This is a compromise. If we reach here, unaligned accesses may be slow on
2278 // this target. However, creating smaller, aligned accesses could be even
2279 // slower and would certainly be a lot more code.
2280 if (Subtarget.is64Bit() && Op.size() >= 8)
2285 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
2287 return X86ScalarSSEf32;
2288 else if (VT == MVT::f64)
2289 return X86ScalarSSEf64;
2293 bool X86TargetLowering::allowsMisalignedMemoryAccesses(
2294 EVT VT, unsigned, unsigned Align, MachineMemOperand::Flags Flags,
2297 switch (VT.getSizeInBits()) {
2299 // 8-byte and under are always assumed to be fast.
2303 *Fast = !Subtarget.isUnalignedMem16Slow();
2306 *Fast = !Subtarget.isUnalignedMem32Slow();
2308 // TODO: What about AVX-512 (512-bit) accesses?
2311 // NonTemporal vector memory ops must be aligned.
2312 if (!!(Flags & MachineMemOperand::MONonTemporal) && VT.isVector()) {
2313 // NT loads can only be vector aligned, so if its less aligned than the
2314 // minimum vector size (which we can split the vector down to), we might as
2315 // well use a regular unaligned vector load.
2316 // We don't have any NT loads pre-SSE41.
2317 if (!!(Flags & MachineMemOperand::MOLoad))
2318 return (Align < 16 || !Subtarget.hasSSE41());
2321 // Misaligned accesses of any size are always allowed.
2325 /// Return the entry encoding for a jump table in the
2326 /// current function. The returned value is a member of the
2327 /// MachineJumpTableInfo::JTEntryKind enum.
2328 unsigned X86TargetLowering::getJumpTableEncoding() const {
2329 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
2331 if (isPositionIndependent() && Subtarget.isPICStyleGOT())
2332 return MachineJumpTableInfo::EK_Custom32;
2334 // Otherwise, use the normal jump table encoding heuristics.
2335 return TargetLowering::getJumpTableEncoding();
2338 bool X86TargetLowering::useSoftFloat() const {
2339 return Subtarget.useSoftFloat();
2342 void X86TargetLowering::markLibCallAttributes(MachineFunction *MF, unsigned CC,
2343 ArgListTy &Args) const {
2345 // Only relabel X86-32 for C / Stdcall CCs.
2346 if (Subtarget.is64Bit())
2348 if (CC != CallingConv::C && CC != CallingConv::X86_StdCall)
2350 unsigned ParamRegs = 0;
2351 if (auto *M = MF->getFunction().getParent())
2352 ParamRegs = M->getNumberRegisterParameters();
2354 // Mark the first N int arguments as having reg
2355 for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
2356 Type *T = Args[Idx].Ty;
2357 if (T->isIntOrPtrTy())
2358 if (MF->getDataLayout().getTypeAllocSize(T) <= 8) {
2359 unsigned numRegs = 1;
2360 if (MF->getDataLayout().getTypeAllocSize(T) > 4)
2362 if (ParamRegs < numRegs)
2364 ParamRegs -= numRegs;
2365 Args[Idx].IsInReg = true;
2371 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
2372 const MachineBasicBlock *MBB,
2373 unsigned uid,MCContext &Ctx) const{
2374 assert(isPositionIndependent() && Subtarget.isPICStyleGOT());
2375 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
2377 return MCSymbolRefExpr::create(MBB->getSymbol(),
2378 MCSymbolRefExpr::VK_GOTOFF, Ctx);
2381 /// Returns relocation base for the given PIC jumptable.
2382 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
2383 SelectionDAG &DAG) const {
2384 if (!Subtarget.is64Bit())
2385 // This doesn't have SDLoc associated with it, but is not really the
2386 // same as a Register.
2387 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
2388 getPointerTy(DAG.getDataLayout()));
2392 /// This returns the relocation base for the given PIC jumptable,
2393 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
2394 const MCExpr *X86TargetLowering::
2395 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
2396 MCContext &Ctx) const {
2397 // X86-64 uses RIP relative addressing based on the jump table label.
2398 if (Subtarget.isPICStyleRIPRel())
2399 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
2401 // Otherwise, the reference is relative to the PIC base.
2402 return MCSymbolRefExpr::create(MF->getPICBaseSymbol(), Ctx);
2405 std::pair<const TargetRegisterClass *, uint8_t>
2406 X86TargetLowering::findRepresentativeClass(const TargetRegisterInfo *TRI,
2408 const TargetRegisterClass *RRC = nullptr;
2410 switch (VT.SimpleTy) {
2412 return TargetLowering::findRepresentativeClass(TRI, VT);
2413 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
2414 RRC = Subtarget.is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
2417 RRC = &X86::VR64RegClass;
2419 case MVT::f32: case MVT::f64:
2420 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
2421 case MVT::v4f32: case MVT::v2f64:
2422 case MVT::v32i8: case MVT::v16i16: case MVT::v8i32: case MVT::v4i64:
2423 case MVT::v8f32: case MVT::v4f64:
2424 case MVT::v64i8: case MVT::v32i16: case MVT::v16i32: case MVT::v8i64:
2425 case MVT::v16f32: case MVT::v8f64:
2426 RRC = &X86::VR128XRegClass;
2429 return std::make_pair(RRC, Cost);
2432 unsigned X86TargetLowering::getAddressSpace() const {
2433 if (Subtarget.is64Bit())
2434 return (getTargetMachine().getCodeModel() == CodeModel::Kernel) ? 256 : 257;
2438 static bool hasStackGuardSlotTLS(const Triple &TargetTriple) {
2439 return TargetTriple.isOSGlibc() || TargetTriple.isOSFuchsia() ||
2440 (TargetTriple.isAndroid() && !TargetTriple.isAndroidVersionLT(17));
2443 static Constant* SegmentOffset(IRBuilder<> &IRB,
2444 unsigned Offset, unsigned AddressSpace) {
2445 return ConstantExpr::getIntToPtr(
2446 ConstantInt::get(Type::getInt32Ty(IRB.getContext()), Offset),
2447 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace));
2450 Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const {
2451 // glibc, bionic, and Fuchsia have a special slot for the stack guard in
2452 // tcbhead_t; use it instead of the usual global variable (see
2453 // sysdeps/{i386,x86_64}/nptl/tls.h)
2454 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple())) {
2455 if (Subtarget.isTargetFuchsia()) {
2456 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value.
2457 return SegmentOffset(IRB, 0x10, getAddressSpace());
2459 // %fs:0x28, unless we're using a Kernel code model, in which case
2460 // it's %gs:0x28. gs:0x14 on i386.
2461 unsigned Offset = (Subtarget.is64Bit()) ? 0x28 : 0x14;
2462 return SegmentOffset(IRB, Offset, getAddressSpace());
2466 return TargetLowering::getIRStackGuard(IRB);
2469 void X86TargetLowering::insertSSPDeclarations(Module &M) const {
2470 // MSVC CRT provides functionalities for stack protection.
2471 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2472 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2473 // MSVC CRT has a global variable holding security cookie.
2474 M.getOrInsertGlobal("__security_cookie",
2475 Type::getInt8PtrTy(M.getContext()));
2477 // MSVC CRT has a function to validate security cookie.
2478 FunctionCallee SecurityCheckCookie = M.getOrInsertFunction(
2479 "__security_check_cookie", Type::getVoidTy(M.getContext()),
2480 Type::getInt8PtrTy(M.getContext()));
2481 if (Function *F = dyn_cast<Function>(SecurityCheckCookie.getCallee())) {
2482 F->setCallingConv(CallingConv::X86_FastCall);
2483 F->addAttribute(1, Attribute::AttrKind::InReg);
2487 // glibc, bionic, and Fuchsia have a special slot for the stack guard.
2488 if (hasStackGuardSlotTLS(Subtarget.getTargetTriple()))
2490 TargetLowering::insertSSPDeclarations(M);
2493 Value *X86TargetLowering::getSDagStackGuard(const Module &M) const {
2494 // MSVC CRT has a global variable holding security cookie.
2495 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2496 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2497 return M.getGlobalVariable("__security_cookie");
2499 return TargetLowering::getSDagStackGuard(M);
2502 Function *X86TargetLowering::getSSPStackGuardCheck(const Module &M) const {
2503 // MSVC CRT has a function to validate security cookie.
2504 if (Subtarget.getTargetTriple().isWindowsMSVCEnvironment() ||
2505 Subtarget.getTargetTriple().isWindowsItaniumEnvironment()) {
2506 return M.getFunction("__security_check_cookie");
2508 return TargetLowering::getSSPStackGuardCheck(M);
2511 Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const {
2512 if (Subtarget.getTargetTriple().isOSContiki())
2513 return getDefaultSafeStackPointerLocation(IRB, false);
2515 // Android provides a fixed TLS slot for the SafeStack pointer. See the
2516 // definition of TLS_SLOT_SAFESTACK in
2517 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h
2518 if (Subtarget.isTargetAndroid()) {
2519 // %fs:0x48, unless we're using a Kernel code model, in which case it's %gs:
2521 unsigned Offset = (Subtarget.is64Bit()) ? 0x48 : 0x24;
2522 return SegmentOffset(IRB, Offset, getAddressSpace());
2525 // Fuchsia is similar.
2526 if (Subtarget.isTargetFuchsia()) {
2527 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value.
2528 return SegmentOffset(IRB, 0x18, getAddressSpace());
2531 return TargetLowering::getSafeStackPointerLocation(IRB);
2534 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2535 unsigned DestAS) const {
2536 assert(SrcAS != DestAS && "Expected different address spaces!");
2538 const TargetMachine &TM = getTargetMachine();
2539 if (TM.getPointerSize(SrcAS) != TM.getPointerSize(DestAS))
2542 return SrcAS < 256 && DestAS < 256;
2545 //===----------------------------------------------------------------------===//
2546 // Return Value Calling Convention Implementation
2547 //===----------------------------------------------------------------------===//
2549 bool X86TargetLowering::CanLowerReturn(
2550 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg,
2551 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2552 SmallVector<CCValAssign, 16> RVLocs;
2553 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2554 return CCInfo.CheckReturn(Outs, RetCC_X86);
2557 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2558 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2562 /// Lowers masks values (v*i1) to the local register values
2563 /// \returns DAG node after lowering to register type
2564 static SDValue lowerMasksToReg(const SDValue &ValArg, const EVT &ValLoc,
2565 const SDLoc &Dl, SelectionDAG &DAG) {
2566 EVT ValVT = ValArg.getValueType();
2568 if (ValVT == MVT::v1i1)
2569 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, Dl, ValLoc, ValArg,
2570 DAG.getIntPtrConstant(0, Dl));
2572 if ((ValVT == MVT::v8i1 && (ValLoc == MVT::i8 || ValLoc == MVT::i32)) ||
2573 (ValVT == MVT::v16i1 && (ValLoc == MVT::i16 || ValLoc == MVT::i32))) {
2574 // Two stage lowering might be required
2575 // bitcast: v8i1 -> i8 / v16i1 -> i16
2576 // anyextend: i8 -> i32 / i16 -> i32
2577 EVT TempValLoc = ValVT == MVT::v8i1 ? MVT::i8 : MVT::i16;
2578 SDValue ValToCopy = DAG.getBitcast(TempValLoc, ValArg);
2579 if (ValLoc == MVT::i32)
2580 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValToCopy);
2584 if ((ValVT == MVT::v32i1 && ValLoc == MVT::i32) ||
2585 (ValVT == MVT::v64i1 && ValLoc == MVT::i64)) {
2586 // One stage lowering is required
2587 // bitcast: v32i1 -> i32 / v64i1 -> i64
2588 return DAG.getBitcast(ValLoc, ValArg);
2591 return DAG.getNode(ISD::ANY_EXTEND, Dl, ValLoc, ValArg);
2594 /// Breaks v64i1 value into two registers and adds the new node to the DAG
2595 static void Passv64i1ArgInRegs(
2596 const SDLoc &Dl, SelectionDAG &DAG, SDValue &Arg,
2597 SmallVectorImpl<std::pair<Register, SDValue>> &RegsToPass, CCValAssign &VA,
2598 CCValAssign &NextVA, const X86Subtarget &Subtarget) {
2599 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
2600 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2601 assert(Arg.getValueType() == MVT::i64 && "Expecting 64 bit value");
2602 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2603 "The value should reside in two registers");
2605 // Before splitting the value we cast it to i64
2606 Arg = DAG.getBitcast(MVT::i64, Arg);
2608 // Splitting the value into two i32 types
2610 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2611 DAG.getConstant(0, Dl, MVT::i32));
2612 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, Dl, MVT::i32, Arg,
2613 DAG.getConstant(1, Dl, MVT::i32));
2615 // Attach the two i32 types into corresponding registers
2616 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Lo));
2617 RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Hi));
2621 X86TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2623 const SmallVectorImpl<ISD::OutputArg> &Outs,
2624 const SmallVectorImpl<SDValue> &OutVals,
2625 const SDLoc &dl, SelectionDAG &DAG) const {
2626 MachineFunction &MF = DAG.getMachineFunction();
2627 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2629 // In some cases we need to disable registers from the default CSR list.
2630 // For example, when they are used for argument passing.
2631 bool ShouldDisableCalleeSavedRegister =
2632 CallConv == CallingConv::X86_RegCall ||
2633 MF.getFunction().hasFnAttribute("no_caller_saved_registers");
2635 if (CallConv == CallingConv::X86_INTR && !Outs.empty())
2636 report_fatal_error("X86 interrupts may not return any value");
2638 SmallVector<CCValAssign, 16> RVLocs;
2639 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2640 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2642 SmallVector<std::pair<Register, SDValue>, 4> RetVals;
2643 for (unsigned I = 0, OutsIndex = 0, E = RVLocs.size(); I != E;
2645 CCValAssign &VA = RVLocs[I];
2646 assert(VA.isRegLoc() && "Can only return in registers!");
2648 // Add the register to the CalleeSaveDisableRegs list.
2649 if (ShouldDisableCalleeSavedRegister)
2650 MF.getRegInfo().disableCalleeSavedRegister(VA.getLocReg());
2652 SDValue ValToCopy = OutVals[OutsIndex];
2653 EVT ValVT = ValToCopy.getValueType();
2655 // Promote values to the appropriate types.
2656 if (VA.getLocInfo() == CCValAssign::SExt)
2657 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2658 else if (VA.getLocInfo() == CCValAssign::ZExt)
2659 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2660 else if (VA.getLocInfo() == CCValAssign::AExt) {
2661 if (ValVT.isVector() && ValVT.getVectorElementType() == MVT::i1)
2662 ValToCopy = lowerMasksToReg(ValToCopy, VA.getLocVT(), dl, DAG);
2664 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2666 else if (VA.getLocInfo() == CCValAssign::BCvt)
2667 ValToCopy = DAG.getBitcast(VA.getLocVT(), ValToCopy);
2669 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2670 "Unexpected FP-extend for return value.");
2672 // Report an error if we have attempted to return a value via an XMM
2673 // register and SSE was disabled.
2674 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
2675 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
2676 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2677 } else if (!Subtarget.hasSSE2() &&
2678 X86::FR64XRegClass.contains(VA.getLocReg()) &&
2679 ValVT == MVT::f64) {
2680 // When returning a double via an XMM register, report an error if SSE2 is
2682 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
2683 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
2686 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2687 // the RET instruction and handled by the FP Stackifier.
2688 if (VA.getLocReg() == X86::FP0 ||
2689 VA.getLocReg() == X86::FP1) {
2690 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2691 // change the value to the FP stack register class.
2692 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2693 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2694 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2695 // Don't emit a copytoreg.
2699 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2700 // which is returned in RAX / RDX.
2701 if (Subtarget.is64Bit()) {
2702 if (ValVT == MVT::x86mmx) {
2703 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2704 ValToCopy = DAG.getBitcast(MVT::i64, ValToCopy);
2705 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2707 // If we don't have SSE2 available, convert to v4f32 so the generated
2708 // register is legal.
2709 if (!Subtarget.hasSSE2())
2710 ValToCopy = DAG.getBitcast(MVT::v4f32, ValToCopy);
2715 if (VA.needsCustom()) {
2716 assert(VA.getValVT() == MVT::v64i1 &&
2717 "Currently the only custom case is when we split v64i1 to 2 regs");
2719 Passv64i1ArgInRegs(dl, DAG, ValToCopy, RetVals, VA, RVLocs[++I],
2722 // Add the second register to the CalleeSaveDisableRegs list.
2723 if (ShouldDisableCalleeSavedRegister)
2724 MF.getRegInfo().disableCalleeSavedRegister(RVLocs[I].getLocReg());
2726 RetVals.push_back(std::make_pair(VA.getLocReg(), ValToCopy));
2731 SmallVector<SDValue, 6> RetOps;
2732 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2733 // Operand #1 = Bytes To Pop
2734 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(), dl,
2737 // Copy the result values into the output registers.
2738 for (auto &RetVal : RetVals) {
2739 if (RetVal.first == X86::FP0 || RetVal.first == X86::FP1) {
2740 RetOps.push_back(RetVal.second);
2741 continue; // Don't emit a copytoreg.
2744 Chain = DAG.getCopyToReg(Chain, dl, RetVal.first, RetVal.second, Flag);
2745 Flag = Chain.getValue(1);
2747 DAG.getRegister(RetVal.first, RetVal.second.getValueType()));
2750 // Swift calling convention does not require we copy the sret argument
2751 // into %rax/%eax for the return, and SRetReturnReg is not set for Swift.
2753 // All x86 ABIs require that for returning structs by value we copy
2754 // the sret argument into %rax/%eax (depending on ABI) for the return.
2755 // We saved the argument into a virtual register in the entry block,
2756 // so now we copy the value out and into %rax/%eax.
2758 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2759 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2760 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2761 // either case FuncInfo->setSRetReturnReg() will have been called.
2762 if (Register SRetReg = FuncInfo->getSRetReturnReg()) {
2763 // When we have both sret and another return value, we should use the
2764 // original Chain stored in RetOps[0], instead of the current Chain updated
2765 // in the above loop. If we only have sret, RetOps[0] equals to Chain.
2767 // For the case of sret and another return value, we have
2768 // Chain_0 at the function entry
2769 // Chain_1 = getCopyToReg(Chain_0) in the above loop
2770 // If we use Chain_1 in getCopyFromReg, we will have
2771 // Val = getCopyFromReg(Chain_1)
2772 // Chain_2 = getCopyToReg(Chain_1, Val) from below
2774 // getCopyToReg(Chain_0) will be glued together with
2775 // getCopyToReg(Chain_1, Val) into Unit A, getCopyFromReg(Chain_1) will be
2776 // in Unit B, and we will have cyclic dependency between Unit A and Unit B:
2777 // Data dependency from Unit B to Unit A due to usage of Val in
2778 // getCopyToReg(Chain_1, Val)
2779 // Chain dependency from Unit A to Unit B
2781 // So here, we use RetOps[0] (i.e Chain_0) for getCopyFromReg.
2782 SDValue Val = DAG.getCopyFromReg(RetOps[0], dl, SRetReg,
2783 getPointerTy(MF.getDataLayout()));
2786 = (Subtarget.is64Bit() && !Subtarget.isTarget64BitILP32()) ?
2787 X86::RAX : X86::EAX;
2788 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2789 Flag = Chain.getValue(1);
2791 // RAX/EAX now acts like a return value.
2793 DAG.getRegister(RetValReg, getPointerTy(DAG.getDataLayout())));
2795 // Add the returned register to the CalleeSaveDisableRegs list.
2796 if (ShouldDisableCalleeSavedRegister)
2797 MF.getRegInfo().disableCalleeSavedRegister(RetValReg);
2800 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
2801 const MCPhysReg *I =
2802 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction());
2805 if (X86::GR64RegClass.contains(*I))
2806 RetOps.push_back(DAG.getRegister(*I, MVT::i64));
2808 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
2812 RetOps[0] = Chain; // Update chain.
2814 // Add the flag if we have it.
2816 RetOps.push_back(Flag);
2818 X86ISD::NodeType opcode = X86ISD::RET_FLAG;
2819 if (CallConv == CallingConv::X86_INTR)
2820 opcode = X86ISD::IRET;
2821 return DAG.getNode(opcode, dl, MVT::Other, RetOps);
2824 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2825 if (N->getNumValues() != 1 || !N->hasNUsesOfValue(1, 0))
2828 SDValue TCChain = Chain;
2829 SDNode *Copy = *N->use_begin();
2830 if (Copy->getOpcode() == ISD::CopyToReg) {
2831 // If the copy has a glue operand, we conservatively assume it isn't safe to
2832 // perform a tail call.
2833 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2835 TCChain = Copy->getOperand(0);
2836 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2839 bool HasRet = false;
2840 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2842 if (UI->getOpcode() != X86ISD::RET_FLAG)
2844 // If we are returning more than one value, we can definitely
2845 // not make a tail call see PR19530
2846 if (UI->getNumOperands() > 4)
2848 if (UI->getNumOperands() == 4 &&
2849 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2861 EVT X86TargetLowering::getTypeForExtReturn(LLVMContext &Context, EVT VT,
2862 ISD::NodeType ExtendKind) const {
2863 MVT ReturnMVT = MVT::i32;
2865 bool Darwin = Subtarget.getTargetTriple().isOSDarwin();
2866 if (VT == MVT::i1 || (!Darwin && (VT == MVT::i8 || VT == MVT::i16))) {
2867 // The ABI does not require i1, i8 or i16 to be extended.
2869 // On Darwin, there is code in the wild relying on Clang's old behaviour of
2870 // always extending i8/i16 return values, so keep doing that for now.
2872 ReturnMVT = MVT::i8;
2875 EVT MinVT = getRegisterType(Context, ReturnMVT);
2876 return VT.bitsLT(MinVT) ? MinVT : VT;
2879 /// Reads two 32 bit registers and creates a 64 bit mask value.
2880 /// \param VA The current 32 bit value that need to be assigned.
2881 /// \param NextVA The next 32 bit value that need to be assigned.
2882 /// \param Root The parent DAG node.
2883 /// \param [in,out] InFlag Represents SDvalue in the parent DAG node for
2884 /// glue purposes. In the case the DAG is already using
2885 /// physical register instead of virtual, we should glue
2886 /// our new SDValue to InFlag SDvalue.
2887 /// \return a new SDvalue of size 64bit.
2888 static SDValue getv64i1Argument(CCValAssign &VA, CCValAssign &NextVA,
2889 SDValue &Root, SelectionDAG &DAG,
2890 const SDLoc &Dl, const X86Subtarget &Subtarget,
2891 SDValue *InFlag = nullptr) {
2892 assert((Subtarget.hasBWI()) && "Expected AVX512BW target!");
2893 assert(Subtarget.is32Bit() && "Expecting 32 bit target");
2894 assert(VA.getValVT() == MVT::v64i1 &&
2895 "Expecting first location of 64 bit width type");
2896 assert(NextVA.getValVT() == VA.getValVT() &&
2897 "The locations should have the same type");
2898 assert(VA.isRegLoc() && NextVA.isRegLoc() &&
2899 "The values should reside in two registers");
2902 SDValue ArgValueLo, ArgValueHi;
2904 MachineFunction &MF = DAG.getMachineFunction();
2905 const TargetRegisterClass *RC = &X86::GR32RegClass;
2907 // Read a 32 bit value from the registers.
2908 if (nullptr == InFlag) {
2909 // When no physical register is present,
2910 // create an intermediate virtual register.
2911 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
2912 ArgValueLo = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2913 Reg = MF.addLiveIn(NextVA.getLocReg(), RC);
2914 ArgValueHi = DAG.getCopyFromReg(Root, Dl, Reg, MVT::i32);
2916 // When a physical register is available read the value from it and glue
2917 // the reads together.
2919 DAG.getCopyFromReg(Root, Dl, VA.getLocReg(), MVT::i32, *InFlag);
2920 *InFlag = ArgValueLo.getValue(2);
2922 DAG.getCopyFromReg(Root, Dl, NextVA.getLocReg(), MVT::i32, *InFlag);
2923 *InFlag = ArgValueHi.getValue(2);
2926 // Convert the i32 type into v32i1 type.
2927 Lo = DAG.getBitcast(MVT::v32i1, ArgValueLo);
2929 // Convert the i32 type into v32i1 type.
2930 Hi = DAG.getBitcast(MVT::v32i1, ArgValueHi);
2932 // Concatenate the two values together.
2933 return DAG.getNode(ISD::CONCAT_VECTORS, Dl, MVT::v64i1, Lo, Hi);
2936 /// The function will lower a register of various sizes (8/16/32/64)
2937 /// to a mask value of the expected size (v8i1/v16i1/v32i1/v64i1)
2938 /// \returns a DAG node contains the operand after lowering to mask type.
2939 static SDValue lowerRegToMasks(const SDValue &ValArg, const EVT &ValVT,
2940 const EVT &ValLoc, const SDLoc &Dl,
2941 SelectionDAG &DAG) {
2942 SDValue ValReturned = ValArg;
2944 if (ValVT == MVT::v1i1)
2945 return DAG.getNode(ISD::SCALAR_TO_VECTOR, Dl, MVT::v1i1, ValReturned);
2947 if (ValVT == MVT::v64i1) {
2948 // In 32 bit machine, this case is handled by getv64i1Argument
2949 assert(ValLoc == MVT::i64 && "Expecting only i64 locations");
2950 // In 64 bit machine, There is no need to truncate the value only bitcast
2953 switch (ValVT.getSimpleVT().SimpleTy) {
2964 llvm_unreachable("Expecting a vector of i1 types");
2967 ValReturned = DAG.getNode(ISD::TRUNCATE, Dl, maskLen, ValReturned);
2969 return DAG.getBitcast(ValVT, ValReturned);
2972 /// Lower the result values of a call into the
2973 /// appropriate copies out of appropriate physical registers.
2975 SDValue X86TargetLowering::LowerCallResult(
2976 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg,
2977 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
2978 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals,
2979 uint32_t *RegMask) const {
2981 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2982 // Assign locations to each value returned by this call.
2983 SmallVector<CCValAssign, 16> RVLocs;
2984 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2986 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2988 // Copy all of the result registers out of their specified physreg.
2989 for (unsigned I = 0, InsIndex = 0, E = RVLocs.size(); I != E;
2991 CCValAssign &VA = RVLocs[I];
2992 EVT CopyVT = VA.getLocVT();
2994 // In some calling conventions we need to remove the used registers
2995 // from the register mask.
2997 for (MCSubRegIterator SubRegs(VA.getLocReg(), TRI, /*IncludeSelf=*/true);
2998 SubRegs.isValid(); ++SubRegs)
2999 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
3002 // Report an error if there was an attempt to return FP values via XMM
3004 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) {
3005 errorUnsupported(DAG, dl, "SSE register return with SSE disabled");
3006 if (VA.getLocReg() == X86::XMM1)
3007 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3009 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3010 } else if (!Subtarget.hasSSE2() &&
3011 X86::FR64XRegClass.contains(VA.getLocReg()) &&
3012 CopyVT == MVT::f64) {
3013 errorUnsupported(DAG, dl, "SSE2 register return with SSE2 disabled");
3014 if (VA.getLocReg() == X86::XMM1)
3015 VA.convertToReg(X86::FP1); // Set reg to FP1, avoid hitting asserts.
3017 VA.convertToReg(X86::FP0); // Set reg to FP0, avoid hitting asserts.
3020 // If we prefer to use the value in xmm registers, copy it out as f80 and
3021 // use a truncate to move it from fp stack reg to xmm reg.
3022 bool RoundAfterCopy = false;
3023 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
3024 isScalarFPTypeInSSEReg(VA.getValVT())) {
3025 if (!Subtarget.hasX87())
3026 report_fatal_error("X87 register return with X87 disabled");
3028 RoundAfterCopy = (CopyVT != VA.getLocVT());
3032 if (VA.needsCustom()) {
3033 assert(VA.getValVT() == MVT::v64i1 &&
3034 "Currently the only custom case is when we split v64i1 to 2 regs");
3036 getv64i1Argument(VA, RVLocs[++I], Chain, DAG, dl, Subtarget, &InFlag);
3038 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(), CopyVT, InFlag)
3040 Val = Chain.getValue(0);
3041 InFlag = Chain.getValue(2);
3045 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
3046 // This truncation won't change the value.
3047 DAG.getIntPtrConstant(1, dl));
3049 if (VA.isExtInLoc() && (VA.getValVT().getScalarType() == MVT::i1)) {
3050 if (VA.getValVT().isVector() &&
3051 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3052 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3053 // promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3054 Val = lowerRegToMasks(Val, VA.getValVT(), VA.getLocVT(), dl, DAG);
3056 Val = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val);
3059 if (VA.getLocInfo() == CCValAssign::BCvt)
3060 Val = DAG.getBitcast(VA.getValVT(), Val);
3062 InVals.push_back(Val);
3068 //===----------------------------------------------------------------------===//
3069 // C & StdCall & Fast Calling Convention implementation
3070 //===----------------------------------------------------------------------===//
3071 // StdCall calling convention seems to be standard for many Windows' API
3072 // routines and around. It differs from C calling convention just a little:
3073 // callee should clean up the stack, not caller. Symbols should be also
3074 // decorated in some fancy way :) It doesn't support any vector arguments.
3075 // For info on fast calling convention see Fast Calling Convention (tail call)
3076 // implementation LowerX86_32FastCCCallTo.
3078 /// CallIsStructReturn - Determines whether a call uses struct return
3080 enum StructReturnType {
3085 static StructReturnType
3086 callIsStructReturn(ArrayRef<ISD::OutputArg> Outs, bool IsMCU) {
3088 return NotStructReturn;
3090 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
3091 if (!Flags.isSRet())
3092 return NotStructReturn;
3093 if (Flags.isInReg() || IsMCU)
3094 return RegStructReturn;
3095 return StackStructReturn;
3098 /// Determines whether a function uses struct return semantics.
3099 static StructReturnType
3100 argsAreStructReturn(ArrayRef<ISD::InputArg> Ins, bool IsMCU) {
3102 return NotStructReturn;
3104 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
3105 if (!Flags.isSRet())
3106 return NotStructReturn;
3107 if (Flags.isInReg() || IsMCU)
3108 return RegStructReturn;
3109 return StackStructReturn;
3112 /// Make a copy of an aggregate at address specified by "Src" to address
3113 /// "Dst" with size and alignment information specified by the specific
3114 /// parameter attribute. The copy will be passed as a byval function parameter.
3115 static SDValue CreateCopyOfByValArgument(SDValue Src, SDValue Dst,
3116 SDValue Chain, ISD::ArgFlagsTy Flags,
3117 SelectionDAG &DAG, const SDLoc &dl) {
3118 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), dl, MVT::i32);
3120 return DAG.getMemcpy(
3121 Chain, dl, Dst, Src, SizeNode, Flags.getNonZeroByValAlign(),
3122 /*isVolatile*/ false, /*AlwaysInline=*/true,
3123 /*isTailCall*/ false, MachinePointerInfo(), MachinePointerInfo());
3126 /// Return true if the calling convention is one that we can guarantee TCO for.
3127 static bool canGuaranteeTCO(CallingConv::ID CC) {
3128 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
3129 CC == CallingConv::X86_RegCall || CC == CallingConv::HiPE ||
3130 CC == CallingConv::HHVM || CC == CallingConv::Tail);
3133 /// Return true if we might ever do TCO for calls with this calling convention.
3134 static bool mayTailCallThisCC(CallingConv::ID CC) {
3136 // C calling conventions:
3137 case CallingConv::C:
3138 case CallingConv::Win64:
3139 case CallingConv::X86_64_SysV:
3140 // Callee pop conventions:
3141 case CallingConv::X86_ThisCall:
3142 case CallingConv::X86_StdCall:
3143 case CallingConv::X86_VectorCall:
3144 case CallingConv::X86_FastCall:
3146 case CallingConv::Swift:
3149 return canGuaranteeTCO(CC);
3153 /// Return true if the function is being made into a tailcall target by
3154 /// changing its ABI.
3155 static bool shouldGuaranteeTCO(CallingConv::ID CC, bool GuaranteedTailCallOpt) {
3156 return (GuaranteedTailCallOpt && canGuaranteeTCO(CC)) || CC == CallingConv::Tail;
3159 bool X86TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
3160 if (!CI->isTailCall())
3163 CallingConv::ID CalleeCC = CI->getCallingConv();
3164 if (!mayTailCallThisCC(CalleeCC))
3171 X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv,
3172 const SmallVectorImpl<ISD::InputArg> &Ins,
3173 const SDLoc &dl, SelectionDAG &DAG,
3174 const CCValAssign &VA,
3175 MachineFrameInfo &MFI, unsigned i) const {
3176 // Create the nodes corresponding to a load from this parameter slot.
3177 ISD::ArgFlagsTy Flags = Ins[i].Flags;
3178 bool AlwaysUseMutable = shouldGuaranteeTCO(
3179 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
3180 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
3182 MVT PtrVT = getPointerTy(DAG.getDataLayout());
3184 // If value is passed by pointer we have address passed instead of the value
3185 // itself. No need to extend if the mask value and location share the same
3187 bool ExtendedInMem =
3188 VA.isExtInLoc() && VA.getValVT().getScalarType() == MVT::i1 &&
3189 VA.getValVT().getSizeInBits() != VA.getLocVT().getSizeInBits();
3191 if (VA.getLocInfo() == CCValAssign::Indirect || ExtendedInMem)
3192 ValVT = VA.getLocVT();
3194 ValVT = VA.getValVT();
3196 // FIXME: For now, all byval parameter objects are marked mutable. This can be
3197 // changed with more analysis.
3198 // In case of tail call optimization mark all arguments mutable. Since they
3199 // could be overwritten by lowering of arguments in case of a tail call.
3200 if (Flags.isByVal()) {
3201 unsigned Bytes = Flags.getByValSize();
3202 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
3204 // FIXME: For now, all byval parameter objects are marked as aliasing. This
3205 // can be improved with deeper analysis.
3206 int FI = MFI.CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable,
3207 /*isAliased=*/true);
3208 return DAG.getFrameIndex(FI, PtrVT);
3211 EVT ArgVT = Ins[i].ArgVT;
3213 // If this is a vector that has been split into multiple parts, and the
3214 // scalar size of the parts don't match the vector element size, then we can't
3215 // elide the copy. The parts will have padding between them instead of being
3216 // packed like a vector.
3217 bool ScalarizedAndExtendedVector =
3218 ArgVT.isVector() && !VA.getLocVT().isVector() &&
3219 VA.getLocVT().getSizeInBits() != ArgVT.getScalarSizeInBits();
3221 // This is an argument in memory. We might be able to perform copy elision.
3222 // If the argument is passed directly in memory without any extension, then we
3223 // can perform copy elision. Large vector types, for example, may be passed
3224 // indirectly by pointer.
3225 if (Flags.isCopyElisionCandidate() &&
3226 VA.getLocInfo() != CCValAssign::Indirect && !ExtendedInMem &&
3227 !ScalarizedAndExtendedVector) {
3229 if (Ins[i].PartOffset == 0) {
3230 // If this is a one-part value or the first part of a multi-part value,
3231 // create a stack object for the entire argument value type and return a
3232 // load from our portion of it. This assumes that if the first part of an
3233 // argument is in memory, the rest will also be in memory.
3234 int FI = MFI.CreateFixedObject(ArgVT.getStoreSize(), VA.getLocMemOffset(),
3235 /*IsImmutable=*/false);
3236 PartAddr = DAG.getFrameIndex(FI, PtrVT);
3238 ValVT, dl, Chain, PartAddr,
3239 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3241 // This is not the first piece of an argument in memory. See if there is
3242 // already a fixed stack object including this offset. If so, assume it
3243 // was created by the PartOffset == 0 branch above and create a load from
3244 // the appropriate offset into it.
3245 int64_t PartBegin = VA.getLocMemOffset();
3246 int64_t PartEnd = PartBegin + ValVT.getSizeInBits() / 8;
3247 int FI = MFI.getObjectIndexBegin();
3248 for (; MFI.isFixedObjectIndex(FI); ++FI) {
3249 int64_t ObjBegin = MFI.getObjectOffset(FI);
3250 int64_t ObjEnd = ObjBegin + MFI.getObjectSize(FI);
3251 if (ObjBegin <= PartBegin && PartEnd <= ObjEnd)
3254 if (MFI.isFixedObjectIndex(FI)) {
3256 DAG.getNode(ISD::ADD, dl, PtrVT, DAG.getFrameIndex(FI, PtrVT),
3257 DAG.getIntPtrConstant(Ins[i].PartOffset, dl));
3259 ValVT, dl, Chain, Addr,
3260 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI,
3261 Ins[i].PartOffset));
3266 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
3267 VA.getLocMemOffset(), isImmutable);
3269 // Set SExt or ZExt flag.
3270 if (VA.getLocInfo() == CCValAssign::ZExt) {
3271 MFI.setObjectZExt(FI, true);
3272 } else if (VA.getLocInfo() == CCValAssign::SExt) {
3273 MFI.setObjectSExt(FI, true);
3276 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
3277 SDValue Val = DAG.getLoad(
3278 ValVT, dl, Chain, FIN,
3279 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
3280 return ExtendedInMem
3281 ? (VA.getValVT().isVector()
3282 ? DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VA.getValVT(), Val)
3283 : DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), Val))
3287 // FIXME: Get this from tablegen.
3288 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
3289 const X86Subtarget &Subtarget) {
3290 assert(Subtarget.is64Bit());
3292 if (Subtarget.isCallingConvWin64(CallConv)) {
3293 static const MCPhysReg GPR64ArgRegsWin64[] = {
3294 X86::RCX, X86::RDX, X86::R8, X86::R9
3296 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
3299 static const MCPhysReg GPR64ArgRegs64Bit[] = {
3300 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
3302 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
3305 // FIXME: Get this from tablegen.
3306 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
3307 CallingConv::ID CallConv,
3308 const X86Subtarget &Subtarget) {
3309 assert(Subtarget.is64Bit());
3310 if (Subtarget.isCallingConvWin64(CallConv)) {
3311 // The XMM registers which might contain var arg parameters are shadowed
3312 // in their paired GPR. So we only need to save the GPR to their home
3314 // TODO: __vectorcall will change this.
3318 const Function &F = MF.getFunction();
3319 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
3320 bool isSoftFloat = Subtarget.useSoftFloat();
3321 assert(!(isSoftFloat && NoImplicitFloatOps) &&
3322 "SSE register cannot be used when SSE is disabled!");
3323 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1())
3324 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
3328 static const MCPhysReg XMMArgRegs64Bit[] = {
3329 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3330 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3332 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
3336 static bool isSortedByValueNo(ArrayRef<CCValAssign> ArgLocs) {
3337 return llvm::is_sorted(
3338 ArgLocs, [](const CCValAssign &A, const CCValAssign &B) -> bool {
3339 return A.getValNo() < B.getValNo();
3345 /// This is a helper class for lowering variable arguments parameters.
3346 class VarArgsLoweringHelper {
3348 VarArgsLoweringHelper(X86MachineFunctionInfo *FuncInfo, const SDLoc &Loc,
3349 SelectionDAG &DAG, const X86Subtarget &Subtarget,
3350 CallingConv::ID CallConv, CCState &CCInfo)
3351 : FuncInfo(FuncInfo), DL(Loc), DAG(DAG), Subtarget(Subtarget),
3352 TheMachineFunction(DAG.getMachineFunction()),
3353 TheFunction(TheMachineFunction.getFunction()),
3354 FrameInfo(TheMachineFunction.getFrameInfo()),
3355 FrameLowering(*Subtarget.getFrameLowering()),
3356 TargLowering(DAG.getTargetLoweringInfo()), CallConv(CallConv),
3359 // Lower variable arguments parameters.
3360 void lowerVarArgsParameters(SDValue &Chain, unsigned StackSize);
3363 void createVarArgAreaAndStoreRegisters(SDValue &Chain, unsigned StackSize);
3365 void forwardMustTailParameters(SDValue &Chain);
3367 bool is64Bit() { return Subtarget.is64Bit(); }
3368 bool isWin64() { return Subtarget.isCallingConvWin64(CallConv); }
3370 X86MachineFunctionInfo *FuncInfo;
3373 const X86Subtarget &Subtarget;
3374 MachineFunction &TheMachineFunction;
3375 const Function &TheFunction;
3376 MachineFrameInfo &FrameInfo;
3377 const TargetFrameLowering &FrameLowering;
3378 const TargetLowering &TargLowering;
3379 CallingConv::ID CallConv;
3384 void VarArgsLoweringHelper::createVarArgAreaAndStoreRegisters(
3385 SDValue &Chain, unsigned StackSize) {
3386 // If the function takes variable number of arguments, make a frame index for
3387 // the start of the first vararg value... for expansion of llvm.va_start. We
3388 // can skip this if there are no va_start calls.
3389 if (is64Bit() || (CallConv != CallingConv::X86_FastCall &&
3390 CallConv != CallingConv::X86_ThisCall)) {
3391 FuncInfo->setVarArgsFrameIndex(
3392 FrameInfo.CreateFixedObject(1, StackSize, true));
3395 // Figure out if XMM registers are in use.
3396 assert(!(Subtarget.useSoftFloat() &&
3397 TheFunction.hasFnAttribute(Attribute::NoImplicitFloat)) &&
3398 "SSE register cannot be used when SSE is disabled!");
3400 // 64-bit calling conventions support varargs and register parameters, so we
3401 // have to do extra work to spill them in the prologue.
3403 // Find the first unallocated argument registers.
3404 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
3405 ArrayRef<MCPhysReg> ArgXMMs =
3406 get64BitArgumentXMMs(TheMachineFunction, CallConv, Subtarget);
3407 unsigned NumIntRegs = CCInfo.getFirstUnallocated(ArgGPRs);
3408 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(ArgXMMs);
3410 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) &&
3411 "SSE register cannot be used when SSE is disabled!");
3414 // Get to the caller-allocated home save location. Add 8 to account
3415 // for the return address.
3416 int HomeOffset = FrameLowering.getOffsetOfLocalArea() + 8;
3417 FuncInfo->setRegSaveFrameIndex(
3418 FrameInfo.CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
3419 // Fixup to set vararg frame on shadow area (4 x i64).
3421 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
3423 // For X86-64, if there are vararg parameters that are passed via
3424 // registers, then we must store them to their spots on the stack so
3425 // they may be loaded by dereferencing the result of va_next.
3426 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
3427 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
3428 FuncInfo->setRegSaveFrameIndex(FrameInfo.CreateStackObject(
3429 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, Align(16), false));
3432 SmallVector<SDValue, 6>
3433 LiveGPRs; // list of SDValue for GPR registers keeping live input value
3434 SmallVector<SDValue, 8> LiveXMMRegs; // list of SDValue for XMM registers
3435 // keeping live input value
3436 SDValue ALVal; // if applicable keeps SDValue for %al register
3438 // Gather all the live in physical registers.
3439 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
3440 Register GPR = TheMachineFunction.addLiveIn(Reg, &X86::GR64RegClass);
3441 LiveGPRs.push_back(DAG.getCopyFromReg(Chain, DL, GPR, MVT::i64));
3443 const auto &AvailableXmms = ArgXMMs.slice(NumXMMRegs);
3444 if (!AvailableXmms.empty()) {
3445 Register AL = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3446 ALVal = DAG.getCopyFromReg(Chain, DL, AL, MVT::i8);
3447 for (MCPhysReg Reg : AvailableXmms) {
3448 Register XMMReg = TheMachineFunction.addLiveIn(Reg, &X86::VR128RegClass);
3449 LiveXMMRegs.push_back(
3450 DAG.getCopyFromReg(Chain, DL, XMMReg, MVT::v4f32));
3454 // Store the integer parameter registers.
3455 SmallVector<SDValue, 8> MemOps;
3457 DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
3458 TargLowering.getPointerTy(DAG.getDataLayout()));
3459 unsigned Offset = FuncInfo->getVarArgsGPOffset();
3460 for (SDValue Val : LiveGPRs) {
3461 SDValue FIN = DAG.getNode(ISD::ADD, DL,
3462 TargLowering.getPointerTy(DAG.getDataLayout()),
3463 RSFIN, DAG.getIntPtrConstant(Offset, DL));
3465 DAG.getStore(Val.getValue(1), DL, Val, FIN,
3466 MachinePointerInfo::getFixedStack(
3467 DAG.getMachineFunction(),
3468 FuncInfo->getRegSaveFrameIndex(), Offset));
3469 MemOps.push_back(Store);
3473 // Now store the XMM (fp + vector) parameter registers.
3474 if (!LiveXMMRegs.empty()) {
3475 SmallVector<SDValue, 12> SaveXMMOps;
3476 SaveXMMOps.push_back(Chain);
3477 SaveXMMOps.push_back(ALVal);
3478 SaveXMMOps.push_back(
3479 DAG.getIntPtrConstant(FuncInfo->getRegSaveFrameIndex(), DL));
3480 SaveXMMOps.push_back(
3481 DAG.getIntPtrConstant(FuncInfo->getVarArgsFPOffset(), DL));
3482 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
3484 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, DL,
3485 MVT::Other, SaveXMMOps));
3488 if (!MemOps.empty())
3489 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
3493 void VarArgsLoweringHelper::forwardMustTailParameters(SDValue &Chain) {
3494 // Find the largest legal vector type.
3495 MVT VecVT = MVT::Other;
3496 // FIXME: Only some x86_32 calling conventions support AVX512.
3497 if (Subtarget.useAVX512Regs() &&
3498 (is64Bit() || (CallConv == CallingConv::X86_VectorCall ||
3499 CallConv == CallingConv::Intel_OCL_BI)))
3500 VecVT = MVT::v16f32;
3501 else if (Subtarget.hasAVX())
3503 else if (Subtarget.hasSSE2())
3506 // We forward some GPRs and some vector types.
3507 SmallVector<MVT, 2> RegParmTypes;
3508 MVT IntVT = is64Bit() ? MVT::i64 : MVT::i32;
3509 RegParmTypes.push_back(IntVT);
3510 if (VecVT != MVT::Other)
3511 RegParmTypes.push_back(VecVT);
3513 // Compute the set of forwarded registers. The rest are scratch.
3514 SmallVectorImpl<ForwardedRegister> &Forwards =
3515 FuncInfo->getForwardedMustTailRegParms();
3516 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
3518 // Forward AL for SysV x86_64 targets, since it is used for varargs.
3519 if (is64Bit() && !isWin64() && !CCInfo.isAllocated(X86::AL)) {
3520 Register ALVReg = TheMachineFunction.addLiveIn(X86::AL, &X86::GR8RegClass);
3521 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
3524 // Copy all forwards from physical to virtual registers.
3525 for (ForwardedRegister &FR : Forwards) {
3526 // FIXME: Can we use a less constrained schedule?
3527 SDValue RegVal = DAG.getCopyFromReg(Chain, DL, FR.VReg, FR.VT);
3528 FR.VReg = TheMachineFunction.getRegInfo().createVirtualRegister(
3529 TargLowering.getRegClassFor(FR.VT));
3530 Chain = DAG.getCopyToReg(Chain, DL, FR.VReg, RegVal);
3534 void VarArgsLoweringHelper::lowerVarArgsParameters(SDValue &Chain,
3535 unsigned StackSize) {
3536 // Set FrameIndex to the 0xAAAAAAA value to mark unset state.
3537 // If necessary, it would be set into the correct value later.
3538 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
3539 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3541 if (FrameInfo.hasVAStart())
3542 createVarArgAreaAndStoreRegisters(Chain, StackSize);
3544 if (FrameInfo.hasMustTailInVarArgFunc())
3545 forwardMustTailParameters(Chain);
3548 SDValue X86TargetLowering::LowerFormalArguments(
3549 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
3550 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
3551 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
3552 MachineFunction &MF = DAG.getMachineFunction();
3553 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3555 const Function &F = MF.getFunction();
3556 if (F.hasExternalLinkage() && Subtarget.isTargetCygMing() &&
3557 F.getName() == "main")
3558 FuncInfo->setForceFramePointer(true);
3560 MachineFrameInfo &MFI = MF.getFrameInfo();
3561 bool Is64Bit = Subtarget.is64Bit();
3562 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3565 !(IsVarArg && canGuaranteeTCO(CallConv)) &&
3566 "Var args not supported with calling conv' regcall, fastcc, ghc or hipe");
3568 // Assign locations to all of the incoming arguments.
3569 SmallVector<CCValAssign, 16> ArgLocs;
3570 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
3572 // Allocate shadow area for Win64.
3574 CCInfo.AllocateStack(32, Align(8));
3576 CCInfo.AnalyzeArguments(Ins, CC_X86);
3578 // In vectorcall calling convention a second pass is required for the HVA
3580 if (CallingConv::X86_VectorCall == CallConv) {
3581 CCInfo.AnalyzeArgumentsSecondPass(Ins, CC_X86);
3584 // The next loop assumes that the locations are in the same order of the
3586 assert(isSortedByValueNo(ArgLocs) &&
3587 "Argument Location list must be sorted before lowering");
3590 for (unsigned I = 0, InsIndex = 0, E = ArgLocs.size(); I != E;
3592 assert(InsIndex < Ins.size() && "Invalid Ins index");
3593 CCValAssign &VA = ArgLocs[I];
3595 if (VA.isRegLoc()) {
3596 EVT RegVT = VA.getLocVT();
3597 if (VA.needsCustom()) {
3599 VA.getValVT() == MVT::v64i1 &&
3600 "Currently the only custom case is when we split v64i1 to 2 regs");
3602 // v64i1 values, in regcall calling convention, that are
3603 // compiled to 32 bit arch, are split up into two registers.
3605 getv64i1Argument(VA, ArgLocs[++I], Chain, DAG, dl, Subtarget);
3607 const TargetRegisterClass *RC;
3608 if (RegVT == MVT::i8)
3609 RC = &X86::GR8RegClass;
3610 else if (RegVT == MVT::i16)
3611 RC = &X86::GR16RegClass;
3612 else if (RegVT == MVT::i32)
3613 RC = &X86::GR32RegClass;
3614 else if (Is64Bit && RegVT == MVT::i64)
3615 RC = &X86::GR64RegClass;
3616 else if (RegVT == MVT::f32)
3617 RC = Subtarget.hasAVX512() ? &X86::FR32XRegClass : &X86::FR32RegClass;
3618 else if (RegVT == MVT::f64)
3619 RC = Subtarget.hasAVX512() ? &X86::FR64XRegClass : &X86::FR64RegClass;
3620 else if (RegVT == MVT::f80)
3621 RC = &X86::RFP80RegClass;
3622 else if (RegVT == MVT::f128)
3623 RC = &X86::VR128RegClass;
3624 else if (RegVT.is512BitVector())
3625 RC = &X86::VR512RegClass;
3626 else if (RegVT.is256BitVector())
3627 RC = Subtarget.hasVLX() ? &X86::VR256XRegClass : &X86::VR256RegClass;
3628 else if (RegVT.is128BitVector())
3629 RC = Subtarget.hasVLX() ? &X86::VR128XRegClass : &X86::VR128RegClass;
3630 else if (RegVT == MVT::x86mmx)
3631 RC = &X86::VR64RegClass;
3632 else if (RegVT == MVT::v1i1)
3633 RC = &X86::VK1RegClass;
3634 else if (RegVT == MVT::v8i1)
3635 RC = &X86::VK8RegClass;
3636 else if (RegVT == MVT::v16i1)
3637 RC = &X86::VK16RegClass;
3638 else if (RegVT == MVT::v32i1)
3639 RC = &X86::VK32RegClass;
3640 else if (RegVT == MVT::v64i1)
3641 RC = &X86::VK64RegClass;
3643 llvm_unreachable("Unknown argument type!");
3645 Register Reg = MF.addLiveIn(VA.getLocReg(), RC);
3646 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
3649 // If this is an 8 or 16-bit value, it is really passed promoted to 32
3650 // bits. Insert an assert[sz]ext to capture this, then truncate to the
3652 if (VA.getLocInfo() == CCValAssign::SExt)
3653 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
3654 DAG.getValueType(VA.getValVT()));
3655 else if (VA.getLocInfo() == CCValAssign::ZExt)
3656 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
3657 DAG.getValueType(VA.getValVT()));
3658 else if (VA.getLocInfo() == CCValAssign::BCvt)
3659 ArgValue = DAG.getBitcast(VA.getValVT(), ArgValue);
3661 if (VA.isExtInLoc()) {
3662 // Handle MMX values passed in XMM regs.
3663 if (RegVT.isVector() && VA.getValVT().getScalarType() != MVT::i1)
3664 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
3665 else if (VA.getValVT().isVector() &&
3666 VA.getValVT().getScalarType() == MVT::i1 &&
3667 ((VA.getLocVT() == MVT::i64) || (VA.getLocVT() == MVT::i32) ||
3668 (VA.getLocVT() == MVT::i16) || (VA.getLocVT() == MVT::i8))) {
3669 // Promoting a mask type (v*i1) into a register of type i64/i32/i16/i8
3670 ArgValue = lowerRegToMasks(ArgValue, VA.getValVT(), RegVT, dl, DAG);
3672 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
3675 assert(VA.isMemLoc());
3677 LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, InsIndex);
3680 // If value is passed via pointer - do a load.
3681 if (VA.getLocInfo() == CCValAssign::Indirect && !Ins[I].Flags.isByVal())
3683 DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue, MachinePointerInfo());
3685 InVals.push_back(ArgValue);
3688 for (unsigned I = 0, E = Ins.size(); I != E; ++I) {
3689 // Swift calling convention does not require we copy the sret argument
3690 // into %rax/%eax for the return. We don't set SRetReturnReg for Swift.
3691 if (CallConv == CallingConv::Swift)
3694 // All x86 ABIs require that for returning structs by value we copy the
3695 // sret argument into %rax/%eax (depending on ABI) for the return. Save
3696 // the argument into a virtual register so that we can access it from the
3698 if (Ins[I].Flags.isSRet()) {
3699 Register Reg = FuncInfo->getSRetReturnReg();
3701 MVT PtrTy = getPointerTy(DAG.getDataLayout());
3702 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
3703 FuncInfo->setSRetReturnReg(Reg);
3705 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[I]);
3706 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
3711 unsigned StackSize = CCInfo.getNextStackOffset();
3712 // Align stack specially for tail calls.
3713 if (shouldGuaranteeTCO(CallConv,
3714 MF.getTarget().Options.GuaranteedTailCallOpt))
3715 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
3718 VarArgsLoweringHelper(FuncInfo, dl, DAG, Subtarget, CallConv, CCInfo)
3719 .lowerVarArgsParameters(Chain, StackSize);
3721 // Some CCs need callee pop.
3722 if (X86::isCalleePop(CallConv, Is64Bit, IsVarArg,
3723 MF.getTarget().Options.GuaranteedTailCallOpt)) {
3724 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
3725 } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) {
3726 // X86 interrupts must pop the error code (and the alignment padding) if
3728 FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4);
3730 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
3731 // If this is an sret function, the return should pop the hidden pointer.
3732 if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
3733 !Subtarget.getTargetTriple().isOSMSVCRT() &&
3734 argsAreStructReturn(Ins, Subtarget.isTargetMCU()) == StackStructReturn)
3735 FuncInfo->setBytesToPopOnReturn(4);
3739 // RegSaveFrameIndex is X86-64 only.
3740 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
3743 FuncInfo->setArgumentStackSize(StackSize);
3745 if (WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo()) {
3746 EHPersonality Personality = classifyEHPersonality(F.getPersonalityFn());
3747 if (Personality == EHPersonality::CoreCLR) {
3749 // TODO: Add a mechanism to frame lowering that will allow us to indicate
3750 // that we'd prefer this slot be allocated towards the bottom of the frame
3751 // (i.e. near the stack pointer after allocating the frame). Every
3752 // funclet needs a copy of this slot in its (mostly empty) frame, and the
3753 // offset from the bottom of this and each funclet's frame must be the
3754 // same, so the size of funclets' (mostly empty) frames is dictated by
3755 // how far this slot is from the bottom (since they allocate just enough
3756 // space to accommodate holding this slot at the correct offset).
3757 int PSPSymFI = MFI.CreateStackObject(8, Align(8), /*isSS=*/false);
3758 EHInfo->PSPSymFrameIdx = PSPSymFI;
3762 if (CallConv == CallingConv::X86_RegCall ||
3763 F.hasFnAttribute("no_caller_saved_registers")) {
3764 MachineRegisterInfo &MRI = MF.getRegInfo();
3765 for (std::pair<Register, Register> Pair : MRI.liveins())
3766 MRI.disableCalleeSavedRegister(Pair.first);
3772 SDValue X86TargetLowering::LowerMemOpCallTo(SDValue Chain, SDValue StackPtr,
3773 SDValue Arg, const SDLoc &dl,
3775 const CCValAssign &VA,
3776 ISD::ArgFlagsTy Flags,
3777 bool isByVal) const {
3778 unsigned LocMemOffset = VA.getLocMemOffset();
3779 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset, dl);
3780 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
3783 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
3785 return DAG.getStore(
3786 Chain, dl, Arg, PtrOff,
3787 MachinePointerInfo::getStack(DAG.getMachineFunction(), LocMemOffset));
3790 /// Emit a load of return address if tail call
3791 /// optimization is performed and it is required.
3792 SDValue X86TargetLowering::EmitTailCallLoadRetAddr(
3793 SelectionDAG &DAG, SDValue &OutRetAddr, SDValue Chain, bool IsTailCall,
3794 bool Is64Bit, int FPDiff, const SDLoc &dl) const {
3795 // Adjust the Return address stack slot.
3796 EVT VT = getPointerTy(DAG.getDataLayout());
3797 OutRetAddr = getReturnAddressFrameIndex(DAG);
3799 // Load the "old" Return address.
3800 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo());
3801 return SDValue(OutRetAddr.getNode(), 1);
3804 /// Emit a store of the return address if tail call
3805 /// optimization is performed and it is required (FPDiff!=0).
3806 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
3807 SDValue Chain, SDValue RetAddrFrIdx,
3808 EVT PtrVT, unsigned SlotSize,
3809 int FPDiff, const SDLoc &dl) {
3810 // Store the return address to the appropriate stack slot.
3811 if (!FPDiff) return Chain;
3812 // Calculate the new stack slot for the return address.
3813 int NewReturnAddrFI =
3814 MF.getFrameInfo().CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
3816 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
3817 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
3818 MachinePointerInfo::getFixedStack(
3819 DAG.getMachineFunction(), NewReturnAddrFI));
3823 /// Returns a vector_shuffle mask for an movs{s|d}, movd
3824 /// operation of specified width.
3825 static SDValue getMOVL(SelectionDAG &DAG, const SDLoc &dl, MVT VT, SDValue V1,
3827 unsigned NumElems = VT.getVectorNumElements();
3828 SmallVector<int, 8> Mask;
3829 Mask.push_back(NumElems);
3830 for (unsigned i = 1; i != NumElems; ++i)
3832 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
3836 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
3837 SmallVectorImpl<SDValue> &InVals) const {
3838 SelectionDAG &DAG = CLI.DAG;
3840 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
3841 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
3842 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
3843 SDValue Chain = CLI.Chain;
3844 SDValue Callee = CLI.Callee;
3845 CallingConv::ID CallConv = CLI.CallConv;
3846 bool &isTailCall = CLI.IsTailCall;
3847 bool isVarArg = CLI.IsVarArg;
3849 MachineFunction &MF = DAG.getMachineFunction();
3850 bool Is64Bit = Subtarget.is64Bit();
3851 bool IsWin64 = Subtarget.isCallingConvWin64(CallConv);
3852 StructReturnType SR = callIsStructReturn(Outs, Subtarget.isTargetMCU());
3853 bool IsSibcall = false;
3854 bool IsGuaranteeTCO = MF.getTarget().Options.GuaranteedTailCallOpt ||
3855 CallConv == CallingConv::Tail;
3856 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
3857 const auto *CI = dyn_cast_or_null<CallInst>(CLI.CB);
3858 const Function *Fn = CI ? CI->getCalledFunction() : nullptr;
3859 bool HasNCSR = (CI && CI->hasFnAttr("no_caller_saved_registers")) ||
3860 (Fn && Fn->hasFnAttribute("no_caller_saved_registers"));
3861 const auto *II = dyn_cast_or_null<InvokeInst>(CLI.CB);
3863 (CI && CI->doesNoCfCheck()) || (II && II->doesNoCfCheck());
3864 const Module *M = MF.getMMI().getModule();
3865 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
3867 MachineFunction::CallSiteInfo CSInfo;
3868 if (CallConv == CallingConv::X86_INTR)
3869 report_fatal_error("X86 interrupts may not be called directly");
3871 if (Subtarget.isPICStyleGOT() && !IsGuaranteeTCO) {
3872 // If we are using a GOT, disable tail calls to external symbols with
3873 // default visibility. Tail calling such a symbol requires using a GOT
3874 // relocation, which forces early binding of the symbol. This breaks code
3875 // that require lazy function symbol resolution. Using musttail or
3876 // GuaranteedTailCallOpt will override this.
3877 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3878 if (!G || (!G->getGlobal()->hasLocalLinkage() &&
3879 G->getGlobal()->hasDefaultVisibility()))
3883 bool IsMustTail = CLI.CB && CLI.CB->isMustTailCall();
3885 // Force this to be a tail call. The verifier rules are enough to ensure
3886 // that we can lower this successfully without moving the return address
3889 } else if (isTailCall) {
3890 // Check if it's really possible to do a tail call.
3891 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
3892 isVarArg, SR != NotStructReturn,
3893 MF.getFunction().hasStructRetAttr(), CLI.RetTy,
3894 Outs, OutVals, Ins, DAG);
3896 // Sibcalls are automatically detected tailcalls which do not require
3898 if (!IsGuaranteeTCO && isTailCall)
3905 assert(!(isVarArg && canGuaranteeTCO(CallConv)) &&
3906 "Var args not supported with calling convention fastcc, ghc or hipe");
3908 // Analyze operands of the call, assigning locations to each operand.
3909 SmallVector<CCValAssign, 16> ArgLocs;
3910 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
3912 // Allocate shadow area for Win64.
3914 CCInfo.AllocateStack(32, Align(8));
3916 CCInfo.AnalyzeArguments(Outs, CC_X86);
3918 // In vectorcall calling convention a second pass is required for the HVA
3920 if (CallingConv::X86_VectorCall == CallConv) {
3921 CCInfo.AnalyzeArgumentsSecondPass(Outs, CC_X86);
3924 // Get a count of how many bytes are to be pushed on the stack.
3925 unsigned NumBytes = CCInfo.getAlignedCallFrameSize();
3927 // This is a sibcall. The memory operands are available in caller's
3928 // own caller's stack.
3930 else if (IsGuaranteeTCO && canGuaranteeTCO(CallConv))
3931 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
3934 if (isTailCall && !IsSibcall && !IsMustTail) {
3935 // Lower arguments at fp - stackoffset + fpdiff.
3936 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
3938 FPDiff = NumBytesCallerPushed - NumBytes;
3940 // Set the delta of movement of the returnaddr stackslot.
3941 // But only set if delta is greater than previous delta.
3942 if (FPDiff < X86Info->getTCReturnAddrDelta())
3943 X86Info->setTCReturnAddrDelta(FPDiff);
3946 unsigned NumBytesToPush = NumBytes;
3947 unsigned NumBytesToPop = NumBytes;
3949 // If we have an inalloca argument, all stack space has already been allocated
3950 // for us and be right at the top of the stack. We don't support multiple
3951 // arguments passed in memory when using inalloca.
3952 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
3954 if (!ArgLocs.back().isMemLoc())
3955 report_fatal_error("cannot use inalloca attribute on a register "
3957 if (ArgLocs.back().getLocMemOffset() != 0)
3958 report_fatal_error("any parameter with the inalloca attribute must be "
3959 "the only memory argument");
3960 } else if (CLI.IsPreallocated) {
3961 assert(ArgLocs.back().isMemLoc() &&
3962 "cannot use preallocated attribute on a register "
3964 SmallVector<size_t, 4> PreallocatedOffsets;
3965 for (size_t i = 0; i < CLI.OutVals.size(); ++i) {
3966 if (CLI.CB->paramHasAttr(i, Attribute::Preallocated)) {
3967 PreallocatedOffsets.push_back(ArgLocs[i].getLocMemOffset());
3970 auto *MFI = DAG.getMachineFunction().getInfo<X86MachineFunctionInfo>();
3971 size_t PreallocatedId = MFI->getPreallocatedIdForCallSite(CLI.CB);
3972 MFI->setPreallocatedStackSize(PreallocatedId, NumBytes);
3973 MFI->setPreallocatedArgOffsets(PreallocatedId, PreallocatedOffsets);
3977 if (!IsSibcall && !IsMustTail)
3978 Chain = DAG.getCALLSEQ_START(Chain, NumBytesToPush,
3979 NumBytes - NumBytesToPush, dl);
3981 SDValue RetAddrFrIdx;
3982 // Load return address for tail calls.
3983 if (isTailCall && FPDiff)
3984 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
3985 Is64Bit, FPDiff, dl);
3987 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
3988 SmallVector<SDValue, 8> MemOpChains;
3991 // The next loop assumes that the locations are in the same order of the
3993 assert(isSortedByValueNo(ArgLocs) &&
3994 "Argument Location list must be sorted before lowering");
3996 // Walk the register/memloc assignments, inserting copies/loads. In the case
3997 // of tail call optimization arguments are handle later.
3998 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
3999 for (unsigned I = 0, OutIndex = 0, E = ArgLocs.size(); I != E;
4001 assert(OutIndex < Outs.size() && "Invalid Out index");
4002 // Skip inalloca/preallocated arguments, they have already been written.
4003 ISD::ArgFlagsTy Flags = Outs[OutIndex].Flags;
4004 if (Flags.isInAlloca() || Flags.isPreallocated())
4007 CCValAssign &VA = ArgLocs[I];
4008 EVT RegVT = VA.getLocVT();
4009 SDValue Arg = OutVals[OutIndex];
4010 bool isByVal = Flags.isByVal();
4012 // Promote the value if needed.
4013 switch (VA.getLocInfo()) {
4014 default: llvm_unreachable("Unknown loc info!");
4015 case CCValAssign::Full: break;
4016 case CCValAssign::SExt:
4017 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
4019 case CCValAssign::ZExt:
4020 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
4022 case CCValAssign::AExt:
4023 if (Arg.getValueType().isVector() &&
4024 Arg.getValueType().getVectorElementType() == MVT::i1)
4025 Arg = lowerMasksToReg(Arg, RegVT, dl, DAG);
4026 else if (RegVT.is128BitVector()) {
4027 // Special case: passing MMX values in XMM registers.
4028 Arg = DAG.getBitcast(MVT::i64, Arg);
4029 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
4030 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
4032 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
4034 case CCValAssign::BCvt:
4035 Arg = DAG.getBitcast(RegVT, Arg);
4037 case CCValAssign::Indirect: {
4039 // Memcpy the argument to a temporary stack slot to prevent
4040 // the caller from seeing any modifications the callee may make
4041 // as guaranteed by the `byval` attribute.
4042 int FrameIdx = MF.getFrameInfo().CreateStackObject(
4043 Flags.getByValSize(),
4044 std::max(Align(16), Flags.getNonZeroByValAlign()), false);
4046 DAG.getFrameIndex(FrameIdx, getPointerTy(DAG.getDataLayout()));
4048 CreateCopyOfByValArgument(Arg, StackSlot, Chain, Flags, DAG, dl);
4049 // From now on treat this as a regular pointer
4053 // Store the argument.
4054 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
4055 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
4056 Chain = DAG.getStore(
4057 Chain, dl, Arg, SpillSlot,
4058 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI));
4065 if (VA.needsCustom()) {
4066 assert(VA.getValVT() == MVT::v64i1 &&
4067 "Currently the only custom case is when we split v64i1 to 2 regs");
4068 // Split v64i1 value into two registers
4069 Passv64i1ArgInRegs(dl, DAG, Arg, RegsToPass, VA, ArgLocs[++I], Subtarget);
4070 } else if (VA.isRegLoc()) {
4071 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
4072 const TargetOptions &Options = DAG.getTarget().Options;
4073 if (Options.EmitCallSiteInfo)
4074 CSInfo.emplace_back(VA.getLocReg(), I);
4075 if (isVarArg && IsWin64) {
4076 // Win64 ABI requires argument XMM reg to be copied to the corresponding
4077 // shadow reg if callee is a varargs function.
4079 switch (VA.getLocReg()) {
4080 case X86::XMM0: ShadowReg = X86::RCX; break;
4081 case X86::XMM1: ShadowReg = X86::RDX; break;
4082 case X86::XMM2: ShadowReg = X86::R8; break;
4083 case X86::XMM3: ShadowReg = X86::R9; break;
4086 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
4088 } else if (!IsSibcall && (!isTailCall || isByVal)) {
4089 assert(VA.isMemLoc());
4090 if (!StackPtr.getNode())
4091 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4092 getPointerTy(DAG.getDataLayout()));
4093 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
4094 dl, DAG, VA, Flags, isByVal));
4098 if (!MemOpChains.empty())
4099 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
4101 if (Subtarget.isPICStyleGOT()) {
4102 // ELF / PIC requires GOT in the EBX register before function calls via PLT
4105 RegsToPass.push_back(std::make_pair(
4106 Register(X86::EBX), DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(),
4107 getPointerTy(DAG.getDataLayout()))));
4109 // If we are tail calling and generating PIC/GOT style code load the
4110 // address of the callee into ECX. The value in ecx is used as target of
4111 // the tail jump. This is done to circumvent the ebx/callee-saved problem
4112 // for tail calls on PIC/GOT architectures. Normally we would just put the
4113 // address of GOT into ebx and then call target@PLT. But for tail calls
4114 // ebx would be restored (since ebx is callee saved) before jumping to the
4117 // Note: The actual moving to ECX is done further down.
4118 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
4119 if (G && !G->getGlobal()->hasLocalLinkage() &&
4120 G->getGlobal()->hasDefaultVisibility())
4121 Callee = LowerGlobalAddress(Callee, DAG);
4122 else if (isa<ExternalSymbolSDNode>(Callee))
4123 Callee = LowerExternalSymbol(Callee, DAG);
4127 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
4128 // From AMD64 ABI document:
4129 // For calls that may call functions that use varargs or stdargs
4130 // (prototype-less calls or calls to functions containing ellipsis (...) in
4131 // the declaration) %al is used as hidden argument to specify the number
4132 // of SSE registers used. The contents of %al do not need to match exactly
4133 // the number of registers, but must be an ubound on the number of SSE
4134 // registers used and is in the range 0 - 8 inclusive.
4136 // Count the number of XMM registers allocated.
4137 static const MCPhysReg XMMArgRegs[] = {
4138 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
4139 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
4141 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs);
4142 assert((Subtarget.hasSSE1() || !NumXMMRegs)
4143 && "SSE registers cannot be used when SSE is disabled");
4144 RegsToPass.push_back(std::make_pair(Register(X86::AL),
4145 DAG.getConstant(NumXMMRegs, dl,
4149 if (isVarArg && IsMustTail) {
4150 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
4151 for (const auto &F : Forwards) {
4152 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
4153 RegsToPass.push_back(std::make_pair(F.PReg, Val));
4157 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
4158 // don't need this because the eligibility check rejects calls that require
4159 // shuffling arguments passed in memory.
4160 if (!IsSibcall && isTailCall) {
4161 // Force all the incoming stack arguments to be loaded from the stack
4162 // before any new outgoing arguments are stored to the stack, because the
4163 // outgoing stack slots may alias the incoming argument stack slots, and
4164 // the alias isn't otherwise explicit. This is slightly more conservative
4165 // than necessary, because it means that each store effectively depends
4166 // on every argument instead of just those arguments it would clobber.
4167 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
4169 SmallVector<SDValue, 8> MemOpChains2;
4172 for (unsigned I = 0, OutsIndex = 0, E = ArgLocs.size(); I != E;
4174 CCValAssign &VA = ArgLocs[I];
4176 if (VA.isRegLoc()) {
4177 if (VA.needsCustom()) {
4178 assert((CallConv == CallingConv::X86_RegCall) &&
4179 "Expecting custom case only in regcall calling convention");
4180 // This means that we are in special case where one argument was
4181 // passed through two register locations - Skip the next location
4188 assert(VA.isMemLoc());
4189 SDValue Arg = OutVals[OutsIndex];
4190 ISD::ArgFlagsTy Flags = Outs[OutsIndex].Flags;
4191 // Skip inalloca/preallocated arguments. They don't require any work.
4192 if (Flags.isInAlloca() || Flags.isPreallocated())
4194 // Create frame index.
4195 int32_t Offset = VA.getLocMemOffset()+FPDiff;
4196 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
4197 FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true);
4198 FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
4200 if (Flags.isByVal()) {
4201 // Copy relative to framepointer.
4202 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset(), dl);
4203 if (!StackPtr.getNode())
4204 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
4205 getPointerTy(DAG.getDataLayout()));
4206 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(DAG.getDataLayout()),
4209 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
4213 // Store relative to framepointer.
4214 MemOpChains2.push_back(DAG.getStore(
4215 ArgChain, dl, Arg, FIN,
4216 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI)));
4220 if (!MemOpChains2.empty())
4221 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
4223 // Store the return address to the appropriate stack slot.
4224 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
4225 getPointerTy(DAG.getDataLayout()),
4226 RegInfo->getSlotSize(), FPDiff, dl);
4229 // Build a sequence of copy-to-reg nodes chained together with token chain
4230 // and flag operands which copy the outgoing args into registers.
4232 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
4233 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
4234 RegsToPass[i].second, InFlag);
4235 InFlag = Chain.getValue(1);
4238 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
4239 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
4240 // In the 64-bit large code model, we have to make all calls
4241 // through a register, since the call instruction's 32-bit
4242 // pc-relative offset may not be large enough to hold the whole
4244 } else if (Callee->getOpcode() == ISD::GlobalAddress ||
4245 Callee->getOpcode() == ISD::ExternalSymbol) {
4246 // Lower direct calls to global addresses and external symbols. Setting
4247 // ForCall to true here has the effect of removing WrapperRIP when possible
4248 // to allow direct calls to be selected without first materializing the
4249 // address into a register.
4250 Callee = LowerGlobalOrExternal(Callee, DAG, /*ForCall=*/true);
4251 } else if (Subtarget.isTarget64BitILP32() &&
4252 Callee->getValueType(0) == MVT::i32) {
4253 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
4254 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
4257 // Returns a chain & a flag for retval copy to use.
4258 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
4259 SmallVector<SDValue, 8> Ops;
4261 if (!IsSibcall && isTailCall && !IsMustTail) {
4262 Chain = DAG.getCALLSEQ_END(Chain,
4263 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4264 DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
4265 InFlag = Chain.getValue(1);
4268 Ops.push_back(Chain);
4269 Ops.push_back(Callee);
4272 Ops.push_back(DAG.getConstant(FPDiff, dl, MVT::i32));
4274 // Add argument registers to the end of the list so that they are known live
4276 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
4277 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
4278 RegsToPass[i].second.getValueType()));
4280 // Add a register mask operand representing the call-preserved registers.
4281 // If HasNCSR is asserted (attribute NoCallerSavedRegisters exists) then we
4282 // set X86_INTR calling convention because it has the same CSR mask
4283 // (same preserved registers).
4284 const uint32_t *Mask = RegInfo->getCallPreservedMask(
4285 MF, HasNCSR ? (CallingConv::ID)CallingConv::X86_INTR : CallConv);
4286 assert(Mask && "Missing call preserved mask for calling convention");
4288 // If this is an invoke in a 32-bit function using a funclet-based
4289 // personality, assume the function clobbers all registers. If an exception
4290 // is thrown, the runtime will not restore CSRs.
4291 // FIXME: Model this more precisely so that we can register allocate across
4292 // the normal edge and spill and fill across the exceptional edge.
4293 if (!Is64Bit && CLI.CB && isa<InvokeInst>(CLI.CB)) {
4294 const Function &CallerFn = MF.getFunction();
4295 EHPersonality Pers =
4296 CallerFn.hasPersonalityFn()
4297 ? classifyEHPersonality(CallerFn.getPersonalityFn())
4298 : EHPersonality::Unknown;
4299 if (isFuncletEHPersonality(Pers))
4300 Mask = RegInfo->getNoPreservedMask();
4303 // Define a new register mask from the existing mask.
4304 uint32_t *RegMask = nullptr;
4306 // In some calling conventions we need to remove the used physical registers
4307 // from the reg mask.
4308 if (CallConv == CallingConv::X86_RegCall || HasNCSR) {
4309 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
4311 // Allocate a new Reg Mask and copy Mask.
4312 RegMask = MF.allocateRegMask();
4313 unsigned RegMaskSize = MachineOperand::getRegMaskSize(TRI->getNumRegs());
4314 memcpy(RegMask, Mask, sizeof(RegMask[0]) * RegMaskSize);
4316 // Make sure all sub registers of the argument registers are reset
4318 for (auto const &RegPair : RegsToPass)
4319 for (MCSubRegIterator SubRegs(RegPair.first, TRI, /*IncludeSelf=*/true);
4320 SubRegs.isValid(); ++SubRegs)
4321 RegMask[*SubRegs / 32] &= ~(1u << (*SubRegs % 32));
4323 // Create the RegMask Operand according to our updated mask.
4324 Ops.push_back(DAG.getRegisterMask(RegMask));
4326 // Create the RegMask Operand according to the static mask.
4327 Ops.push_back(DAG.getRegisterMask(Mask));
4330 if (InFlag.getNode())
4331 Ops.push_back(InFlag);
4335 //// If this is the first return lowered for this function, add the regs
4336 //// to the liveout set for the function.
4337 // This isn't right, although it's probably harmless on x86; liveouts
4338 // should be computed from returns not tail calls. Consider a void
4339 // function making a tail call to a function returning int.
4340 MF.getFrameInfo().setHasTailCall();
4341 SDValue Ret = DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
4342 DAG.addCallSiteInfo(Ret.getNode(), std::move(CSInfo));
4346 if (HasNoCfCheck && IsCFProtectionSupported) {
4347 Chain = DAG.getNode(X86ISD::NT_CALL, dl, NodeTys, Ops);
4349 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
4351 InFlag = Chain.getValue(1);
4352 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
4353 DAG.addCallSiteInfo(Chain.getNode(), std::move(CSInfo));
4355 // Save heapallocsite metadata.
4357 if (MDNode *HeapAlloc = CLI.CB->getMetadata("heapallocsite"))
4358 DAG.addHeapAllocSite(Chain.getNode(), HeapAlloc);
4360 // Create the CALLSEQ_END node.
4361 unsigned NumBytesForCalleeToPop;
4362 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
4363 DAG.getTarget().Options.GuaranteedTailCallOpt))
4364 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
4365 else if (!Is64Bit && !canGuaranteeTCO(CallConv) &&
4366 !Subtarget.getTargetTriple().isOSMSVCRT() &&
4367 SR == StackStructReturn)
4368 // If this is a call to a struct-return function, the callee
4369 // pops the hidden struct pointer, so we have to push it back.
4370 // This is common for Darwin/X86, Linux & Mingw32 targets.
4371 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
4372 NumBytesForCalleeToPop = 4;
4374 NumBytesForCalleeToPop = 0; // Callee pops nothing.
4376 // Returns a flag for retval copy to use.
4378 Chain = DAG.getCALLSEQ_END(Chain,
4379 DAG.getIntPtrConstant(NumBytesToPop, dl, true),
4380 DAG.getIntPtrConstant(NumBytesForCalleeToPop, dl,
4383 InFlag = Chain.getValue(1);
4386 // Handle result values, copying them out of physregs into vregs that we
4388 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
4392 //===----------------------------------------------------------------------===//
4393 // Fast Calling Convention (tail call) implementation
4394 //===----------------------------------------------------------------------===//
4396 // Like std call, callee cleans arguments, convention except that ECX is
4397 // reserved for storing the tail called function address. Only 2 registers are
4398 // free for argument passing (inreg). Tail call optimization is performed
4400 // * tailcallopt is enabled
4401 // * caller/callee are fastcc
4402 // On X86_64 architecture with GOT-style position independent code only local
4403 // (within module) calls are supported at the moment.
4404 // To keep the stack aligned according to platform abi the function
4405 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
4406 // of stack alignment. (Dynamic linkers need this - Darwin's dyld for example)
4407 // If a tail called function callee has more arguments than the caller the
4408 // caller needs to make sure that there is room to move the RETADDR to. This is
4409 // achieved by reserving an area the size of the argument delta right after the
4410 // original RETADDR, but before the saved framepointer or the spilled registers
4411 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
4423 /// Make the stack size align e.g 16n + 12 aligned for a 16-byte align
4426 X86TargetLowering::GetAlignedArgumentStackSize(const unsigned StackSize,
4427 SelectionDAG &DAG) const {
4428 const Align StackAlignment = Subtarget.getFrameLowering()->getStackAlign();
4429 const uint64_t SlotSize = Subtarget.getRegisterInfo()->getSlotSize();
4430 assert(StackSize % SlotSize == 0 &&
4431 "StackSize must be a multiple of SlotSize");
4432 return alignTo(StackSize + SlotSize, StackAlignment) - SlotSize;
4435 /// Return true if the given stack call argument is already available in the
4436 /// same position (relatively) of the caller's incoming argument stack.
4438 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
4439 MachineFrameInfo &MFI, const MachineRegisterInfo *MRI,
4440 const X86InstrInfo *TII, const CCValAssign &VA) {
4441 unsigned Bytes = Arg.getValueSizeInBits() / 8;
4444 // Look through nodes that don't alter the bits of the incoming value.
4445 unsigned Op = Arg.getOpcode();
4446 if (Op == ISD::ZERO_EXTEND || Op == ISD::ANY_EXTEND || Op == ISD::BITCAST) {
4447 Arg = Arg.getOperand(0);
4450 if (Op == ISD::TRUNCATE) {
4451 const SDValue &TruncInput = Arg.getOperand(0);
4452 if (TruncInput.getOpcode() == ISD::AssertZext &&
4453 cast<VTSDNode>(TruncInput.getOperand(1))->getVT() ==
4454 Arg.getValueType()) {
4455 Arg = TruncInput.getOperand(0);
4463 if (Arg.getOpcode() == ISD::CopyFromReg) {
4464 Register VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
4465 if (!Register::isVirtualRegister(VR))
4467 MachineInstr *Def = MRI->getVRegDef(VR);
4470 if (!Flags.isByVal()) {
4471 if (!TII->isLoadFromStackSlot(*Def, FI))
4474 unsigned Opcode = Def->getOpcode();
4475 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
4476 Opcode == X86::LEA64_32r) &&
4477 Def->getOperand(1).isFI()) {
4478 FI = Def->getOperand(1).getIndex();
4479 Bytes = Flags.getByValSize();
4483 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
4484 if (Flags.isByVal())
4485 // ByVal argument is passed in as a pointer but it's now being
4486 // dereferenced. e.g.
4487 // define @foo(%struct.X* %A) {
4488 // tail call @bar(%struct.X* byval %A)
4491 SDValue Ptr = Ld->getBasePtr();
4492 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
4495 FI = FINode->getIndex();
4496 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
4497 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
4498 FI = FINode->getIndex();
4499 Bytes = Flags.getByValSize();
4503 assert(FI != INT_MAX);
4504 if (!MFI.isFixedObjectIndex(FI))
4507 if (Offset != MFI.getObjectOffset(FI))
4510 // If this is not byval, check that the argument stack object is immutable.
4511 // inalloca and argument copy elision can create mutable argument stack
4512 // objects. Byval objects can be mutated, but a byval call intends to pass the
4514 if (!Flags.isByVal() && !MFI.isImmutableObjectIndex(FI))
4517 if (VA.getLocVT().getSizeInBits() > Arg.getValueSizeInBits()) {
4518 // If the argument location is wider than the argument type, check that any
4519 // extension flags match.
4520 if (Flags.isZExt() != MFI.isObjectZExt(FI) ||
4521 Flags.isSExt() != MFI.isObjectSExt(FI)) {
4526 return Bytes == MFI.getObjectSize(FI);
4529 /// Check whether the call is eligible for tail call optimization. Targets
4530 /// that want to do tail call optimization should implement this function.
4531 bool X86TargetLowering::IsEligibleForTailCallOptimization(
4532 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg,
4533 bool isCalleeStructRet, bool isCallerStructRet, Type *RetTy,
4534 const SmallVectorImpl<ISD::OutputArg> &Outs,
4535 const SmallVectorImpl<SDValue> &OutVals,
4536 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const {
4537 if (!mayTailCallThisCC(CalleeCC))
4540 // If -tailcallopt is specified, make fastcc functions tail-callable.
4541 MachineFunction &MF = DAG.getMachineFunction();
4542 const Function &CallerF = MF.getFunction();
4544 // If the function return type is x86_fp80 and the callee return type is not,
4545 // then the FP_EXTEND of the call result is not a nop. It's not safe to
4546 // perform a tailcall optimization here.
4547 if (CallerF.getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
4550 CallingConv::ID CallerCC = CallerF.getCallingConv();
4551 bool CCMatch = CallerCC == CalleeCC;
4552 bool IsCalleeWin64 = Subtarget.isCallingConvWin64(CalleeCC);
4553 bool IsCallerWin64 = Subtarget.isCallingConvWin64(CallerCC);
4554 bool IsGuaranteeTCO = DAG.getTarget().Options.GuaranteedTailCallOpt ||
4555 CalleeCC == CallingConv::Tail;
4557 // Win64 functions have extra shadow space for argument homing. Don't do the
4558 // sibcall if the caller and callee have mismatched expectations for this
4560 if (IsCalleeWin64 != IsCallerWin64)
4563 if (IsGuaranteeTCO) {
4564 if (canGuaranteeTCO(CalleeCC) && CCMatch)
4569 // Look for obvious safe cases to perform tail call optimization that do not
4570 // require ABI changes. This is what gcc calls sibcall.
4572 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
4573 // emit a special epilogue.
4574 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4575 if (RegInfo->needsStackRealignment(MF))
4578 // Also avoid sibcall optimization if either caller or callee uses struct
4579 // return semantics.
4580 if (isCalleeStructRet || isCallerStructRet)
4583 // Do not sibcall optimize vararg calls unless all arguments are passed via
4585 LLVMContext &C = *DAG.getContext();
4586 if (isVarArg && !Outs.empty()) {
4587 // Optimizing for varargs on Win64 is unlikely to be safe without
4588 // additional testing.
4589 if (IsCalleeWin64 || IsCallerWin64)
4592 SmallVector<CCValAssign, 16> ArgLocs;
4593 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4595 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4596 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
4597 if (!ArgLocs[i].isRegLoc())
4601 // If the call result is in ST0 / ST1, it needs to be popped off the x87
4602 // stack. Therefore, if it's not used by the call it is not safe to optimize
4603 // this into a sibcall.
4604 bool Unused = false;
4605 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
4612 SmallVector<CCValAssign, 16> RVLocs;
4613 CCState CCInfo(CalleeCC, false, MF, RVLocs, C);
4614 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
4615 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
4616 CCValAssign &VA = RVLocs[i];
4617 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
4622 // Check that the call results are passed in the same way.
4623 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins,
4624 RetCC_X86, RetCC_X86))
4626 // The callee has to preserve all registers the caller needs to preserve.
4627 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
4628 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
4630 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
4631 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
4635 unsigned StackArgsSize = 0;
4637 // If the callee takes no arguments then go on to check the results of the
4639 if (!Outs.empty()) {
4640 // Check if stack adjustment is needed. For now, do not do this if any
4641 // argument is passed on the stack.
4642 SmallVector<CCValAssign, 16> ArgLocs;
4643 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C);
4645 // Allocate shadow area for Win64
4647 CCInfo.AllocateStack(32, Align(8));
4649 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
4650 StackArgsSize = CCInfo.getNextStackOffset();
4652 if (CCInfo.getNextStackOffset()) {
4653 // Check if the arguments are already laid out in the right way as
4654 // the caller's fixed stack objects.
4655 MachineFrameInfo &MFI = MF.getFrameInfo();
4656 const MachineRegisterInfo *MRI = &MF.getRegInfo();
4657 const X86InstrInfo *TII = Subtarget.getInstrInfo();
4658 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4659 CCValAssign &VA = ArgLocs[i];
4660 SDValue Arg = OutVals[i];
4661 ISD::ArgFlagsTy Flags = Outs[i].Flags;
4662 if (VA.getLocInfo() == CCValAssign::Indirect)
4664 if (!VA.isRegLoc()) {
4665 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
4672 bool PositionIndependent = isPositionIndependent();
4673 // If the tailcall address may be in a register, then make sure it's
4674 // possible to register allocate for it. In 32-bit, the call address can
4675 // only target EAX, EDX, or ECX since the tail call must be scheduled after
4676 // callee-saved registers are restored. These happen to be the same
4677 // registers used to pass 'inreg' arguments so watch out for those.
4678 if (!Subtarget.is64Bit() && ((!isa<GlobalAddressSDNode>(Callee) &&
4679 !isa<ExternalSymbolSDNode>(Callee)) ||
4680 PositionIndependent)) {
4681 unsigned NumInRegs = 0;
4682 // In PIC we need an extra register to formulate the address computation
4684 unsigned MaxInRegs = PositionIndependent ? 2 : 3;
4686 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
4687 CCValAssign &VA = ArgLocs[i];
4690 Register Reg = VA.getLocReg();
4693 case X86::EAX: case X86::EDX: case X86::ECX:
4694 if (++NumInRegs == MaxInRegs)
4701 const MachineRegisterInfo &MRI = MF.getRegInfo();
4702 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals))
4706 bool CalleeWillPop =
4707 X86::isCalleePop(CalleeCC, Subtarget.is64Bit(), isVarArg,
4708 MF.getTarget().Options.GuaranteedTailCallOpt);
4710 if (unsigned BytesToPop =
4711 MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn()) {
4712 // If we have bytes to pop, the callee must pop them.
4713 bool CalleePopMatches = CalleeWillPop && BytesToPop == StackArgsSize;
4714 if (!CalleePopMatches)
4716 } else if (CalleeWillPop && StackArgsSize > 0) {
4717 // If we don't have bytes to pop, make sure the callee doesn't pop any.
4725 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
4726 const TargetLibraryInfo *libInfo) const {
4727 return X86::createFastISel(funcInfo, libInfo);
4730 //===----------------------------------------------------------------------===//
4731 // Other Lowering Hooks
4732 //===----------------------------------------------------------------------===//
4734 static bool MayFoldLoad(SDValue Op) {
4735 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
4738 static bool MayFoldIntoStore(SDValue Op) {
4739 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
4742 static bool MayFoldIntoZeroExtend(SDValue Op) {
4743 if (Op.hasOneUse()) {
4744 unsigned Opcode = Op.getNode()->use_begin()->getOpcode();
4745 return (ISD::ZERO_EXTEND == Opcode);
4750 static bool isTargetShuffle(unsigned Opcode) {
4752 default: return false;
4753 case X86ISD::BLENDI:
4754 case X86ISD::PSHUFB:
4755 case X86ISD::PSHUFD:
4756 case X86ISD::PSHUFHW:
4757 case X86ISD::PSHUFLW:
4759 case X86ISD::INSERTPS:
4760 case X86ISD::EXTRQI:
4761 case X86ISD::INSERTQI:
4762 case X86ISD::VALIGN:
4763 case X86ISD::PALIGNR:
4764 case X86ISD::VSHLDQ:
4765 case X86ISD::VSRLDQ:
4766 case X86ISD::MOVLHPS:
4767 case X86ISD::MOVHLPS:
4768 case X86ISD::MOVSHDUP:
4769 case X86ISD::MOVSLDUP:
4770 case X86ISD::MOVDDUP:
4773 case X86ISD::UNPCKL:
4774 case X86ISD::UNPCKH:
4775 case X86ISD::VBROADCAST:
4776 case X86ISD::VPERMILPI:
4777 case X86ISD::VPERMILPV:
4778 case X86ISD::VPERM2X128:
4779 case X86ISD::SHUF128:
4780 case X86ISD::VPERMIL2:
4781 case X86ISD::VPERMI:
4782 case X86ISD::VPPERM:
4783 case X86ISD::VPERMV:
4784 case X86ISD::VPERMV3:
4785 case X86ISD::VZEXT_MOVL:
4790 static bool isTargetShuffleVariableMask(unsigned Opcode) {
4792 default: return false;
4794 case X86ISD::PSHUFB:
4795 case X86ISD::VPERMILPV:
4796 case X86ISD::VPERMIL2:
4797 case X86ISD::VPPERM:
4798 case X86ISD::VPERMV:
4799 case X86ISD::VPERMV3:
4801 // 'Faux' Target Shuffles.
4809 static bool isTargetShuffleSplat(SDValue Op) {
4810 unsigned Opcode = Op.getOpcode();
4811 if (Opcode == ISD::EXTRACT_SUBVECTOR)
4812 return isTargetShuffleSplat(Op.getOperand(0));
4813 return Opcode == X86ISD::VBROADCAST || Opcode == X86ISD::VBROADCAST_LOAD;
4816 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
4817 MachineFunction &MF = DAG.getMachineFunction();
4818 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
4819 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
4820 int ReturnAddrIndex = FuncInfo->getRAIndex();
4822 if (ReturnAddrIndex == 0) {
4823 // Set up a frame object for the return address.
4824 unsigned SlotSize = RegInfo->getSlotSize();
4825 ReturnAddrIndex = MF.getFrameInfo().CreateFixedObject(SlotSize,
4828 FuncInfo->setRAIndex(ReturnAddrIndex);
4831 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy(DAG.getDataLayout()));
4834 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
4835 bool hasSymbolicDisplacement) {
4836 // Offset should fit into 32 bit immediate field.
4837 if (!isInt<32>(Offset))
4840 // If we don't have a symbolic displacement - we don't have any extra
4842 if (!hasSymbolicDisplacement)
4845 // FIXME: Some tweaks might be needed for medium code model.
4846 if (M != CodeModel::Small && M != CodeModel::Kernel)
4849 // For small code model we assume that latest object is 16MB before end of 31
4850 // bits boundary. We may also accept pretty large negative constants knowing
4851 // that all objects are in the positive half of address space.
4852 if (M == CodeModel::Small && Offset < 16*1024*1024)
4855 // For kernel code model we know that all object resist in the negative half
4856 // of 32bits address space. We may not accept negative offsets, since they may
4857 // be just off and we may accept pretty large positive ones.
4858 if (M == CodeModel::Kernel && Offset >= 0)
4864 /// Determines whether the callee is required to pop its own arguments.
4865 /// Callee pop is necessary to support tail calls.
4866 bool X86::isCalleePop(CallingConv::ID CallingConv,
4867 bool is64Bit, bool IsVarArg, bool GuaranteeTCO) {
4868 // If GuaranteeTCO is true, we force some calls to be callee pop so that we
4869 // can guarantee TCO.
4870 if (!IsVarArg && shouldGuaranteeTCO(CallingConv, GuaranteeTCO))
4873 switch (CallingConv) {
4876 case CallingConv::X86_StdCall:
4877 case CallingConv::X86_FastCall:
4878 case CallingConv::X86_ThisCall:
4879 case CallingConv::X86_VectorCall:
4884 /// Return true if the condition is an signed comparison operation.
4885 static bool isX86CCSigned(unsigned X86CC) {
4888 llvm_unreachable("Invalid integer condition!");
4904 static X86::CondCode TranslateIntegerX86CC(ISD::CondCode SetCCOpcode) {
4905 switch (SetCCOpcode) {
4906 default: llvm_unreachable("Invalid integer condition!");
4907 case ISD::SETEQ: return X86::COND_E;
4908 case ISD::SETGT: return X86::COND_G;
4909 case ISD::SETGE: return X86::COND_GE;
4910 case ISD::SETLT: return X86::COND_L;
4911 case ISD::SETLE: return X86::COND_LE;
4912 case ISD::SETNE: return X86::COND_NE;
4913 case ISD::SETULT: return X86::COND_B;
4914 case ISD::SETUGT: return X86::COND_A;
4915 case ISD::SETULE: return X86::COND_BE;
4916 case ISD::SETUGE: return X86::COND_AE;
4920 /// Do a one-to-one translation of a ISD::CondCode to the X86-specific
4921 /// condition code, returning the condition code and the LHS/RHS of the
4922 /// comparison to make.
4923 static X86::CondCode TranslateX86CC(ISD::CondCode SetCCOpcode, const SDLoc &DL,
4924 bool isFP, SDValue &LHS, SDValue &RHS,
4925 SelectionDAG &DAG) {
4927 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
4928 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
4929 // X > -1 -> X == 0, jump !sign.
4930 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4931 return X86::COND_NS;
4933 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
4934 // X < 0 -> X == 0, jump on sign.
4937 if (SetCCOpcode == ISD::SETGE && RHSC->isNullValue()) {
4938 // X >= 0 -> X == 0, jump on !sign.
4939 return X86::COND_NS;
4941 if (SetCCOpcode == ISD::SETLT && RHSC->isOne()) {
4943 RHS = DAG.getConstant(0, DL, RHS.getValueType());
4944 return X86::COND_LE;
4948 return TranslateIntegerX86CC(SetCCOpcode);
4951 // First determine if it is required or is profitable to flip the operands.
4953 // If LHS is a foldable load, but RHS is not, flip the condition.
4954 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
4955 !ISD::isNON_EXTLoad(RHS.getNode())) {
4956 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
4957 std::swap(LHS, RHS);
4960 switch (SetCCOpcode) {
4966 std::swap(LHS, RHS);
4970 // On a floating point condition, the flags are set as follows:
4972 // 0 | 0 | 0 | X > Y
4973 // 0 | 0 | 1 | X < Y
4974 // 1 | 0 | 0 | X == Y
4975 // 1 | 1 | 1 | unordered
4976 switch (SetCCOpcode) {
4977 default: llvm_unreachable("Condcode should be pre-legalized away");
4979 case ISD::SETEQ: return X86::COND_E;
4980 case ISD::SETOLT: // flipped
4982 case ISD::SETGT: return X86::COND_A;
4983 case ISD::SETOLE: // flipped
4985 case ISD::SETGE: return X86::COND_AE;
4986 case ISD::SETUGT: // flipped
4988 case ISD::SETLT: return X86::COND_B;
4989 case ISD::SETUGE: // flipped
4991 case ISD::SETLE: return X86::COND_BE;
4993 case ISD::SETNE: return X86::COND_NE;
4994 case ISD::SETUO: return X86::COND_P;
4995 case ISD::SETO: return X86::COND_NP;
4997 case ISD::SETUNE: return X86::COND_INVALID;
5001 /// Is there a floating point cmov for the specific X86 condition code?
5002 /// Current x86 isa includes the following FP cmov instructions:
5003 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
5004 static bool hasFPCMov(unsigned X86CC) {
5021 bool X86TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
5023 MachineFunction &MF,
5024 unsigned Intrinsic) const {
5026 const IntrinsicData* IntrData = getIntrinsicWithChain(Intrinsic);
5030 Info.flags = MachineMemOperand::MONone;
5033 switch (IntrData->Type) {
5034 case TRUNCATE_TO_MEM_VI8:
5035 case TRUNCATE_TO_MEM_VI16:
5036 case TRUNCATE_TO_MEM_VI32: {
5037 Info.opc = ISD::INTRINSIC_VOID;
5038 Info.ptrVal = I.getArgOperand(0);
5039 MVT VT = MVT::getVT(I.getArgOperand(1)->getType());
5040 MVT ScalarVT = MVT::INVALID_SIMPLE_VALUE_TYPE;
5041 if (IntrData->Type == TRUNCATE_TO_MEM_VI8)
5043 else if (IntrData->Type == TRUNCATE_TO_MEM_VI16)
5044 ScalarVT = MVT::i16;
5045 else if (IntrData->Type == TRUNCATE_TO_MEM_VI32)
5046 ScalarVT = MVT::i32;
5048 Info.memVT = MVT::getVectorVT(ScalarVT, VT.getVectorNumElements());
5049 Info.align = Align(1);
5050 Info.flags |= MachineMemOperand::MOStore;
5055 Info.opc = ISD::INTRINSIC_W_CHAIN;
5056 Info.ptrVal = nullptr;
5057 MVT DataVT = MVT::getVT(I.getType());
5058 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5059 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5060 IndexVT.getVectorNumElements());
5061 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5062 Info.align = Align(1);
5063 Info.flags |= MachineMemOperand::MOLoad;
5067 Info.opc = ISD::INTRINSIC_VOID;
5068 Info.ptrVal = nullptr;
5069 MVT DataVT = MVT::getVT(I.getArgOperand(3)->getType());
5070 MVT IndexVT = MVT::getVT(I.getArgOperand(2)->getType());
5071 unsigned NumElts = std::min(DataVT.getVectorNumElements(),
5072 IndexVT.getVectorNumElements());
5073 Info.memVT = MVT::getVectorVT(DataVT.getVectorElementType(), NumElts);
5074 Info.align = Align(1);
5075 Info.flags |= MachineMemOperand::MOStore;
5085 /// Returns true if the target can instruction select the
5086 /// specified FP immediate natively. If false, the legalizer will
5087 /// materialize the FP immediate as a load from a constant pool.
5088 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
5089 bool ForCodeSize) const {
5090 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
5091 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
5097 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
5098 ISD::LoadExtType ExtTy,
5100 assert(cast<LoadSDNode>(Load)->isSimple() && "illegal to narrow");
5102 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
5103 // relocation target a movq or addq instruction: don't let the load shrink.
5104 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
5105 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
5106 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
5107 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
5109 // If this is an (1) AVX vector load with (2) multiple uses and (3) all of
5110 // those uses are extracted directly into a store, then the extract + store
5111 // can be store-folded. Therefore, it's probably not worth splitting the load.
5112 EVT VT = Load->getValueType(0);
5113 if ((VT.is256BitVector() || VT.is512BitVector()) && !Load->hasOneUse()) {
5114 for (auto UI = Load->use_begin(), UE = Load->use_end(); UI != UE; ++UI) {
5115 // Skip uses of the chain value. Result 0 of the node is the load value.
5116 if (UI.getUse().getResNo() != 0)
5119 // If this use is not an extract + store, it's probably worth splitting.
5120 if (UI->getOpcode() != ISD::EXTRACT_SUBVECTOR || !UI->hasOneUse() ||
5121 UI->use_begin()->getOpcode() != ISD::STORE)
5124 // All non-chain uses are extract + store.
5131 /// Returns true if it is beneficial to convert a load of a constant
5132 /// to just the constant itself.
5133 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
5135 assert(Ty->isIntegerTy());
5137 unsigned BitSize = Ty->getPrimitiveSizeInBits();
5138 if (BitSize == 0 || BitSize > 64)
5143 bool X86TargetLowering::reduceSelectOfFPConstantLoads(EVT CmpOpVT) const {
5144 // If we are using XMM registers in the ABI and the condition of the select is
5145 // a floating-point compare and we have blendv or conditional move, then it is
5146 // cheaper to select instead of doing a cross-register move and creating a
5147 // load that depends on the compare result.
5148 bool IsFPSetCC = CmpOpVT.isFloatingPoint() && CmpOpVT != MVT::f128;
5149 return !IsFPSetCC || !Subtarget.isTarget64BitLP64() || !Subtarget.hasAVX();
5152 bool X86TargetLowering::convertSelectOfConstantsToMath(EVT VT) const {
5153 // TODO: It might be a win to ease or lift this restriction, but the generic
5154 // folds in DAGCombiner conflict with vector folds for an AVX512 target.
5155 if (VT.isVector() && Subtarget.hasAVX512())
5161 bool X86TargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
5163 // TODO: We handle scalars using custom code, but generic combining could make
5164 // that unnecessary.
5166 if (!ISD::isConstantSplatVector(C.getNode(), MulC))
5169 // Find the type this will be legalized too. Otherwise we might prematurely
5170 // convert this to shl+add/sub and then still have to type legalize those ops.
5171 // Another choice would be to defer the decision for illegal types until
5172 // after type legalization. But constant splat vectors of i64 can't make it
5173 // through type legalization on 32-bit targets so we would need to special
5175 while (getTypeAction(Context, VT) != TypeLegal)
5176 VT = getTypeToTransformTo(Context, VT);
5178 // If vector multiply is legal, assume that's faster than shl + add/sub.
5179 // TODO: Multiply is a complex op with higher latency and lower throughput in
5180 // most implementations, so this check could be loosened based on type
5181 // and/or a CPU attribute.
5182 if (isOperationLegal(ISD::MUL, VT))
5185 // shl+add, shl+sub, shl+add+neg
5186 return (MulC + 1).isPowerOf2() || (MulC - 1).isPowerOf2() ||
5187 (1 - MulC).isPowerOf2() || (-(MulC + 1)).isPowerOf2();
5190 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT,
5191 unsigned Index) const {
5192 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
5195 // Mask vectors support all subregister combinations and operations that
5196 // extract half of vector.
5197 if (ResVT.getVectorElementType() == MVT::i1)
5198 return Index == 0 || ((ResVT.getSizeInBits() == SrcVT.getSizeInBits()*2) &&
5199 (Index == ResVT.getVectorNumElements()));
5201 return (Index % ResVT.getVectorNumElements()) == 0;
5204 bool X86TargetLowering::shouldScalarizeBinop(SDValue VecOp) const {
5205 unsigned Opc = VecOp.getOpcode();
5207 // Assume target opcodes can't be scalarized.
5208 // TODO - do we have any exceptions?
5209 if (Opc >= ISD::BUILTIN_OP_END)
5212 // If the vector op is not supported, try to convert to scalar.
5213 EVT VecVT = VecOp.getValueType();
5214 if (!isOperationLegalOrCustomOrPromote(Opc, VecVT))
5217 // If the vector op is supported, but the scalar op is not, the transform may
5218 // not be worthwhile.
5219 EVT ScalarVT = VecVT.getScalarType();
5220 return isOperationLegalOrCustomOrPromote(Opc, ScalarVT);
5223 bool X86TargetLowering::shouldFormOverflowOp(unsigned Opcode, EVT VT,
5225 // TODO: Allow vectors?
5228 return VT.isSimple() || !isOperationExpand(Opcode, VT);
5231 bool X86TargetLowering::isCheapToSpeculateCttz() const {
5232 // Speculate cttz only if we can directly use TZCNT.
5233 return Subtarget.hasBMI();
5236 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
5237 // Speculate ctlz only if we can directly use LZCNT.
5238 return Subtarget.hasLZCNT();
5241 bool X86TargetLowering::isLoadBitCastBeneficial(EVT LoadVT, EVT BitcastVT,
5242 const SelectionDAG &DAG,
5243 const MachineMemOperand &MMO) const {
5244 if (!Subtarget.hasAVX512() && !LoadVT.isVector() && BitcastVT.isVector() &&
5245 BitcastVT.getVectorElementType() == MVT::i1)
5248 if (!Subtarget.hasDQI() && BitcastVT == MVT::v8i1 && LoadVT == MVT::i8)
5251 // If both types are legal vectors, it's always ok to convert them.
5252 if (LoadVT.isVector() && BitcastVT.isVector() &&
5253 isTypeLegal(LoadVT) && isTypeLegal(BitcastVT))
5256 return TargetLowering::isLoadBitCastBeneficial(LoadVT, BitcastVT, DAG, MMO);
5259 bool X86TargetLowering::canMergeStoresTo(unsigned AddressSpace, EVT MemVT,
5260 const SelectionDAG &DAG) const {
5261 // Do not merge to float value size (128 bytes) if no implicit
5262 // float attribute is set.
5263 bool NoFloat = DAG.getMachineFunction().getFunction().hasFnAttribute(
5264 Attribute::NoImplicitFloat);
5267 unsigned MaxIntSize = Subtarget.is64Bit() ? 64 : 32;
5268 return (MemVT.getSizeInBits() <= MaxIntSize);
5270 // Make sure we don't merge greater than our preferred vector
5272 if (MemVT.getSizeInBits() > Subtarget.getPreferVectorWidth())
5277 bool X86TargetLowering::isCtlzFast() const {
5278 return Subtarget.hasFastLZCNT();
5281 bool X86TargetLowering::isMaskAndCmp0FoldingBeneficial(
5282 const Instruction &AndI) const {
5286 bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
5287 EVT VT = Y.getValueType();
5292 if (!Subtarget.hasBMI())
5295 // There are only 32-bit and 64-bit forms for 'andn'.
5296 if (VT != MVT::i32 && VT != MVT::i64)
5299 return !isa<ConstantSDNode>(Y);
5302 bool X86TargetLowering::hasAndNot(SDValue Y) const {
5303 EVT VT = Y.getValueType();
5306 return hasAndNotCompare(Y);
5310 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128)
5313 if (VT == MVT::v4i32)
5316 return Subtarget.hasSSE2();
5319 bool X86TargetLowering::hasBitTest(SDValue X, SDValue Y) const {
5320 return X.getValueType().isScalarInteger(); // 'bt'
5323 bool X86TargetLowering::
5324 shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5325 SDValue X, ConstantSDNode *XC, ConstantSDNode *CC, SDValue Y,
5326 unsigned OldShiftOpcode, unsigned NewShiftOpcode,
5327 SelectionDAG &DAG) const {
5328 // Does baseline recommend not to perform the fold by default?
5329 if (!TargetLowering::shouldProduceAndByConstByHoistingConstFromShiftsLHSOfAnd(
5330 X, XC, CC, Y, OldShiftOpcode, NewShiftOpcode, DAG))
5332 // For scalars this transform is always beneficial.
5333 if (X.getValueType().isScalarInteger())
5335 // If all the shift amounts are identical, then transform is beneficial even
5336 // with rudimentary SSE2 shifts.
5337 if (DAG.isSplatValue(Y, /*AllowUndefs=*/true))
5339 // If we have AVX2 with it's powerful shift operations, then it's also good.
5340 if (Subtarget.hasAVX2())
5342 // Pre-AVX2 vector codegen for this pattern is best for variant with 'shl'.
5343 return NewShiftOpcode == ISD::SHL;
5346 bool X86TargetLowering::shouldFoldConstantShiftPairToMask(
5347 const SDNode *N, CombineLevel Level) const {
5348 assert(((N->getOpcode() == ISD::SHL &&
5349 N->getOperand(0).getOpcode() == ISD::SRL) ||
5350 (N->getOpcode() == ISD::SRL &&
5351 N->getOperand(0).getOpcode() == ISD::SHL)) &&
5352 "Expected shift-shift mask");
5353 EVT VT = N->getValueType(0);
5354 if ((Subtarget.hasFastVectorShiftMasks() && VT.isVector()) ||
5355 (Subtarget.hasFastScalarShiftMasks() && !VT.isVector())) {
5356 // Only fold if the shift values are equal - so it folds to AND.
5357 // TODO - we should fold if either is a non-uniform vector but we don't do
5358 // the fold for non-splats yet.
5359 return N->getOperand(1) == N->getOperand(0).getOperand(1);
5361 return TargetLoweringBase::shouldFoldConstantShiftPairToMask(N, Level);
5364 bool X86TargetLowering::shouldFoldMaskToVariableShiftPair(SDValue Y) const {
5365 EVT VT = Y.getValueType();
5367 // For vectors, we don't have a preference, but we probably want a mask.
5371 // 64-bit shifts on 32-bit targets produce really bad bloated code.
5372 if (VT == MVT::i64 && !Subtarget.is64Bit())
5378 bool X86TargetLowering::shouldExpandShift(SelectionDAG &DAG,
5380 if (DAG.getMachineFunction().getFunction().hasMinSize() &&
5381 !Subtarget.isOSWindows())
5386 bool X86TargetLowering::shouldSplatInsEltVarIndex(EVT VT) const {
5387 // Any legal vector type can be splatted more efficiently than
5388 // loading/spilling from memory.
5389 return isTypeLegal(VT);
5392 MVT X86TargetLowering::hasFastEqualityCompare(unsigned NumBits) const {
5393 MVT VT = MVT::getIntegerVT(NumBits);
5394 if (isTypeLegal(VT))
5397 // PMOVMSKB can handle this.
5398 if (NumBits == 128 && isTypeLegal(MVT::v16i8))
5401 // VPMOVMSKB can handle this.
5402 if (NumBits == 256 && isTypeLegal(MVT::v32i8))
5405 // TODO: Allow 64-bit type for 32-bit target.
5406 // TODO: 512-bit types should be allowed, but make sure that those
5407 // cases are handled in combineVectorSizedSetCCEquality().
5409 return MVT::INVALID_SIMPLE_VALUE_TYPE;
5412 /// Val is the undef sentinel value or equal to the specified value.
5413 static bool isUndefOrEqual(int Val, int CmpVal) {
5414 return ((Val == SM_SentinelUndef) || (Val == CmpVal));
5417 /// Val is either the undef or zero sentinel value.
5418 static bool isUndefOrZero(int Val) {
5419 return ((Val == SM_SentinelUndef) || (Val == SM_SentinelZero));
5422 /// Return true if every element in Mask, beginning from position Pos and ending
5423 /// in Pos+Size is the undef sentinel value.
5424 static bool isUndefInRange(ArrayRef<int> Mask, unsigned Pos, unsigned Size) {
5425 return llvm::all_of(Mask.slice(Pos, Size),
5426 [](int M) { return M == SM_SentinelUndef; });
5429 /// Return true if the mask creates a vector whose lower half is undefined.
5430 static bool isUndefLowerHalf(ArrayRef<int> Mask) {
5431 unsigned NumElts = Mask.size();
5432 return isUndefInRange(Mask, 0, NumElts / 2);
5435 /// Return true if the mask creates a vector whose upper half is undefined.
5436 static bool isUndefUpperHalf(ArrayRef<int> Mask) {
5437 unsigned NumElts = Mask.size();
5438 return isUndefInRange(Mask, NumElts / 2, NumElts / 2);
5441 /// Return true if Val falls within the specified range (L, H].
5442 static bool isInRange(int Val, int Low, int Hi) {
5443 return (Val >= Low && Val < Hi);
5446 /// Return true if the value of any element in Mask falls within the specified
5448 static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
5449 return llvm::any_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
5452 /// Return true if the value of any element in Mask is the zero sentinel value.
5453 static bool isAnyZero(ArrayRef<int> Mask) {
5454 return llvm::any_of(Mask, [](int M) { return M == SM_SentinelZero; });
5457 /// Return true if the value of any element in Mask is the zero or undef
5458 /// sentinel values.
5459 static bool isAnyZeroOrUndef(ArrayRef<int> Mask) {
5460 return llvm::any_of(Mask, [](int M) {
5461 return M == SM_SentinelZero || M == SM_SentinelUndef;
5465 /// Return true if Val is undef or if its value falls within the
5466 /// specified range (L, H].
5467 static bool isUndefOrInRange(int Val, int Low, int Hi) {
5468 return (Val == SM_SentinelUndef) || isInRange(Val, Low, Hi);
5471 /// Return true if every element in Mask is undef or if its value
5472 /// falls within the specified range (L, H].
5473 static bool isUndefOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5474 return llvm::all_of(
5475 Mask, [Low, Hi](int M) { return isUndefOrInRange(M, Low, Hi); });
5478 /// Return true if Val is undef, zero or if its value falls within the
5479 /// specified range (L, H].
5480 static bool isUndefOrZeroOrInRange(int Val, int Low, int Hi) {
5481 return isUndefOrZero(Val) || isInRange(Val, Low, Hi);
5484 /// Return true if every element in Mask is undef, zero or if its value
5485 /// falls within the specified range (L, H].
5486 static bool isUndefOrZeroOrInRange(ArrayRef<int> Mask, int Low, int Hi) {
5487 return llvm::all_of(
5488 Mask, [Low, Hi](int M) { return isUndefOrZeroOrInRange(M, Low, Hi); });
5491 /// Return true if every element in Mask, beginning
5492 /// from position Pos and ending in Pos + Size, falls within the specified
5493 /// sequence (Low, Low + Step, ..., Low + (Size - 1) * Step) or is undef.
5494 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask, unsigned Pos,
5495 unsigned Size, int Low, int Step = 1) {
5496 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5497 if (!isUndefOrEqual(Mask[i], Low))
5502 /// Return true if every element in Mask, beginning
5503 /// from position Pos and ending in Pos+Size, falls within the specified
5504 /// sequential range (Low, Low+Size], or is undef or is zero.
5505 static bool isSequentialOrUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5506 unsigned Size, int Low,
5508 for (unsigned i = Pos, e = Pos + Size; i != e; ++i, Low += Step)
5509 if (!isUndefOrZero(Mask[i]) && Mask[i] != Low)
5514 /// Return true if every element in Mask, beginning
5515 /// from position Pos and ending in Pos+Size is undef or is zero.
5516 static bool isUndefOrZeroInRange(ArrayRef<int> Mask, unsigned Pos,
5518 return llvm::all_of(Mask.slice(Pos, Size),
5519 [](int M) { return isUndefOrZero(M); });
5522 /// Helper function to test whether a shuffle mask could be
5523 /// simplified by widening the elements being shuffled.
5525 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
5526 /// leaves it in an unspecified state.
5528 /// NOTE: This must handle normal vector shuffle masks and *target* vector
5529 /// shuffle masks. The latter have the special property of a '-2' representing
5530 /// a zero-ed lane of a vector.
5531 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5532 SmallVectorImpl<int> &WidenedMask) {
5533 WidenedMask.assign(Mask.size() / 2, 0);
5534 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
5536 int M1 = Mask[i + 1];
5538 // If both elements are undef, its trivial.
5539 if (M0 == SM_SentinelUndef && M1 == SM_SentinelUndef) {
5540 WidenedMask[i / 2] = SM_SentinelUndef;
5544 // Check for an undef mask and a mask value properly aligned to fit with
5545 // a pair of values. If we find such a case, use the non-undef mask's value.
5546 if (M0 == SM_SentinelUndef && M1 >= 0 && (M1 % 2) == 1) {
5547 WidenedMask[i / 2] = M1 / 2;
5550 if (M1 == SM_SentinelUndef && M0 >= 0 && (M0 % 2) == 0) {
5551 WidenedMask[i / 2] = M0 / 2;
5555 // When zeroing, we need to spread the zeroing across both lanes to widen.
5556 if (M0 == SM_SentinelZero || M1 == SM_SentinelZero) {
5557 if ((M0 == SM_SentinelZero || M0 == SM_SentinelUndef) &&
5558 (M1 == SM_SentinelZero || M1 == SM_SentinelUndef)) {
5559 WidenedMask[i / 2] = SM_SentinelZero;
5565 // Finally check if the two mask values are adjacent and aligned with
5567 if (M0 != SM_SentinelUndef && (M0 % 2) == 0 && (M0 + 1) == M1) {
5568 WidenedMask[i / 2] = M0 / 2;
5572 // Otherwise we can't safely widen the elements used in this shuffle.
5575 assert(WidenedMask.size() == Mask.size() / 2 &&
5576 "Incorrect size of mask after widening the elements!");
5581 static bool canWidenShuffleElements(ArrayRef<int> Mask,
5582 const APInt &Zeroable,
5584 SmallVectorImpl<int> &WidenedMask) {
5585 // Create an alternative mask with info about zeroable elements.
5586 // Here we do not set undef elements as zeroable.
5587 SmallVector<int, 64> ZeroableMask(Mask.begin(), Mask.end());
5589 assert(!Zeroable.isNullValue() && "V2's non-undef elements are used?!");
5590 for (int i = 0, Size = Mask.size(); i != Size; ++i)
5591 if (Mask[i] != SM_SentinelUndef && Zeroable[i])
5592 ZeroableMask[i] = SM_SentinelZero;
5594 return canWidenShuffleElements(ZeroableMask, WidenedMask);
5597 static bool canWidenShuffleElements(ArrayRef<int> Mask) {
5598 SmallVector<int, 32> WidenedMask;
5599 return canWidenShuffleElements(Mask, WidenedMask);
5602 // Attempt to narrow/widen shuffle mask until it matches the target number of
5604 static bool scaleShuffleElements(ArrayRef<int> Mask, unsigned NumDstElts,
5605 SmallVectorImpl<int> &ScaledMask) {
5606 unsigned NumSrcElts = Mask.size();
5607 assert(((NumSrcElts % NumDstElts) == 0 || (NumDstElts % NumSrcElts) == 0) &&
5608 "Illegal shuffle scale factor");
5610 // Narrowing is guaranteed to work.
5611 if (NumDstElts >= NumSrcElts) {
5612 int Scale = NumDstElts / NumSrcElts;
5613 llvm::narrowShuffleMaskElts(Scale, Mask, ScaledMask);
5617 // We have to repeat the widening until we reach the target size, but we can
5618 // split out the first widening as it sets up ScaledMask for us.
5619 if (canWidenShuffleElements(Mask, ScaledMask)) {
5620 while (ScaledMask.size() > NumDstElts) {
5621 SmallVector<int, 16> WidenedMask;
5622 if (!canWidenShuffleElements(ScaledMask, WidenedMask))
5624 ScaledMask = std::move(WidenedMask);
5632 /// Returns true if Elt is a constant zero or a floating point constant +0.0.
5633 bool X86::isZeroNode(SDValue Elt) {
5634 return isNullConstant(Elt) || isNullFPConstant(Elt);
5637 // Build a vector of constants.
5638 // Use an UNDEF node if MaskElt == -1.
5639 // Split 64-bit constants in the 32-bit mode.
5640 static SDValue getConstVector(ArrayRef<int> Values, MVT VT, SelectionDAG &DAG,
5641 const SDLoc &dl, bool IsMask = false) {
5643 SmallVector<SDValue, 32> Ops;
5646 MVT ConstVecVT = VT;
5647 unsigned NumElts = VT.getVectorNumElements();
5648 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5649 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5650 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5654 MVT EltVT = ConstVecVT.getVectorElementType();
5655 for (unsigned i = 0; i < NumElts; ++i) {
5656 bool IsUndef = Values[i] < 0 && IsMask;
5657 SDValue OpNode = IsUndef ? DAG.getUNDEF(EltVT) :
5658 DAG.getConstant(Values[i], dl, EltVT);
5659 Ops.push_back(OpNode);
5661 Ops.push_back(IsUndef ? DAG.getUNDEF(EltVT) :
5662 DAG.getConstant(0, dl, EltVT));
5664 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5666 ConstsNode = DAG.getBitcast(VT, ConstsNode);
5670 static SDValue getConstVector(ArrayRef<APInt> Bits, APInt &Undefs,
5671 MVT VT, SelectionDAG &DAG, const SDLoc &dl) {
5672 assert(Bits.size() == Undefs.getBitWidth() &&
5673 "Unequal constant and undef arrays");
5674 SmallVector<SDValue, 32> Ops;
5677 MVT ConstVecVT = VT;
5678 unsigned NumElts = VT.getVectorNumElements();
5679 bool In64BitMode = DAG.getTargetLoweringInfo().isTypeLegal(MVT::i64);
5680 if (!In64BitMode && VT.getVectorElementType() == MVT::i64) {
5681 ConstVecVT = MVT::getVectorVT(MVT::i32, NumElts * 2);
5685 MVT EltVT = ConstVecVT.getVectorElementType();
5686 for (unsigned i = 0, e = Bits.size(); i != e; ++i) {
5688 Ops.append(Split ? 2 : 1, DAG.getUNDEF(EltVT));
5691 const APInt &V = Bits[i];
5692 assert(V.getBitWidth() == VT.getScalarSizeInBits() && "Unexpected sizes");
5694 Ops.push_back(DAG.getConstant(V.trunc(32), dl, EltVT));
5695 Ops.push_back(DAG.getConstant(V.lshr(32).trunc(32), dl, EltVT));
5696 } else if (EltVT == MVT::f32) {
5697 APFloat FV(APFloat::IEEEsingle(), V);
5698 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5699 } else if (EltVT == MVT::f64) {
5700 APFloat FV(APFloat::IEEEdouble(), V);
5701 Ops.push_back(DAG.getConstantFP(FV, dl, EltVT));
5703 Ops.push_back(DAG.getConstant(V, dl, EltVT));
5707 SDValue ConstsNode = DAG.getBuildVector(ConstVecVT, dl, Ops);
5708 return DAG.getBitcast(VT, ConstsNode);
5711 /// Returns a vector of specified type with all zero elements.
5712 static SDValue getZeroVector(MVT VT, const X86Subtarget &Subtarget,
5713 SelectionDAG &DAG, const SDLoc &dl) {
5714 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector() ||
5715 VT.getVectorElementType() == MVT::i1) &&
5716 "Unexpected vector type");
5718 // Try to build SSE/AVX zero vectors as <N x i32> bitcasted to their dest
5719 // type. This ensures they get CSE'd. But if the integer type is not
5720 // available, use a floating-point +0.0 instead.
5722 if (!Subtarget.hasSSE2() && VT.is128BitVector()) {
5723 Vec = DAG.getConstantFP(+0.0, dl, MVT::v4f32);
5724 } else if (VT.isFloatingPoint()) {
5725 Vec = DAG.getConstantFP(+0.0, dl, VT);
5726 } else if (VT.getVectorElementType() == MVT::i1) {
5727 assert((Subtarget.hasBWI() || VT.getVectorNumElements() <= 16) &&
5728 "Unexpected vector type");
5729 Vec = DAG.getConstant(0, dl, VT);
5731 unsigned Num32BitElts = VT.getSizeInBits() / 32;
5732 Vec = DAG.getConstant(0, dl, MVT::getVectorVT(MVT::i32, Num32BitElts));
5734 return DAG.getBitcast(VT, Vec);
5737 static SDValue extractSubVector(SDValue Vec, unsigned IdxVal, SelectionDAG &DAG,
5738 const SDLoc &dl, unsigned vectorWidth) {
5739 EVT VT = Vec.getValueType();
5740 EVT ElVT = VT.getVectorElementType();
5741 unsigned Factor = VT.getSizeInBits()/vectorWidth;
5742 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
5743 VT.getVectorNumElements()/Factor);
5745 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
5746 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
5747 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5749 // This is the index of the first element of the vectorWidth-bit chunk
5750 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5751 IdxVal &= ~(ElemsPerChunk - 1);
5753 // If the input is a buildvector just emit a smaller one.
5754 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
5755 return DAG.getBuildVector(ResultVT, dl,
5756 Vec->ops().slice(IdxVal, ElemsPerChunk));
5758 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5759 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
5762 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
5763 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
5764 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
5765 /// instructions or a simple subregister reference. Idx is an index in the
5766 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
5767 /// lowering EXTRACT_VECTOR_ELT operations easier.
5768 static SDValue extract128BitVector(SDValue Vec, unsigned IdxVal,
5769 SelectionDAG &DAG, const SDLoc &dl) {
5770 assert((Vec.getValueType().is256BitVector() ||
5771 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
5772 return extractSubVector(Vec, IdxVal, DAG, dl, 128);
5775 /// Generate a DAG to grab 256-bits from a 512-bit vector.
5776 static SDValue extract256BitVector(SDValue Vec, unsigned IdxVal,
5777 SelectionDAG &DAG, const SDLoc &dl) {
5778 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
5779 return extractSubVector(Vec, IdxVal, DAG, dl, 256);
5782 static SDValue insertSubVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5783 SelectionDAG &DAG, const SDLoc &dl,
5784 unsigned vectorWidth) {
5785 assert((vectorWidth == 128 || vectorWidth == 256) &&
5786 "Unsupported vector width");
5787 // Inserting UNDEF is Result
5790 EVT VT = Vec.getValueType();
5791 EVT ElVT = VT.getVectorElementType();
5792 EVT ResultVT = Result.getValueType();
5794 // Insert the relevant vectorWidth bits.
5795 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
5796 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
5798 // This is the index of the first element of the vectorWidth-bit chunk
5799 // we want. Since ElemsPerChunk is a power of 2 just need to clear bits.
5800 IdxVal &= ~(ElemsPerChunk - 1);
5802 SDValue VecIdx = DAG.getIntPtrConstant(IdxVal, dl);
5803 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
5806 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
5807 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
5808 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
5809 /// simple superregister reference. Idx is an index in the 128 bits
5810 /// we want. It need not be aligned to a 128-bit boundary. That makes
5811 /// lowering INSERT_VECTOR_ELT operations easier.
5812 static SDValue insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
5813 SelectionDAG &DAG, const SDLoc &dl) {
5814 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
5815 return insertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
5818 /// Widen a vector to a larger size with the same scalar type, with the new
5819 /// elements either zero or undef.
5820 static SDValue widenSubVector(MVT VT, SDValue Vec, bool ZeroNewElements,
5821 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5823 assert(Vec.getValueSizeInBits() < VT.getSizeInBits() &&
5824 Vec.getValueType().getScalarType() == VT.getScalarType() &&
5825 "Unsupported vector widening type");
5826 SDValue Res = ZeroNewElements ? getZeroVector(VT, Subtarget, DAG, dl)
5828 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT, Res, Vec,
5829 DAG.getIntPtrConstant(0, dl));
5832 /// Widen a vector to a larger size with the same scalar type, with the new
5833 /// elements either zero or undef.
5834 static SDValue widenSubVector(SDValue Vec, bool ZeroNewElements,
5835 const X86Subtarget &Subtarget, SelectionDAG &DAG,
5836 const SDLoc &dl, unsigned WideSizeInBits) {
5837 assert(Vec.getValueSizeInBits() < WideSizeInBits &&
5838 (WideSizeInBits % Vec.getScalarValueSizeInBits()) == 0 &&
5839 "Unsupported vector widening type");
5840 unsigned WideNumElts = WideSizeInBits / Vec.getScalarValueSizeInBits();
5841 MVT SVT = Vec.getSimpleValueType().getScalarType();
5842 MVT VT = MVT::getVectorVT(SVT, WideNumElts);
5843 return widenSubVector(VT, Vec, ZeroNewElements, Subtarget, DAG, dl);
5846 // Helper function to collect subvector ops that are concatenated together,
5847 // either by ISD::CONCAT_VECTORS or a ISD::INSERT_SUBVECTOR series.
5848 // The subvectors in Ops are guaranteed to be the same type.
5849 static bool collectConcatOps(SDNode *N, SmallVectorImpl<SDValue> &Ops) {
5850 assert(Ops.empty() && "Expected an empty ops vector");
5852 if (N->getOpcode() == ISD::CONCAT_VECTORS) {
5853 Ops.append(N->op_begin(), N->op_end());
5857 if (N->getOpcode() == ISD::INSERT_SUBVECTOR) {
5858 SDValue Src = N->getOperand(0);
5859 SDValue Sub = N->getOperand(1);
5860 const APInt &Idx = N->getConstantOperandAPInt(2);
5861 EVT VT = Src.getValueType();
5862 EVT SubVT = Sub.getValueType();
5864 // TODO - Handle more general insert_subvector chains.
5865 if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
5866 Idx == (VT.getVectorNumElements() / 2)) {
5867 // insert_subvector(insert_subvector(undef, x, lo), y, hi)
5868 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
5869 Src.getOperand(1).getValueType() == SubVT &&
5870 isNullConstant(Src.getOperand(2))) {
5871 Ops.push_back(Src.getOperand(1));
5875 // insert_subvector(x, extract_subvector(x, lo), hi)
5876 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
5877 Sub.getOperand(0) == Src && isNullConstant(Sub.getOperand(1))) {
5887 static std::pair<SDValue, SDValue> splitVector(SDValue Op, SelectionDAG &DAG,
5889 EVT VT = Op.getValueType();
5890 unsigned NumElems = VT.getVectorNumElements();
5891 unsigned SizeInBits = VT.getSizeInBits();
5892 assert((NumElems % 2) == 0 && (SizeInBits % 2) == 0 &&
5893 "Can't split odd sized vector");
5895 SDValue Lo = extractSubVector(Op, 0, DAG, dl, SizeInBits / 2);
5896 SDValue Hi = extractSubVector(Op, NumElems / 2, DAG, dl, SizeInBits / 2);
5897 return std::make_pair(Lo, Hi);
5900 // Split an unary integer op into 2 half sized ops.
5901 static SDValue splitVectorIntUnary(SDValue Op, SelectionDAG &DAG) {
5902 EVT VT = Op.getValueType();
5904 // Make sure we only try to split 256/512-bit types to avoid creating
5906 assert((Op.getOperand(0).getValueType().is256BitVector() ||
5907 Op.getOperand(0).getValueType().is512BitVector()) &&
5908 (VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
5909 assert(Op.getOperand(0).getValueType().getVectorNumElements() ==
5910 VT.getVectorNumElements() &&
5915 // Extract the Lo/Hi vectors
5917 std::tie(Lo, Hi) = splitVector(Op.getOperand(0), DAG, dl);
5920 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
5921 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
5922 DAG.getNode(Op.getOpcode(), dl, LoVT, Lo),
5923 DAG.getNode(Op.getOpcode(), dl, HiVT, Hi));
5926 /// Break a binary integer operation into 2 half sized ops and then
5927 /// concatenate the result back.
5928 static SDValue splitVectorIntBinary(SDValue Op, SelectionDAG &DAG) {
5929 EVT VT = Op.getValueType();
5931 // Sanity check that all the types match.
5932 assert(Op.getOperand(0).getValueType() == VT &&
5933 Op.getOperand(1).getValueType() == VT && "Unexpected VTs!");
5934 assert((VT.is256BitVector() || VT.is512BitVector()) && "Unsupported VT!");
5938 // Extract the LHS Lo/Hi vectors
5940 std::tie(LHS1, LHS2) = splitVector(Op.getOperand(0), DAG, dl);
5942 // Extract the RHS Lo/Hi vectors
5944 std::tie(RHS1, RHS2) = splitVector(Op.getOperand(1), DAG, dl);
5947 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
5948 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
5949 DAG.getNode(Op.getOpcode(), dl, LoVT, LHS1, RHS1),
5950 DAG.getNode(Op.getOpcode(), dl, HiVT, LHS2, RHS2));
5953 // Helper for splitting operands of an operation to legal target size and
5954 // apply a function on each part.
5955 // Useful for operations that are available on SSE2 in 128-bit, on AVX2 in
5956 // 256-bit and on AVX512BW in 512-bit. The argument VT is the type used for
5957 // deciding if/how to split Ops. Ops elements do *not* have to be of type VT.
5958 // The argument Builder is a function that will be applied on each split part:
5959 // SDValue Builder(SelectionDAG&G, SDLoc, ArrayRef<SDValue>)
5960 template <typename F>
5961 SDValue SplitOpsAndApply(SelectionDAG &DAG, const X86Subtarget &Subtarget,
5962 const SDLoc &DL, EVT VT, ArrayRef<SDValue> Ops,
5963 F Builder, bool CheckBWI = true) {
5964 assert(Subtarget.hasSSE2() && "Target assumed to support at least SSE2");
5965 unsigned NumSubs = 1;
5966 if ((CheckBWI && Subtarget.useBWIRegs()) ||
5967 (!CheckBWI && Subtarget.useAVX512Regs())) {
5968 if (VT.getSizeInBits() > 512) {
5969 NumSubs = VT.getSizeInBits() / 512;
5970 assert((VT.getSizeInBits() % 512) == 0 && "Illegal vector size");
5972 } else if (Subtarget.hasAVX2()) {
5973 if (VT.getSizeInBits() > 256) {
5974 NumSubs = VT.getSizeInBits() / 256;
5975 assert((VT.getSizeInBits() % 256) == 0 && "Illegal vector size");
5978 if (VT.getSizeInBits() > 128) {
5979 NumSubs = VT.getSizeInBits() / 128;
5980 assert((VT.getSizeInBits() % 128) == 0 && "Illegal vector size");
5985 return Builder(DAG, DL, Ops);
5987 SmallVector<SDValue, 4> Subs;
5988 for (unsigned i = 0; i != NumSubs; ++i) {
5989 SmallVector<SDValue, 2> SubOps;
5990 for (SDValue Op : Ops) {
5991 EVT OpVT = Op.getValueType();
5992 unsigned NumSubElts = OpVT.getVectorNumElements() / NumSubs;
5993 unsigned SizeSub = OpVT.getSizeInBits() / NumSubs;
5994 SubOps.push_back(extractSubVector(Op, i * NumSubElts, DAG, DL, SizeSub));
5996 Subs.push_back(Builder(DAG, DL, SubOps));
5998 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Subs);
6001 /// Insert i1-subvector to i1-vector.
6002 static SDValue insert1BitVector(SDValue Op, SelectionDAG &DAG,
6003 const X86Subtarget &Subtarget) {
6006 SDValue Vec = Op.getOperand(0);
6007 SDValue SubVec = Op.getOperand(1);
6008 SDValue Idx = Op.getOperand(2);
6009 unsigned IdxVal = Op.getConstantOperandVal(2);
6011 // Inserting undef is a nop. We can just return the original vector.
6012 if (SubVec.isUndef())
6015 if (IdxVal == 0 && Vec.isUndef()) // the operation is legal
6018 MVT OpVT = Op.getSimpleValueType();
6019 unsigned NumElems = OpVT.getVectorNumElements();
6020 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
6022 // Extend to natively supported kshift.
6023 MVT WideOpVT = OpVT;
6024 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
6025 WideOpVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
6027 // Inserting into the lsbs of a zero vector is legal. ISel will insert shifts
6029 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(Vec.getNode())) {
6030 // May need to promote to a legal type.
6031 Op = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6032 DAG.getConstant(0, dl, WideOpVT),
6034 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6037 MVT SubVecVT = SubVec.getSimpleValueType();
6038 unsigned SubVecNumElems = SubVecVT.getVectorNumElements();
6039 assert(IdxVal + SubVecNumElems <= NumElems &&
6040 IdxVal % SubVecVT.getSizeInBits() == 0 &&
6041 "Unexpected index value in INSERT_SUBVECTOR");
6043 SDValue Undef = DAG.getUNDEF(WideOpVT);
6046 // Zero lower bits of the Vec
6047 SDValue ShiftBits = DAG.getTargetConstant(SubVecNumElems, dl, MVT::i8);
6048 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec,
6050 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6051 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6052 // Merge them together, SubVec should be zero extended.
6053 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6054 DAG.getConstant(0, dl, WideOpVT),
6056 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6057 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6060 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6061 Undef, SubVec, ZeroIdx);
6063 if (Vec.isUndef()) {
6064 assert(IdxVal != 0 && "Unexpected index");
6065 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6066 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6067 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6070 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
6071 assert(IdxVal != 0 && "Unexpected index");
6072 NumElems = WideOpVT.getVectorNumElements();
6073 unsigned ShiftLeft = NumElems - SubVecNumElems;
6074 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6075 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6076 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6077 if (ShiftRight != 0)
6078 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6079 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6080 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6083 // Simple case when we put subvector in the upper part
6084 if (IdxVal + SubVecNumElems == NumElems) {
6085 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6086 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
6087 if (SubVecNumElems * 2 == NumElems) {
6088 // Special case, use legal zero extending insert_subvector. This allows
6089 // isel to optimize when bits are known zero.
6090 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVecVT, Vec, ZeroIdx);
6091 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6092 DAG.getConstant(0, dl, WideOpVT),
6095 // Otherwise use explicit shifts to zero the bits.
6096 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT,
6097 Undef, Vec, ZeroIdx);
6098 NumElems = WideOpVT.getVectorNumElements();
6099 SDValue ShiftBits = DAG.getTargetConstant(NumElems - IdxVal, dl, MVT::i8);
6100 Vec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec, ShiftBits);
6101 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec, ShiftBits);
6103 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6104 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6107 // Inserting into the middle is more complicated.
6109 NumElems = WideOpVT.getVectorNumElements();
6111 // Widen the vector if needed.
6112 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideOpVT, Undef, Vec, ZeroIdx);
6114 unsigned ShiftLeft = NumElems - SubVecNumElems;
6115 unsigned ShiftRight = NumElems - SubVecNumElems - IdxVal;
6117 // Do an optimization for the the most frequently used types.
6118 if (WideOpVT != MVT::v64i1 || Subtarget.is64Bit()) {
6119 APInt Mask0 = APInt::getBitsSet(NumElems, IdxVal, IdxVal + SubVecNumElems);
6120 Mask0.flipAllBits();
6121 SDValue CMask0 = DAG.getConstant(Mask0, dl, MVT::getIntegerVT(NumElems));
6122 SDValue VMask0 = DAG.getNode(ISD::BITCAST, dl, WideOpVT, CMask0);
6123 Vec = DAG.getNode(ISD::AND, dl, WideOpVT, Vec, VMask0);
6124 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6125 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6126 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6127 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6128 Op = DAG.getNode(ISD::OR, dl, WideOpVT, Vec, SubVec);
6130 // Reduce to original width if needed.
6131 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, Op, ZeroIdx);
6134 // Clear the upper bits of the subvector and move it to its insert position.
6135 SubVec = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, SubVec,
6136 DAG.getTargetConstant(ShiftLeft, dl, MVT::i8));
6137 SubVec = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, SubVec,
6138 DAG.getTargetConstant(ShiftRight, dl, MVT::i8));
6140 // Isolate the bits below the insertion point.
6141 unsigned LowShift = NumElems - IdxVal;
6142 SDValue Low = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, Vec,
6143 DAG.getTargetConstant(LowShift, dl, MVT::i8));
6144 Low = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Low,
6145 DAG.getTargetConstant(LowShift, dl, MVT::i8));
6147 // Isolate the bits after the last inserted bit.
6148 unsigned HighShift = IdxVal + SubVecNumElems;
6149 SDValue High = DAG.getNode(X86ISD::KSHIFTR, dl, WideOpVT, Vec,
6150 DAG.getTargetConstant(HighShift, dl, MVT::i8));
6151 High = DAG.getNode(X86ISD::KSHIFTL, dl, WideOpVT, High,
6152 DAG.getTargetConstant(HighShift, dl, MVT::i8));
6154 // Now OR all 3 pieces together.
6155 Vec = DAG.getNode(ISD::OR, dl, WideOpVT, Low, High);
6156 SubVec = DAG.getNode(ISD::OR, dl, WideOpVT, SubVec, Vec);
6158 // Reduce to original width if needed.
6159 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OpVT, SubVec, ZeroIdx);
6162 static SDValue concatSubVectors(SDValue V1, SDValue V2, SelectionDAG &DAG,
6164 assert(V1.getValueType() == V2.getValueType() && "subvector type mismatch");
6165 EVT SubVT = V1.getValueType();
6166 EVT SubSVT = SubVT.getScalarType();
6167 unsigned SubNumElts = SubVT.getVectorNumElements();
6168 unsigned SubVectorWidth = SubVT.getSizeInBits();
6169 EVT VT = EVT::getVectorVT(*DAG.getContext(), SubSVT, 2 * SubNumElts);
6170 SDValue V = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, dl, SubVectorWidth);
6171 return insertSubVector(V, V2, SubNumElts, DAG, dl, SubVectorWidth);
6174 /// Returns a vector of specified type with all bits set.
6175 /// Always build ones vectors as <4 x i32>, <8 x i32> or <16 x i32>.
6176 /// Then bitcast to their original type, ensuring they get CSE'd.
6177 static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, const SDLoc &dl) {
6178 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6179 "Expected a 128/256/512-bit vector type");
6181 APInt Ones = APInt::getAllOnesValue(32);
6182 unsigned NumElts = VT.getSizeInBits() / 32;
6183 SDValue Vec = DAG.getConstant(Ones, dl, MVT::getVectorVT(MVT::i32, NumElts));
6184 return DAG.getBitcast(VT, Vec);
6187 // Convert *_EXTEND to *_EXTEND_VECTOR_INREG opcode.
6188 static unsigned getOpcode_EXTEND_VECTOR_INREG(unsigned Opcode) {
6190 case ISD::ANY_EXTEND:
6191 case ISD::ANY_EXTEND_VECTOR_INREG:
6192 return ISD::ANY_EXTEND_VECTOR_INREG;
6193 case ISD::ZERO_EXTEND:
6194 case ISD::ZERO_EXTEND_VECTOR_INREG:
6195 return ISD::ZERO_EXTEND_VECTOR_INREG;
6196 case ISD::SIGN_EXTEND:
6197 case ISD::SIGN_EXTEND_VECTOR_INREG:
6198 return ISD::SIGN_EXTEND_VECTOR_INREG;
6200 llvm_unreachable("Unknown opcode");
6203 static SDValue getExtendInVec(unsigned Opcode, const SDLoc &DL, EVT VT,
6204 SDValue In, SelectionDAG &DAG) {
6205 EVT InVT = In.getValueType();
6206 assert(VT.isVector() && InVT.isVector() && "Expected vector VTs.");
6207 assert((ISD::ANY_EXTEND == Opcode || ISD::SIGN_EXTEND == Opcode ||
6208 ISD::ZERO_EXTEND == Opcode) &&
6209 "Unknown extension opcode");
6211 // For 256-bit vectors, we only need the lower (128-bit) input half.
6212 // For 512-bit vectors, we only need the lower input half or quarter.
6213 if (InVT.getSizeInBits() > 128) {
6214 assert(VT.getSizeInBits() == InVT.getSizeInBits() &&
6215 "Expected VTs to be the same size!");
6216 unsigned Scale = VT.getScalarSizeInBits() / InVT.getScalarSizeInBits();
6217 In = extractSubVector(In, 0, DAG, DL,
6218 std::max(128U, (unsigned)VT.getSizeInBits() / Scale));
6219 InVT = In.getValueType();
6222 if (VT.getVectorNumElements() != InVT.getVectorNumElements())
6223 Opcode = getOpcode_EXTEND_VECTOR_INREG(Opcode);
6225 return DAG.getNode(Opcode, DL, VT, In);
6228 // Match (xor X, -1) -> X.
6229 // Match extract_subvector(xor X, -1) -> extract_subvector(X).
6230 // Match concat_vectors(xor X, -1, xor Y, -1) -> concat_vectors(X, Y).
6231 static SDValue IsNOT(SDValue V, SelectionDAG &DAG, bool OneUse = false) {
6232 V = OneUse ? peekThroughOneUseBitcasts(V) : peekThroughBitcasts(V);
6233 if (V.getOpcode() == ISD::XOR &&
6234 ISD::isBuildVectorAllOnes(V.getOperand(1).getNode()))
6235 return V.getOperand(0);
6236 if (V.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
6237 (isNullConstant(V.getOperand(1)) || V.getOperand(0).hasOneUse())) {
6238 if (SDValue Not = IsNOT(V.getOperand(0), DAG)) {
6239 Not = DAG.getBitcast(V.getOperand(0).getValueType(), Not);
6240 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(Not), V.getValueType(),
6241 Not, V.getOperand(1));
6244 SmallVector<SDValue, 2> CatOps;
6245 if (collectConcatOps(V.getNode(), CatOps)) {
6246 for (SDValue &CatOp : CatOps) {
6247 SDValue NotCat = IsNOT(CatOp, DAG);
6248 if (!NotCat) return SDValue();
6249 CatOp = DAG.getBitcast(CatOp.getValueType(), NotCat);
6251 return DAG.getNode(ISD::CONCAT_VECTORS, SDLoc(V), V.getValueType(), CatOps);
6256 void llvm::createUnpackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6257 bool Lo, bool Unary) {
6258 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6259 int NumElts = VT.getVectorNumElements();
6260 int NumEltsInLane = 128 / VT.getScalarSizeInBits();
6261 for (int i = 0; i < NumElts; ++i) {
6262 unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
6263 int Pos = (i % NumEltsInLane) / 2 + LaneStart;
6264 Pos += (Unary ? 0 : NumElts * (i % 2));
6265 Pos += (Lo ? 0 : NumEltsInLane / 2);
6266 Mask.push_back(Pos);
6270 /// Similar to unpacklo/unpackhi, but without the 128-bit lane limitation
6271 /// imposed by AVX and specific to the unary pattern. Example:
6272 /// v8iX Lo --> <0, 0, 1, 1, 2, 2, 3, 3>
6273 /// v8iX Hi --> <4, 4, 5, 5, 6, 6, 7, 7>
6274 void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6276 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6277 int NumElts = VT.getVectorNumElements();
6278 for (int i = 0; i < NumElts; ++i) {
6280 Pos += (Lo ? 0 : NumElts / 2);
6281 Mask.push_back(Pos);
6285 /// Returns a vector_shuffle node for an unpackl operation.
6286 static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6287 SDValue V1, SDValue V2) {
6288 SmallVector<int, 8> Mask;
6289 createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
6290 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6293 /// Returns a vector_shuffle node for an unpackh operation.
6294 static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, MVT VT,
6295 SDValue V1, SDValue V2) {
6296 SmallVector<int, 8> Mask;
6297 createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
6298 return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
6301 /// Return a vector_shuffle of the specified vector of zero or undef vector.
6302 /// This produces a shuffle where the low element of V2 is swizzled into the
6303 /// zero/undef vector, landing at element Idx.
6304 /// This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
6305 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, int Idx,
6307 const X86Subtarget &Subtarget,
6308 SelectionDAG &DAG) {
6309 MVT VT = V2.getSimpleValueType();
6311 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
6312 int NumElems = VT.getVectorNumElements();
6313 SmallVector<int, 16> MaskVec(NumElems);
6314 for (int i = 0; i != NumElems; ++i)
6315 // If this is the insertion idx, put the low elt of V2 here.
6316 MaskVec[i] = (i == Idx) ? NumElems : i;
6317 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, MaskVec);
6320 static const Constant *getTargetConstantFromBasePtr(SDValue Ptr) {
6321 if (Ptr.getOpcode() == X86ISD::Wrapper ||
6322 Ptr.getOpcode() == X86ISD::WrapperRIP)
6323 Ptr = Ptr.getOperand(0);
6325 auto *CNode = dyn_cast<ConstantPoolSDNode>(Ptr);
6326 if (!CNode || CNode->isMachineConstantPoolEntry() || CNode->getOffset() != 0)
6329 return CNode->getConstVal();
6332 static const Constant *getTargetConstantFromNode(LoadSDNode *Load) {
6333 if (!Load || !ISD::isNormalLoad(Load))
6335 return getTargetConstantFromBasePtr(Load->getBasePtr());
6338 static const Constant *getTargetConstantFromNode(SDValue Op) {
6339 Op = peekThroughBitcasts(Op);
6340 return getTargetConstantFromNode(dyn_cast<LoadSDNode>(Op));
6344 X86TargetLowering::getTargetConstantFromLoad(LoadSDNode *LD) const {
6345 assert(LD && "Unexpected null LoadSDNode");
6346 return getTargetConstantFromNode(LD);
6349 // Extract raw constant bits from constant pools.
6350 static bool getTargetConstantBitsFromNode(SDValue Op, unsigned EltSizeInBits,
6352 SmallVectorImpl<APInt> &EltBits,
6353 bool AllowWholeUndefs = true,
6354 bool AllowPartialUndefs = true) {
6355 assert(EltBits.empty() && "Expected an empty EltBits vector");
6357 Op = peekThroughBitcasts(Op);
6359 EVT VT = Op.getValueType();
6360 unsigned SizeInBits = VT.getSizeInBits();
6361 assert((SizeInBits % EltSizeInBits) == 0 && "Can't split constant!");
6362 unsigned NumElts = SizeInBits / EltSizeInBits;
6364 // Bitcast a source array of element bits to the target size.
6365 auto CastBitData = [&](APInt &UndefSrcElts, ArrayRef<APInt> SrcEltBits) {
6366 unsigned NumSrcElts = UndefSrcElts.getBitWidth();
6367 unsigned SrcEltSizeInBits = SrcEltBits[0].getBitWidth();
6368 assert((NumSrcElts * SrcEltSizeInBits) == SizeInBits &&
6369 "Constant bit sizes don't match");
6371 // Don't split if we don't allow undef bits.
6372 bool AllowUndefs = AllowWholeUndefs || AllowPartialUndefs;
6373 if (UndefSrcElts.getBoolValue() && !AllowUndefs)
6376 // If we're already the right size, don't bother bitcasting.
6377 if (NumSrcElts == NumElts) {
6378 UndefElts = UndefSrcElts;
6379 EltBits.assign(SrcEltBits.begin(), SrcEltBits.end());
6383 // Extract all the undef/constant element data and pack into single bitsets.
6384 APInt UndefBits(SizeInBits, 0);
6385 APInt MaskBits(SizeInBits, 0);
6387 for (unsigned i = 0; i != NumSrcElts; ++i) {
6388 unsigned BitOffset = i * SrcEltSizeInBits;
6389 if (UndefSrcElts[i])
6390 UndefBits.setBits(BitOffset, BitOffset + SrcEltSizeInBits);
6391 MaskBits.insertBits(SrcEltBits[i], BitOffset);
6394 // Split the undef/constant single bitset data into the target elements.
6395 UndefElts = APInt(NumElts, 0);
6396 EltBits.resize(NumElts, APInt(EltSizeInBits, 0));
6398 for (unsigned i = 0; i != NumElts; ++i) {
6399 unsigned BitOffset = i * EltSizeInBits;
6400 APInt UndefEltBits = UndefBits.extractBits(EltSizeInBits, BitOffset);
6402 // Only treat an element as UNDEF if all bits are UNDEF.
6403 if (UndefEltBits.isAllOnesValue()) {
6404 if (!AllowWholeUndefs)
6406 UndefElts.setBit(i);
6410 // If only some bits are UNDEF then treat them as zero (or bail if not
6412 if (UndefEltBits.getBoolValue() && !AllowPartialUndefs)
6415 EltBits[i] = MaskBits.extractBits(EltSizeInBits, BitOffset);
6420 // Collect constant bits and insert into mask/undef bit masks.
6421 auto CollectConstantBits = [](const Constant *Cst, APInt &Mask, APInt &Undefs,
6422 unsigned UndefBitIndex) {
6425 if (isa<UndefValue>(Cst)) {
6426 Undefs.setBit(UndefBitIndex);
6429 if (auto *CInt = dyn_cast<ConstantInt>(Cst)) {
6430 Mask = CInt->getValue();
6433 if (auto *CFP = dyn_cast<ConstantFP>(Cst)) {
6434 Mask = CFP->getValueAPF().bitcastToAPInt();
6442 APInt UndefSrcElts = APInt::getAllOnesValue(NumElts);
6443 SmallVector<APInt, 64> SrcEltBits(NumElts, APInt(EltSizeInBits, 0));
6444 return CastBitData(UndefSrcElts, SrcEltBits);
6447 // Extract scalar constant bits.
6448 if (auto *Cst = dyn_cast<ConstantSDNode>(Op)) {
6449 APInt UndefSrcElts = APInt::getNullValue(1);
6450 SmallVector<APInt, 64> SrcEltBits(1, Cst->getAPIntValue());
6451 return CastBitData(UndefSrcElts, SrcEltBits);
6453 if (auto *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
6454 APInt UndefSrcElts = APInt::getNullValue(1);
6455 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6456 SmallVector<APInt, 64> SrcEltBits(1, RawBits);
6457 return CastBitData(UndefSrcElts, SrcEltBits);
6460 // Extract constant bits from build vector.
6461 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
6462 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6463 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6465 APInt UndefSrcElts(NumSrcElts, 0);
6466 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6467 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6468 const SDValue &Src = Op.getOperand(i);
6469 if (Src.isUndef()) {
6470 UndefSrcElts.setBit(i);
6473 auto *Cst = cast<ConstantSDNode>(Src);
6474 SrcEltBits[i] = Cst->getAPIntValue().zextOrTrunc(SrcEltSizeInBits);
6476 return CastBitData(UndefSrcElts, SrcEltBits);
6478 if (ISD::isBuildVectorOfConstantFPSDNodes(Op.getNode())) {
6479 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6480 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6482 APInt UndefSrcElts(NumSrcElts, 0);
6483 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6484 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
6485 const SDValue &Src = Op.getOperand(i);
6486 if (Src.isUndef()) {
6487 UndefSrcElts.setBit(i);
6490 auto *Cst = cast<ConstantFPSDNode>(Src);
6491 APInt RawBits = Cst->getValueAPF().bitcastToAPInt();
6492 SrcEltBits[i] = RawBits.zextOrTrunc(SrcEltSizeInBits);
6494 return CastBitData(UndefSrcElts, SrcEltBits);
6497 // Extract constant bits from constant pool vector.
6498 if (auto *Cst = getTargetConstantFromNode(Op)) {
6499 Type *CstTy = Cst->getType();
6500 unsigned CstSizeInBits = CstTy->getPrimitiveSizeInBits();
6501 if (!CstTy->isVectorTy() || (CstSizeInBits % SizeInBits) != 0)
6504 unsigned SrcEltSizeInBits = CstTy->getScalarSizeInBits();
6505 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6507 APInt UndefSrcElts(NumSrcElts, 0);
6508 SmallVector<APInt, 64> SrcEltBits(NumSrcElts, APInt(SrcEltSizeInBits, 0));
6509 for (unsigned i = 0; i != NumSrcElts; ++i)
6510 if (!CollectConstantBits(Cst->getAggregateElement(i), SrcEltBits[i],
6514 return CastBitData(UndefSrcElts, SrcEltBits);
6517 // Extract constant bits from a broadcasted constant pool scalar.
6518 if (Op.getOpcode() == X86ISD::VBROADCAST_LOAD &&
6519 EltSizeInBits <= VT.getScalarSizeInBits()) {
6520 auto *MemIntr = cast<MemIntrinsicSDNode>(Op);
6521 if (MemIntr->getMemoryVT().getScalarSizeInBits() != VT.getScalarSizeInBits())
6524 SDValue Ptr = MemIntr->getBasePtr();
6525 if (const Constant *C = getTargetConstantFromBasePtr(Ptr)) {
6526 unsigned SrcEltSizeInBits = C->getType()->getScalarSizeInBits();
6527 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6529 APInt UndefSrcElts(NumSrcElts, 0);
6530 SmallVector<APInt, 64> SrcEltBits(1, APInt(SrcEltSizeInBits, 0));
6531 if (CollectConstantBits(C, SrcEltBits[0], UndefSrcElts, 0)) {
6532 if (UndefSrcElts[0])
6533 UndefSrcElts.setBits(0, NumSrcElts);
6534 SrcEltBits.append(NumSrcElts - 1, SrcEltBits[0]);
6535 return CastBitData(UndefSrcElts, SrcEltBits);
6540 // Extract constant bits from a subvector broadcast.
6541 if (Op.getOpcode() == X86ISD::SUBV_BROADCAST) {
6542 SmallVector<APInt, 16> SubEltBits;
6543 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6544 UndefElts, SubEltBits, AllowWholeUndefs,
6545 AllowPartialUndefs)) {
6546 UndefElts = APInt::getSplat(NumElts, UndefElts);
6547 while (EltBits.size() < NumElts)
6548 EltBits.append(SubEltBits.begin(), SubEltBits.end());
6553 // Extract a rematerialized scalar constant insertion.
6554 if (Op.getOpcode() == X86ISD::VZEXT_MOVL &&
6555 Op.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
6556 isa<ConstantSDNode>(Op.getOperand(0).getOperand(0))) {
6557 unsigned SrcEltSizeInBits = VT.getScalarSizeInBits();
6558 unsigned NumSrcElts = SizeInBits / SrcEltSizeInBits;
6560 APInt UndefSrcElts(NumSrcElts, 0);
6561 SmallVector<APInt, 64> SrcEltBits;
6562 auto *CN = cast<ConstantSDNode>(Op.getOperand(0).getOperand(0));
6563 SrcEltBits.push_back(CN->getAPIntValue().zextOrTrunc(SrcEltSizeInBits));
6564 SrcEltBits.append(NumSrcElts - 1, APInt(SrcEltSizeInBits, 0));
6565 return CastBitData(UndefSrcElts, SrcEltBits);
6568 // Insert constant bits from a base and sub vector sources.
6569 if (Op.getOpcode() == ISD::INSERT_SUBVECTOR) {
6570 // TODO - support insert_subvector through bitcasts.
6571 if (EltSizeInBits != VT.getScalarSizeInBits())
6575 SmallVector<APInt, 32> EltSubBits;
6576 if (getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6577 UndefSubElts, EltSubBits,
6578 AllowWholeUndefs, AllowPartialUndefs) &&
6579 getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6580 UndefElts, EltBits, AllowWholeUndefs,
6581 AllowPartialUndefs)) {
6582 unsigned BaseIdx = Op.getConstantOperandVal(2);
6583 UndefElts.insertBits(UndefSubElts, BaseIdx);
6584 for (unsigned i = 0, e = EltSubBits.size(); i != e; ++i)
6585 EltBits[BaseIdx + i] = EltSubBits[i];
6590 // Extract constant bits from a subvector's source.
6591 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
6592 // TODO - support extract_subvector through bitcasts.
6593 if (EltSizeInBits != VT.getScalarSizeInBits())
6596 if (getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6597 UndefElts, EltBits, AllowWholeUndefs,
6598 AllowPartialUndefs)) {
6599 EVT SrcVT = Op.getOperand(0).getValueType();
6600 unsigned NumSrcElts = SrcVT.getVectorNumElements();
6601 unsigned NumSubElts = VT.getVectorNumElements();
6602 unsigned BaseIdx = Op.getConstantOperandVal(1);
6603 UndefElts = UndefElts.extractBits(NumSubElts, BaseIdx);
6604 if ((BaseIdx + NumSubElts) != NumSrcElts)
6605 EltBits.erase(EltBits.begin() + BaseIdx + NumSubElts, EltBits.end());
6607 EltBits.erase(EltBits.begin(), EltBits.begin() + BaseIdx);
6612 // Extract constant bits from shuffle node sources.
6613 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Op)) {
6614 // TODO - support shuffle through bitcasts.
6615 if (EltSizeInBits != VT.getScalarSizeInBits())
6618 ArrayRef<int> Mask = SVN->getMask();
6619 if ((!AllowWholeUndefs || !AllowPartialUndefs) &&
6620 llvm::any_of(Mask, [](int M) { return M < 0; }))
6623 APInt UndefElts0, UndefElts1;
6624 SmallVector<APInt, 32> EltBits0, EltBits1;
6625 if (isAnyInRange(Mask, 0, NumElts) &&
6626 !getTargetConstantBitsFromNode(Op.getOperand(0), EltSizeInBits,
6627 UndefElts0, EltBits0, AllowWholeUndefs,
6628 AllowPartialUndefs))
6630 if (isAnyInRange(Mask, NumElts, 2 * NumElts) &&
6631 !getTargetConstantBitsFromNode(Op.getOperand(1), EltSizeInBits,
6632 UndefElts1, EltBits1, AllowWholeUndefs,
6633 AllowPartialUndefs))
6636 UndefElts = APInt::getNullValue(NumElts);
6637 for (int i = 0; i != (int)NumElts; ++i) {
6640 UndefElts.setBit(i);
6641 EltBits.push_back(APInt::getNullValue(EltSizeInBits));
6642 } else if (M < (int)NumElts) {
6644 UndefElts.setBit(i);
6645 EltBits.push_back(EltBits0[M]);
6647 if (UndefElts1[M - NumElts])
6648 UndefElts.setBit(i);
6649 EltBits.push_back(EltBits1[M - NumElts]);
6660 bool isConstantSplat(SDValue Op, APInt &SplatVal, bool AllowPartialUndefs) {
6662 SmallVector<APInt, 16> EltBits;
6663 if (getTargetConstantBitsFromNode(Op, Op.getScalarValueSizeInBits(),
6664 UndefElts, EltBits, true,
6665 AllowPartialUndefs)) {
6666 int SplatIndex = -1;
6667 for (int i = 0, e = EltBits.size(); i != e; ++i) {
6670 if (0 <= SplatIndex && EltBits[i] != EltBits[SplatIndex]) {
6676 if (0 <= SplatIndex) {
6677 SplatVal = EltBits[SplatIndex];
6687 static bool getTargetShuffleMaskIndices(SDValue MaskNode,
6688 unsigned MaskEltSizeInBits,
6689 SmallVectorImpl<uint64_t> &RawMask,
6691 // Extract the raw target constant bits.
6692 SmallVector<APInt, 64> EltBits;
6693 if (!getTargetConstantBitsFromNode(MaskNode, MaskEltSizeInBits, UndefElts,
6694 EltBits, /* AllowWholeUndefs */ true,
6695 /* AllowPartialUndefs */ false))
6698 // Insert the extracted elements into the mask.
6699 for (APInt Elt : EltBits)
6700 RawMask.push_back(Elt.getZExtValue());
6705 /// Create a shuffle mask that matches the PACKSS/PACKUS truncation.
6706 /// A multi-stage pack shuffle mask is created by specifying NumStages > 1.
6707 /// Note: This ignores saturation, so inputs must be checked first.
6708 static void createPackShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
6709 bool Unary, unsigned NumStages = 1) {
6710 assert(Mask.empty() && "Expected an empty shuffle mask vector");
6711 unsigned NumElts = VT.getVectorNumElements();
6712 unsigned NumLanes = VT.getSizeInBits() / 128;
6713 unsigned NumEltsPerLane = 128 / VT.getScalarSizeInBits();
6714 unsigned Offset = Unary ? 0 : NumElts;
6715 unsigned Repetitions = 1u << (NumStages - 1);
6716 unsigned Increment = 1u << NumStages;
6717 assert((NumEltsPerLane >> NumStages) > 0 && "Illegal packing compaction");
6719 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
6720 for (unsigned Stage = 0; Stage != Repetitions; ++Stage) {
6721 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
6722 Mask.push_back(Elt + (Lane * NumEltsPerLane));
6723 for (unsigned Elt = 0; Elt != NumEltsPerLane; Elt += Increment)
6724 Mask.push_back(Elt + (Lane * NumEltsPerLane) + Offset);
6729 // Split the demanded elts of a PACKSS/PACKUS node between its operands.
6730 static void getPackDemandedElts(EVT VT, const APInt &DemandedElts,
6731 APInt &DemandedLHS, APInt &DemandedRHS) {
6732 int NumLanes = VT.getSizeInBits() / 128;
6733 int NumElts = DemandedElts.getBitWidth();
6734 int NumInnerElts = NumElts / 2;
6735 int NumEltsPerLane = NumElts / NumLanes;
6736 int NumInnerEltsPerLane = NumInnerElts / NumLanes;
6738 DemandedLHS = APInt::getNullValue(NumInnerElts);
6739 DemandedRHS = APInt::getNullValue(NumInnerElts);
6741 // Map DemandedElts to the packed operands.
6742 for (int Lane = 0; Lane != NumLanes; ++Lane) {
6743 for (int Elt = 0; Elt != NumInnerEltsPerLane; ++Elt) {
6744 int OuterIdx = (Lane * NumEltsPerLane) + Elt;
6745 int InnerIdx = (Lane * NumInnerEltsPerLane) + Elt;
6746 if (DemandedElts[OuterIdx])
6747 DemandedLHS.setBit(InnerIdx);
6748 if (DemandedElts[OuterIdx + NumInnerEltsPerLane])
6749 DemandedRHS.setBit(InnerIdx);
6754 // Split the demanded elts of a HADD/HSUB node between its operands.
6755 static void getHorizDemandedElts(EVT VT, const APInt &DemandedElts,
6756 APInt &DemandedLHS, APInt &DemandedRHS) {
6757 int NumLanes = VT.getSizeInBits() / 128;
6758 int NumElts = DemandedElts.getBitWidth();
6759 int NumEltsPerLane = NumElts / NumLanes;
6760 int HalfEltsPerLane = NumEltsPerLane / 2;
6762 DemandedLHS = APInt::getNullValue(NumElts);
6763 DemandedRHS = APInt::getNullValue(NumElts);
6765 // Map DemandedElts to the horizontal operands.
6766 for (int Idx = 0; Idx != NumElts; ++Idx) {
6767 if (!DemandedElts[Idx])
6769 int LaneIdx = (Idx / NumEltsPerLane) * NumEltsPerLane;
6770 int LocalIdx = Idx % NumEltsPerLane;
6771 if (LocalIdx < HalfEltsPerLane) {
6772 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6773 DemandedLHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6775 LocalIdx -= HalfEltsPerLane;
6776 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 0);
6777 DemandedRHS.setBit(LaneIdx + 2 * LocalIdx + 1);
6782 /// Calculates the shuffle mask corresponding to the target-specific opcode.
6783 /// If the mask could be calculated, returns it in \p Mask, returns the shuffle
6784 /// operands in \p Ops, and returns true.
6785 /// Sets \p IsUnary to true if only one source is used. Note that this will set
6786 /// IsUnary for shuffles which use a single input multiple times, and in those
6787 /// cases it will adjust the mask to only have indices within that single input.
6788 /// It is an error to call this with non-empty Mask/Ops vectors.
6789 static bool getTargetShuffleMask(SDNode *N, MVT VT, bool AllowSentinelZero,
6790 SmallVectorImpl<SDValue> &Ops,
6791 SmallVectorImpl<int> &Mask, bool &IsUnary) {
6792 unsigned NumElems = VT.getVectorNumElements();
6793 unsigned MaskEltSize = VT.getScalarSizeInBits();
6794 SmallVector<uint64_t, 32> RawMask;
6798 assert(Mask.empty() && "getTargetShuffleMask expects an empty Mask vector");
6799 assert(Ops.empty() && "getTargetShuffleMask expects an empty Ops vector");
6802 bool IsFakeUnary = false;
6803 switch (N->getOpcode()) {
6804 case X86ISD::BLENDI:
6805 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6806 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6807 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6808 DecodeBLENDMask(NumElems, ImmN, Mask);
6809 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6812 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6813 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6814 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6815 DecodeSHUFPMask(NumElems, MaskEltSize, ImmN, Mask);
6816 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6818 case X86ISD::INSERTPS:
6819 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6820 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6821 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6822 DecodeINSERTPSMask(ImmN, Mask);
6823 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6825 case X86ISD::EXTRQI:
6826 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6827 if (isa<ConstantSDNode>(N->getOperand(1)) &&
6828 isa<ConstantSDNode>(N->getOperand(2))) {
6829 int BitLen = N->getConstantOperandVal(1);
6830 int BitIdx = N->getConstantOperandVal(2);
6831 DecodeEXTRQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6835 case X86ISD::INSERTQI:
6836 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6837 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6838 if (isa<ConstantSDNode>(N->getOperand(2)) &&
6839 isa<ConstantSDNode>(N->getOperand(3))) {
6840 int BitLen = N->getConstantOperandVal(2);
6841 int BitIdx = N->getConstantOperandVal(3);
6842 DecodeINSERTQIMask(NumElems, MaskEltSize, BitLen, BitIdx, Mask);
6843 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6846 case X86ISD::UNPCKH:
6847 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6848 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6849 DecodeUNPCKHMask(NumElems, MaskEltSize, Mask);
6850 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6852 case X86ISD::UNPCKL:
6853 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6854 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6855 DecodeUNPCKLMask(NumElems, MaskEltSize, Mask);
6856 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6858 case X86ISD::MOVHLPS:
6859 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6860 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6861 DecodeMOVHLPSMask(NumElems, Mask);
6862 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6864 case X86ISD::MOVLHPS:
6865 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6866 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6867 DecodeMOVLHPSMask(NumElems, Mask);
6868 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6870 case X86ISD::VALIGN:
6871 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
6872 "Only 32-bit and 64-bit elements are supported!");
6873 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6874 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6875 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6876 DecodeVALIGNMask(NumElems, ImmN, Mask);
6877 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6878 Ops.push_back(N->getOperand(1));
6879 Ops.push_back(N->getOperand(0));
6881 case X86ISD::PALIGNR:
6882 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6883 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6884 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6885 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6886 DecodePALIGNRMask(NumElems, ImmN, Mask);
6887 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6888 Ops.push_back(N->getOperand(1));
6889 Ops.push_back(N->getOperand(0));
6891 case X86ISD::VSHLDQ:
6892 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6893 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6894 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6895 DecodePSLLDQMask(NumElems, ImmN, Mask);
6898 case X86ISD::VSRLDQ:
6899 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6900 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6901 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6902 DecodePSRLDQMask(NumElems, ImmN, Mask);
6905 case X86ISD::PSHUFD:
6906 case X86ISD::VPERMILPI:
6907 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6908 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6909 DecodePSHUFMask(NumElems, MaskEltSize, ImmN, Mask);
6912 case X86ISD::PSHUFHW:
6913 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6914 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6915 DecodePSHUFHWMask(NumElems, ImmN, Mask);
6918 case X86ISD::PSHUFLW:
6919 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6920 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6921 DecodePSHUFLWMask(NumElems, ImmN, Mask);
6924 case X86ISD::VZEXT_MOVL:
6925 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6926 DecodeZeroMoveLowMask(NumElems, Mask);
6929 case X86ISD::VBROADCAST:
6930 // We only decode broadcasts of same-sized vectors, peeking through to
6931 // extracted subvectors is likely to cause hasOneUse issues with
6932 // SimplifyDemandedBits etc.
6933 if (N->getOperand(0).getValueType() == VT) {
6934 DecodeVectorBroadcast(NumElems, Mask);
6939 case X86ISD::VPERMILPV: {
6940 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6942 SDValue MaskNode = N->getOperand(1);
6943 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
6945 DecodeVPERMILPMask(NumElems, MaskEltSize, RawMask, RawUndefs, Mask);
6950 case X86ISD::PSHUFB: {
6951 assert(VT.getScalarType() == MVT::i8 && "Byte vector expected");
6952 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6953 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6955 SDValue MaskNode = N->getOperand(1);
6956 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
6957 DecodePSHUFBMask(RawMask, RawUndefs, Mask);
6962 case X86ISD::VPERMI:
6963 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6964 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6965 DecodeVPERMMask(NumElems, ImmN, Mask);
6970 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6971 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6972 DecodeScalarMoveMask(NumElems, /* IsLoad */ false, Mask);
6974 case X86ISD::VPERM2X128:
6975 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6976 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6977 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6978 DecodeVPERM2X128Mask(NumElems, ImmN, Mask);
6979 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6981 case X86ISD::SHUF128:
6982 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6983 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
6984 ImmN = N->getConstantOperandVal(N->getNumOperands() - 1);
6985 decodeVSHUF64x2FamilyMask(NumElems, MaskEltSize, ImmN, Mask);
6986 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
6988 case X86ISD::MOVSLDUP:
6989 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6990 DecodeMOVSLDUPMask(NumElems, Mask);
6993 case X86ISD::MOVSHDUP:
6994 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
6995 DecodeMOVSHDUPMask(NumElems, Mask);
6998 case X86ISD::MOVDDUP:
6999 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7000 DecodeMOVDDUPMask(NumElems, Mask);
7003 case X86ISD::VPERMIL2: {
7004 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7005 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7006 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7007 SDValue MaskNode = N->getOperand(2);
7008 SDValue CtrlNode = N->getOperand(3);
7009 if (ConstantSDNode *CtrlOp = dyn_cast<ConstantSDNode>(CtrlNode)) {
7010 unsigned CtrlImm = CtrlOp->getZExtValue();
7011 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7013 DecodeVPERMIL2PMask(NumElems, MaskEltSize, CtrlImm, RawMask, RawUndefs,
7020 case X86ISD::VPPERM: {
7021 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7022 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7023 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
7024 SDValue MaskNode = N->getOperand(2);
7025 if (getTargetShuffleMaskIndices(MaskNode, 8, RawMask, RawUndefs)) {
7026 DecodeVPPERMMask(RawMask, RawUndefs, Mask);
7031 case X86ISD::VPERMV: {
7032 assert(N->getOperand(1).getValueType() == VT && "Unexpected value type");
7034 // Unlike most shuffle nodes, VPERMV's mask operand is operand 0.
7035 Ops.push_back(N->getOperand(1));
7036 SDValue MaskNode = N->getOperand(0);
7037 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7039 DecodeVPERMVMask(RawMask, RawUndefs, Mask);
7044 case X86ISD::VPERMV3: {
7045 assert(N->getOperand(0).getValueType() == VT && "Unexpected value type");
7046 assert(N->getOperand(2).getValueType() == VT && "Unexpected value type");
7047 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(2);
7048 // Unlike most shuffle nodes, VPERMV3's mask operand is the middle one.
7049 Ops.push_back(N->getOperand(0));
7050 Ops.push_back(N->getOperand(2));
7051 SDValue MaskNode = N->getOperand(1);
7052 if (getTargetShuffleMaskIndices(MaskNode, MaskEltSize, RawMask,
7054 DecodeVPERMV3Mask(RawMask, RawUndefs, Mask);
7059 default: llvm_unreachable("unknown target shuffle node");
7062 // Empty mask indicates the decode failed.
7066 // Check if we're getting a shuffle mask with zero'd elements.
7067 if (!AllowSentinelZero && isAnyZero(Mask))
7070 // If we have a fake unary shuffle, the shuffle mask is spread across two
7071 // inputs that are actually the same node. Re-map the mask to always point
7072 // into the first input.
7075 if (M >= (int)Mask.size())
7078 // If we didn't already add operands in the opcode-specific code, default to
7079 // adding 1 or 2 operands starting at 0.
7081 Ops.push_back(N->getOperand(0));
7082 if (!IsUnary || IsFakeUnary)
7083 Ops.push_back(N->getOperand(1));
7089 /// Compute whether each element of a shuffle is zeroable.
7091 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7092 /// Either it is an undef element in the shuffle mask, the element of the input
7093 /// referenced is undef, or the element of the input referenced is known to be
7094 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7095 /// as many lanes with this technique as possible to simplify the remaining
7097 static void computeZeroableShuffleElements(ArrayRef<int> Mask,
7098 SDValue V1, SDValue V2,
7099 APInt &KnownUndef, APInt &KnownZero) {
7100 int Size = Mask.size();
7101 KnownUndef = KnownZero = APInt::getNullValue(Size);
7103 V1 = peekThroughBitcasts(V1);
7104 V2 = peekThroughBitcasts(V2);
7106 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7107 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7109 int VectorSizeInBits = V1.getValueSizeInBits();
7110 int ScalarSizeInBits = VectorSizeInBits / Size;
7111 assert(!(VectorSizeInBits % ScalarSizeInBits) && "Illegal shuffle mask size");
7113 for (int i = 0; i < Size; ++i) {
7115 // Handle the easy cases.
7117 KnownUndef.setBit(i);
7120 if ((M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7121 KnownZero.setBit(i);
7125 // Determine shuffle input and normalize the mask.
7126 SDValue V = M < Size ? V1 : V2;
7129 // Currently we can only search BUILD_VECTOR for UNDEF/ZERO elements.
7130 if (V.getOpcode() != ISD::BUILD_VECTOR)
7133 // If the BUILD_VECTOR has fewer elements then the bitcasted portion of
7134 // the (larger) source element must be UNDEF/ZERO.
7135 if ((Size % V.getNumOperands()) == 0) {
7136 int Scale = Size / V->getNumOperands();
7137 SDValue Op = V.getOperand(M / Scale);
7139 KnownUndef.setBit(i);
7140 if (X86::isZeroNode(Op))
7141 KnownZero.setBit(i);
7142 else if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op)) {
7143 APInt Val = Cst->getAPIntValue();
7144 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
7146 KnownZero.setBit(i);
7147 } else if (ConstantFPSDNode *Cst = dyn_cast<ConstantFPSDNode>(Op)) {
7148 APInt Val = Cst->getValueAPF().bitcastToAPInt();
7149 Val = Val.extractBits(ScalarSizeInBits, (M % Scale) * ScalarSizeInBits);
7151 KnownZero.setBit(i);
7156 // If the BUILD_VECTOR has more elements then all the (smaller) source
7157 // elements must be UNDEF or ZERO.
7158 if ((V.getNumOperands() % Size) == 0) {
7159 int Scale = V->getNumOperands() / Size;
7160 bool AllUndef = true;
7161 bool AllZero = true;
7162 for (int j = 0; j < Scale; ++j) {
7163 SDValue Op = V.getOperand((M * Scale) + j);
7164 AllUndef &= Op.isUndef();
7165 AllZero &= X86::isZeroNode(Op);
7168 KnownUndef.setBit(i);
7170 KnownZero.setBit(i);
7176 /// Decode a target shuffle mask and inputs and see if any values are
7177 /// known to be undef or zero from their inputs.
7178 /// Returns true if the target shuffle mask was decoded.
7179 /// FIXME: Merge this with computeZeroableShuffleElements?
7180 static bool getTargetShuffleAndZeroables(SDValue N, SmallVectorImpl<int> &Mask,
7181 SmallVectorImpl<SDValue> &Ops,
7182 APInt &KnownUndef, APInt &KnownZero) {
7184 if (!isTargetShuffle(N.getOpcode()))
7187 MVT VT = N.getSimpleValueType();
7188 if (!getTargetShuffleMask(N.getNode(), VT, true, Ops, Mask, IsUnary))
7191 int Size = Mask.size();
7192 SDValue V1 = Ops[0];
7193 SDValue V2 = IsUnary ? V1 : Ops[1];
7194 KnownUndef = KnownZero = APInt::getNullValue(Size);
7196 V1 = peekThroughBitcasts(V1);
7197 V2 = peekThroughBitcasts(V2);
7199 assert((VT.getSizeInBits() % Size) == 0 &&
7200 "Illegal split of shuffle value type");
7201 unsigned EltSizeInBits = VT.getSizeInBits() / Size;
7203 // Extract known constant input data.
7204 APInt UndefSrcElts[2];
7205 SmallVector<APInt, 32> SrcEltBits[2];
7206 bool IsSrcConstant[2] = {
7207 getTargetConstantBitsFromNode(V1, EltSizeInBits, UndefSrcElts[0],
7208 SrcEltBits[0], true, false),
7209 getTargetConstantBitsFromNode(V2, EltSizeInBits, UndefSrcElts[1],
7210 SrcEltBits[1], true, false)};
7212 for (int i = 0; i < Size; ++i) {
7215 // Already decoded as SM_SentinelZero / SM_SentinelUndef.
7217 assert(isUndefOrZero(M) && "Unknown shuffle sentinel value!");
7218 if (SM_SentinelUndef == M)
7219 KnownUndef.setBit(i);
7220 if (SM_SentinelZero == M)
7221 KnownZero.setBit(i);
7225 // Determine shuffle input and normalize the mask.
7226 unsigned SrcIdx = M / Size;
7227 SDValue V = M < Size ? V1 : V2;
7230 // We are referencing an UNDEF input.
7232 KnownUndef.setBit(i);
7236 // SCALAR_TO_VECTOR - only the first element is defined, and the rest UNDEF.
7237 // TODO: We currently only set UNDEF for integer types - floats use the same
7238 // registers as vectors and many of the scalar folded loads rely on the
7239 // SCALAR_TO_VECTOR pattern.
7240 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
7241 (Size % V.getValueType().getVectorNumElements()) == 0) {
7242 int Scale = Size / V.getValueType().getVectorNumElements();
7243 int Idx = M / Scale;
7244 if (Idx != 0 && !VT.isFloatingPoint())
7245 KnownUndef.setBit(i);
7246 else if (Idx == 0 && X86::isZeroNode(V.getOperand(0)))
7247 KnownZero.setBit(i);
7251 // INSERT_SUBVECTOR - to widen vectors we often insert them into UNDEF
7253 if (V.getOpcode() == ISD::INSERT_SUBVECTOR) {
7254 SDValue Vec = V.getOperand(0);
7255 int NumVecElts = Vec.getValueType().getVectorNumElements();
7256 if (Vec.isUndef() && Size == NumVecElts) {
7257 int Idx = V.getConstantOperandVal(2);
7258 int NumSubElts = V.getOperand(1).getValueType().getVectorNumElements();
7259 if (M < Idx || (Idx + NumSubElts) <= M)
7260 KnownUndef.setBit(i);
7265 // Attempt to extract from the source's constant bits.
7266 if (IsSrcConstant[SrcIdx]) {
7267 if (UndefSrcElts[SrcIdx][M])
7268 KnownUndef.setBit(i);
7269 else if (SrcEltBits[SrcIdx][M] == 0)
7270 KnownZero.setBit(i);
7274 assert(VT.getVectorNumElements() == (unsigned)Size &&
7275 "Different mask size from vector size!");
7279 // Replace target shuffle mask elements with known undef/zero sentinels.
7280 static void resolveTargetShuffleFromZeroables(SmallVectorImpl<int> &Mask,
7281 const APInt &KnownUndef,
7282 const APInt &KnownZero,
7283 bool ResolveKnownZeros= true) {
7284 unsigned NumElts = Mask.size();
7285 assert(KnownUndef.getBitWidth() == NumElts &&
7286 KnownZero.getBitWidth() == NumElts && "Shuffle mask size mismatch");
7288 for (unsigned i = 0; i != NumElts; ++i) {
7290 Mask[i] = SM_SentinelUndef;
7291 else if (ResolveKnownZeros && KnownZero[i])
7292 Mask[i] = SM_SentinelZero;
7296 // Extract target shuffle mask sentinel elements to known undef/zero bitmasks.
7297 static void resolveZeroablesFromTargetShuffle(const SmallVectorImpl<int> &Mask,
7300 unsigned NumElts = Mask.size();
7301 KnownUndef = KnownZero = APInt::getNullValue(NumElts);
7303 for (unsigned i = 0; i != NumElts; ++i) {
7305 if (SM_SentinelUndef == M)
7306 KnownUndef.setBit(i);
7307 if (SM_SentinelZero == M)
7308 KnownZero.setBit(i);
7312 // Forward declaration (for getFauxShuffleMask recursive check).
7313 // TODO: Use DemandedElts variant.
7314 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7315 SmallVectorImpl<int> &Mask,
7316 const SelectionDAG &DAG, unsigned Depth,
7317 bool ResolveKnownElts);
7319 // Attempt to decode ops that could be represented as a shuffle mask.
7320 // The decoded shuffle mask may contain a different number of elements to the
7321 // destination value type.
7322 static bool getFauxShuffleMask(SDValue N, const APInt &DemandedElts,
7323 SmallVectorImpl<int> &Mask,
7324 SmallVectorImpl<SDValue> &Ops,
7325 const SelectionDAG &DAG, unsigned Depth,
7326 bool ResolveKnownElts) {
7330 MVT VT = N.getSimpleValueType();
7331 unsigned NumElts = VT.getVectorNumElements();
7332 unsigned NumSizeInBits = VT.getSizeInBits();
7333 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
7334 if ((NumBitsPerElt % 8) != 0 || (NumSizeInBits % 8) != 0)
7336 assert(NumElts == DemandedElts.getBitWidth() && "Unexpected vector size");
7337 unsigned NumSizeInBytes = NumSizeInBits / 8;
7338 unsigned NumBytesPerElt = NumBitsPerElt / 8;
7340 unsigned Opcode = N.getOpcode();
7342 case ISD::VECTOR_SHUFFLE: {
7343 // Don't treat ISD::VECTOR_SHUFFLE as a target shuffle so decode it here.
7344 ArrayRef<int> ShuffleMask = cast<ShuffleVectorSDNode>(N)->getMask();
7345 if (isUndefOrInRange(ShuffleMask, 0, 2 * NumElts)) {
7346 Mask.append(ShuffleMask.begin(), ShuffleMask.end());
7347 Ops.push_back(N.getOperand(0));
7348 Ops.push_back(N.getOperand(1));
7354 case X86ISD::ANDNP: {
7355 // Attempt to decode as a per-byte mask.
7357 SmallVector<APInt, 32> EltBits;
7358 SDValue N0 = N.getOperand(0);
7359 SDValue N1 = N.getOperand(1);
7360 bool IsAndN = (X86ISD::ANDNP == Opcode);
7361 uint64_t ZeroMask = IsAndN ? 255 : 0;
7362 if (!getTargetConstantBitsFromNode(IsAndN ? N0 : N1, 8, UndefElts, EltBits))
7364 for (int i = 0, e = (int)EltBits.size(); i != e; ++i) {
7366 Mask.push_back(SM_SentinelUndef);
7369 const APInt &ByteBits = EltBits[i];
7370 if (ByteBits != 0 && ByteBits != 255)
7372 Mask.push_back(ByteBits == ZeroMask ? SM_SentinelZero : i);
7374 Ops.push_back(IsAndN ? N1 : N0);
7378 // Inspect each operand at the byte level. We can merge these into a
7379 // blend shuffle mask if for each byte at least one is masked out (zero).
7381 DAG.computeKnownBits(N.getOperand(0), DemandedElts, Depth + 1);
7383 DAG.computeKnownBits(N.getOperand(1), DemandedElts, Depth + 1);
7384 if (Known0.One.isNullValue() && Known1.One.isNullValue()) {
7385 bool IsByteMask = true;
7386 APInt ZeroMask = APInt::getNullValue(NumBytesPerElt);
7387 APInt SelectMask = APInt::getNullValue(NumBytesPerElt);
7388 for (unsigned i = 0; i != NumBytesPerElt && IsByteMask; ++i) {
7389 unsigned LHS = Known0.Zero.extractBits(8, i * 8).getZExtValue();
7390 unsigned RHS = Known1.Zero.extractBits(8, i * 8).getZExtValue();
7391 if (LHS == 255 && RHS == 0)
7392 SelectMask.setBit(i);
7393 else if (LHS == 255 && RHS == 255)
7395 else if (!(LHS == 0 && RHS == 255))
7399 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt) {
7400 for (unsigned j = 0; j != NumBytesPerElt; ++j) {
7401 unsigned Ofs = (SelectMask[j] ? NumSizeInBytes : 0);
7402 int Idx = (ZeroMask[j] ? (int)SM_SentinelZero : (i + j + Ofs));
7403 Mask.push_back(Idx);
7406 Ops.push_back(N.getOperand(0));
7407 Ops.push_back(N.getOperand(1));
7412 // Handle OR(SHUFFLE,SHUFFLE) case where one source is zero and the other
7413 // is a valid shuffle index.
7414 SDValue N0 = peekThroughOneUseBitcasts(N.getOperand(0));
7415 SDValue N1 = peekThroughOneUseBitcasts(N.getOperand(1));
7416 if (!N0.getValueType().isVector() || !N1.getValueType().isVector())
7418 SmallVector<int, 64> SrcMask0, SrcMask1;
7419 SmallVector<SDValue, 2> SrcInputs0, SrcInputs1;
7420 if (!getTargetShuffleInputs(N0, SrcInputs0, SrcMask0, DAG, Depth + 1,
7422 !getTargetShuffleInputs(N1, SrcInputs1, SrcMask1, DAG, Depth + 1,
7426 // Shuffle inputs must be the same size as the result.
7427 if (llvm::any_of(SrcInputs0, [VT](SDValue Op) {
7428 return VT.getSizeInBits() != Op.getValueSizeInBits();
7431 if (llvm::any_of(SrcInputs1, [VT](SDValue Op) {
7432 return VT.getSizeInBits() != Op.getValueSizeInBits();
7436 size_t MaskSize = std::max(SrcMask0.size(), SrcMask1.size());
7437 SmallVector<int, 64> Mask0, Mask1;
7438 narrowShuffleMaskElts(MaskSize / SrcMask0.size(), SrcMask0, Mask0);
7439 narrowShuffleMaskElts(MaskSize / SrcMask1.size(), SrcMask1, Mask1);
7440 for (size_t i = 0; i != MaskSize; ++i) {
7441 if (Mask0[i] == SM_SentinelUndef && Mask1[i] == SM_SentinelUndef)
7442 Mask.push_back(SM_SentinelUndef);
7443 else if (Mask0[i] == SM_SentinelZero && Mask1[i] == SM_SentinelZero)
7444 Mask.push_back(SM_SentinelZero);
7445 else if (Mask1[i] == SM_SentinelZero)
7446 Mask.push_back(Mask0[i]);
7447 else if (Mask0[i] == SM_SentinelZero)
7448 Mask.push_back(Mask1[i] + (int)(MaskSize * SrcInputs0.size()));
7452 Ops.append(SrcInputs0.begin(), SrcInputs0.end());
7453 Ops.append(SrcInputs1.begin(), SrcInputs1.end());
7456 case ISD::INSERT_SUBVECTOR: {
7457 SDValue Src = N.getOperand(0);
7458 SDValue Sub = N.getOperand(1);
7459 EVT SubVT = Sub.getValueType();
7460 unsigned NumSubElts = SubVT.getVectorNumElements();
7461 if (!N->isOnlyUserOf(Sub.getNode()))
7463 uint64_t InsertIdx = N.getConstantOperandVal(2);
7464 // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)).
7465 if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
7466 Sub.getOperand(0).getValueType() == VT) {
7467 uint64_t ExtractIdx = Sub.getConstantOperandVal(1);
7468 for (int i = 0; i != (int)NumElts; ++i)
7470 for (int i = 0; i != (int)NumSubElts; ++i)
7471 Mask[InsertIdx + i] = NumElts + ExtractIdx + i;
7473 Ops.push_back(Sub.getOperand(0));
7476 // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)).
7477 SmallVector<int, 64> SubMask;
7478 SmallVector<SDValue, 2> SubInputs;
7479 if (!getTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs,
7480 SubMask, DAG, Depth + 1, ResolveKnownElts))
7483 // Subvector shuffle inputs must not be larger than the subvector.
7484 if (llvm::any_of(SubInputs, [SubVT](SDValue SubInput) {
7485 return SubVT.getSizeInBits() < SubInput.getValueSizeInBits();
7489 if (SubMask.size() != NumSubElts) {
7490 assert(((SubMask.size() % NumSubElts) == 0 ||
7491 (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale");
7492 if ((NumSubElts % SubMask.size()) == 0) {
7493 int Scale = NumSubElts / SubMask.size();
7494 SmallVector<int,64> ScaledSubMask;
7495 narrowShuffleMaskElts(Scale, SubMask, ScaledSubMask);
7496 SubMask = ScaledSubMask;
7498 int Scale = SubMask.size() / NumSubElts;
7499 NumSubElts = SubMask.size();
7505 Ops.append(SubInputs.begin(), SubInputs.end());
7506 for (int i = 0; i != (int)NumElts; ++i)
7508 for (int i = 0; i != (int)NumSubElts; ++i) {
7511 int InputIdx = M / NumSubElts;
7512 M = (NumElts * (1 + InputIdx)) + (M % NumSubElts);
7514 Mask[i + InsertIdx] = M;
7518 case X86ISD::PINSRB:
7519 case X86ISD::PINSRW:
7520 case ISD::SCALAR_TO_VECTOR:
7521 case ISD::INSERT_VECTOR_ELT: {
7522 // Match against a insert_vector_elt/scalar_to_vector of an extract from a
7523 // vector, for matching src/dst vector types.
7524 SDValue Scl = N.getOperand(Opcode == ISD::SCALAR_TO_VECTOR ? 0 : 1);
7526 unsigned DstIdx = 0;
7527 if (Opcode != ISD::SCALAR_TO_VECTOR) {
7528 // Check we have an in-range constant insertion index.
7529 if (!isa<ConstantSDNode>(N.getOperand(2)) ||
7530 N.getConstantOperandAPInt(2).uge(NumElts))
7532 DstIdx = N.getConstantOperandVal(2);
7534 // Attempt to recognise an INSERT*(VEC, 0, DstIdx) shuffle pattern.
7535 if (X86::isZeroNode(Scl)) {
7536 Ops.push_back(N.getOperand(0));
7537 for (unsigned i = 0; i != NumElts; ++i)
7538 Mask.push_back(i == DstIdx ? SM_SentinelZero : (int)i);
7543 // Peek through trunc/aext/zext.
7544 // TODO: aext shouldn't require SM_SentinelZero padding.
7545 // TODO: handle shift of scalars.
7546 unsigned MinBitsPerElt = Scl.getScalarValueSizeInBits();
7547 while (Scl.getOpcode() == ISD::TRUNCATE ||
7548 Scl.getOpcode() == ISD::ANY_EXTEND ||
7549 Scl.getOpcode() == ISD::ZERO_EXTEND) {
7550 Scl = Scl.getOperand(0);
7552 std::min<unsigned>(MinBitsPerElt, Scl.getScalarValueSizeInBits());
7554 if ((MinBitsPerElt % 8) != 0)
7557 // Attempt to find the source vector the scalar was extracted from.
7559 if ((Scl.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
7560 Scl.getOpcode() == X86ISD::PEXTRW ||
7561 Scl.getOpcode() == X86ISD::PEXTRB) &&
7562 Scl.getOperand(0).getValueSizeInBits() == NumSizeInBits) {
7565 if (!SrcExtract || !isa<ConstantSDNode>(SrcExtract.getOperand(1)))
7568 SDValue SrcVec = SrcExtract.getOperand(0);
7569 EVT SrcVT = SrcVec.getValueType();
7570 if (!SrcVT.getScalarType().isByteSized())
7572 unsigned SrcIdx = SrcExtract.getConstantOperandVal(1);
7573 unsigned SrcByte = SrcIdx * (SrcVT.getScalarSizeInBits() / 8);
7574 unsigned DstByte = DstIdx * NumBytesPerElt;
7576 std::min<unsigned>(MinBitsPerElt, SrcVT.getScalarSizeInBits());
7578 // Create 'identity' byte level shuffle mask and then add inserted bytes.
7579 if (Opcode == ISD::SCALAR_TO_VECTOR) {
7580 Ops.push_back(SrcVec);
7581 Mask.append(NumSizeInBytes, SM_SentinelUndef);
7583 Ops.push_back(SrcVec);
7584 Ops.push_back(N.getOperand(0));
7585 for (int i = 0; i != (int)NumSizeInBytes; ++i)
7586 Mask.push_back(NumSizeInBytes + i);
7589 unsigned MinBytesPerElts = MinBitsPerElt / 8;
7590 MinBytesPerElts = std::min(MinBytesPerElts, NumBytesPerElt);
7591 for (unsigned i = 0; i != MinBytesPerElts; ++i)
7592 Mask[DstByte + i] = SrcByte + i;
7593 for (unsigned i = MinBytesPerElts; i < NumBytesPerElt; ++i)
7594 Mask[DstByte + i] = SM_SentinelZero;
7597 case X86ISD::PACKSS:
7598 case X86ISD::PACKUS: {
7599 SDValue N0 = N.getOperand(0);
7600 SDValue N1 = N.getOperand(1);
7601 assert(N0.getValueType().getVectorNumElements() == (NumElts / 2) &&
7602 N1.getValueType().getVectorNumElements() == (NumElts / 2) &&
7603 "Unexpected input value type");
7605 APInt EltsLHS, EltsRHS;
7606 getPackDemandedElts(VT, DemandedElts, EltsLHS, EltsRHS);
7608 // If we know input saturation won't happen we can treat this
7609 // as a truncation shuffle.
7610 if (Opcode == X86ISD::PACKSS) {
7611 if ((!N0.isUndef() &&
7612 DAG.ComputeNumSignBits(N0, EltsLHS, Depth + 1) <= NumBitsPerElt) ||
7614 DAG.ComputeNumSignBits(N1, EltsRHS, Depth + 1) <= NumBitsPerElt))
7617 APInt ZeroMask = APInt::getHighBitsSet(2 * NumBitsPerElt, NumBitsPerElt);
7618 if ((!N0.isUndef() &&
7619 !DAG.MaskedValueIsZero(N0, ZeroMask, EltsLHS, Depth + 1)) ||
7621 !DAG.MaskedValueIsZero(N1, ZeroMask, EltsRHS, Depth + 1)))
7625 bool IsUnary = (N0 == N1);
7631 createPackShuffleMask(VT, Mask, IsUnary);
7634 case X86ISD::VTRUNC: {
7635 SDValue Src = N.getOperand(0);
7636 EVT SrcVT = Src.getValueType();
7637 // Truncated source must be a simple vector.
7638 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7639 (SrcVT.getScalarSizeInBits() % 8) != 0)
7641 unsigned NumSrcElts = SrcVT.getVectorNumElements();
7642 unsigned NumBitsPerSrcElt = SrcVT.getScalarSizeInBits();
7643 unsigned Scale = NumBitsPerSrcElt / NumBitsPerElt;
7644 assert((NumBitsPerSrcElt % NumBitsPerElt) == 0 && "Illegal truncation");
7645 for (unsigned i = 0; i != NumSrcElts; ++i)
7646 Mask.push_back(i * Scale);
7647 Mask.append(NumElts - NumSrcElts, SM_SentinelZero);
7652 case X86ISD::VSRLI: {
7653 uint64_t ShiftVal = N.getConstantOperandVal(1);
7654 // Out of range bit shifts are guaranteed to be zero.
7655 if (NumBitsPerElt <= ShiftVal) {
7656 Mask.append(NumElts, SM_SentinelZero);
7660 // We can only decode 'whole byte' bit shifts as shuffles.
7661 if ((ShiftVal % 8) != 0)
7664 uint64_t ByteShift = ShiftVal / 8;
7665 Ops.push_back(N.getOperand(0));
7667 // Clear mask to all zeros and insert the shifted byte indices.
7668 Mask.append(NumSizeInBytes, SM_SentinelZero);
7670 if (X86ISD::VSHLI == Opcode) {
7671 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
7672 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7673 Mask[i + j] = i + j - ByteShift;
7675 for (unsigned i = 0; i != NumSizeInBytes; i += NumBytesPerElt)
7676 for (unsigned j = ByteShift; j != NumBytesPerElt; ++j)
7677 Mask[i + j - ByteShift] = i + j;
7681 case X86ISD::VROTLI:
7682 case X86ISD::VROTRI: {
7683 // We can only decode 'whole byte' bit rotates as shuffles.
7684 uint64_t RotateVal = N.getConstantOperandAPInt(1).urem(NumBitsPerElt);
7685 if ((RotateVal % 8) != 0)
7687 Ops.push_back(N.getOperand(0));
7688 int Offset = RotateVal / 8;
7689 Offset = (X86ISD::VROTLI == Opcode ? NumBytesPerElt - Offset : Offset);
7690 for (int i = 0; i != (int)NumElts; ++i) {
7691 int BaseIdx = i * NumBytesPerElt;
7692 for (int j = 0; j != (int)NumBytesPerElt; ++j) {
7693 Mask.push_back(BaseIdx + ((Offset + j) % NumBytesPerElt));
7698 case X86ISD::VBROADCAST: {
7699 SDValue Src = N.getOperand(0);
7700 if (!Src.getSimpleValueType().isVector())
7703 Mask.append(NumElts, 0);
7706 case ISD::ZERO_EXTEND:
7707 case ISD::ANY_EXTEND:
7708 case ISD::ZERO_EXTEND_VECTOR_INREG:
7709 case ISD::ANY_EXTEND_VECTOR_INREG: {
7710 SDValue Src = N.getOperand(0);
7711 EVT SrcVT = Src.getValueType();
7713 // Extended source must be a simple vector.
7714 if (!SrcVT.isSimple() || (SrcVT.getSizeInBits() % 128) != 0 ||
7715 (SrcVT.getScalarSizeInBits() % 8) != 0)
7719 (ISD::ANY_EXTEND == Opcode || ISD::ANY_EXTEND_VECTOR_INREG == Opcode);
7720 DecodeZeroExtendMask(SrcVT.getScalarSizeInBits(), NumBitsPerElt, NumElts,
7730 /// Removes unused/repeated shuffle source inputs and adjusts the shuffle mask.
7731 static void resolveTargetShuffleInputsAndMask(SmallVectorImpl<SDValue> &Inputs,
7732 SmallVectorImpl<int> &Mask) {
7733 int MaskWidth = Mask.size();
7734 SmallVector<SDValue, 16> UsedInputs;
7735 for (int i = 0, e = Inputs.size(); i < e; ++i) {
7736 int lo = UsedInputs.size() * MaskWidth;
7737 int hi = lo + MaskWidth;
7739 // Strip UNDEF input usage.
7740 if (Inputs[i].isUndef())
7742 if ((lo <= M) && (M < hi))
7743 M = SM_SentinelUndef;
7745 // Check for unused inputs.
7746 if (none_of(Mask, [lo, hi](int i) { return (lo <= i) && (i < hi); })) {
7753 // Check for repeated inputs.
7754 bool IsRepeat = false;
7755 for (int j = 0, ue = UsedInputs.size(); j != ue; ++j) {
7756 if (UsedInputs[j] != Inputs[i])
7760 M = (M < hi) ? ((M - lo) + (j * MaskWidth)) : (M - MaskWidth);
7767 UsedInputs.push_back(Inputs[i]);
7769 Inputs = UsedInputs;
7772 /// Calls getTargetShuffleAndZeroables to resolve a target shuffle mask's inputs
7773 /// and then sets the SM_SentinelUndef and SM_SentinelZero values.
7774 /// Returns true if the target shuffle mask was decoded.
7775 static bool getTargetShuffleInputs(SDValue Op, const APInt &DemandedElts,
7776 SmallVectorImpl<SDValue> &Inputs,
7777 SmallVectorImpl<int> &Mask,
7778 APInt &KnownUndef, APInt &KnownZero,
7779 const SelectionDAG &DAG, unsigned Depth,
7780 bool ResolveKnownElts) {
7781 EVT VT = Op.getValueType();
7782 if (!VT.isSimple() || !VT.isVector())
7785 if (getTargetShuffleAndZeroables(Op, Mask, Inputs, KnownUndef, KnownZero)) {
7786 if (ResolveKnownElts)
7787 resolveTargetShuffleFromZeroables(Mask, KnownUndef, KnownZero);
7790 if (getFauxShuffleMask(Op, DemandedElts, Mask, Inputs, DAG, Depth,
7791 ResolveKnownElts)) {
7792 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
7798 static bool getTargetShuffleInputs(SDValue Op, SmallVectorImpl<SDValue> &Inputs,
7799 SmallVectorImpl<int> &Mask,
7800 const SelectionDAG &DAG, unsigned Depth = 0,
7801 bool ResolveKnownElts = true) {
7802 EVT VT = Op.getValueType();
7803 if (!VT.isSimple() || !VT.isVector())
7806 APInt KnownUndef, KnownZero;
7807 unsigned NumElts = Op.getValueType().getVectorNumElements();
7808 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
7809 return getTargetShuffleInputs(Op, DemandedElts, Inputs, Mask, KnownUndef,
7810 KnownZero, DAG, Depth, ResolveKnownElts);
7813 /// Returns the scalar element that will make up the i'th
7814 /// element of the result of the vector shuffle.
7815 static SDValue getShuffleScalarElt(SDValue Op, unsigned Index,
7816 SelectionDAG &DAG, unsigned Depth) {
7817 if (Depth >= SelectionDAG::MaxRecursionDepth)
7818 return SDValue(); // Limit search depth.
7820 EVT VT = Op.getValueType();
7821 unsigned Opcode = Op.getOpcode();
7822 unsigned NumElems = VT.getVectorNumElements();
7824 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
7825 if (auto *SV = dyn_cast<ShuffleVectorSDNode>(Op)) {
7826 int Elt = SV->getMaskElt(Index);
7829 return DAG.getUNDEF(VT.getVectorElementType());
7831 SDValue Src = (Elt < (int)NumElems) ? SV->getOperand(0) : SV->getOperand(1);
7832 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
7835 // Recurse into target specific vector shuffles to find scalars.
7836 if (isTargetShuffle(Opcode)) {
7837 MVT ShufVT = VT.getSimpleVT();
7838 MVT ShufSVT = ShufVT.getVectorElementType();
7839 int NumElems = (int)ShufVT.getVectorNumElements();
7840 SmallVector<int, 16> ShuffleMask;
7841 SmallVector<SDValue, 16> ShuffleOps;
7844 if (!getTargetShuffleMask(Op.getNode(), ShufVT, true, ShuffleOps,
7845 ShuffleMask, IsUnary))
7848 int Elt = ShuffleMask[Index];
7849 if (Elt == SM_SentinelZero)
7850 return ShufSVT.isInteger() ? DAG.getConstant(0, SDLoc(Op), ShufSVT)
7851 : DAG.getConstantFP(+0.0, SDLoc(Op), ShufSVT);
7852 if (Elt == SM_SentinelUndef)
7853 return DAG.getUNDEF(ShufSVT);
7855 assert(0 <= Elt && Elt < (2 * NumElems) && "Shuffle index out of range");
7856 SDValue Src = (Elt < NumElems) ? ShuffleOps[0] : ShuffleOps[1];
7857 return getShuffleScalarElt(Src, Elt % NumElems, DAG, Depth + 1);
7860 // Recurse into insert_subvector base/sub vector to find scalars.
7861 if (Opcode == ISD::INSERT_SUBVECTOR) {
7862 SDValue Vec = Op.getOperand(0);
7863 SDValue Sub = Op.getOperand(1);
7864 uint64_t SubIdx = Op.getConstantOperandVal(2);
7865 unsigned NumSubElts = Sub.getValueType().getVectorNumElements();
7867 if (SubIdx <= Index && Index < (SubIdx + NumSubElts))
7868 return getShuffleScalarElt(Sub, Index - SubIdx, DAG, Depth + 1);
7869 return getShuffleScalarElt(Vec, Index, DAG, Depth + 1);
7872 // Recurse into concat_vectors sub vector to find scalars.
7873 if (Opcode == ISD::CONCAT_VECTORS) {
7874 EVT SubVT = Op.getOperand(0).getValueType();
7875 unsigned NumSubElts = SubVT.getVectorNumElements();
7876 uint64_t SubIdx = Index / NumSubElts;
7877 uint64_t SubElt = Index % NumSubElts;
7878 return getShuffleScalarElt(Op.getOperand(SubIdx), SubElt, DAG, Depth + 1);
7881 // Recurse into extract_subvector src vector to find scalars.
7882 if (Opcode == ISD::EXTRACT_SUBVECTOR) {
7883 SDValue Src = Op.getOperand(0);
7884 uint64_t SrcIdx = Op.getConstantOperandVal(1);
7885 return getShuffleScalarElt(Src, Index + SrcIdx, DAG, Depth + 1);
7888 // We only peek through bitcasts of the same vector width.
7889 if (Opcode == ISD::BITCAST) {
7890 SDValue Src = Op.getOperand(0);
7891 EVT SrcVT = Src.getValueType();
7892 if (SrcVT.isVector() && SrcVT.getVectorNumElements() == NumElems)
7893 return getShuffleScalarElt(Src, Index, DAG, Depth + 1);
7897 // Actual nodes that may contain scalar elements
7899 // For insert_vector_elt - either return the index matching scalar or recurse
7900 // into the base vector.
7901 if (Opcode == ISD::INSERT_VECTOR_ELT &&
7902 isa<ConstantSDNode>(Op.getOperand(2))) {
7903 if (Op.getConstantOperandAPInt(2) == Index)
7904 return Op.getOperand(1);
7905 return getShuffleScalarElt(Op.getOperand(0), Index, DAG, Depth + 1);
7908 if (Opcode == ISD::SCALAR_TO_VECTOR)
7909 return (Index == 0) ? Op.getOperand(0)
7910 : DAG.getUNDEF(VT.getVectorElementType());
7912 if (Opcode == ISD::BUILD_VECTOR)
7913 return Op.getOperand(Index);
7918 // Use PINSRB/PINSRW/PINSRD to create a build vector.
7919 static SDValue LowerBuildVectorAsInsert(SDValue Op, unsigned NonZeros,
7920 unsigned NumNonZero, unsigned NumZero,
7922 const X86Subtarget &Subtarget) {
7923 MVT VT = Op.getSimpleValueType();
7924 unsigned NumElts = VT.getVectorNumElements();
7925 assert(((VT == MVT::v8i16 && Subtarget.hasSSE2()) ||
7926 ((VT == MVT::v16i8 || VT == MVT::v4i32) && Subtarget.hasSSE41())) &&
7927 "Illegal vector insertion");
7933 for (unsigned i = 0; i < NumElts; ++i) {
7934 bool IsNonZero = (NonZeros & (1 << i)) != 0;
7938 // If the build vector contains zeros or our first insertion is not the
7939 // first index then insert into zero vector to break any register
7940 // dependency else use SCALAR_TO_VECTOR.
7943 if (NumZero || 0 != i)
7944 V = getZeroVector(VT, Subtarget, DAG, dl);
7946 assert(0 == i && "Expected insertion into zero-index");
7947 V = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7948 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, V);
7949 V = DAG.getBitcast(VT, V);
7953 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V, Op.getOperand(i),
7954 DAG.getIntPtrConstant(i, dl));
7960 /// Custom lower build_vector of v16i8.
7961 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
7962 unsigned NumNonZero, unsigned NumZero,
7964 const X86Subtarget &Subtarget) {
7965 if (NumNonZero > 8 && !Subtarget.hasSSE41())
7968 // SSE4.1 - use PINSRB to insert each byte directly.
7969 if (Subtarget.hasSSE41())
7970 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
7976 // Pre-SSE4.1 - merge byte pairs and insert with PINSRW.
7977 for (unsigned i = 0; i < 16; i += 2) {
7978 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
7979 bool NextIsNonZero = (NonZeros & (1 << (i + 1))) != 0;
7980 if (!ThisIsNonZero && !NextIsNonZero)
7983 // FIXME: Investigate combining the first 4 bytes as a i32 instead.
7985 if (ThisIsNonZero) {
7986 if (NumZero || NextIsNonZero)
7987 Elt = DAG.getZExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7989 Elt = DAG.getAnyExtOrTrunc(Op.getOperand(i), dl, MVT::i32);
7992 if (NextIsNonZero) {
7993 SDValue NextElt = Op.getOperand(i + 1);
7994 if (i == 0 && NumZero)
7995 NextElt = DAG.getZExtOrTrunc(NextElt, dl, MVT::i32);
7997 NextElt = DAG.getAnyExtOrTrunc(NextElt, dl, MVT::i32);
7998 NextElt = DAG.getNode(ISD::SHL, dl, MVT::i32, NextElt,
7999 DAG.getConstant(8, dl, MVT::i8));
8001 Elt = DAG.getNode(ISD::OR, dl, MVT::i32, NextElt, Elt);
8006 // If our first insertion is not the first index or zeros are needed, then
8007 // insert into zero vector. Otherwise, use SCALAR_TO_VECTOR (leaves high
8008 // elements undefined).
8010 if (i != 0 || NumZero)
8011 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
8013 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Elt);
8014 V = DAG.getBitcast(MVT::v8i16, V);
8018 Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Elt);
8019 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, Elt,
8020 DAG.getIntPtrConstant(i / 2, dl));
8023 return DAG.getBitcast(MVT::v16i8, V);
8026 /// Custom lower build_vector of v8i16.
8027 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
8028 unsigned NumNonZero, unsigned NumZero,
8030 const X86Subtarget &Subtarget) {
8031 if (NumNonZero > 4 && !Subtarget.hasSSE41())
8034 // Use PINSRW to insert each byte directly.
8035 return LowerBuildVectorAsInsert(Op, NonZeros, NumNonZero, NumZero, DAG,
8039 /// Custom lower build_vector of v4i32 or v4f32.
8040 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
8041 const X86Subtarget &Subtarget) {
8042 // If this is a splat of a pair of elements, use MOVDDUP (unless the target
8043 // has XOP; in that case defer lowering to potentially use VPERMIL2PS).
8044 // Because we're creating a less complicated build vector here, we may enable
8045 // further folding of the MOVDDUP via shuffle transforms.
8046 if (Subtarget.hasSSE3() && !Subtarget.hasXOP() &&
8047 Op.getOperand(0) == Op.getOperand(2) &&
8048 Op.getOperand(1) == Op.getOperand(3) &&
8049 Op.getOperand(0) != Op.getOperand(1)) {
8051 MVT VT = Op.getSimpleValueType();
8052 MVT EltVT = VT.getVectorElementType();
8053 // Create a new build vector with the first 2 elements followed by undef
8054 // padding, bitcast to v2f64, duplicate, and bitcast back.
8055 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
8056 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
8057 SDValue NewBV = DAG.getBitcast(MVT::v2f64, DAG.getBuildVector(VT, DL, Ops));
8058 SDValue Dup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, NewBV);
8059 return DAG.getBitcast(VT, Dup);
8062 // Find all zeroable elements.
8063 std::bitset<4> Zeroable, Undefs;
8064 for (int i = 0; i < 4; ++i) {
8065 SDValue Elt = Op.getOperand(i);
8066 Undefs[i] = Elt.isUndef();
8067 Zeroable[i] = (Elt.isUndef() || X86::isZeroNode(Elt));
8069 assert(Zeroable.size() - Zeroable.count() > 1 &&
8070 "We expect at least two non-zero elements!");
8072 // We only know how to deal with build_vector nodes where elements are either
8073 // zeroable or extract_vector_elt with constant index.
8074 SDValue FirstNonZero;
8075 unsigned FirstNonZeroIdx;
8076 for (unsigned i = 0; i < 4; ++i) {
8079 SDValue Elt = Op.getOperand(i);
8080 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
8081 !isa<ConstantSDNode>(Elt.getOperand(1)))
8083 // Make sure that this node is extracting from a 128-bit vector.
8084 MVT VT = Elt.getOperand(0).getSimpleValueType();
8085 if (!VT.is128BitVector())
8087 if (!FirstNonZero.getNode()) {
8089 FirstNonZeroIdx = i;
8093 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
8094 SDValue V1 = FirstNonZero.getOperand(0);
8095 MVT VT = V1.getSimpleValueType();
8097 // See if this build_vector can be lowered as a blend with zero.
8099 unsigned EltMaskIdx, EltIdx;
8101 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
8102 if (Zeroable[EltIdx]) {
8103 // The zero vector will be on the right hand side.
8104 Mask[EltIdx] = EltIdx+4;
8108 Elt = Op->getOperand(EltIdx);
8109 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
8110 EltMaskIdx = Elt.getConstantOperandVal(1);
8111 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
8113 Mask[EltIdx] = EltIdx;
8117 // Let the shuffle legalizer deal with blend operations.
8118 SDValue VZeroOrUndef = (Zeroable == Undefs)
8120 : getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
8121 if (V1.getSimpleValueType() != VT)
8122 V1 = DAG.getBitcast(VT, V1);
8123 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZeroOrUndef, Mask);
8126 // See if we can lower this build_vector to a INSERTPS.
8127 if (!Subtarget.hasSSE41())
8130 SDValue V2 = Elt.getOperand(0);
8131 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
8134 bool CanFold = true;
8135 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
8139 SDValue Current = Op->getOperand(i);
8140 SDValue SrcVector = Current->getOperand(0);
8143 CanFold = (SrcVector == V1) && (Current.getConstantOperandAPInt(1) == i);
8149 assert(V1.getNode() && "Expected at least two non-zero elements!");
8150 if (V1.getSimpleValueType() != MVT::v4f32)
8151 V1 = DAG.getBitcast(MVT::v4f32, V1);
8152 if (V2.getSimpleValueType() != MVT::v4f32)
8153 V2 = DAG.getBitcast(MVT::v4f32, V2);
8155 // Ok, we can emit an INSERTPS instruction.
8156 unsigned ZMask = Zeroable.to_ulong();
8158 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
8159 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8161 SDValue Result = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8162 DAG.getIntPtrConstant(InsertPSMask, DL, true));
8163 return DAG.getBitcast(VT, Result);
8166 /// Return a vector logical shift node.
8167 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp, unsigned NumBits,
8168 SelectionDAG &DAG, const TargetLowering &TLI,
8170 assert(VT.is128BitVector() && "Unknown type for VShift");
8171 MVT ShVT = MVT::v16i8;
8172 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
8173 SrcOp = DAG.getBitcast(ShVT, SrcOp);
8174 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
8175 SDValue ShiftVal = DAG.getTargetConstant(NumBits / 8, dl, MVT::i8);
8176 return DAG.getBitcast(VT, DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
8179 static SDValue LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, const SDLoc &dl,
8180 SelectionDAG &DAG) {
8182 // Check if the scalar load can be widened into a vector load. And if
8183 // the address is "base + cst" see if the cst can be "absorbed" into
8184 // the shuffle mask.
8185 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
8186 SDValue Ptr = LD->getBasePtr();
8187 if (!ISD::isNormalLoad(LD) || !LD->isSimple())
8189 EVT PVT = LD->getValueType(0);
8190 if (PVT != MVT::i32 && PVT != MVT::f32)
8195 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
8196 FI = FINode->getIndex();
8198 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
8199 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
8200 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
8201 Offset = Ptr.getConstantOperandVal(1);
8202 Ptr = Ptr.getOperand(0);
8207 // FIXME: 256-bit vector instructions don't require a strict alignment,
8208 // improve this code to support it better.
8209 Align RequiredAlign(VT.getSizeInBits() / 8);
8210 SDValue Chain = LD->getChain();
8211 // Make sure the stack object alignment is at least 16 or 32.
8212 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
8213 MaybeAlign InferredAlign = DAG.InferPtrAlign(Ptr);
8214 if (!InferredAlign || *InferredAlign < RequiredAlign) {
8215 if (MFI.isFixedObjectIndex(FI)) {
8216 // Can't change the alignment. FIXME: It's possible to compute
8217 // the exact stack offset and reference FI + adjust offset instead.
8218 // If someone *really* cares about this. That's the way to implement it.
8221 MFI.setObjectAlignment(FI, RequiredAlign);
8225 // (Offset % 16 or 32) must be multiple of 4. Then address is then
8226 // Ptr + (Offset & ~15).
8229 if ((Offset % RequiredAlign.value()) & 3)
8231 int64_t StartOffset = Offset & ~int64_t(RequiredAlign.value() - 1);
8234 Ptr = DAG.getNode(ISD::ADD, DL, Ptr.getValueType(), Ptr,
8235 DAG.getConstant(StartOffset, DL, Ptr.getValueType()));
8238 int EltNo = (Offset - StartOffset) >> 2;
8239 unsigned NumElems = VT.getVectorNumElements();
8241 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
8242 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
8243 LD->getPointerInfo().getWithOffset(StartOffset));
8245 SmallVector<int, 8> Mask(NumElems, EltNo);
8247 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), Mask);
8253 // Recurse to find a LoadSDNode source and the accumulated ByteOffest.
8254 static bool findEltLoadSrc(SDValue Elt, LoadSDNode *&Ld, int64_t &ByteOffset) {
8255 if (ISD::isNON_EXTLoad(Elt.getNode())) {
8256 auto *BaseLd = cast<LoadSDNode>(Elt);
8257 if (!BaseLd->isSimple())
8264 switch (Elt.getOpcode()) {
8267 case ISD::SCALAR_TO_VECTOR:
8268 return findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset);
8270 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
8271 uint64_t Idx = IdxC->getZExtValue();
8272 if ((Idx % 8) == 0 && findEltLoadSrc(Elt.getOperand(0), Ld, ByteOffset)) {
8273 ByteOffset += Idx / 8;
8278 case ISD::EXTRACT_VECTOR_ELT:
8279 if (auto *IdxC = dyn_cast<ConstantSDNode>(Elt.getOperand(1))) {
8280 SDValue Src = Elt.getOperand(0);
8281 unsigned SrcSizeInBits = Src.getScalarValueSizeInBits();
8282 unsigned DstSizeInBits = Elt.getScalarValueSizeInBits();
8283 if (DstSizeInBits == SrcSizeInBits && (SrcSizeInBits % 8) == 0 &&
8284 findEltLoadSrc(Src, Ld, ByteOffset)) {
8285 uint64_t Idx = IdxC->getZExtValue();
8286 ByteOffset += Idx * (SrcSizeInBits / 8);
8296 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
8297 /// elements can be replaced by a single large load which has the same value as
8298 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
8300 /// Example: <load i32 *a, load i32 *a+4, zero, undef> -> zextload a
8301 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
8302 const SDLoc &DL, SelectionDAG &DAG,
8303 const X86Subtarget &Subtarget,
8304 bool isAfterLegalize) {
8305 if ((VT.getScalarSizeInBits() % 8) != 0)
8308 unsigned NumElems = Elts.size();
8310 int LastLoadedElt = -1;
8311 APInt LoadMask = APInt::getNullValue(NumElems);
8312 APInt ZeroMask = APInt::getNullValue(NumElems);
8313 APInt UndefMask = APInt::getNullValue(NumElems);
8315 SmallVector<LoadSDNode*, 8> Loads(NumElems, nullptr);
8316 SmallVector<int64_t, 8> ByteOffsets(NumElems, 0);
8318 // For each element in the initializer, see if we've found a load, zero or an
8320 for (unsigned i = 0; i < NumElems; ++i) {
8321 SDValue Elt = peekThroughBitcasts(Elts[i]);
8324 if (Elt.isUndef()) {
8325 UndefMask.setBit(i);
8328 if (X86::isZeroNode(Elt) || ISD::isBuildVectorAllZeros(Elt.getNode())) {
8333 // Each loaded element must be the correct fractional portion of the
8334 // requested vector load.
8335 unsigned EltSizeInBits = Elt.getValueSizeInBits();
8336 if ((NumElems * EltSizeInBits) != VT.getSizeInBits())
8339 if (!findEltLoadSrc(Elt, Loads[i], ByteOffsets[i]) || ByteOffsets[i] < 0)
8341 unsigned LoadSizeInBits = Loads[i]->getValueSizeInBits(0);
8342 if (((ByteOffsets[i] * 8) + EltSizeInBits) > LoadSizeInBits)
8348 assert((ZeroMask.countPopulation() + UndefMask.countPopulation() +
8349 LoadMask.countPopulation()) == NumElems &&
8350 "Incomplete element masks");
8352 // Handle Special Cases - all undef or undef/zero.
8353 if (UndefMask.countPopulation() == NumElems)
8354 return DAG.getUNDEF(VT);
8356 // FIXME: Should we return this as a BUILD_VECTOR instead?
8357 if ((ZeroMask.countPopulation() + UndefMask.countPopulation()) == NumElems)
8358 return VT.isInteger() ? DAG.getConstant(0, DL, VT)
8359 : DAG.getConstantFP(0.0, DL, VT);
8361 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8362 int FirstLoadedElt = LoadMask.countTrailingZeros();
8363 SDValue EltBase = peekThroughBitcasts(Elts[FirstLoadedElt]);
8364 EVT EltBaseVT = EltBase.getValueType();
8365 assert(EltBaseVT.getSizeInBits() == EltBaseVT.getStoreSizeInBits() &&
8366 "Register/Memory size mismatch");
8367 LoadSDNode *LDBase = Loads[FirstLoadedElt];
8368 assert(LDBase && "Did not find base load for merging consecutive loads");
8369 unsigned BaseSizeInBits = EltBaseVT.getStoreSizeInBits();
8370 unsigned BaseSizeInBytes = BaseSizeInBits / 8;
8371 int LoadSizeInBits = (1 + LastLoadedElt - FirstLoadedElt) * BaseSizeInBits;
8372 assert((BaseSizeInBits % 8) == 0 && "Sub-byte element loads detected");
8374 // TODO: Support offsetting the base load.
8375 if (ByteOffsets[FirstLoadedElt] != 0)
8378 // Check to see if the element's load is consecutive to the base load
8379 // or offset from a previous (already checked) load.
8380 auto CheckConsecutiveLoad = [&](LoadSDNode *Base, int EltIdx) {
8381 LoadSDNode *Ld = Loads[EltIdx];
8382 int64_t ByteOffset = ByteOffsets[EltIdx];
8383 if (ByteOffset && (ByteOffset % BaseSizeInBytes) == 0) {
8384 int64_t BaseIdx = EltIdx - (ByteOffset / BaseSizeInBytes);
8385 return (0 <= BaseIdx && BaseIdx < (int)NumElems && LoadMask[BaseIdx] &&
8386 Loads[BaseIdx] == Ld && ByteOffsets[BaseIdx] == 0);
8388 return DAG.areNonVolatileConsecutiveLoads(Ld, Base, BaseSizeInBytes,
8389 EltIdx - FirstLoadedElt);
8392 // Consecutive loads can contain UNDEFS but not ZERO elements.
8393 // Consecutive loads with UNDEFs and ZEROs elements require a
8394 // an additional shuffle stage to clear the ZERO elements.
8395 bool IsConsecutiveLoad = true;
8396 bool IsConsecutiveLoadWithZeros = true;
8397 for (int i = FirstLoadedElt + 1; i <= LastLoadedElt; ++i) {
8399 if (!CheckConsecutiveLoad(LDBase, i)) {
8400 IsConsecutiveLoad = false;
8401 IsConsecutiveLoadWithZeros = false;
8404 } else if (ZeroMask[i]) {
8405 IsConsecutiveLoad = false;
8409 auto CreateLoad = [&DAG, &DL, &Loads](EVT VT, LoadSDNode *LDBase) {
8410 auto MMOFlags = LDBase->getMemOperand()->getFlags();
8411 assert(LDBase->isSimple() &&
8412 "Cannot merge volatile or atomic loads.");
8414 DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
8415 LDBase->getPointerInfo(), LDBase->getOriginalAlign(),
8417 for (auto *LD : Loads)
8419 DAG.makeEquivalentMemoryOrdering(LD, NewLd);
8423 // Check if the base load is entirely dereferenceable.
8424 bool IsDereferenceable = LDBase->getPointerInfo().isDereferenceable(
8425 VT.getSizeInBits() / 8, *DAG.getContext(), DAG.getDataLayout());
8427 // LOAD - all consecutive load/undefs (must start/end with a load or be
8428 // entirely dereferenceable). If we have found an entire vector of loads and
8429 // undefs, then return a large load of the entire vector width starting at the
8430 // base pointer. If the vector contains zeros, then attempt to shuffle those
8432 if (FirstLoadedElt == 0 &&
8433 (LastLoadedElt == (int)(NumElems - 1) || IsDereferenceable) &&
8434 (IsConsecutiveLoad || IsConsecutiveLoadWithZeros)) {
8435 if (isAfterLegalize && !TLI.isOperationLegal(ISD::LOAD, VT))
8438 // Don't create 256-bit non-temporal aligned loads without AVX2 as these
8439 // will lower to regular temporal loads and use the cache.
8440 if (LDBase->isNonTemporal() && LDBase->getAlignment() >= 32 &&
8441 VT.is256BitVector() && !Subtarget.hasInt256())
8445 return DAG.getBitcast(VT, Elts[FirstLoadedElt]);
8448 return CreateLoad(VT, LDBase);
8450 // IsConsecutiveLoadWithZeros - we need to create a shuffle of the loaded
8451 // vector and a zero vector to clear out the zero elements.
8452 if (!isAfterLegalize && VT.isVector()) {
8453 unsigned NumMaskElts = VT.getVectorNumElements();
8454 if ((NumMaskElts % NumElems) == 0) {
8455 unsigned Scale = NumMaskElts / NumElems;
8456 SmallVector<int, 4> ClearMask(NumMaskElts, -1);
8457 for (unsigned i = 0; i < NumElems; ++i) {
8460 int Offset = ZeroMask[i] ? NumMaskElts : 0;
8461 for (unsigned j = 0; j != Scale; ++j)
8462 ClearMask[(i * Scale) + j] = (i * Scale) + j + Offset;
8464 SDValue V = CreateLoad(VT, LDBase);
8465 SDValue Z = VT.isInteger() ? DAG.getConstant(0, DL, VT)
8466 : DAG.getConstantFP(0.0, DL, VT);
8467 return DAG.getVectorShuffle(VT, DL, V, Z, ClearMask);
8472 // If the upper half of a ymm/zmm load is undef then just load the lower half.
8473 if (VT.is256BitVector() || VT.is512BitVector()) {
8474 unsigned HalfNumElems = NumElems / 2;
8475 if (UndefMask.extractBits(HalfNumElems, HalfNumElems).isAllOnesValue()) {
8477 EVT::getVectorVT(*DAG.getContext(), VT.getScalarType(), HalfNumElems);
8479 EltsFromConsecutiveLoads(HalfVT, Elts.drop_back(HalfNumElems), DL,
8480 DAG, Subtarget, isAfterLegalize);
8482 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT),
8483 HalfLD, DAG.getIntPtrConstant(0, DL));
8487 // VZEXT_LOAD - consecutive 32/64-bit load/undefs followed by zeros/undefs.
8488 if (IsConsecutiveLoad && FirstLoadedElt == 0 &&
8489 (LoadSizeInBits == 32 || LoadSizeInBits == 64) &&
8490 ((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()))) {
8491 MVT VecSVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(LoadSizeInBits)
8492 : MVT::getIntegerVT(LoadSizeInBits);
8493 MVT VecVT = MVT::getVectorVT(VecSVT, VT.getSizeInBits() / LoadSizeInBits);
8494 // Allow v4f32 on SSE1 only targets.
8495 // FIXME: Add more isel patterns so we can just use VT directly.
8496 if (!Subtarget.hasSSE2() && VT == MVT::v4f32)
8498 if (TLI.isTypeLegal(VecVT)) {
8499 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
8500 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
8501 SDValue ResNode = DAG.getMemIntrinsicNode(
8502 X86ISD::VZEXT_LOAD, DL, Tys, Ops, VecSVT, LDBase->getPointerInfo(),
8503 LDBase->getOriginalAlign(), MachineMemOperand::MOLoad);
8504 for (auto *LD : Loads)
8506 DAG.makeEquivalentMemoryOrdering(LD, ResNode);
8507 return DAG.getBitcast(VT, ResNode);
8511 // BROADCAST - match the smallest possible repetition pattern, load that
8512 // scalar/subvector element and then broadcast to the entire vector.
8513 if (ZeroMask.isNullValue() && isPowerOf2_32(NumElems) && Subtarget.hasAVX() &&
8514 (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector())) {
8515 for (unsigned SubElems = 1; SubElems < NumElems; SubElems *= 2) {
8516 unsigned RepeatSize = SubElems * BaseSizeInBits;
8517 unsigned ScalarSize = std::min(RepeatSize, 64u);
8518 if (!Subtarget.hasAVX2() && ScalarSize < 32)
8522 SmallVector<SDValue, 8> RepeatedLoads(SubElems, DAG.getUNDEF(EltBaseVT));
8523 for (unsigned i = 0; i != NumElems && Match; ++i) {
8526 SDValue Elt = peekThroughBitcasts(Elts[i]);
8527 if (RepeatedLoads[i % SubElems].isUndef())
8528 RepeatedLoads[i % SubElems] = Elt;
8530 Match &= (RepeatedLoads[i % SubElems] == Elt);
8533 // We must have loads at both ends of the repetition.
8534 Match &= !RepeatedLoads.front().isUndef();
8535 Match &= !RepeatedLoads.back().isUndef();
8540 VT.isInteger() && (RepeatSize != 64 || TLI.isTypeLegal(MVT::i64))
8541 ? EVT::getIntegerVT(*DAG.getContext(), ScalarSize)
8542 : EVT::getFloatingPointVT(ScalarSize);
8543 if (RepeatSize > ScalarSize)
8544 RepeatVT = EVT::getVectorVT(*DAG.getContext(), RepeatVT,
8545 RepeatSize / ScalarSize);
8547 EVT::getVectorVT(*DAG.getContext(), RepeatVT.getScalarType(),
8548 VT.getSizeInBits() / ScalarSize);
8549 if (TLI.isTypeLegal(BroadcastVT)) {
8550 if (SDValue RepeatLoad = EltsFromConsecutiveLoads(
8551 RepeatVT, RepeatedLoads, DL, DAG, Subtarget, isAfterLegalize)) {
8552 unsigned Opcode = RepeatSize > ScalarSize ? X86ISD::SUBV_BROADCAST
8553 : X86ISD::VBROADCAST;
8554 SDValue Broadcast = DAG.getNode(Opcode, DL, BroadcastVT, RepeatLoad);
8555 return DAG.getBitcast(VT, Broadcast);
8564 // Combine a vector ops (shuffles etc.) that is equal to build_vector load1,
8565 // load2, load3, load4, <0, 1, 2, 3> into a vector load if the load addresses
8566 // are consecutive, non-overlapping, and in the right order.
8567 static SDValue combineToConsecutiveLoads(EVT VT, SDValue Op, const SDLoc &DL,
8569 const X86Subtarget &Subtarget,
8570 bool isAfterLegalize) {
8571 SmallVector<SDValue, 64> Elts;
8572 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
8573 if (SDValue Elt = getShuffleScalarElt(Op, i, DAG, 0)) {
8574 Elts.push_back(Elt);
8579 assert(Elts.size() == VT.getVectorNumElements());
8580 return EltsFromConsecutiveLoads(VT, Elts, DL, DAG, Subtarget,
8584 static Constant *getConstantVector(MVT VT, const APInt &SplatValue,
8585 unsigned SplatBitSize, LLVMContext &C) {
8586 unsigned ScalarSize = VT.getScalarSizeInBits();
8587 unsigned NumElm = SplatBitSize / ScalarSize;
8589 SmallVector<Constant *, 32> ConstantVec;
8590 for (unsigned i = 0; i < NumElm; i++) {
8591 APInt Val = SplatValue.extractBits(ScalarSize, ScalarSize * i);
8593 if (VT.isFloatingPoint()) {
8594 if (ScalarSize == 32) {
8595 Const = ConstantFP::get(C, APFloat(APFloat::IEEEsingle(), Val));
8597 assert(ScalarSize == 64 && "Unsupported floating point scalar size");
8598 Const = ConstantFP::get(C, APFloat(APFloat::IEEEdouble(), Val));
8601 Const = Constant::getIntegerValue(Type::getIntNTy(C, ScalarSize), Val);
8602 ConstantVec.push_back(Const);
8604 return ConstantVector::get(ArrayRef<Constant *>(ConstantVec));
8607 static bool isFoldableUseOfShuffle(SDNode *N) {
8608 for (auto *U : N->uses()) {
8609 unsigned Opc = U->getOpcode();
8610 // VPERMV/VPERMV3 shuffles can never fold their index operands.
8611 if (Opc == X86ISD::VPERMV && U->getOperand(0).getNode() == N)
8613 if (Opc == X86ISD::VPERMV3 && U->getOperand(1).getNode() == N)
8615 if (isTargetShuffle(Opc))
8617 if (Opc == ISD::BITCAST) // Ignore bitcasts
8618 return isFoldableUseOfShuffle(U);
8625 // Check if the current node of build vector is a zero extended vector.
8626 // // If so, return the value extended.
8627 // // For example: (0,0,0,a,0,0,0,a,0,0,0,a,0,0,0,a) returns a.
8628 // // NumElt - return the number of zero extended identical values.
8629 // // EltType - return the type of the value include the zero extend.
8630 static SDValue isSplatZeroExtended(const BuildVectorSDNode *Op,
8631 unsigned &NumElt, MVT &EltType) {
8632 SDValue ExtValue = Op->getOperand(0);
8633 unsigned NumElts = Op->getNumOperands();
8634 unsigned Delta = NumElts;
8636 for (unsigned i = 1; i < NumElts; i++) {
8637 if (Op->getOperand(i) == ExtValue) {
8641 if (!(Op->getOperand(i).isUndef() || isNullConstant(Op->getOperand(i))))
8644 if (!isPowerOf2_32(Delta) || Delta == 1)
8647 for (unsigned i = Delta; i < NumElts; i++) {
8648 if (i % Delta == 0) {
8649 if (Op->getOperand(i) != ExtValue)
8651 } else if (!(isNullConstant(Op->getOperand(i)) ||
8652 Op->getOperand(i).isUndef()))
8655 unsigned EltSize = Op->getSimpleValueType(0).getScalarSizeInBits();
8656 unsigned ExtVTSize = EltSize * Delta;
8657 EltType = MVT::getIntegerVT(ExtVTSize);
8658 NumElt = NumElts / Delta;
8662 /// Attempt to use the vbroadcast instruction to generate a splat value
8663 /// from a splat BUILD_VECTOR which uses:
8664 /// a. A single scalar load, or a constant.
8665 /// b. Repeated pattern of constants (e.g. <0,1,0,1> or <0,1,2,3,0,1,2,3>).
8667 /// The VBROADCAST node is returned when a pattern is found,
8668 /// or SDValue() otherwise.
8669 static SDValue lowerBuildVectorAsBroadcast(BuildVectorSDNode *BVOp,
8670 const X86Subtarget &Subtarget,
8671 SelectionDAG &DAG) {
8672 // VBROADCAST requires AVX.
8673 // TODO: Splats could be generated for non-AVX CPUs using SSE
8674 // instructions, but there's less potential gain for only 128-bit vectors.
8675 if (!Subtarget.hasAVX())
8678 MVT VT = BVOp->getSimpleValueType(0);
8681 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
8682 "Unsupported vector type for broadcast.");
8684 BitVector UndefElements;
8685 SDValue Ld = BVOp->getSplatValue(&UndefElements);
8687 // Attempt to use VBROADCASTM
8688 // From this pattern:
8689 // a. t0 = (zext_i64 (bitcast_i8 v2i1 X))
8690 // b. t1 = (build_vector t0 t0)
8692 // Create (VBROADCASTM v2i1 X)
8693 if (Subtarget.hasCDI() && (VT.is512BitVector() || Subtarget.hasVLX())) {
8694 MVT EltType = VT.getScalarType();
8695 unsigned NumElts = VT.getVectorNumElements();
8697 SDValue ZeroExtended = isSplatZeroExtended(BVOp, NumElts, EltType);
8698 if ((ZeroExtended && ZeroExtended.getOpcode() == ISD::BITCAST) ||
8699 (Ld && Ld.getOpcode() == ISD::ZERO_EXTEND &&
8700 Ld.getOperand(0).getOpcode() == ISD::BITCAST)) {
8702 BOperand = ZeroExtended.getOperand(0);
8704 BOperand = Ld.getOperand(0).getOperand(0);
8705 MVT MaskVT = BOperand.getSimpleValueType();
8706 if ((EltType == MVT::i64 && MaskVT == MVT::v8i1) || // for broadcastmb2q
8707 (EltType == MVT::i32 && MaskVT == MVT::v16i1)) { // for broadcastmw2d
8709 DAG.getNode(X86ISD::VBROADCASTM, dl,
8710 MVT::getVectorVT(EltType, NumElts), BOperand);
8711 return DAG.getBitcast(VT, Brdcst);
8716 unsigned NumElts = VT.getVectorNumElements();
8717 unsigned NumUndefElts = UndefElements.count();
8718 if (!Ld || (NumElts - NumUndefElts) <= 1) {
8719 APInt SplatValue, Undef;
8720 unsigned SplatBitSize;
8722 // Check if this is a repeated constant pattern suitable for broadcasting.
8723 if (BVOp->isConstantSplat(SplatValue, Undef, SplatBitSize, HasUndef) &&
8724 SplatBitSize > VT.getScalarSizeInBits() &&
8725 SplatBitSize < VT.getSizeInBits()) {
8726 // Avoid replacing with broadcast when it's a use of a shuffle
8727 // instruction to preserve the present custom lowering of shuffles.
8728 if (isFoldableUseOfShuffle(BVOp))
8730 // replace BUILD_VECTOR with broadcast of the repeated constants.
8731 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8732 LLVMContext *Ctx = DAG.getContext();
8733 MVT PVT = TLI.getPointerTy(DAG.getDataLayout());
8734 if (Subtarget.hasAVX()) {
8735 if (SplatBitSize == 32 || SplatBitSize == 64 ||
8736 (SplatBitSize < 32 && Subtarget.hasAVX2())) {
8737 // Splatted value can fit in one INTEGER constant in constant pool.
8738 // Load the constant and broadcast it.
8739 MVT CVT = MVT::getIntegerVT(SplatBitSize);
8740 Type *ScalarTy = Type::getIntNTy(*Ctx, SplatBitSize);
8741 Constant *C = Constant::getIntegerValue(ScalarTy, SplatValue);
8742 SDValue CP = DAG.getConstantPool(C, PVT);
8743 unsigned Repeat = VT.getSizeInBits() / SplatBitSize;
8745 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
8747 DAG.getVTList(MVT::getVectorVT(CVT, Repeat), MVT::Other);
8748 SDValue Ops[] = {DAG.getEntryNode(), CP};
8749 MachinePointerInfo MPI =
8750 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
8751 SDValue Brdcst = DAG.getMemIntrinsicNode(
8752 X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT, MPI, Alignment,
8753 MachineMemOperand::MOLoad);
8754 return DAG.getBitcast(VT, Brdcst);
8756 if (SplatBitSize > 64) {
8757 // Load the vector of constants and broadcast it.
8758 MVT CVT = VT.getScalarType();
8759 Constant *VecC = getConstantVector(VT, SplatValue, SplatBitSize,
8761 SDValue VCP = DAG.getConstantPool(VecC, PVT);
8762 unsigned NumElm = SplatBitSize / VT.getScalarSizeInBits();
8763 Align Alignment = cast<ConstantPoolSDNode>(VCP)->getAlign();
8765 MVT::getVectorVT(CVT, NumElm), dl, DAG.getEntryNode(), VCP,
8766 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
8768 SDValue Brdcst = DAG.getNode(X86ISD::SUBV_BROADCAST, dl, VT, Ld);
8769 return DAG.getBitcast(VT, Brdcst);
8774 // If we are moving a scalar into a vector (Ld must be set and all elements
8775 // but 1 are undef) and that operation is not obviously supported by
8776 // vmovd/vmovq/vmovss/vmovsd, then keep trying to form a broadcast.
8777 // That's better than general shuffling and may eliminate a load to GPR and
8778 // move from scalar to vector register.
8779 if (!Ld || NumElts - NumUndefElts != 1)
8781 unsigned ScalarSize = Ld.getValueSizeInBits();
8782 if (!(UndefElements[0] || (ScalarSize != 32 && ScalarSize != 64)))
8786 bool ConstSplatVal =
8787 (Ld.getOpcode() == ISD::Constant || Ld.getOpcode() == ISD::ConstantFP);
8788 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
8790 // Make sure that all of the users of a non-constant load are from the
8791 // BUILD_VECTOR node.
8792 // FIXME: Is the use count needed for non-constant, non-load case?
8793 if (!ConstSplatVal && !IsLoad && !BVOp->isOnlyUserOf(Ld.getNode()))
8796 unsigned ScalarSize = Ld.getValueSizeInBits();
8797 bool IsGE256 = (VT.getSizeInBits() >= 256);
8799 // When optimizing for size, generate up to 5 extra bytes for a broadcast
8800 // instruction to save 8 or more bytes of constant pool data.
8801 // TODO: If multiple splats are generated to load the same constant,
8802 // it may be detrimental to overall size. There needs to be a way to detect
8803 // that condition to know if this is truly a size win.
8804 bool OptForSize = DAG.shouldOptForSize();
8806 // Handle broadcasting a single constant scalar from the constant pool
8808 // On Sandybridge (no AVX2), it is still better to load a constant vector
8809 // from the constant pool and not to broadcast it from a scalar.
8810 // But override that restriction when optimizing for size.
8811 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
8812 if (ConstSplatVal && (Subtarget.hasAVX2() || OptForSize)) {
8813 EVT CVT = Ld.getValueType();
8814 assert(!CVT.isVector() && "Must not broadcast a vector type");
8816 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
8817 // For size optimization, also splat v2f64 and v2i64, and for size opt
8818 // with AVX2, also splat i8 and i16.
8819 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
8820 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8821 (OptForSize && (ScalarSize == 64 || Subtarget.hasAVX2()))) {
8822 const Constant *C = nullptr;
8823 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
8824 C = CI->getConstantIntValue();
8825 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
8826 C = CF->getConstantFPValue();
8828 assert(C && "Invalid constant type");
8830 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8832 DAG.getConstantPool(C, TLI.getPointerTy(DAG.getDataLayout()));
8833 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
8835 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8836 SDValue Ops[] = {DAG.getEntryNode(), CP};
8837 MachinePointerInfo MPI =
8838 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
8839 return DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops, CVT,
8840 MPI, Alignment, MachineMemOperand::MOLoad);
8844 // Handle AVX2 in-register broadcasts.
8845 if (!IsLoad && Subtarget.hasInt256() &&
8846 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
8847 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
8849 // The scalar source must be a normal load.
8853 // Make sure the non-chain result is only used by this build vector.
8854 if (!Ld->hasNUsesOfValue(NumElts - NumUndefElts, 0))
8857 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
8858 (Subtarget.hasVLX() && ScalarSize == 64)) {
8859 auto *LN = cast<LoadSDNode>(Ld);
8860 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8861 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
8863 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
8864 LN->getMemoryVT(), LN->getMemOperand());
8865 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
8869 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
8870 // double since there is no vbroadcastsd xmm
8871 if (Subtarget.hasInt256() && Ld.getValueType().isInteger() &&
8872 (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)) {
8873 auto *LN = cast<LoadSDNode>(Ld);
8874 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
8875 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
8877 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
8878 LN->getMemoryVT(), LN->getMemOperand());
8879 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BCast.getValue(1));
8883 // Unsupported broadcast.
8887 /// For an EXTRACT_VECTOR_ELT with a constant index return the real
8888 /// underlying vector and index.
8890 /// Modifies \p ExtractedFromVec to the real vector and returns the real
8892 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
8894 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
8895 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
8898 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
8900 // (extract_vector_elt (v8f32 %1), Constant<6>)
8902 // (extract_vector_elt (vector_shuffle<2,u,u,u>
8903 // (extract_subvector (v8f32 %0), Constant<4>),
8906 // In this case the vector is the extract_subvector expression and the index
8907 // is 2, as specified by the shuffle.
8908 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
8909 SDValue ShuffleVec = SVOp->getOperand(0);
8910 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
8911 assert(ShuffleVecVT.getVectorElementType() ==
8912 ExtractedFromVec.getSimpleValueType().getVectorElementType());
8914 int ShuffleIdx = SVOp->getMaskElt(Idx);
8915 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
8916 ExtractedFromVec = ShuffleVec;
8922 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
8923 MVT VT = Op.getSimpleValueType();
8925 // Skip if insert_vec_elt is not supported.
8926 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
8927 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
8931 unsigned NumElems = Op.getNumOperands();
8935 SmallVector<unsigned, 4> InsertIndices;
8936 SmallVector<int, 8> Mask(NumElems, -1);
8938 for (unsigned i = 0; i != NumElems; ++i) {
8939 unsigned Opc = Op.getOperand(i).getOpcode();
8941 if (Opc == ISD::UNDEF)
8944 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
8945 // Quit if more than 1 elements need inserting.
8946 if (InsertIndices.size() > 1)
8949 InsertIndices.push_back(i);
8953 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
8954 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
8956 // Quit if non-constant index.
8957 if (!isa<ConstantSDNode>(ExtIdx))
8959 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
8961 // Quit if extracted from vector of different type.
8962 if (ExtractedFromVec.getValueType() != VT)
8965 if (!VecIn1.getNode())
8966 VecIn1 = ExtractedFromVec;
8967 else if (VecIn1 != ExtractedFromVec) {
8968 if (!VecIn2.getNode())
8969 VecIn2 = ExtractedFromVec;
8970 else if (VecIn2 != ExtractedFromVec)
8971 // Quit if more than 2 vectors to shuffle
8975 if (ExtractedFromVec == VecIn1)
8977 else if (ExtractedFromVec == VecIn2)
8978 Mask[i] = Idx + NumElems;
8981 if (!VecIn1.getNode())
8984 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
8985 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, Mask);
8987 for (unsigned Idx : InsertIndices)
8988 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
8989 DAG.getIntPtrConstant(Idx, DL));
8994 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
8995 static SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG,
8996 const X86Subtarget &Subtarget) {
8998 MVT VT = Op.getSimpleValueType();
8999 assert((VT.getVectorElementType() == MVT::i1) &&
9000 "Unexpected type in LowerBUILD_VECTORvXi1!");
9003 if (ISD::isBuildVectorAllZeros(Op.getNode()) ||
9004 ISD::isBuildVectorAllOnes(Op.getNode()))
9007 uint64_t Immediate = 0;
9008 SmallVector<unsigned, 16> NonConstIdx;
9009 bool IsSplat = true;
9010 bool HasConstElts = false;
9012 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
9013 SDValue In = Op.getOperand(idx);
9016 if (auto *InC = dyn_cast<ConstantSDNode>(In)) {
9017 Immediate |= (InC->getZExtValue() & 0x1) << idx;
9018 HasConstElts = true;
9020 NonConstIdx.push_back(idx);
9024 else if (In != Op.getOperand(SplatIdx))
9028 // for splat use " (select i1 splat_elt, all-ones, all-zeroes)"
9030 // The build_vector allows the scalar element to be larger than the vector
9031 // element type. We need to mask it to use as a condition unless we know
9032 // the upper bits are zero.
9033 // FIXME: Use computeKnownBits instead of checking specific opcode?
9034 SDValue Cond = Op.getOperand(SplatIdx);
9035 assert(Cond.getValueType() == MVT::i8 && "Unexpected VT!");
9036 if (Cond.getOpcode() != ISD::SETCC)
9037 Cond = DAG.getNode(ISD::AND, dl, MVT::i8, Cond,
9038 DAG.getConstant(1, dl, MVT::i8));
9040 // Perform the select in the scalar domain so we can use cmov.
9041 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
9042 SDValue Select = DAG.getSelect(dl, MVT::i32, Cond,
9043 DAG.getAllOnesConstant(dl, MVT::i32),
9044 DAG.getConstant(0, dl, MVT::i32));
9045 Select = DAG.getBitcast(MVT::v32i1, Select);
9046 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Select, Select);
9048 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
9049 SDValue Select = DAG.getSelect(dl, ImmVT, Cond,
9050 DAG.getAllOnesConstant(dl, ImmVT),
9051 DAG.getConstant(0, dl, ImmVT));
9052 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
9053 Select = DAG.getBitcast(VecVT, Select);
9054 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Select,
9055 DAG.getIntPtrConstant(0, dl));
9059 // insert elements one by one
9062 if (VT == MVT::v64i1 && !Subtarget.is64Bit()) {
9063 SDValue ImmL = DAG.getConstant(Lo_32(Immediate), dl, MVT::i32);
9064 SDValue ImmH = DAG.getConstant(Hi_32(Immediate), dl, MVT::i32);
9065 ImmL = DAG.getBitcast(MVT::v32i1, ImmL);
9066 ImmH = DAG.getBitcast(MVT::v32i1, ImmH);
9067 DstVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, ImmL, ImmH);
9069 MVT ImmVT = MVT::getIntegerVT(std::max((unsigned)VT.getSizeInBits(), 8U));
9070 SDValue Imm = DAG.getConstant(Immediate, dl, ImmVT);
9071 MVT VecVT = VT.getSizeInBits() >= 8 ? VT : MVT::v8i1;
9072 DstVec = DAG.getBitcast(VecVT, Imm);
9073 DstVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, DstVec,
9074 DAG.getIntPtrConstant(0, dl));
9077 DstVec = DAG.getUNDEF(VT);
9079 for (unsigned i = 0, e = NonConstIdx.size(); i != e; ++i) {
9080 unsigned InsertIdx = NonConstIdx[i];
9081 DstVec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
9082 Op.getOperand(InsertIdx),
9083 DAG.getIntPtrConstant(InsertIdx, dl));
9088 /// This is a helper function of LowerToHorizontalOp().
9089 /// This function checks that the build_vector \p N in input implements a
9090 /// 128-bit partial horizontal operation on a 256-bit vector, but that operation
9091 /// may not match the layout of an x86 256-bit horizontal instruction.
9092 /// In other words, if this returns true, then some extraction/insertion will
9093 /// be required to produce a valid horizontal instruction.
9095 /// Parameter \p Opcode defines the kind of horizontal operation to match.
9096 /// For example, if \p Opcode is equal to ISD::ADD, then this function
9097 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
9098 /// is equal to ISD::SUB, then this function checks if this is a horizontal
9101 /// This function only analyzes elements of \p N whose indices are
9102 /// in range [BaseIdx, LastIdx).
9104 /// TODO: This function was originally used to match both real and fake partial
9105 /// horizontal operations, but the index-matching logic is incorrect for that.
9106 /// See the corrected implementation in isHopBuildVector(). Can we reduce this
9107 /// code because it is only used for partial h-op matching now?
9108 static bool isHorizontalBinOpPart(const BuildVectorSDNode *N, unsigned Opcode,
9110 unsigned BaseIdx, unsigned LastIdx,
9111 SDValue &V0, SDValue &V1) {
9112 EVT VT = N->getValueType(0);
9113 assert(VT.is256BitVector() && "Only use for matching partial 256-bit h-ops");
9114 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
9115 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
9116 "Invalid Vector in input!");
9118 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
9119 bool CanFold = true;
9120 unsigned ExpectedVExtractIdx = BaseIdx;
9121 unsigned NumElts = LastIdx - BaseIdx;
9122 V0 = DAG.getUNDEF(VT);
9123 V1 = DAG.getUNDEF(VT);
9125 // Check if N implements a horizontal binop.
9126 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
9127 SDValue Op = N->getOperand(i + BaseIdx);
9130 if (Op->isUndef()) {
9131 // Update the expected vector extract index.
9132 if (i * 2 == NumElts)
9133 ExpectedVExtractIdx = BaseIdx;
9134 ExpectedVExtractIdx += 2;
9138 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
9143 SDValue Op0 = Op.getOperand(0);
9144 SDValue Op1 = Op.getOperand(1);
9146 // Try to match the following pattern:
9147 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
9148 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
9149 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
9150 Op0.getOperand(0) == Op1.getOperand(0) &&
9151 isa<ConstantSDNode>(Op0.getOperand(1)) &&
9152 isa<ConstantSDNode>(Op1.getOperand(1)));
9156 unsigned I0 = Op0.getConstantOperandVal(1);
9157 unsigned I1 = Op1.getConstantOperandVal(1);
9159 if (i * 2 < NumElts) {
9161 V0 = Op0.getOperand(0);
9162 if (V0.getValueType() != VT)
9167 V1 = Op0.getOperand(0);
9168 if (V1.getValueType() != VT)
9171 if (i * 2 == NumElts)
9172 ExpectedVExtractIdx = BaseIdx;
9175 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
9176 if (I0 == ExpectedVExtractIdx)
9177 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
9178 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
9179 // Try to match the following dag sequence:
9180 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
9181 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
9185 ExpectedVExtractIdx += 2;
9191 /// Emit a sequence of two 128-bit horizontal add/sub followed by
9192 /// a concat_vector.
9194 /// This is a helper function of LowerToHorizontalOp().
9195 /// This function expects two 256-bit vectors called V0 and V1.
9196 /// At first, each vector is split into two separate 128-bit vectors.
9197 /// Then, the resulting 128-bit vectors are used to implement two
9198 /// horizontal binary operations.
9200 /// The kind of horizontal binary operation is defined by \p X86Opcode.
9202 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
9203 /// the two new horizontal binop.
9204 /// When Mode is set, the first horizontal binop dag node would take as input
9205 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
9206 /// horizontal binop dag node would take as input the lower 128-bit of V1
9207 /// and the upper 128-bit of V1.
9209 /// HADD V0_LO, V0_HI
9210 /// HADD V1_LO, V1_HI
9212 /// Otherwise, the first horizontal binop dag node takes as input the lower
9213 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
9214 /// dag node takes the upper 128-bit of V0 and the upper 128-bit of V1.
9216 /// HADD V0_LO, V1_LO
9217 /// HADD V0_HI, V1_HI
9219 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
9220 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
9221 /// the upper 128-bits of the result.
9222 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
9223 const SDLoc &DL, SelectionDAG &DAG,
9224 unsigned X86Opcode, bool Mode,
9225 bool isUndefLO, bool isUndefHI) {
9226 MVT VT = V0.getSimpleValueType();
9227 assert(VT.is256BitVector() && VT == V1.getSimpleValueType() &&
9228 "Invalid nodes in input!");
9230 unsigned NumElts = VT.getVectorNumElements();
9231 SDValue V0_LO = extract128BitVector(V0, 0, DAG, DL);
9232 SDValue V0_HI = extract128BitVector(V0, NumElts/2, DAG, DL);
9233 SDValue V1_LO = extract128BitVector(V1, 0, DAG, DL);
9234 SDValue V1_HI = extract128BitVector(V1, NumElts/2, DAG, DL);
9235 MVT NewVT = V0_LO.getSimpleValueType();
9237 SDValue LO = DAG.getUNDEF(NewVT);
9238 SDValue HI = DAG.getUNDEF(NewVT);
9241 // Don't emit a horizontal binop if the result is expected to be UNDEF.
9242 if (!isUndefLO && !V0->isUndef())
9243 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
9244 if (!isUndefHI && !V1->isUndef())
9245 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
9247 // Don't emit a horizontal binop if the result is expected to be UNDEF.
9248 if (!isUndefLO && (!V0_LO->isUndef() || !V1_LO->isUndef()))
9249 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
9251 if (!isUndefHI && (!V0_HI->isUndef() || !V1_HI->isUndef()))
9252 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
9255 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
9258 /// Returns true iff \p BV builds a vector with the result equivalent to
9259 /// the result of ADDSUB/SUBADD operation.
9260 /// If true is returned then the operands of ADDSUB = Opnd0 +- Opnd1
9261 /// (SUBADD = Opnd0 -+ Opnd1) operation are written to the parameters
9262 /// \p Opnd0 and \p Opnd1.
9263 static bool isAddSubOrSubAdd(const BuildVectorSDNode *BV,
9264 const X86Subtarget &Subtarget, SelectionDAG &DAG,
9265 SDValue &Opnd0, SDValue &Opnd1,
9266 unsigned &NumExtracts,
9269 MVT VT = BV->getSimpleValueType(0);
9270 if (!Subtarget.hasSSE3() || !VT.isFloatingPoint())
9273 unsigned NumElts = VT.getVectorNumElements();
9274 SDValue InVec0 = DAG.getUNDEF(VT);
9275 SDValue InVec1 = DAG.getUNDEF(VT);
9279 // Odd-numbered elements in the input build vector are obtained from
9280 // adding/subtracting two integer/float elements.
9281 // Even-numbered elements in the input build vector are obtained from
9282 // subtracting/adding two integer/float elements.
9283 unsigned Opc[2] = {0, 0};
9284 for (unsigned i = 0, e = NumElts; i != e; ++i) {
9285 SDValue Op = BV->getOperand(i);
9287 // Skip 'undef' values.
9288 unsigned Opcode = Op.getOpcode();
9289 if (Opcode == ISD::UNDEF)
9292 // Early exit if we found an unexpected opcode.
9293 if (Opcode != ISD::FADD && Opcode != ISD::FSUB)
9296 SDValue Op0 = Op.getOperand(0);
9297 SDValue Op1 = Op.getOperand(1);
9299 // Try to match the following pattern:
9300 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
9301 // Early exit if we cannot match that sequence.
9302 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9303 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9304 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9305 Op0.getOperand(1) != Op1.getOperand(1))
9308 unsigned I0 = Op0.getConstantOperandVal(1);
9312 // We found a valid add/sub node, make sure its the same opcode as previous
9313 // elements for this parity.
9314 if (Opc[i % 2] != 0 && Opc[i % 2] != Opcode)
9316 Opc[i % 2] = Opcode;
9318 // Update InVec0 and InVec1.
9319 if (InVec0.isUndef()) {
9320 InVec0 = Op0.getOperand(0);
9321 if (InVec0.getSimpleValueType() != VT)
9324 if (InVec1.isUndef()) {
9325 InVec1 = Op1.getOperand(0);
9326 if (InVec1.getSimpleValueType() != VT)
9330 // Make sure that operands in input to each add/sub node always
9331 // come from a same pair of vectors.
9332 if (InVec0 != Op0.getOperand(0)) {
9333 if (Opcode == ISD::FSUB)
9336 // FADD is commutable. Try to commute the operands
9337 // and then test again.
9338 std::swap(Op0, Op1);
9339 if (InVec0 != Op0.getOperand(0))
9343 if (InVec1 != Op1.getOperand(0))
9346 // Increment the number of extractions done.
9350 // Ensure we have found an opcode for both parities and that they are
9351 // different. Don't try to fold this build_vector into an ADDSUB/SUBADD if the
9352 // inputs are undef.
9353 if (!Opc[0] || !Opc[1] || Opc[0] == Opc[1] ||
9354 InVec0.isUndef() || InVec1.isUndef())
9357 IsSubAdd = Opc[0] == ISD::FADD;
9364 /// Returns true if is possible to fold MUL and an idiom that has already been
9365 /// recognized as ADDSUB/SUBADD(\p Opnd0, \p Opnd1) into
9366 /// FMADDSUB/FMSUBADD(x, y, \p Opnd1). If (and only if) true is returned, the
9367 /// operands of FMADDSUB/FMSUBADD are written to parameters \p Opnd0, \p Opnd1, \p Opnd2.
9369 /// Prior to calling this function it should be known that there is some
9370 /// SDNode that potentially can be replaced with an X86ISD::ADDSUB operation
9371 /// using \p Opnd0 and \p Opnd1 as operands. Also, this method is called
9372 /// before replacement of such SDNode with ADDSUB operation. Thus the number
9373 /// of \p Opnd0 uses is expected to be equal to 2.
9374 /// For example, this function may be called for the following IR:
9375 /// %AB = fmul fast <2 x double> %A, %B
9376 /// %Sub = fsub fast <2 x double> %AB, %C
9377 /// %Add = fadd fast <2 x double> %AB, %C
9378 /// %Addsub = shufflevector <2 x double> %Sub, <2 x double> %Add,
9379 /// <2 x i32> <i32 0, i32 3>
9380 /// There is a def for %Addsub here, which potentially can be replaced by
9381 /// X86ISD::ADDSUB operation:
9382 /// %Addsub = X86ISD::ADDSUB %AB, %C
9383 /// and such ADDSUB can further be replaced with FMADDSUB:
9384 /// %Addsub = FMADDSUB %A, %B, %C.
9386 /// The main reason why this method is called before the replacement of the
9387 /// recognized ADDSUB idiom with ADDSUB operation is that such replacement
9388 /// is illegal sometimes. E.g. 512-bit ADDSUB is not available, while 512-bit
9390 static bool isFMAddSubOrFMSubAdd(const X86Subtarget &Subtarget,
9392 SDValue &Opnd0, SDValue &Opnd1, SDValue &Opnd2,
9393 unsigned ExpectedUses) {
9394 if (Opnd0.getOpcode() != ISD::FMUL ||
9395 !Opnd0->hasNUsesOfValue(ExpectedUses, 0) || !Subtarget.hasAnyFMA())
9398 // FIXME: These checks must match the similar ones in
9399 // DAGCombiner::visitFADDForFMACombine. It would be good to have one
9400 // function that would answer if it is Ok to fuse MUL + ADD to FMADD
9401 // or MUL + ADDSUB to FMADDSUB.
9402 const TargetOptions &Options = DAG.getTarget().Options;
9404 (Options.AllowFPOpFusion == FPOpFusion::Fast || Options.UnsafeFPMath);
9409 Opnd1 = Opnd0.getOperand(1);
9410 Opnd0 = Opnd0.getOperand(0);
9415 /// Try to fold a build_vector that performs an 'addsub' or 'fmaddsub' or
9416 /// 'fsubadd' operation accordingly to X86ISD::ADDSUB or X86ISD::FMADDSUB or
9417 /// X86ISD::FMSUBADD node.
9418 static SDValue lowerToAddSubOrFMAddSub(const BuildVectorSDNode *BV,
9419 const X86Subtarget &Subtarget,
9420 SelectionDAG &DAG) {
9421 SDValue Opnd0, Opnd1;
9422 unsigned NumExtracts;
9424 if (!isAddSubOrSubAdd(BV, Subtarget, DAG, Opnd0, Opnd1, NumExtracts,
9428 MVT VT = BV->getSimpleValueType(0);
9431 // Try to generate X86ISD::FMADDSUB node here.
9433 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, NumExtracts)) {
9434 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
9435 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
9438 // We only support ADDSUB.
9442 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
9443 // the ADDSUB idiom has been successfully recognized. There are no known
9444 // X86 targets with 512-bit ADDSUB instructions!
9445 // 512-bit ADDSUB idiom recognition was needed only as part of FMADDSUB idiom
9447 if (VT.is512BitVector())
9450 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
9453 static bool isHopBuildVector(const BuildVectorSDNode *BV, SelectionDAG &DAG,
9454 unsigned &HOpcode, SDValue &V0, SDValue &V1) {
9455 // Initialize outputs to known values.
9456 MVT VT = BV->getSimpleValueType(0);
9457 HOpcode = ISD::DELETED_NODE;
9458 V0 = DAG.getUNDEF(VT);
9459 V1 = DAG.getUNDEF(VT);
9461 // x86 256-bit horizontal ops are defined in a non-obvious way. Each 128-bit
9462 // half of the result is calculated independently from the 128-bit halves of
9463 // the inputs, so that makes the index-checking logic below more complicated.
9464 unsigned NumElts = VT.getVectorNumElements();
9465 unsigned GenericOpcode = ISD::DELETED_NODE;
9466 unsigned Num128BitChunks = VT.is256BitVector() ? 2 : 1;
9467 unsigned NumEltsIn128Bits = NumElts / Num128BitChunks;
9468 unsigned NumEltsIn64Bits = NumEltsIn128Bits / 2;
9469 for (unsigned i = 0; i != Num128BitChunks; ++i) {
9470 for (unsigned j = 0; j != NumEltsIn128Bits; ++j) {
9471 // Ignore undef elements.
9472 SDValue Op = BV->getOperand(i * NumEltsIn128Bits + j);
9476 // If there's an opcode mismatch, we're done.
9477 if (HOpcode != ISD::DELETED_NODE && Op.getOpcode() != GenericOpcode)
9480 // Initialize horizontal opcode.
9481 if (HOpcode == ISD::DELETED_NODE) {
9482 GenericOpcode = Op.getOpcode();
9483 switch (GenericOpcode) {
9484 case ISD::ADD: HOpcode = X86ISD::HADD; break;
9485 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
9486 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
9487 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
9488 default: return false;
9492 SDValue Op0 = Op.getOperand(0);
9493 SDValue Op1 = Op.getOperand(1);
9494 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9495 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
9496 Op0.getOperand(0) != Op1.getOperand(0) ||
9497 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
9498 !isa<ConstantSDNode>(Op1.getOperand(1)) || !Op.hasOneUse())
9501 // The source vector is chosen based on which 64-bit half of the
9502 // destination vector is being calculated.
9503 if (j < NumEltsIn64Bits) {
9505 V0 = Op0.getOperand(0);
9508 V1 = Op0.getOperand(0);
9511 SDValue SourceVec = (j < NumEltsIn64Bits) ? V0 : V1;
9512 if (SourceVec != Op0.getOperand(0))
9515 // op (extract_vector_elt A, I), (extract_vector_elt A, I+1)
9516 unsigned ExtIndex0 = Op0.getConstantOperandVal(1);
9517 unsigned ExtIndex1 = Op1.getConstantOperandVal(1);
9518 unsigned ExpectedIndex = i * NumEltsIn128Bits +
9519 (j % NumEltsIn64Bits) * 2;
9520 if (ExpectedIndex == ExtIndex0 && ExtIndex1 == ExtIndex0 + 1)
9523 // If this is not a commutative op, this does not match.
9524 if (GenericOpcode != ISD::ADD && GenericOpcode != ISD::FADD)
9527 // Addition is commutative, so try swapping the extract indexes.
9528 // op (extract_vector_elt A, I+1), (extract_vector_elt A, I)
9529 if (ExpectedIndex == ExtIndex1 && ExtIndex0 == ExtIndex1 + 1)
9532 // Extract indexes do not match horizontal requirement.
9536 // We matched. Opcode and operands are returned by reference as arguments.
9540 static SDValue getHopForBuildVector(const BuildVectorSDNode *BV,
9541 SelectionDAG &DAG, unsigned HOpcode,
9542 SDValue V0, SDValue V1) {
9543 // If either input vector is not the same size as the build vector,
9544 // extract/insert the low bits to the correct size.
9545 // This is free (examples: zmm --> xmm, xmm --> ymm).
9546 MVT VT = BV->getSimpleValueType(0);
9547 unsigned Width = VT.getSizeInBits();
9548 if (V0.getValueSizeInBits() > Width)
9549 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), Width);
9550 else if (V0.getValueSizeInBits() < Width)
9551 V0 = insertSubVector(DAG.getUNDEF(VT), V0, 0, DAG, SDLoc(BV), Width);
9553 if (V1.getValueSizeInBits() > Width)
9554 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), Width);
9555 else if (V1.getValueSizeInBits() < Width)
9556 V1 = insertSubVector(DAG.getUNDEF(VT), V1, 0, DAG, SDLoc(BV), Width);
9558 unsigned NumElts = VT.getVectorNumElements();
9559 APInt DemandedElts = APInt::getAllOnesValue(NumElts);
9560 for (unsigned i = 0; i != NumElts; ++i)
9561 if (BV->getOperand(i).isUndef())
9562 DemandedElts.clearBit(i);
9564 // If we don't need the upper xmm, then perform as a xmm hop.
9565 unsigned HalfNumElts = NumElts / 2;
9566 if (VT.is256BitVector() && DemandedElts.lshr(HalfNumElts) == 0) {
9567 MVT HalfVT = VT.getHalfNumVectorElementsVT();
9568 V0 = extractSubVector(V0, 0, DAG, SDLoc(BV), 128);
9569 V1 = extractSubVector(V1, 0, DAG, SDLoc(BV), 128);
9570 SDValue Half = DAG.getNode(HOpcode, SDLoc(BV), HalfVT, V0, V1);
9571 return insertSubVector(DAG.getUNDEF(VT), Half, 0, DAG, SDLoc(BV), 256);
9574 return DAG.getNode(HOpcode, SDLoc(BV), VT, V0, V1);
9577 /// Lower BUILD_VECTOR to a horizontal add/sub operation if possible.
9578 static SDValue LowerToHorizontalOp(const BuildVectorSDNode *BV,
9579 const X86Subtarget &Subtarget,
9580 SelectionDAG &DAG) {
9581 // We need at least 2 non-undef elements to make this worthwhile by default.
9582 unsigned NumNonUndefs =
9583 count_if(BV->op_values(), [](SDValue V) { return !V.isUndef(); });
9584 if (NumNonUndefs < 2)
9587 // There are 4 sets of horizontal math operations distinguished by type:
9588 // int/FP at 128-bit/256-bit. Each type was introduced with a different
9589 // subtarget feature. Try to match those "native" patterns first.
9590 MVT VT = BV->getSimpleValueType(0);
9591 if (((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget.hasSSE3()) ||
9592 ((VT == MVT::v8i16 || VT == MVT::v4i32) && Subtarget.hasSSSE3()) ||
9593 ((VT == MVT::v8f32 || VT == MVT::v4f64) && Subtarget.hasAVX()) ||
9594 ((VT == MVT::v16i16 || VT == MVT::v8i32) && Subtarget.hasAVX2())) {
9597 if (isHopBuildVector(BV, DAG, HOpcode, V0, V1))
9598 return getHopForBuildVector(BV, DAG, HOpcode, V0, V1);
9601 // Try harder to match 256-bit ops by using extract/concat.
9602 if (!Subtarget.hasAVX() || !VT.is256BitVector())
9605 // Count the number of UNDEF operands in the build_vector in input.
9606 unsigned NumElts = VT.getVectorNumElements();
9607 unsigned Half = NumElts / 2;
9608 unsigned NumUndefsLO = 0;
9609 unsigned NumUndefsHI = 0;
9610 for (unsigned i = 0, e = Half; i != e; ++i)
9611 if (BV->getOperand(i)->isUndef())
9614 for (unsigned i = Half, e = NumElts; i != e; ++i)
9615 if (BV->getOperand(i)->isUndef())
9619 SDValue InVec0, InVec1;
9620 if (VT == MVT::v8i32 || VT == MVT::v16i16) {
9621 SDValue InVec2, InVec3;
9623 bool CanFold = true;
9625 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
9626 isHorizontalBinOpPart(BV, ISD::ADD, DAG, Half, NumElts, InVec2,
9628 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9629 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9630 X86Opcode = X86ISD::HADD;
9631 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, Half, InVec0,
9633 isHorizontalBinOpPart(BV, ISD::SUB, DAG, Half, NumElts, InVec2,
9635 ((InVec0.isUndef() || InVec2.isUndef()) || InVec0 == InVec2) &&
9636 ((InVec1.isUndef() || InVec3.isUndef()) || InVec1 == InVec3))
9637 X86Opcode = X86ISD::HSUB;
9642 // Do not try to expand this build_vector into a pair of horizontal
9643 // add/sub if we can emit a pair of scalar add/sub.
9644 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9647 // Convert this build_vector into a pair of horizontal binops followed by
9648 // a concat vector. We must adjust the outputs from the partial horizontal
9649 // matching calls above to account for undefined vector halves.
9650 SDValue V0 = InVec0.isUndef() ? InVec2 : InVec0;
9651 SDValue V1 = InVec1.isUndef() ? InVec3 : InVec1;
9652 assert((!V0.isUndef() || !V1.isUndef()) && "Horizontal-op of undefs?");
9653 bool isUndefLO = NumUndefsLO == Half;
9654 bool isUndefHI = NumUndefsHI == Half;
9655 return ExpandHorizontalBinOp(V0, V1, DL, DAG, X86Opcode, false, isUndefLO,
9660 if (VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
9661 VT == MVT::v16i16) {
9663 if (isHorizontalBinOpPart(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
9664 X86Opcode = X86ISD::HADD;
9665 else if (isHorizontalBinOpPart(BV, ISD::SUB, DAG, 0, NumElts, InVec0,
9667 X86Opcode = X86ISD::HSUB;
9668 else if (isHorizontalBinOpPart(BV, ISD::FADD, DAG, 0, NumElts, InVec0,
9670 X86Opcode = X86ISD::FHADD;
9671 else if (isHorizontalBinOpPart(BV, ISD::FSUB, DAG, 0, NumElts, InVec0,
9673 X86Opcode = X86ISD::FHSUB;
9677 // Don't try to expand this build_vector into a pair of horizontal add/sub
9678 // if we can simply emit a pair of scalar add/sub.
9679 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
9682 // Convert this build_vector into two horizontal add/sub followed by
9684 bool isUndefLO = NumUndefsLO == Half;
9685 bool isUndefHI = NumUndefsHI == Half;
9686 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
9687 isUndefLO, isUndefHI);
9693 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
9696 /// If a BUILD_VECTOR's source elements all apply the same bit operation and
9697 /// one of their operands is constant, lower to a pair of BUILD_VECTOR and
9698 /// just apply the bit to the vectors.
9699 /// NOTE: Its not in our interest to start make a general purpose vectorizer
9700 /// from this, but enough scalar bit operations are created from the later
9701 /// legalization + scalarization stages to need basic support.
9702 static SDValue lowerBuildVectorToBitOp(BuildVectorSDNode *Op,
9703 const X86Subtarget &Subtarget,
9704 SelectionDAG &DAG) {
9706 MVT VT = Op->getSimpleValueType(0);
9707 unsigned NumElems = VT.getVectorNumElements();
9708 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
9710 // Check that all elements have the same opcode.
9711 // TODO: Should we allow UNDEFS and if so how many?
9712 unsigned Opcode = Op->getOperand(0).getOpcode();
9713 for (unsigned i = 1; i < NumElems; ++i)
9714 if (Opcode != Op->getOperand(i).getOpcode())
9717 // TODO: We may be able to add support for other Ops (ADD/SUB + shifts).
9718 bool IsShift = false;
9730 // Don't do this if the buildvector is a splat - we'd replace one
9731 // constant with an entire vector.
9732 if (Op->getSplatValue())
9734 if (!TLI.isOperationLegalOrPromote(Opcode, VT))
9739 SmallVector<SDValue, 4> LHSElts, RHSElts;
9740 for (SDValue Elt : Op->ops()) {
9741 SDValue LHS = Elt.getOperand(0);
9742 SDValue RHS = Elt.getOperand(1);
9744 // We expect the canonicalized RHS operand to be the constant.
9745 if (!isa<ConstantSDNode>(RHS))
9748 // Extend shift amounts.
9749 if (RHS.getValueSizeInBits() != VT.getScalarSizeInBits()) {
9752 RHS = DAG.getZExtOrTrunc(RHS, DL, VT.getScalarType());
9755 LHSElts.push_back(LHS);
9756 RHSElts.push_back(RHS);
9759 // Limit to shifts by uniform immediates.
9760 // TODO: Only accept vXi8/vXi64 special cases?
9761 // TODO: Permit non-uniform XOP/AVX2/MULLO cases?
9762 if (IsShift && any_of(RHSElts, [&](SDValue V) { return RHSElts[0] != V; }))
9765 SDValue LHS = DAG.getBuildVector(VT, DL, LHSElts);
9766 SDValue RHS = DAG.getBuildVector(VT, DL, RHSElts);
9767 SDValue Res = DAG.getNode(Opcode, DL, VT, LHS, RHS);
9772 // Immediately lower the shift to ensure the constant build vector doesn't
9773 // get converted to a constant pool before the shift is lowered.
9774 return LowerShift(Res, Subtarget, DAG);
9777 /// Create a vector constant without a load. SSE/AVX provide the bare minimum
9778 /// functionality to do this, so it's all zeros, all ones, or some derivation
9779 /// that is cheap to calculate.
9780 static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
9781 const X86Subtarget &Subtarget) {
9783 MVT VT = Op.getSimpleValueType();
9785 // Vectors containing all zeros can be matched by pxor and xorps.
9786 if (ISD::isBuildVectorAllZeros(Op.getNode()))
9789 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
9790 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
9791 // vpcmpeqd on 256-bit vectors.
9792 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
9793 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
9796 return getOnesVector(VT, DAG, DL);
9802 /// Look for opportunities to create a VPERMV/VPERMILPV/PSHUFB variable permute
9803 /// from a vector of source values and a vector of extraction indices.
9804 /// The vectors might be manipulated to match the type of the permute op.
9805 static SDValue createVariablePermute(MVT VT, SDValue SrcVec, SDValue IndicesVec,
9806 SDLoc &DL, SelectionDAG &DAG,
9807 const X86Subtarget &Subtarget) {
9809 EVT IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9810 unsigned NumElts = VT.getVectorNumElements();
9811 unsigned SizeInBits = VT.getSizeInBits();
9813 // Adjust IndicesVec to match VT size.
9814 assert(IndicesVec.getValueType().getVectorNumElements() >= NumElts &&
9815 "Illegal variable permute mask size");
9816 if (IndicesVec.getValueType().getVectorNumElements() > NumElts)
9817 IndicesVec = extractSubVector(IndicesVec, 0, DAG, SDLoc(IndicesVec),
9818 NumElts * VT.getScalarSizeInBits());
9819 IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
9821 // Handle SrcVec that don't match VT type.
9822 if (SrcVec.getValueSizeInBits() != SizeInBits) {
9823 if ((SrcVec.getValueSizeInBits() % SizeInBits) == 0) {
9824 // Handle larger SrcVec by treating it as a larger permute.
9825 unsigned Scale = SrcVec.getValueSizeInBits() / SizeInBits;
9826 VT = MVT::getVectorVT(VT.getScalarType(), Scale * NumElts);
9827 IndicesVT = EVT(VT).changeVectorElementTypeToInteger();
9828 IndicesVec = widenSubVector(IndicesVT.getSimpleVT(), IndicesVec, false,
9829 Subtarget, DAG, SDLoc(IndicesVec));
9831 createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
9833 return extractSubVector(NewSrcVec, 0, DAG, DL, SizeInBits);
9835 } else if (SrcVec.getValueSizeInBits() < SizeInBits) {
9836 // Widen smaller SrcVec to match VT.
9837 SrcVec = widenSubVector(VT, SrcVec, false, Subtarget, DAG, SDLoc(SrcVec));
9842 auto ScaleIndices = [&DAG](SDValue Idx, uint64_t Scale) {
9843 assert(isPowerOf2_64(Scale) && "Illegal variable permute shuffle scale");
9844 EVT SrcVT = Idx.getValueType();
9845 unsigned NumDstBits = SrcVT.getScalarSizeInBits() / Scale;
9846 uint64_t IndexScale = 0;
9847 uint64_t IndexOffset = 0;
9849 // If we're scaling a smaller permute op, then we need to repeat the
9850 // indices, scaling and offsetting them as well.
9851 // e.g. v4i32 -> v16i8 (Scale = 4)
9852 // IndexScale = v4i32 Splat(4 << 24 | 4 << 16 | 4 << 8 | 4)
9853 // IndexOffset = v4i32 Splat(3 << 24 | 2 << 16 | 1 << 8 | 0)
9854 for (uint64_t i = 0; i != Scale; ++i) {
9855 IndexScale |= Scale << (i * NumDstBits);
9856 IndexOffset |= i << (i * NumDstBits);
9859 Idx = DAG.getNode(ISD::MUL, SDLoc(Idx), SrcVT, Idx,
9860 DAG.getConstant(IndexScale, SDLoc(Idx), SrcVT));
9861 Idx = DAG.getNode(ISD::ADD, SDLoc(Idx), SrcVT, Idx,
9862 DAG.getConstant(IndexOffset, SDLoc(Idx), SrcVT));
9866 unsigned Opcode = 0;
9867 switch (VT.SimpleTy) {
9871 if (Subtarget.hasSSSE3())
9872 Opcode = X86ISD::PSHUFB;
9875 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9876 Opcode = X86ISD::VPERMV;
9877 else if (Subtarget.hasSSSE3()) {
9878 Opcode = X86ISD::PSHUFB;
9879 ShuffleVT = MVT::v16i8;
9884 if (Subtarget.hasAVX()) {
9885 Opcode = X86ISD::VPERMILPV;
9886 ShuffleVT = MVT::v4f32;
9887 } else if (Subtarget.hasSSSE3()) {
9888 Opcode = X86ISD::PSHUFB;
9889 ShuffleVT = MVT::v16i8;
9894 if (Subtarget.hasAVX()) {
9895 // VPERMILPD selects using bit#1 of the index vector, so scale IndicesVec.
9896 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
9897 Opcode = X86ISD::VPERMILPV;
9898 ShuffleVT = MVT::v2f64;
9899 } else if (Subtarget.hasSSE41()) {
9900 // SSE41 can compare v2i64 - select between indices 0 and 1.
9901 return DAG.getSelectCC(
9903 getZeroVector(IndicesVT.getSimpleVT(), Subtarget, DAG, DL),
9904 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {0, 0}),
9905 DAG.getVectorShuffle(VT, DL, SrcVec, SrcVec, {1, 1}),
9906 ISD::CondCode::SETEQ);
9910 if (Subtarget.hasVLX() && Subtarget.hasVBMI())
9911 Opcode = X86ISD::VPERMV;
9912 else if (Subtarget.hasXOP()) {
9913 SDValue LoSrc = extract128BitVector(SrcVec, 0, DAG, DL);
9914 SDValue HiSrc = extract128BitVector(SrcVec, 16, DAG, DL);
9915 SDValue LoIdx = extract128BitVector(IndicesVec, 0, DAG, DL);
9916 SDValue HiIdx = extract128BitVector(IndicesVec, 16, DAG, DL);
9918 ISD::CONCAT_VECTORS, DL, VT,
9919 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, LoIdx),
9920 DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, LoSrc, HiSrc, HiIdx));
9921 } else if (Subtarget.hasAVX()) {
9922 SDValue Lo = extract128BitVector(SrcVec, 0, DAG, DL);
9923 SDValue Hi = extract128BitVector(SrcVec, 16, DAG, DL);
9924 SDValue LoLo = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Lo);
9925 SDValue HiHi = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Hi, Hi);
9926 auto PSHUFBBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
9927 ArrayRef<SDValue> Ops) {
9928 // Permute Lo and Hi and then select based on index range.
9929 // This works as SHUFB uses bits[3:0] to permute elements and we don't
9930 // care about the bit[7] as its just an index vector.
9931 SDValue Idx = Ops[2];
9932 EVT VT = Idx.getValueType();
9933 return DAG.getSelectCC(DL, Idx, DAG.getConstant(15, DL, VT),
9934 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[1], Idx),
9935 DAG.getNode(X86ISD::PSHUFB, DL, VT, Ops[0], Idx),
9936 ISD::CondCode::SETGT);
9938 SDValue Ops[] = {LoLo, HiHi, IndicesVec};
9939 return SplitOpsAndApply(DAG, Subtarget, DL, MVT::v32i8, Ops,
9944 if (Subtarget.hasVLX() && Subtarget.hasBWI())
9945 Opcode = X86ISD::VPERMV;
9946 else if (Subtarget.hasAVX()) {
9947 // Scale to v32i8 and perform as v32i8.
9948 IndicesVec = ScaleIndices(IndicesVec, 2);
9949 return DAG.getBitcast(
9950 VT, createVariablePermute(
9951 MVT::v32i8, DAG.getBitcast(MVT::v32i8, SrcVec),
9952 DAG.getBitcast(MVT::v32i8, IndicesVec), DL, DAG, Subtarget));
9957 if (Subtarget.hasAVX2())
9958 Opcode = X86ISD::VPERMV;
9959 else if (Subtarget.hasAVX()) {
9960 SrcVec = DAG.getBitcast(MVT::v8f32, SrcVec);
9961 SDValue LoLo = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9962 {0, 1, 2, 3, 0, 1, 2, 3});
9963 SDValue HiHi = DAG.getVectorShuffle(MVT::v8f32, DL, SrcVec, SrcVec,
9964 {4, 5, 6, 7, 4, 5, 6, 7});
9965 if (Subtarget.hasXOP())
9966 return DAG.getBitcast(
9967 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v8f32, LoLo, HiHi,
9968 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
9969 // Permute Lo and Hi and then select based on index range.
9970 // This works as VPERMILPS only uses index bits[0:1] to permute elements.
9971 SDValue Res = DAG.getSelectCC(
9972 DL, IndicesVec, DAG.getConstant(3, DL, MVT::v8i32),
9973 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, HiHi, IndicesVec),
9974 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, LoLo, IndicesVec),
9975 ISD::CondCode::SETGT);
9976 return DAG.getBitcast(VT, Res);
9981 if (Subtarget.hasAVX512()) {
9982 if (!Subtarget.hasVLX()) {
9983 MVT WidenSrcVT = MVT::getVectorVT(VT.getScalarType(), 8);
9984 SrcVec = widenSubVector(WidenSrcVT, SrcVec, false, Subtarget, DAG,
9986 IndicesVec = widenSubVector(MVT::v8i64, IndicesVec, false, Subtarget,
9987 DAG, SDLoc(IndicesVec));
9988 SDValue Res = createVariablePermute(WidenSrcVT, SrcVec, IndicesVec, DL,
9990 return extract256BitVector(Res, 0, DAG, DL);
9992 Opcode = X86ISD::VPERMV;
9993 } else if (Subtarget.hasAVX()) {
9994 SrcVec = DAG.getBitcast(MVT::v4f64, SrcVec);
9996 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {0, 1, 0, 1});
9998 DAG.getVectorShuffle(MVT::v4f64, DL, SrcVec, SrcVec, {2, 3, 2, 3});
9999 // VPERMIL2PD selects with bit#1 of the index vector, so scale IndicesVec.
10000 IndicesVec = DAG.getNode(ISD::ADD, DL, IndicesVT, IndicesVec, IndicesVec);
10001 if (Subtarget.hasXOP())
10002 return DAG.getBitcast(
10003 VT, DAG.getNode(X86ISD::VPERMIL2, DL, MVT::v4f64, LoLo, HiHi,
10004 IndicesVec, DAG.getTargetConstant(0, DL, MVT::i8)));
10005 // Permute Lo and Hi and then select based on index range.
10006 // This works as VPERMILPD only uses index bit[1] to permute elements.
10007 SDValue Res = DAG.getSelectCC(
10008 DL, IndicesVec, DAG.getConstant(2, DL, MVT::v4i64),
10009 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, HiHi, IndicesVec),
10010 DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v4f64, LoLo, IndicesVec),
10011 ISD::CondCode::SETGT);
10012 return DAG.getBitcast(VT, Res);
10016 if (Subtarget.hasVBMI())
10017 Opcode = X86ISD::VPERMV;
10020 if (Subtarget.hasBWI())
10021 Opcode = X86ISD::VPERMV;
10027 if (Subtarget.hasAVX512())
10028 Opcode = X86ISD::VPERMV;
10034 assert((VT.getSizeInBits() == ShuffleVT.getSizeInBits()) &&
10035 (VT.getScalarSizeInBits() % ShuffleVT.getScalarSizeInBits()) == 0 &&
10036 "Illegal variable permute shuffle type");
10038 uint64_t Scale = VT.getScalarSizeInBits() / ShuffleVT.getScalarSizeInBits();
10040 IndicesVec = ScaleIndices(IndicesVec, Scale);
10042 EVT ShuffleIdxVT = EVT(ShuffleVT).changeVectorElementTypeToInteger();
10043 IndicesVec = DAG.getBitcast(ShuffleIdxVT, IndicesVec);
10045 SrcVec = DAG.getBitcast(ShuffleVT, SrcVec);
10046 SDValue Res = Opcode == X86ISD::VPERMV
10047 ? DAG.getNode(Opcode, DL, ShuffleVT, IndicesVec, SrcVec)
10048 : DAG.getNode(Opcode, DL, ShuffleVT, SrcVec, IndicesVec);
10049 return DAG.getBitcast(VT, Res);
10052 // Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
10053 // reasoned to be a permutation of a vector by indices in a non-constant vector.
10054 // (build_vector (extract_elt V, (extract_elt I, 0)),
10055 // (extract_elt V, (extract_elt I, 1)),
10060 // TODO: Handle undefs
10061 // TODO: Utilize pshufb and zero mask blending to support more efficient
10062 // construction of vectors with constant-0 elements.
10064 LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
10065 const X86Subtarget &Subtarget) {
10066 SDValue SrcVec, IndicesVec;
10067 // Check for a match of the permute source vector and permute index elements.
10068 // This is done by checking that the i-th build_vector operand is of the form:
10069 // (extract_elt SrcVec, (extract_elt IndicesVec, i)).
10070 for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
10071 SDValue Op = V.getOperand(Idx);
10072 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10075 // If this is the first extract encountered in V, set the source vector,
10076 // otherwise verify the extract is from the previously defined source
10079 SrcVec = Op.getOperand(0);
10080 else if (SrcVec != Op.getOperand(0))
10082 SDValue ExtractedIndex = Op->getOperand(1);
10083 // Peek through extends.
10084 if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
10085 ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
10086 ExtractedIndex = ExtractedIndex.getOperand(0);
10087 if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
10090 // If this is the first extract from the index vector candidate, set the
10091 // indices vector, otherwise verify the extract is from the previously
10092 // defined indices vector.
10094 IndicesVec = ExtractedIndex.getOperand(0);
10095 else if (IndicesVec != ExtractedIndex.getOperand(0))
10098 auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
10099 if (!PermIdx || PermIdx->getAPIntValue() != Idx)
10104 MVT VT = V.getSimpleValueType();
10105 return createVariablePermute(VT, SrcVec, IndicesVec, DL, DAG, Subtarget);
10109 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
10112 MVT VT = Op.getSimpleValueType();
10113 MVT EltVT = VT.getVectorElementType();
10114 unsigned NumElems = Op.getNumOperands();
10116 // Generate vectors for predicate vectors.
10117 if (VT.getVectorElementType() == MVT::i1 && Subtarget.hasAVX512())
10118 return LowerBUILD_VECTORvXi1(Op, DAG, Subtarget);
10120 if (SDValue VectorConstant = materializeVectorConstant(Op, DAG, Subtarget))
10121 return VectorConstant;
10123 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(Op.getNode());
10124 if (SDValue AddSub = lowerToAddSubOrFMAddSub(BV, Subtarget, DAG))
10126 if (SDValue HorizontalOp = LowerToHorizontalOp(BV, Subtarget, DAG))
10127 return HorizontalOp;
10128 if (SDValue Broadcast = lowerBuildVectorAsBroadcast(BV, Subtarget, DAG))
10130 if (SDValue BitOp = lowerBuildVectorToBitOp(BV, Subtarget, DAG))
10133 unsigned EVTBits = EltVT.getSizeInBits();
10135 unsigned NumZero = 0;
10136 unsigned NumNonZero = 0;
10137 uint64_t NonZeros = 0;
10138 bool IsAllConstants = true;
10139 SmallSet<SDValue, 8> Values;
10140 unsigned NumConstants = NumElems;
10141 for (unsigned i = 0; i < NumElems; ++i) {
10142 SDValue Elt = Op.getOperand(i);
10145 Values.insert(Elt);
10146 if (!isa<ConstantSDNode>(Elt) && !isa<ConstantFPSDNode>(Elt)) {
10147 IsAllConstants = false;
10150 if (X86::isZeroNode(Elt))
10153 assert(i < sizeof(NonZeros) * 8); // Make sure the shift is within range.
10154 NonZeros |= ((uint64_t)1 << i);
10159 // All undef vector. Return an UNDEF. All zero vectors were handled above.
10160 if (NumNonZero == 0)
10161 return DAG.getUNDEF(VT);
10163 // If we are inserting one variable into a vector of non-zero constants, try
10164 // to avoid loading each constant element as a scalar. Load the constants as a
10165 // vector and then insert the variable scalar element. If insertion is not
10166 // supported, fall back to a shuffle to get the scalar blended with the
10167 // constants. Insertion into a zero vector is handled as a special-case
10168 // somewhere below here.
10169 if (NumConstants == NumElems - 1 && NumNonZero != 1 &&
10170 (isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT) ||
10171 isOperationLegalOrCustom(ISD::VECTOR_SHUFFLE, VT))) {
10172 // Create an all-constant vector. The variable element in the old
10173 // build vector is replaced by undef in the constant vector. Save the
10174 // variable scalar element and its index for use in the insertelement.
10175 LLVMContext &Context = *DAG.getContext();
10176 Type *EltType = Op.getValueType().getScalarType().getTypeForEVT(Context);
10177 SmallVector<Constant *, 16> ConstVecOps(NumElems, UndefValue::get(EltType));
10180 for (unsigned i = 0; i != NumElems; ++i) {
10181 SDValue Elt = Op.getOperand(i);
10182 if (auto *C = dyn_cast<ConstantSDNode>(Elt))
10183 ConstVecOps[i] = ConstantInt::get(Context, C->getAPIntValue());
10184 else if (auto *C = dyn_cast<ConstantFPSDNode>(Elt))
10185 ConstVecOps[i] = ConstantFP::get(Context, C->getValueAPF());
10186 else if (!Elt.isUndef()) {
10187 assert(!VarElt.getNode() && !InsIndex.getNode() &&
10188 "Expected one variable element in this vector");
10190 InsIndex = DAG.getVectorIdxConstant(i, dl);
10193 Constant *CV = ConstantVector::get(ConstVecOps);
10194 SDValue DAGConstVec = DAG.getConstantPool(CV, VT);
10196 // The constants we just created may not be legal (eg, floating point). We
10197 // must lower the vector right here because we can not guarantee that we'll
10198 // legalize it before loading it. This is also why we could not just create
10199 // a new build vector here. If the build vector contains illegal constants,
10200 // it could get split back up into a series of insert elements.
10201 // TODO: Improve this by using shorter loads with broadcast/VZEXT_LOAD.
10202 SDValue LegalDAGConstVec = LowerConstantPool(DAGConstVec, DAG);
10203 MachineFunction &MF = DAG.getMachineFunction();
10204 MachinePointerInfo MPI = MachinePointerInfo::getConstantPool(MF);
10205 SDValue Ld = DAG.getLoad(VT, dl, DAG.getEntryNode(), LegalDAGConstVec, MPI);
10206 unsigned InsertC = cast<ConstantSDNode>(InsIndex)->getZExtValue();
10207 unsigned NumEltsInLow128Bits = 128 / VT.getScalarSizeInBits();
10208 if (InsertC < NumEltsInLow128Bits)
10209 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Ld, VarElt, InsIndex);
10211 // There's no good way to insert into the high elements of a >128-bit
10212 // vector, so use shuffles to avoid an extract/insert sequence.
10213 assert(VT.getSizeInBits() > 128 && "Invalid insertion index?");
10214 assert(Subtarget.hasAVX() && "Must have AVX with >16-byte vector");
10215 SmallVector<int, 8> ShuffleMask;
10216 unsigned NumElts = VT.getVectorNumElements();
10217 for (unsigned i = 0; i != NumElts; ++i)
10218 ShuffleMask.push_back(i == InsertC ? NumElts : i);
10219 SDValue S2V = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, VarElt);
10220 return DAG.getVectorShuffle(VT, dl, Ld, S2V, ShuffleMask);
10223 // Special case for single non-zero, non-undef, element.
10224 if (NumNonZero == 1) {
10225 unsigned Idx = countTrailingZeros(NonZeros);
10226 SDValue Item = Op.getOperand(Idx);
10228 // If we have a constant or non-constant insertion into the low element of
10229 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
10230 // the rest of the elements. This will be matched as movd/movq/movss/movsd
10231 // depending on what the source datatype is.
10234 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10236 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
10237 (EltVT == MVT::i64 && Subtarget.is64Bit())) {
10238 assert((VT.is128BitVector() || VT.is256BitVector() ||
10239 VT.is512BitVector()) &&
10240 "Expected an SSE value type!");
10241 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10242 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
10243 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
10246 // We can't directly insert an i8 or i16 into a vector, so zero extend
10247 // it to i32 first.
10248 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
10249 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
10250 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
10251 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, Item);
10252 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
10253 return DAG.getBitcast(VT, Item);
10257 // Is it a vector logical left shift?
10258 if (NumElems == 2 && Idx == 1 &&
10259 X86::isZeroNode(Op.getOperand(0)) &&
10260 !X86::isZeroNode(Op.getOperand(1))) {
10261 unsigned NumBits = VT.getSizeInBits();
10262 return getVShift(true, VT,
10263 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
10264 VT, Op.getOperand(1)),
10265 NumBits/2, DAG, *this, dl);
10268 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
10271 // Otherwise, if this is a vector with i32 or f32 elements, and the element
10272 // is a non-constant being inserted into an element other than the low one,
10273 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
10274 // movd/movss) to move this into the low element, then shuffle it into
10276 if (EVTBits == 32) {
10277 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
10278 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
10282 // Splat is obviously ok. Let legalizer expand it to a shuffle.
10283 if (Values.size() == 1) {
10284 if (EVTBits == 32) {
10285 // Instead of a shuffle like this:
10286 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
10287 // Check if it's possible to issue this instead.
10288 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
10289 unsigned Idx = countTrailingZeros(NonZeros);
10290 SDValue Item = Op.getOperand(Idx);
10291 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
10292 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
10297 // A vector full of immediates; various special cases are already
10298 // handled, so this is best done with a single constant-pool load.
10299 if (IsAllConstants)
10302 if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
10305 // See if we can use a vector load to get all of the elements.
10307 SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
10309 EltsFromConsecutiveLoads(VT, Ops, dl, DAG, Subtarget, false))
10313 // If this is a splat of pairs of 32-bit elements, we can use a narrower
10314 // build_vector and broadcast it.
10315 // TODO: We could probably generalize this more.
10316 if (Subtarget.hasAVX2() && EVTBits == 32 && Values.size() == 2) {
10317 SDValue Ops[4] = { Op.getOperand(0), Op.getOperand(1),
10318 DAG.getUNDEF(EltVT), DAG.getUNDEF(EltVT) };
10319 auto CanSplat = [](SDValue Op, unsigned NumElems, ArrayRef<SDValue> Ops) {
10320 // Make sure all the even/odd operands match.
10321 for (unsigned i = 2; i != NumElems; ++i)
10322 if (Ops[i % 2] != Op.getOperand(i))
10326 if (CanSplat(Op, NumElems, Ops)) {
10327 MVT WideEltVT = VT.isFloatingPoint() ? MVT::f64 : MVT::i64;
10328 MVT NarrowVT = MVT::getVectorVT(EltVT, 4);
10329 // Create a new build vector and cast to v2i64/v2f64.
10330 SDValue NewBV = DAG.getBitcast(MVT::getVectorVT(WideEltVT, 2),
10331 DAG.getBuildVector(NarrowVT, dl, Ops));
10332 // Broadcast from v2i64/v2f64 and cast to final VT.
10333 MVT BcastVT = MVT::getVectorVT(WideEltVT, NumElems/2);
10334 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, dl, BcastVT,
10339 // For AVX-length vectors, build the individual 128-bit pieces and use
10340 // shuffles to put them in place.
10341 if (VT.getSizeInBits() > 128) {
10342 MVT HVT = MVT::getVectorVT(EltVT, NumElems/2);
10344 // Build both the lower and upper subvector.
10346 DAG.getBuildVector(HVT, dl, Op->ops().slice(0, NumElems / 2));
10347 SDValue Upper = DAG.getBuildVector(
10348 HVT, dl, Op->ops().slice(NumElems / 2, NumElems /2));
10350 // Recreate the wider vector with the lower and upper part.
10351 return concatSubVectors(Lower, Upper, DAG, dl);
10354 // Let legalizer expand 2-wide build_vectors.
10355 if (EVTBits == 64) {
10356 if (NumNonZero == 1) {
10357 // One half is zero or undef.
10358 unsigned Idx = countTrailingZeros(NonZeros);
10359 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
10360 Op.getOperand(Idx));
10361 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
10366 // If element VT is < 32 bits, convert it to inserts into a zero vector.
10367 if (EVTBits == 8 && NumElems == 16)
10368 if (SDValue V = LowerBuildVectorv16i8(Op, NonZeros, NumNonZero, NumZero,
10372 if (EVTBits == 16 && NumElems == 8)
10373 if (SDValue V = LowerBuildVectorv8i16(Op, NonZeros, NumNonZero, NumZero,
10377 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
10378 if (EVTBits == 32 && NumElems == 4)
10379 if (SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget))
10382 // If element VT is == 32 bits, turn it into a number of shuffles.
10383 if (NumElems == 4 && NumZero > 0) {
10384 SmallVector<SDValue, 8> Ops(NumElems);
10385 for (unsigned i = 0; i < 4; ++i) {
10386 bool isZero = !(NonZeros & (1ULL << i));
10388 Ops[i] = getZeroVector(VT, Subtarget, DAG, dl);
10390 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10393 for (unsigned i = 0; i < 2; ++i) {
10394 switch ((NonZeros >> (i*2)) & 0x3) {
10395 default: llvm_unreachable("Unexpected NonZero count");
10397 Ops[i] = Ops[i*2]; // Must be a zero vector.
10400 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2+1], Ops[i*2]);
10403 Ops[i] = getMOVL(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10406 Ops[i] = getUnpackl(DAG, dl, VT, Ops[i*2], Ops[i*2+1]);
10411 bool Reverse1 = (NonZeros & 0x3) == 2;
10412 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
10416 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
10417 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
10419 return DAG.getVectorShuffle(VT, dl, Ops[0], Ops[1], MaskVec);
10422 assert(Values.size() > 1 && "Expected non-undef and non-splat vector");
10424 // Check for a build vector from mostly shuffle plus few inserting.
10425 if (SDValue Sh = buildFromShuffleMostly(Op, DAG))
10428 // For SSE 4.1, use insertps to put the high elements into the low element.
10429 if (Subtarget.hasSSE41()) {
10431 if (!Op.getOperand(0).isUndef())
10432 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
10434 Result = DAG.getUNDEF(VT);
10436 for (unsigned i = 1; i < NumElems; ++i) {
10437 if (Op.getOperand(i).isUndef()) continue;
10438 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
10439 Op.getOperand(i), DAG.getIntPtrConstant(i, dl));
10444 // Otherwise, expand into a number of unpckl*, start by extending each of
10445 // our (non-undef) elements to the full vector width with the element in the
10446 // bottom slot of the vector (which generates no code for SSE).
10447 SmallVector<SDValue, 8> Ops(NumElems);
10448 for (unsigned i = 0; i < NumElems; ++i) {
10449 if (!Op.getOperand(i).isUndef())
10450 Ops[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
10452 Ops[i] = DAG.getUNDEF(VT);
10455 // Next, we iteratively mix elements, e.g. for v4f32:
10456 // Step 1: unpcklps 0, 1 ==> X: <?, ?, 1, 0>
10457 // : unpcklps 2, 3 ==> Y: <?, ?, 3, 2>
10458 // Step 2: unpcklpd X, Y ==> <3, 2, 1, 0>
10459 for (unsigned Scale = 1; Scale < NumElems; Scale *= 2) {
10460 // Generate scaled UNPCKL shuffle mask.
10461 SmallVector<int, 16> Mask;
10462 for(unsigned i = 0; i != Scale; ++i)
10464 for (unsigned i = 0; i != Scale; ++i)
10465 Mask.push_back(NumElems+i);
10466 Mask.append(NumElems - Mask.size(), SM_SentinelUndef);
10468 for (unsigned i = 0, e = NumElems / (2 * Scale); i != e; ++i)
10469 Ops[i] = DAG.getVectorShuffle(VT, dl, Ops[2*i], Ops[(2*i)+1], Mask);
10474 // 256-bit AVX can use the vinsertf128 instruction
10475 // to create 256-bit vectors from two other 128-bit ones.
10476 // TODO: Detect subvector broadcast here instead of DAG combine?
10477 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG,
10478 const X86Subtarget &Subtarget) {
10480 MVT ResVT = Op.getSimpleValueType();
10482 assert((ResVT.is256BitVector() ||
10483 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
10485 unsigned NumOperands = Op.getNumOperands();
10486 unsigned NumZero = 0;
10487 unsigned NumNonZero = 0;
10488 unsigned NonZeros = 0;
10489 for (unsigned i = 0; i != NumOperands; ++i) {
10490 SDValue SubVec = Op.getOperand(i);
10491 if (SubVec.isUndef())
10493 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10496 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10497 NonZeros |= 1 << i;
10502 // If we have more than 2 non-zeros, build each half separately.
10503 if (NumNonZero > 2) {
10504 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10505 ArrayRef<SDUse> Ops = Op->ops();
10506 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10507 Ops.slice(0, NumOperands/2));
10508 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10509 Ops.slice(NumOperands/2));
10510 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10513 // Otherwise, build it up through insert_subvectors.
10514 SDValue Vec = NumZero ? getZeroVector(ResVT, Subtarget, DAG, dl)
10515 : DAG.getUNDEF(ResVT);
10517 MVT SubVT = Op.getOperand(0).getSimpleValueType();
10518 unsigned NumSubElems = SubVT.getVectorNumElements();
10519 for (unsigned i = 0; i != NumOperands; ++i) {
10520 if ((NonZeros & (1 << i)) == 0)
10523 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec,
10525 DAG.getIntPtrConstant(i * NumSubElems, dl));
10531 // Returns true if the given node is a type promotion (by concatenating i1
10532 // zeros) of the result of a node that already zeros all upper bits of
10534 // TODO: Merge this with LowerAVXCONCAT_VECTORS?
10535 static SDValue LowerCONCAT_VECTORSvXi1(SDValue Op,
10536 const X86Subtarget &Subtarget,
10537 SelectionDAG & DAG) {
10539 MVT ResVT = Op.getSimpleValueType();
10540 unsigned NumOperands = Op.getNumOperands();
10542 assert(NumOperands > 1 && isPowerOf2_32(NumOperands) &&
10543 "Unexpected number of operands in CONCAT_VECTORS");
10545 uint64_t Zeros = 0;
10546 uint64_t NonZeros = 0;
10547 for (unsigned i = 0; i != NumOperands; ++i) {
10548 SDValue SubVec = Op.getOperand(i);
10549 if (SubVec.isUndef())
10551 assert(i < sizeof(NonZeros) * CHAR_BIT); // Ensure the shift is in range.
10552 if (ISD::isBuildVectorAllZeros(SubVec.getNode()))
10553 Zeros |= (uint64_t)1 << i;
10555 NonZeros |= (uint64_t)1 << i;
10558 unsigned NumElems = ResVT.getVectorNumElements();
10560 // If we are inserting non-zero vector and there are zeros in LSBs and undef
10561 // in the MSBs we need to emit a KSHIFTL. The generic lowering to
10562 // insert_subvector will give us two kshifts.
10563 if (isPowerOf2_64(NonZeros) && Zeros != 0 && NonZeros > Zeros &&
10564 Log2_64(NonZeros) != NumOperands - 1) {
10565 MVT ShiftVT = ResVT;
10566 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8)
10567 ShiftVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
10568 unsigned Idx = Log2_64(NonZeros);
10569 SDValue SubVec = Op.getOperand(Idx);
10570 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10571 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ShiftVT,
10572 DAG.getUNDEF(ShiftVT), SubVec,
10573 DAG.getIntPtrConstant(0, dl));
10574 Op = DAG.getNode(X86ISD::KSHIFTL, dl, ShiftVT, SubVec,
10575 DAG.getTargetConstant(Idx * SubVecNumElts, dl, MVT::i8));
10576 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResVT, Op,
10577 DAG.getIntPtrConstant(0, dl));
10580 // If there are zero or one non-zeros we can handle this very simply.
10581 if (NonZeros == 0 || isPowerOf2_64(NonZeros)) {
10582 SDValue Vec = Zeros ? DAG.getConstant(0, dl, ResVT) : DAG.getUNDEF(ResVT);
10585 unsigned Idx = Log2_64(NonZeros);
10586 SDValue SubVec = Op.getOperand(Idx);
10587 unsigned SubVecNumElts = SubVec.getSimpleValueType().getVectorNumElements();
10588 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, SubVec,
10589 DAG.getIntPtrConstant(Idx * SubVecNumElts, dl));
10592 if (NumOperands > 2) {
10593 MVT HalfVT = ResVT.getHalfNumVectorElementsVT();
10594 ArrayRef<SDUse> Ops = Op->ops();
10595 SDValue Lo = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10596 Ops.slice(0, NumOperands/2));
10597 SDValue Hi = DAG.getNode(ISD::CONCAT_VECTORS, dl, HalfVT,
10598 Ops.slice(NumOperands/2));
10599 return DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Lo, Hi);
10602 assert(countPopulation(NonZeros) == 2 && "Simple cases not handled?");
10604 if (ResVT.getVectorNumElements() >= 16)
10605 return Op; // The operation is legal with KUNPCK
10607 SDValue Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT,
10608 DAG.getUNDEF(ResVT), Op.getOperand(0),
10609 DAG.getIntPtrConstant(0, dl));
10610 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResVT, Vec, Op.getOperand(1),
10611 DAG.getIntPtrConstant(NumElems/2, dl));
10614 static SDValue LowerCONCAT_VECTORS(SDValue Op,
10615 const X86Subtarget &Subtarget,
10616 SelectionDAG &DAG) {
10617 MVT VT = Op.getSimpleValueType();
10618 if (VT.getVectorElementType() == MVT::i1)
10619 return LowerCONCAT_VECTORSvXi1(Op, Subtarget, DAG);
10621 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
10622 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
10623 Op.getNumOperands() == 4)));
10625 // AVX can use the vinsertf128 instruction to create 256-bit vectors
10626 // from two other 128-bit ones.
10628 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
10629 return LowerAVXCONCAT_VECTORS(Op, DAG, Subtarget);
10632 //===----------------------------------------------------------------------===//
10633 // Vector shuffle lowering
10635 // This is an experimental code path for lowering vector shuffles on x86. It is
10636 // designed to handle arbitrary vector shuffles and blends, gracefully
10637 // degrading performance as necessary. It works hard to recognize idiomatic
10638 // shuffles and lower them to optimal instruction patterns without leaving
10639 // a framework that allows reasonably efficient handling of all vector shuffle
10641 //===----------------------------------------------------------------------===//
10643 /// Tiny helper function to identify a no-op mask.
10645 /// This is a somewhat boring predicate function. It checks whether the mask
10646 /// array input, which is assumed to be a single-input shuffle mask of the kind
10647 /// used by the X86 shuffle instructions (not a fully general
10648 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
10649 /// in-place shuffle are 'no-op's.
10650 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
10651 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
10652 assert(Mask[i] >= -1 && "Out of bound mask element!");
10653 if (Mask[i] >= 0 && Mask[i] != i)
10659 /// Test whether there are elements crossing LaneSizeInBits lanes in this
10662 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
10663 /// and we routinely test for these.
10664 static bool isLaneCrossingShuffleMask(unsigned LaneSizeInBits,
10665 unsigned ScalarSizeInBits,
10666 ArrayRef<int> Mask) {
10667 assert(LaneSizeInBits && ScalarSizeInBits &&
10668 (LaneSizeInBits % ScalarSizeInBits) == 0 &&
10669 "Illegal shuffle lane size");
10670 int LaneSize = LaneSizeInBits / ScalarSizeInBits;
10671 int Size = Mask.size();
10672 for (int i = 0; i < Size; ++i)
10673 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10678 /// Test whether there are elements crossing 128-bit lanes in this
10680 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
10681 return isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), Mask);
10684 /// Test whether a shuffle mask is equivalent within each sub-lane.
10686 /// This checks a shuffle mask to see if it is performing the same
10687 /// lane-relative shuffle in each sub-lane. This trivially implies
10688 /// that it is also not lane-crossing. It may however involve a blend from the
10689 /// same lane of a second vector.
10691 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
10692 /// non-trivial to compute in the face of undef lanes. The representation is
10693 /// suitable for use with existing 128-bit shuffles as entries from the second
10694 /// vector have been remapped to [LaneSize, 2*LaneSize).
10695 static bool isRepeatedShuffleMask(unsigned LaneSizeInBits, MVT VT,
10696 ArrayRef<int> Mask,
10697 SmallVectorImpl<int> &RepeatedMask) {
10698 auto LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10699 RepeatedMask.assign(LaneSize, -1);
10700 int Size = Mask.size();
10701 for (int i = 0; i < Size; ++i) {
10702 assert(Mask[i] == SM_SentinelUndef || Mask[i] >= 0);
10705 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10706 // This entry crosses lanes, so there is no way to model this shuffle.
10709 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10710 // Adjust second vector indices to start at LaneSize instead of Size.
10711 int LocalM = Mask[i] < Size ? Mask[i] % LaneSize
10712 : Mask[i] % LaneSize + LaneSize;
10713 if (RepeatedMask[i % LaneSize] < 0)
10714 // This is the first non-undef entry in this slot of a 128-bit lane.
10715 RepeatedMask[i % LaneSize] = LocalM;
10716 else if (RepeatedMask[i % LaneSize] != LocalM)
10717 // Found a mismatch with the repeated mask.
10723 /// Test whether a shuffle mask is equivalent within each 128-bit lane.
10725 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10726 SmallVectorImpl<int> &RepeatedMask) {
10727 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10731 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask) {
10732 SmallVector<int, 32> RepeatedMask;
10733 return isRepeatedShuffleMask(128, VT, Mask, RepeatedMask);
10736 /// Test whether a shuffle mask is equivalent within each 256-bit lane.
10738 is256BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
10739 SmallVectorImpl<int> &RepeatedMask) {
10740 return isRepeatedShuffleMask(256, VT, Mask, RepeatedMask);
10743 /// Test whether a target shuffle mask is equivalent within each sub-lane.
10744 /// Unlike isRepeatedShuffleMask we must respect SM_SentinelZero.
10745 static bool isRepeatedTargetShuffleMask(unsigned LaneSizeInBits, MVT VT,
10746 ArrayRef<int> Mask,
10747 SmallVectorImpl<int> &RepeatedMask) {
10748 int LaneSize = LaneSizeInBits / VT.getScalarSizeInBits();
10749 RepeatedMask.assign(LaneSize, SM_SentinelUndef);
10750 int Size = Mask.size();
10751 for (int i = 0; i < Size; ++i) {
10752 assert(isUndefOrZero(Mask[i]) || (Mask[i] >= 0));
10753 if (Mask[i] == SM_SentinelUndef)
10755 if (Mask[i] == SM_SentinelZero) {
10756 if (!isUndefOrZero(RepeatedMask[i % LaneSize]))
10758 RepeatedMask[i % LaneSize] = SM_SentinelZero;
10761 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
10762 // This entry crosses lanes, so there is no way to model this shuffle.
10765 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
10766 // Adjust second vector indices to start at LaneSize instead of Size.
10768 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + LaneSize;
10769 if (RepeatedMask[i % LaneSize] == SM_SentinelUndef)
10770 // This is the first non-undef entry in this slot of a 128-bit lane.
10771 RepeatedMask[i % LaneSize] = LocalM;
10772 else if (RepeatedMask[i % LaneSize] != LocalM)
10773 // Found a mismatch with the repeated mask.
10779 /// Checks whether a shuffle mask is equivalent to an explicit list of
10782 /// This is a fast way to test a shuffle mask against a fixed pattern:
10784 /// if (isShuffleEquivalent(Mask, 3, 2, {1, 0})) { ... }
10786 /// It returns true if the mask is exactly as wide as the argument list, and
10787 /// each element of the mask is either -1 (signifying undef) or the value given
10788 /// in the argument.
10789 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
10790 ArrayRef<int> ExpectedMask) {
10791 if (Mask.size() != ExpectedMask.size())
10794 int Size = Mask.size();
10796 // If the values are build vectors, we can look through them to find
10797 // equivalent inputs that make the shuffles equivalent.
10798 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
10799 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
10801 for (int i = 0; i < Size; ++i) {
10802 assert(Mask[i] >= -1 && "Out of bound mask element!");
10803 if (Mask[i] >= 0 && Mask[i] != ExpectedMask[i]) {
10804 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10805 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10806 if (!MaskBV || !ExpectedBV ||
10807 MaskBV->getOperand(Mask[i] % Size) !=
10808 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10816 /// Checks whether a target shuffle mask is equivalent to an explicit pattern.
10818 /// The masks must be exactly the same width.
10820 /// If an element in Mask matches SM_SentinelUndef (-1) then the corresponding
10821 /// value in ExpectedMask is always accepted. Otherwise the indices must match.
10823 /// SM_SentinelZero is accepted as a valid negative index but must match in
10825 static bool isTargetShuffleEquivalent(ArrayRef<int> Mask,
10826 ArrayRef<int> ExpectedMask,
10827 SDValue V1 = SDValue(),
10828 SDValue V2 = SDValue()) {
10829 int Size = Mask.size();
10830 if (Size != (int)ExpectedMask.size())
10832 assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
10833 "Illegal target shuffle mask");
10835 // Check for out-of-range target shuffle mask indices.
10836 if (!isUndefOrZeroOrInRange(Mask, 0, 2 * Size))
10839 // If the values are build vectors, we can look through them to find
10840 // equivalent inputs that make the shuffles equivalent.
10841 auto *BV1 = dyn_cast_or_null<BuildVectorSDNode>(V1);
10842 auto *BV2 = dyn_cast_or_null<BuildVectorSDNode>(V2);
10843 BV1 = ((BV1 && Size != (int)BV1->getNumOperands()) ? nullptr : BV1);
10844 BV2 = ((BV2 && Size != (int)BV2->getNumOperands()) ? nullptr : BV2);
10846 for (int i = 0; i < Size; ++i) {
10847 if (Mask[i] == SM_SentinelUndef || Mask[i] == ExpectedMask[i])
10849 if (0 <= Mask[i] && 0 <= ExpectedMask[i]) {
10850 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
10851 auto *ExpectedBV = ExpectedMask[i] < Size ? BV1 : BV2;
10852 if (MaskBV && ExpectedBV &&
10853 MaskBV->getOperand(Mask[i] % Size) ==
10854 ExpectedBV->getOperand(ExpectedMask[i] % Size))
10857 // TODO - handle SM_Sentinel equivalences.
10863 // Attempt to create a shuffle mask from a VSELECT condition mask.
10864 static bool createShuffleMaskFromVSELECT(SmallVectorImpl<int> &Mask,
10866 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
10869 unsigned Size = Cond.getValueType().getVectorNumElements();
10870 Mask.resize(Size, SM_SentinelUndef);
10872 for (int i = 0; i != (int)Size; ++i) {
10873 SDValue CondElt = Cond.getOperand(i);
10875 // Arbitrarily choose from the 2nd operand if the select condition element
10877 // TODO: Can we do better by matching patterns such as even/odd?
10878 if (CondElt.isUndef() || isNullConstant(CondElt))
10885 // Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
10887 static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
10888 if (VT != MVT::v8i32 && VT != MVT::v8f32)
10891 SmallVector<int, 8> Unpcklwd;
10892 createUnpackShuffleMask(MVT::v8i16, Unpcklwd, /* Lo = */ true,
10893 /* Unary = */ false);
10894 SmallVector<int, 8> Unpckhwd;
10895 createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
10896 /* Unary = */ false);
10897 bool IsUnpackwdMask = (isTargetShuffleEquivalent(Mask, Unpcklwd) ||
10898 isTargetShuffleEquivalent(Mask, Unpckhwd));
10899 return IsUnpackwdMask;
10902 static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
10903 // Create 128-bit vector type based on mask size.
10904 MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
10905 MVT VT = MVT::getVectorVT(EltVT, Mask.size());
10907 // We can't assume a canonical shuffle mask, so try the commuted version too.
10908 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
10909 ShuffleVectorSDNode::commuteMask(CommutedMask);
10911 // Match any of unary/binary or low/high.
10912 for (unsigned i = 0; i != 4; ++i) {
10913 SmallVector<int, 16> UnpackMask;
10914 createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
10915 if (isTargetShuffleEquivalent(Mask, UnpackMask) ||
10916 isTargetShuffleEquivalent(CommutedMask, UnpackMask))
10922 /// Return true if a shuffle mask chooses elements identically in its top and
10923 /// bottom halves. For example, any splat mask has the same top and bottom
10924 /// halves. If an element is undefined in only one half of the mask, the halves
10925 /// are not considered identical.
10926 static bool hasIdenticalHalvesShuffleMask(ArrayRef<int> Mask) {
10927 assert(Mask.size() % 2 == 0 && "Expecting even number of elements in mask");
10928 unsigned HalfSize = Mask.size() / 2;
10929 for (unsigned i = 0; i != HalfSize; ++i) {
10930 if (Mask[i] != Mask[i + HalfSize])
10936 /// Get a 4-lane 8-bit shuffle immediate for a mask.
10938 /// This helper function produces an 8-bit shuffle immediate corresponding to
10939 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
10940 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
10943 /// NB: We rely heavily on "undef" masks preserving the input lane.
10944 static unsigned getV4X86ShuffleImm(ArrayRef<int> Mask) {
10945 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
10946 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
10947 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
10948 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
10949 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
10952 Imm |= (Mask[0] < 0 ? 0 : Mask[0]) << 0;
10953 Imm |= (Mask[1] < 0 ? 1 : Mask[1]) << 2;
10954 Imm |= (Mask[2] < 0 ? 2 : Mask[2]) << 4;
10955 Imm |= (Mask[3] < 0 ? 3 : Mask[3]) << 6;
10959 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask, const SDLoc &DL,
10960 SelectionDAG &DAG) {
10961 return DAG.getTargetConstant(getV4X86ShuffleImm(Mask), DL, MVT::i8);
10964 // The Shuffle result is as follow:
10965 // 0*a[0]0*a[1]...0*a[n] , n >=0 where a[] elements in a ascending order.
10966 // Each Zeroable's element correspond to a particular Mask's element.
10967 // As described in computeZeroableShuffleElements function.
10969 // The function looks for a sub-mask that the nonzero elements are in
10970 // increasing order. If such sub-mask exist. The function returns true.
10971 static bool isNonZeroElementsInOrder(const APInt &Zeroable,
10972 ArrayRef<int> Mask, const EVT &VectorType,
10973 bool &IsZeroSideLeft) {
10974 int NextElement = -1;
10975 // Check if the Mask's nonzero elements are in increasing order.
10976 for (int i = 0, e = Mask.size(); i < e; i++) {
10977 // Checks if the mask's zeros elements are built from only zeros.
10978 assert(Mask[i] >= -1 && "Out of bound mask element!");
10983 // Find the lowest non zero element
10984 if (NextElement < 0) {
10985 NextElement = Mask[i] != 0 ? VectorType.getVectorNumElements() : 0;
10986 IsZeroSideLeft = NextElement != 0;
10988 // Exit if the mask's non zero elements are not in increasing order.
10989 if (NextElement != Mask[i])
10996 /// Try to lower a shuffle with a single PSHUFB of V1 or V2.
10997 static SDValue lowerShuffleWithPSHUFB(const SDLoc &DL, MVT VT,
10998 ArrayRef<int> Mask, SDValue V1,
10999 SDValue V2, const APInt &Zeroable,
11000 const X86Subtarget &Subtarget,
11001 SelectionDAG &DAG) {
11002 int Size = Mask.size();
11003 int LaneSize = 128 / VT.getScalarSizeInBits();
11004 const int NumBytes = VT.getSizeInBits() / 8;
11005 const int NumEltBytes = VT.getScalarSizeInBits() / 8;
11007 assert((Subtarget.hasSSSE3() && VT.is128BitVector()) ||
11008 (Subtarget.hasAVX2() && VT.is256BitVector()) ||
11009 (Subtarget.hasBWI() && VT.is512BitVector()));
11011 SmallVector<SDValue, 64> PSHUFBMask(NumBytes);
11012 // Sign bit set in i8 mask means zero element.
11013 SDValue ZeroMask = DAG.getConstant(0x80, DL, MVT::i8);
11016 for (int i = 0; i < NumBytes; ++i) {
11017 int M = Mask[i / NumEltBytes];
11019 PSHUFBMask[i] = DAG.getUNDEF(MVT::i8);
11022 if (Zeroable[i / NumEltBytes]) {
11023 PSHUFBMask[i] = ZeroMask;
11027 // We can only use a single input of V1 or V2.
11028 SDValue SrcV = (M >= Size ? V2 : V1);
11029 if (V && V != SrcV)
11034 // PSHUFB can't cross lanes, ensure this doesn't happen.
11035 if ((M / LaneSize) != ((i / NumEltBytes) / LaneSize))
11039 M = M * NumEltBytes + (i % NumEltBytes);
11040 PSHUFBMask[i] = DAG.getConstant(M, DL, MVT::i8);
11042 assert(V && "Failed to find a source input");
11044 MVT I8VT = MVT::getVectorVT(MVT::i8, NumBytes);
11045 return DAG.getBitcast(
11046 VT, DAG.getNode(X86ISD::PSHUFB, DL, I8VT, DAG.getBitcast(I8VT, V),
11047 DAG.getBuildVector(I8VT, DL, PSHUFBMask)));
11050 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
11051 const X86Subtarget &Subtarget, SelectionDAG &DAG,
11054 // X86 has dedicated shuffle that can be lowered to VEXPAND
11055 static SDValue lowerShuffleToEXPAND(const SDLoc &DL, MVT VT,
11056 const APInt &Zeroable,
11057 ArrayRef<int> Mask, SDValue &V1,
11058 SDValue &V2, SelectionDAG &DAG,
11059 const X86Subtarget &Subtarget) {
11060 bool IsLeftZeroSide = true;
11061 if (!isNonZeroElementsInOrder(Zeroable, Mask, V1.getValueType(),
11064 unsigned VEXPANDMask = (~Zeroable).getZExtValue();
11066 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11067 SDValue MaskNode = DAG.getConstant(VEXPANDMask, DL, IntegerType);
11068 unsigned NumElts = VT.getVectorNumElements();
11069 assert((NumElts == 4 || NumElts == 8 || NumElts == 16) &&
11070 "Unexpected number of vector elements");
11071 SDValue VMask = getMaskNode(MaskNode, MVT::getVectorVT(MVT::i1, NumElts),
11072 Subtarget, DAG, DL);
11073 SDValue ZeroVector = getZeroVector(VT, Subtarget, DAG, DL);
11074 SDValue ExpandedVector = IsLeftZeroSide ? V2 : V1;
11075 return DAG.getNode(X86ISD::EXPAND, DL, VT, ExpandedVector, ZeroVector, VMask);
11078 static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
11079 unsigned &UnpackOpcode, bool IsUnary,
11080 ArrayRef<int> TargetMask, const SDLoc &DL,
11082 const X86Subtarget &Subtarget) {
11083 int NumElts = VT.getVectorNumElements();
11085 bool Undef1 = true, Undef2 = true, Zero1 = true, Zero2 = true;
11086 for (int i = 0; i != NumElts; i += 2) {
11087 int M1 = TargetMask[i + 0];
11088 int M2 = TargetMask[i + 1];
11089 Undef1 &= (SM_SentinelUndef == M1);
11090 Undef2 &= (SM_SentinelUndef == M2);
11091 Zero1 &= isUndefOrZero(M1);
11092 Zero2 &= isUndefOrZero(M2);
11094 assert(!((Undef1 || Zero1) && (Undef2 || Zero2)) &&
11095 "Zeroable shuffle detected");
11097 // Attempt to match the target mask against the unpack lo/hi mask patterns.
11098 SmallVector<int, 64> Unpckl, Unpckh;
11099 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
11100 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
11101 UnpackOpcode = X86ISD::UNPCKL;
11102 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
11103 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
11107 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
11108 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
11109 UnpackOpcode = X86ISD::UNPCKH;
11110 V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
11111 V1 = (Undef1 ? DAG.getUNDEF(VT) : V1);
11115 // If an unary shuffle, attempt to match as an unpack lo/hi with zero.
11116 if (IsUnary && (Zero1 || Zero2)) {
11117 // Don't bother if we can blend instead.
11118 if ((Subtarget.hasSSE41() || VT == MVT::v2i64 || VT == MVT::v2f64) &&
11119 isSequentialOrUndefOrZeroInRange(TargetMask, 0, NumElts, 0))
11122 bool MatchLo = true, MatchHi = true;
11123 for (int i = 0; (i != NumElts) && (MatchLo || MatchHi); ++i) {
11124 int M = TargetMask[i];
11126 // Ignore if the input is known to be zero or the index is undef.
11127 if ((((i & 1) == 0) && Zero1) || (((i & 1) == 1) && Zero2) ||
11128 (M == SM_SentinelUndef))
11131 MatchLo &= (M == Unpckl[i]);
11132 MatchHi &= (M == Unpckh[i]);
11135 if (MatchLo || MatchHi) {
11136 UnpackOpcode = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11137 V2 = Zero2 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
11138 V1 = Zero1 ? getZeroVector(VT, Subtarget, DAG, DL) : V1;
11143 // If a binary shuffle, commute and try again.
11145 ShuffleVectorSDNode::commuteMask(Unpckl);
11146 if (isTargetShuffleEquivalent(TargetMask, Unpckl)) {
11147 UnpackOpcode = X86ISD::UNPCKL;
11152 ShuffleVectorSDNode::commuteMask(Unpckh);
11153 if (isTargetShuffleEquivalent(TargetMask, Unpckh)) {
11154 UnpackOpcode = X86ISD::UNPCKH;
11163 // X86 has dedicated unpack instructions that can handle specific blend
11164 // operations: UNPCKH and UNPCKL.
11165 static SDValue lowerShuffleWithUNPCK(const SDLoc &DL, MVT VT,
11166 ArrayRef<int> Mask, SDValue V1, SDValue V2,
11167 SelectionDAG &DAG) {
11168 SmallVector<int, 8> Unpckl;
11169 createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, /* Unary = */ false);
11170 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
11171 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
11173 SmallVector<int, 8> Unpckh;
11174 createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, /* Unary = */ false);
11175 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
11176 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
11178 // Commute and try again.
11179 ShuffleVectorSDNode::commuteMask(Unpckl);
11180 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
11181 return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
11183 ShuffleVectorSDNode::commuteMask(Unpckh);
11184 if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
11185 return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
11190 /// Check if the mask can be mapped to a preliminary shuffle (vperm 64-bit)
11191 /// followed by unpack 256-bit.
11192 static SDValue lowerShuffleWithUNPCK256(const SDLoc &DL, MVT VT,
11193 ArrayRef<int> Mask, SDValue V1,
11194 SDValue V2, SelectionDAG &DAG) {
11195 SmallVector<int, 32> Unpckl, Unpckh;
11196 createSplat2ShuffleMask(VT, Unpckl, /* Lo */ true);
11197 createSplat2ShuffleMask(VT, Unpckh, /* Lo */ false);
11199 unsigned UnpackOpcode;
11200 if (isShuffleEquivalent(V1, V2, Mask, Unpckl))
11201 UnpackOpcode = X86ISD::UNPCKL;
11202 else if (isShuffleEquivalent(V1, V2, Mask, Unpckh))
11203 UnpackOpcode = X86ISD::UNPCKH;
11207 // This is a "natural" unpack operation (rather than the 128-bit sectored
11208 // operation implemented by AVX). We need to rearrange 64-bit chunks of the
11209 // input in order to use the x86 instruction.
11210 V1 = DAG.getVectorShuffle(MVT::v4f64, DL, DAG.getBitcast(MVT::v4f64, V1),
11211 DAG.getUNDEF(MVT::v4f64), {0, 2, 1, 3});
11212 V1 = DAG.getBitcast(VT, V1);
11213 return DAG.getNode(UnpackOpcode, DL, VT, V1, V1);
11216 // Check if the mask can be mapped to a TRUNCATE or VTRUNC, truncating the
11217 // source into the lower elements and zeroing the upper elements.
11218 // TODO: Merge with matchShuffleAsVPMOV.
11219 static bool matchShuffleAsVTRUNC(MVT &SrcVT, MVT &DstVT, MVT VT,
11220 ArrayRef<int> Mask, const APInt &Zeroable,
11221 const X86Subtarget &Subtarget) {
11222 if (!VT.is512BitVector() && !Subtarget.hasVLX())
11225 unsigned NumElts = Mask.size();
11226 unsigned EltSizeInBits = VT.getScalarSizeInBits();
11227 unsigned MaxScale = 64 / EltSizeInBits;
11229 for (unsigned Scale = 2; Scale <= MaxScale; Scale += Scale) {
11230 unsigned SrcEltBits = EltSizeInBits * Scale;
11231 if (SrcEltBits < 32 && !Subtarget.hasBWI())
11233 unsigned NumSrcElts = NumElts / Scale;
11234 if (!isSequentialOrUndefInRange(Mask, 0, NumSrcElts, 0, Scale))
11236 unsigned UpperElts = NumElts - NumSrcElts;
11237 if (!Zeroable.extractBits(UpperElts, NumSrcElts).isAllOnesValue())
11239 SrcVT = MVT::getIntegerVT(EltSizeInBits * Scale);
11240 SrcVT = MVT::getVectorVT(SrcVT, NumSrcElts);
11241 DstVT = MVT::getIntegerVT(EltSizeInBits);
11242 if ((NumSrcElts * EltSizeInBits) >= 128) {
11244 DstVT = MVT::getVectorVT(DstVT, NumSrcElts);
11247 DstVT = MVT::getVectorVT(DstVT, 128 / EltSizeInBits);
11255 static bool matchShuffleAsVPMOV(ArrayRef<int> Mask, bool SwappedOps,
11257 int Size = (int)Mask.size();
11258 int Split = Size / Delta;
11259 int TruncatedVectorStart = SwappedOps ? Size : 0;
11261 // Match for mask starting with e.g.: <8, 10, 12, 14,... or <0, 2, 4, 6,...
11262 if (!isSequentialOrUndefInRange(Mask, 0, Split, TruncatedVectorStart, Delta))
11265 // The rest of the mask should not refer to the truncated vector's elements.
11266 if (isAnyInRange(Mask.slice(Split, Size - Split), TruncatedVectorStart,
11267 TruncatedVectorStart + Size))
11273 // Try to lower trunc+vector_shuffle to a vpmovdb or a vpmovdw instruction.
11275 // An example is the following:
11277 // t0: ch = EntryToken
11278 // t2: v4i64,ch = CopyFromReg t0, Register:v4i64 %0
11279 // t25: v4i32 = truncate t2
11280 // t41: v8i16 = bitcast t25
11281 // t21: v8i16 = BUILD_VECTOR undef:i16, undef:i16, undef:i16, undef:i16,
11282 // Constant:i16<0>, Constant:i16<0>, Constant:i16<0>, Constant:i16<0>
11283 // t51: v8i16 = vector_shuffle<0,2,4,6,12,13,14,15> t41, t21
11284 // t18: v2i64 = bitcast t51
11286 // Without avx512vl, this is lowered to:
11288 // vpmovqd %zmm0, %ymm0
11289 // vpshufb {{.*#+}} xmm0 =
11290 // xmm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero
11292 // But when avx512vl is available, one can just use a single vpmovdw
11294 static SDValue lowerShuffleWithVPMOV(const SDLoc &DL, ArrayRef<int> Mask,
11295 MVT VT, SDValue V1, SDValue V2,
11297 const X86Subtarget &Subtarget) {
11298 if (VT != MVT::v16i8 && VT != MVT::v8i16)
11301 if (Mask.size() != VT.getVectorNumElements())
11304 bool SwappedOps = false;
11306 if (!ISD::isBuildVectorAllZeros(V2.getNode())) {
11307 if (!ISD::isBuildVectorAllZeros(V1.getNode()))
11316 // bitcast (truncate <8 x i32> %vec to <8 x i16>) to <16 x i8>
11317 // bitcast (truncate <4 x i64> %vec to <4 x i32>) to <8 x i16>
11319 // and similar ones.
11320 if (V1.getOpcode() != ISD::BITCAST)
11322 if (V1.getOperand(0).getOpcode() != ISD::TRUNCATE)
11325 SDValue Src = V1.getOperand(0).getOperand(0);
11326 MVT SrcVT = Src.getSimpleValueType();
11328 // The vptrunc** instructions truncating 128 bit and 256 bit vectors
11329 // are only available with avx512vl.
11330 if (!SrcVT.is512BitVector() && !Subtarget.hasVLX())
11333 // Down Convert Word to Byte is only available with avx512bw. The case with
11334 // 256-bit output doesn't contain a shuffle and is therefore not handled here.
11335 if (SrcVT.getVectorElementType() == MVT::i16 && VT == MVT::v16i8 &&
11336 !Subtarget.hasBWI())
11339 // The first half/quarter of the mask should refer to every second/fourth
11340 // element of the vector truncated and bitcasted.
11341 if (!matchShuffleAsVPMOV(Mask, SwappedOps, 2) &&
11342 !matchShuffleAsVPMOV(Mask, SwappedOps, 4))
11345 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Src);
11348 /// Check whether a compaction lowering can be done by dropping even
11349 /// elements and compute how many times even elements must be dropped.
11351 /// This handles shuffles which take every Nth element where N is a power of
11352 /// two. Example shuffle masks:
11354 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
11355 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
11356 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
11357 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
11358 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
11359 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
11361 /// Any of these lanes can of course be undef.
11363 /// This routine only supports N <= 3.
11364 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
11367 /// \returns N above, or the number of times even elements must be dropped if
11368 /// there is such a number. Otherwise returns zero.
11369 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask,
11370 bool IsSingleInput) {
11371 // The modulus for the shuffle vector entries is based on whether this is
11372 // a single input or not.
11373 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
11374 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
11375 "We should only be called with masks with a power-of-2 size!");
11377 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
11379 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
11380 // and 2^3 simultaneously. This is because we may have ambiguity with
11381 // partially undef inputs.
11382 bool ViableForN[3] = {true, true, true};
11384 for (int i = 0, e = Mask.size(); i < e; ++i) {
11385 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
11390 bool IsAnyViable = false;
11391 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
11392 if (ViableForN[j]) {
11393 uint64_t N = j + 1;
11395 // The shuffle mask must be equal to (i * 2^N) % M.
11396 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
11397 IsAnyViable = true;
11399 ViableForN[j] = false;
11401 // Early exit if we exhaust the possible powers of two.
11406 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
11410 // Return 0 as there is no viable power of two.
11414 // X86 has dedicated pack instructions that can handle specific truncation
11415 // operations: PACKSS and PACKUS.
11416 // Checks for compaction shuffle masks if MaxStages > 1.
11417 // TODO: Add support for matching multiple PACKSS/PACKUS stages.
11418 static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
11419 unsigned &PackOpcode, ArrayRef<int> TargetMask,
11421 const X86Subtarget &Subtarget,
11422 unsigned MaxStages = 1) {
11423 unsigned NumElts = VT.getVectorNumElements();
11424 unsigned BitSize = VT.getScalarSizeInBits();
11425 assert(0 < MaxStages && MaxStages <= 3 && (BitSize << MaxStages) <= 64 &&
11426 "Illegal maximum compaction");
11428 auto MatchPACK = [&](SDValue N1, SDValue N2, MVT PackVT) {
11429 unsigned NumSrcBits = PackVT.getScalarSizeInBits();
11430 unsigned NumPackedBits = NumSrcBits - BitSize;
11431 SDValue VV1 = DAG.getBitcast(PackVT, N1);
11432 SDValue VV2 = DAG.getBitcast(PackVT, N2);
11433 if (Subtarget.hasSSE41() || BitSize == 8) {
11434 APInt ZeroMask = APInt::getHighBitsSet(NumSrcBits, NumPackedBits);
11435 if ((N1.isUndef() || DAG.MaskedValueIsZero(VV1, ZeroMask)) &&
11436 (N2.isUndef() || DAG.MaskedValueIsZero(VV2, ZeroMask))) {
11440 PackOpcode = X86ISD::PACKUS;
11444 if ((N1.isUndef() || DAG.ComputeNumSignBits(VV1) > NumPackedBits) &&
11445 (N2.isUndef() || DAG.ComputeNumSignBits(VV2) > NumPackedBits)) {
11449 PackOpcode = X86ISD::PACKSS;
11455 // Attempt to match against wider and wider compaction patterns.
11456 for (unsigned NumStages = 1; NumStages <= MaxStages; ++NumStages) {
11457 MVT PackSVT = MVT::getIntegerVT(BitSize << NumStages);
11458 MVT PackVT = MVT::getVectorVT(PackSVT, NumElts >> NumStages);
11460 // Try binary shuffle.
11461 SmallVector<int, 32> BinaryMask;
11462 createPackShuffleMask(VT, BinaryMask, false, NumStages);
11463 if (isTargetShuffleEquivalent(TargetMask, BinaryMask, V1, V2))
11464 if (MatchPACK(V1, V2, PackVT))
11467 // Try unary shuffle.
11468 SmallVector<int, 32> UnaryMask;
11469 createPackShuffleMask(VT, UnaryMask, true, NumStages);
11470 if (isTargetShuffleEquivalent(TargetMask, UnaryMask, V1))
11471 if (MatchPACK(V1, V1, PackVT))
11478 static SDValue lowerShuffleWithPACK(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
11479 SDValue V1, SDValue V2, SelectionDAG &DAG,
11480 const X86Subtarget &Subtarget) {
11482 unsigned PackOpcode;
11483 unsigned SizeBits = VT.getSizeInBits();
11484 unsigned EltBits = VT.getScalarSizeInBits();
11485 unsigned MaxStages = Log2_32(64 / EltBits);
11486 if (!matchShuffleWithPACK(VT, PackVT, V1, V2, PackOpcode, Mask, DAG,
11487 Subtarget, MaxStages))
11490 unsigned CurrentEltBits = PackVT.getScalarSizeInBits();
11491 unsigned NumStages = Log2_32(CurrentEltBits / EltBits);
11493 // Don't lower multi-stage packs on AVX512, truncation is better.
11494 if (NumStages != 1 && SizeBits == 128 && Subtarget.hasVLX())
11497 // Pack to the largest type possible:
11498 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
11499 unsigned MaxPackBits = 16;
11500 if (CurrentEltBits > 16 &&
11501 (PackOpcode == X86ISD::PACKSS || Subtarget.hasSSE41()))
11504 // Repeatedly pack down to the target size.
11506 for (unsigned i = 0; i != NumStages; ++i) {
11507 unsigned SrcEltBits = std::min(MaxPackBits, CurrentEltBits);
11508 unsigned NumSrcElts = SizeBits / SrcEltBits;
11509 MVT SrcSVT = MVT::getIntegerVT(SrcEltBits);
11510 MVT DstSVT = MVT::getIntegerVT(SrcEltBits / 2);
11511 MVT SrcVT = MVT::getVectorVT(SrcSVT, NumSrcElts);
11512 MVT DstVT = MVT::getVectorVT(DstSVT, NumSrcElts * 2);
11513 Res = DAG.getNode(PackOpcode, DL, DstVT, DAG.getBitcast(SrcVT, V1),
11514 DAG.getBitcast(SrcVT, V2));
11516 CurrentEltBits /= 2;
11518 assert(Res && Res.getValueType() == VT &&
11519 "Failed to lower compaction shuffle");
11523 /// Try to emit a bitmask instruction for a shuffle.
11525 /// This handles cases where we can model a blend exactly as a bitmask due to
11526 /// one of the inputs being zeroable.
11527 static SDValue lowerShuffleAsBitMask(const SDLoc &DL, MVT VT, SDValue V1,
11528 SDValue V2, ArrayRef<int> Mask,
11529 const APInt &Zeroable,
11530 const X86Subtarget &Subtarget,
11531 SelectionDAG &DAG) {
11533 MVT EltVT = VT.getVectorElementType();
11534 SDValue Zero, AllOnes;
11535 // Use f64 if i64 isn't legal.
11536 if (EltVT == MVT::i64 && !Subtarget.is64Bit()) {
11538 MaskVT = MVT::getVectorVT(EltVT, Mask.size());
11542 if (EltVT == MVT::f32 || EltVT == MVT::f64) {
11543 Zero = DAG.getConstantFP(0.0, DL, EltVT);
11544 APFloat AllOnesValue = APFloat::getAllOnesValue(
11545 SelectionDAG::EVTToAPFloatSemantics(EltVT), EltVT.getSizeInBits());
11546 AllOnes = DAG.getConstantFP(AllOnesValue, DL, EltVT);
11548 MVT::getVectorVT(EltVT == MVT::f64 ? MVT::i64 : MVT::i32, Mask.size());
11550 Zero = DAG.getConstant(0, DL, EltVT);
11551 AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11554 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
11556 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11559 if (Mask[i] % Size != i)
11560 return SDValue(); // Not a blend.
11562 V = Mask[i] < Size ? V1 : V2;
11563 else if (V != (Mask[i] < Size ? V1 : V2))
11564 return SDValue(); // Can only let one input through the mask.
11566 VMaskOps[i] = AllOnes;
11569 return SDValue(); // No non-zeroable elements!
11571 SDValue VMask = DAG.getBuildVector(MaskVT, DL, VMaskOps);
11572 VMask = DAG.getBitcast(LogicVT, VMask);
11573 V = DAG.getBitcast(LogicVT, V);
11574 SDValue And = DAG.getNode(ISD::AND, DL, LogicVT, V, VMask);
11575 return DAG.getBitcast(VT, And);
11578 /// Try to emit a blend instruction for a shuffle using bit math.
11580 /// This is used as a fallback approach when first class blend instructions are
11581 /// unavailable. Currently it is only suitable for integer vectors, but could
11582 /// be generalized for floating point vectors if desirable.
11583 static SDValue lowerShuffleAsBitBlend(const SDLoc &DL, MVT VT, SDValue V1,
11584 SDValue V2, ArrayRef<int> Mask,
11585 SelectionDAG &DAG) {
11586 assert(VT.isInteger() && "Only supports integer vector types!");
11587 MVT EltVT = VT.getVectorElementType();
11588 SDValue Zero = DAG.getConstant(0, DL, EltVT);
11589 SDValue AllOnes = DAG.getAllOnesConstant(DL, EltVT);
11590 SmallVector<SDValue, 16> MaskOps;
11591 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11592 if (Mask[i] >= 0 && Mask[i] != i && Mask[i] != i + Size)
11593 return SDValue(); // Shuffled input!
11594 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
11597 SDValue V1Mask = DAG.getBuildVector(VT, DL, MaskOps);
11598 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
11599 V2 = DAG.getNode(X86ISD::ANDNP, DL, VT, V1Mask, V2);
11600 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
11603 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
11604 SDValue PreservedSrc,
11605 const X86Subtarget &Subtarget,
11606 SelectionDAG &DAG);
11608 static bool matchShuffleAsBlend(SDValue V1, SDValue V2,
11609 MutableArrayRef<int> Mask,
11610 const APInt &Zeroable, bool &ForceV1Zero,
11611 bool &ForceV2Zero, uint64_t &BlendMask) {
11612 bool V1IsZeroOrUndef =
11613 V1.isUndef() || ISD::isBuildVectorAllZeros(V1.getNode());
11614 bool V2IsZeroOrUndef =
11615 V2.isUndef() || ISD::isBuildVectorAllZeros(V2.getNode());
11618 ForceV1Zero = false, ForceV2Zero = false;
11619 assert(Mask.size() <= 64 && "Shuffle mask too big for blend mask");
11621 // Attempt to generate the binary blend mask. If an input is zero then
11622 // we can use any lane.
11623 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11625 if (M == SM_SentinelUndef)
11629 if (M == i + Size) {
11630 BlendMask |= 1ull << i;
11634 if (V1IsZeroOrUndef) {
11635 ForceV1Zero = true;
11639 if (V2IsZeroOrUndef) {
11640 ForceV2Zero = true;
11641 BlendMask |= 1ull << i;
11642 Mask[i] = i + Size;
11651 static uint64_t scaleVectorShuffleBlendMask(uint64_t BlendMask, int Size,
11653 uint64_t ScaledMask = 0;
11654 for (int i = 0; i != Size; ++i)
11655 if (BlendMask & (1ull << i))
11656 ScaledMask |= ((1ull << Scale) - 1) << (i * Scale);
11660 /// Try to emit a blend instruction for a shuffle.
11662 /// This doesn't do any checks for the availability of instructions for blending
11663 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
11664 /// be matched in the backend with the type given. What it does check for is
11665 /// that the shuffle mask is a blend, or convertible into a blend with zero.
11666 static SDValue lowerShuffleAsBlend(const SDLoc &DL, MVT VT, SDValue V1,
11667 SDValue V2, ArrayRef<int> Original,
11668 const APInt &Zeroable,
11669 const X86Subtarget &Subtarget,
11670 SelectionDAG &DAG) {
11671 uint64_t BlendMask = 0;
11672 bool ForceV1Zero = false, ForceV2Zero = false;
11673 SmallVector<int, 64> Mask(Original.begin(), Original.end());
11674 if (!matchShuffleAsBlend(V1, V2, Mask, Zeroable, ForceV1Zero, ForceV2Zero,
11678 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
11680 V1 = getZeroVector(VT, Subtarget, DAG, DL);
11682 V2 = getZeroVector(VT, Subtarget, DAG, DL);
11684 switch (VT.SimpleTy) {
11687 assert(Subtarget.hasAVX2() && "256-bit integer blends require AVX2!");
11691 assert(Subtarget.hasAVX() && "256-bit float blends require AVX!");
11698 assert(Subtarget.hasSSE41() && "128-bit blends require SSE41!");
11699 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
11700 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11701 case MVT::v16i16: {
11702 assert(Subtarget.hasAVX2() && "v16i16 blends require AVX2!");
11703 SmallVector<int, 8> RepeatedMask;
11704 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
11705 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
11706 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
11708 for (int i = 0; i < 8; ++i)
11709 if (RepeatedMask[i] >= 8)
11710 BlendMask |= 1ull << i;
11711 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11712 DAG.getTargetConstant(BlendMask, DL, MVT::i8));
11714 // Use PBLENDW for lower/upper lanes and then blend lanes.
11715 // TODO - we should allow 2 PBLENDW here and leave shuffle combine to
11716 // merge to VSELECT where useful.
11717 uint64_t LoMask = BlendMask & 0xFF;
11718 uint64_t HiMask = (BlendMask >> 8) & 0xFF;
11719 if (LoMask == 0 || LoMask == 255 || HiMask == 0 || HiMask == 255) {
11720 SDValue Lo = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11721 DAG.getTargetConstant(LoMask, DL, MVT::i8));
11722 SDValue Hi = DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
11723 DAG.getTargetConstant(HiMask, DL, MVT::i8));
11724 return DAG.getVectorShuffle(
11725 MVT::v16i16, DL, Lo, Hi,
11726 {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31});
11731 assert(Subtarget.hasAVX2() && "256-bit byte-blends require AVX2!");
11734 assert(Subtarget.hasSSE41() && "128-bit byte-blends require SSE41!");
11736 // Attempt to lower to a bitmask if we can. VPAND is faster than VPBLENDVB.
11737 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11741 if (Subtarget.hasBWI() && Subtarget.hasVLX()) {
11743 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11744 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11745 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11748 // If we have VPTERNLOG, we can use that as a bit blend.
11749 if (Subtarget.hasVLX())
11750 if (SDValue BitBlend =
11751 lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
11754 // Scale the blend by the number of bytes per element.
11755 int Scale = VT.getScalarSizeInBits() / 8;
11757 // This form of blend is always done on bytes. Compute the byte vector
11759 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11761 // x86 allows load folding with blendvb from the 2nd source operand. But
11762 // we are still using LLVM select here (see comment below), so that's V1.
11763 // If V2 can be load-folded and V1 cannot be load-folded, then commute to
11764 // allow that load-folding possibility.
11765 if (!ISD::isNormalLoad(V1.getNode()) && ISD::isNormalLoad(V2.getNode())) {
11766 ShuffleVectorSDNode::commuteMask(Mask);
11770 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
11771 // mix of LLVM's code generator and the x86 backend. We tell the code
11772 // generator that boolean values in the elements of an x86 vector register
11773 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
11774 // mapping a select to operand #1, and 'false' mapping to operand #2. The
11775 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
11776 // of the element (the remaining are ignored) and 0 in that high bit would
11777 // mean operand #1 while 1 in the high bit would mean operand #2. So while
11778 // the LLVM model for boolean values in vector elements gets the relevant
11779 // bit set, it is set backwards and over constrained relative to x86's
11781 SmallVector<SDValue, 32> VSELECTMask;
11782 for (int i = 0, Size = Mask.size(); i < Size; ++i)
11783 for (int j = 0; j < Scale; ++j)
11784 VSELECTMask.push_back(
11785 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
11786 : DAG.getConstant(Mask[i] < Size ? -1 : 0, DL,
11789 V1 = DAG.getBitcast(BlendVT, V1);
11790 V2 = DAG.getBitcast(BlendVT, V2);
11791 return DAG.getBitcast(
11793 DAG.getSelect(DL, BlendVT, DAG.getBuildVector(BlendVT, DL, VSELECTMask),
11802 // Attempt to lower to a bitmask if we can. Only if not optimizing for size.
11803 bool OptForSize = DAG.shouldOptForSize();
11805 if (SDValue Masked = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
11810 // Otherwise load an immediate into a GPR, cast to k-register, and use a
11813 MVT::getIntegerVT(std::max((int)VT.getVectorNumElements(), 8));
11814 SDValue MaskNode = DAG.getConstant(BlendMask, DL, IntegerType);
11815 return getVectorMaskingNode(V2, MaskNode, V1, Subtarget, DAG);
11818 llvm_unreachable("Not a supported integer vector type!");
11822 /// Try to lower as a blend of elements from two inputs followed by
11823 /// a single-input permutation.
11825 /// This matches the pattern where we can blend elements from two inputs and
11826 /// then reduce the shuffle to a single-input permutation.
11827 static SDValue lowerShuffleAsBlendAndPermute(const SDLoc &DL, MVT VT,
11828 SDValue V1, SDValue V2,
11829 ArrayRef<int> Mask,
11831 bool ImmBlends = false) {
11832 // We build up the blend mask while checking whether a blend is a viable way
11833 // to reduce the shuffle.
11834 SmallVector<int, 32> BlendMask(Mask.size(), -1);
11835 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
11837 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
11841 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
11843 if (BlendMask[Mask[i] % Size] < 0)
11844 BlendMask[Mask[i] % Size] = Mask[i];
11845 else if (BlendMask[Mask[i] % Size] != Mask[i])
11846 return SDValue(); // Can't blend in the needed input!
11848 PermuteMask[i] = Mask[i] % Size;
11851 // If only immediate blends, then bail if the blend mask can't be widened to
11853 unsigned EltSize = VT.getScalarSizeInBits();
11854 if (ImmBlends && EltSize == 8 && !canWidenShuffleElements(BlendMask))
11857 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
11858 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
11861 /// Try to lower as an unpack of elements from two inputs followed by
11862 /// a single-input permutation.
11864 /// This matches the pattern where we can unpack elements from two inputs and
11865 /// then reduce the shuffle to a single-input (wider) permutation.
11866 static SDValue lowerShuffleAsUNPCKAndPermute(const SDLoc &DL, MVT VT,
11867 SDValue V1, SDValue V2,
11868 ArrayRef<int> Mask,
11869 SelectionDAG &DAG) {
11870 int NumElts = Mask.size();
11871 int NumLanes = VT.getSizeInBits() / 128;
11872 int NumLaneElts = NumElts / NumLanes;
11873 int NumHalfLaneElts = NumLaneElts / 2;
11875 bool MatchLo = true, MatchHi = true;
11876 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
11878 // Determine UNPCKL/UNPCKH type and operand order.
11879 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11880 for (int Elt = 0; Elt != NumLaneElts; ++Elt) {
11881 int M = Mask[Lane + Elt];
11885 SDValue &Op = Ops[Elt & 1];
11886 if (M < NumElts && (Op.isUndef() || Op == V1))
11888 else if (NumElts <= M && (Op.isUndef() || Op == V2))
11893 int Lo = Lane, Mid = Lane + NumHalfLaneElts, Hi = Lane + NumLaneElts;
11894 MatchLo &= isUndefOrInRange(M, Lo, Mid) ||
11895 isUndefOrInRange(M, NumElts + Lo, NumElts + Mid);
11896 MatchHi &= isUndefOrInRange(M, Mid, Hi) ||
11897 isUndefOrInRange(M, NumElts + Mid, NumElts + Hi);
11898 if (!MatchLo && !MatchHi)
11902 assert((MatchLo ^ MatchHi) && "Failed to match UNPCKLO/UNPCKHI");
11904 // Now check that each pair of elts come from the same unpack pair
11905 // and set the permute mask based on each pair.
11906 // TODO - Investigate cases where we permute individual elements.
11907 SmallVector<int, 32> PermuteMask(NumElts, -1);
11908 for (int Lane = 0; Lane != NumElts; Lane += NumLaneElts) {
11909 for (int Elt = 0; Elt != NumLaneElts; Elt += 2) {
11910 int M0 = Mask[Lane + Elt + 0];
11911 int M1 = Mask[Lane + Elt + 1];
11912 if (0 <= M0 && 0 <= M1 &&
11913 (M0 % NumHalfLaneElts) != (M1 % NumHalfLaneElts))
11916 PermuteMask[Lane + Elt + 0] = Lane + (2 * (M0 % NumHalfLaneElts));
11918 PermuteMask[Lane + Elt + 1] = Lane + (2 * (M1 % NumHalfLaneElts)) + 1;
11922 unsigned UnpckOp = MatchLo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
11923 SDValue Unpck = DAG.getNode(UnpckOp, DL, VT, Ops);
11924 return DAG.getVectorShuffle(VT, DL, Unpck, DAG.getUNDEF(VT), PermuteMask);
11927 /// Helper to form a PALIGNR-based rotate+permute, merging 2 inputs and then
11928 /// permuting the elements of the result in place.
11929 static SDValue lowerShuffleAsByteRotateAndPermute(
11930 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
11931 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
11932 if ((VT.is128BitVector() && !Subtarget.hasSSSE3()) ||
11933 (VT.is256BitVector() && !Subtarget.hasAVX2()) ||
11934 (VT.is512BitVector() && !Subtarget.hasBWI()))
11937 // We don't currently support lane crossing permutes.
11938 if (is128BitLaneCrossingShuffleMask(VT, Mask))
11941 int Scale = VT.getScalarSizeInBits() / 8;
11942 int NumLanes = VT.getSizeInBits() / 128;
11943 int NumElts = VT.getVectorNumElements();
11944 int NumEltsPerLane = NumElts / NumLanes;
11946 // Determine range of mask elts.
11947 bool Blend1 = true;
11948 bool Blend2 = true;
11949 std::pair<int, int> Range1 = std::make_pair(INT_MAX, INT_MIN);
11950 std::pair<int, int> Range2 = std::make_pair(INT_MAX, INT_MIN);
11951 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11952 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11953 int M = Mask[Lane + Elt];
11957 Blend1 &= (M == (Lane + Elt));
11958 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11959 M = M % NumEltsPerLane;
11960 Range1.first = std::min(Range1.first, M);
11961 Range1.second = std::max(Range1.second, M);
11964 Blend2 &= (M == (Lane + Elt));
11965 assert(Lane <= M && M < (Lane + NumEltsPerLane) && "Out of range mask");
11966 M = M % NumEltsPerLane;
11967 Range2.first = std::min(Range2.first, M);
11968 Range2.second = std::max(Range2.second, M);
11973 // Bail if we don't need both elements.
11974 // TODO - it might be worth doing this for unary shuffles if the permute
11976 if (!(0 <= Range1.first && Range1.second < NumEltsPerLane) ||
11977 !(0 <= Range2.first && Range2.second < NumEltsPerLane))
11980 if (VT.getSizeInBits() > 128 && (Blend1 || Blend2))
11983 // Rotate the 2 ops so we can access both ranges, then permute the result.
11984 auto RotateAndPermute = [&](SDValue Lo, SDValue Hi, int RotAmt, int Ofs) {
11985 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
11986 SDValue Rotate = DAG.getBitcast(
11987 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, DAG.getBitcast(ByteVT, Hi),
11988 DAG.getBitcast(ByteVT, Lo),
11989 DAG.getTargetConstant(Scale * RotAmt, DL, MVT::i8)));
11990 SmallVector<int, 64> PermMask(NumElts, SM_SentinelUndef);
11991 for (int Lane = 0; Lane != NumElts; Lane += NumEltsPerLane) {
11992 for (int Elt = 0; Elt != NumEltsPerLane; ++Elt) {
11993 int M = Mask[Lane + Elt];
11997 PermMask[Lane + Elt] = Lane + ((M + Ofs - RotAmt) % NumEltsPerLane);
11999 PermMask[Lane + Elt] = Lane + ((M - Ofs - RotAmt) % NumEltsPerLane);
12002 return DAG.getVectorShuffle(VT, DL, Rotate, DAG.getUNDEF(VT), PermMask);
12005 // Check if the ranges are small enough to rotate from either direction.
12006 if (Range2.second < Range1.first)
12007 return RotateAndPermute(V1, V2, Range1.first, 0);
12008 if (Range1.second < Range2.first)
12009 return RotateAndPermute(V2, V1, Range2.first, NumElts);
12013 /// Generic routine to decompose a shuffle and blend into independent
12014 /// blends and permutes.
12016 /// This matches the extremely common pattern for handling combined
12017 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
12018 /// operations. It will try to pick the best arrangement of shuffles and
12020 static SDValue lowerShuffleAsDecomposedShuffleBlend(
12021 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12022 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12023 // Shuffle the input elements into the desired positions in V1 and V2 and
12024 // blend them together.
12025 SmallVector<int, 32> V1Mask(Mask.size(), -1);
12026 SmallVector<int, 32> V2Mask(Mask.size(), -1);
12027 SmallVector<int, 32> BlendMask(Mask.size(), -1);
12028 for (int i = 0, Size = Mask.size(); i < Size; ++i)
12029 if (Mask[i] >= 0 && Mask[i] < Size) {
12030 V1Mask[i] = Mask[i];
12032 } else if (Mask[i] >= Size) {
12033 V2Mask[i] = Mask[i] - Size;
12034 BlendMask[i] = i + Size;
12037 // Try to lower with the simpler initial blend/unpack/rotate strategies unless
12038 // one of the input shuffles would be a no-op. We prefer to shuffle inputs as
12039 // the shuffle may be able to fold with a load or other benefit. However, when
12040 // we'll have to do 2x as many shuffles in order to achieve this, a 2-input
12041 // pre-shuffle first is a better strategy.
12042 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask)) {
12043 // Only prefer immediate blends to unpack/rotate.
12044 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
12047 if (SDValue UnpackPerm = lowerShuffleAsUNPCKAndPermute(DL, VT, V1, V2, Mask,
12050 if (SDValue RotatePerm = lowerShuffleAsByteRotateAndPermute(
12051 DL, VT, V1, V2, Mask, Subtarget, DAG))
12053 // Unpack/rotate failed - try again with variable blends.
12054 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask,
12059 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
12060 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
12061 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
12064 /// Try to lower a vector shuffle as a bit rotation.
12066 /// Look for a repeated rotation pattern in each sub group.
12067 /// Returns a ISD::ROTL element rotation amount or -1 if failed.
12068 static int matchShuffleAsBitRotate(ArrayRef<int> Mask, int NumSubElts) {
12069 int NumElts = Mask.size();
12070 assert((NumElts % NumSubElts) == 0 && "Illegal shuffle mask");
12072 int RotateAmt = -1;
12073 for (int i = 0; i != NumElts; i += NumSubElts) {
12074 for (int j = 0; j != NumSubElts; ++j) {
12075 int M = Mask[i + j];
12078 if (!isInRange(M, i, i + NumSubElts))
12080 int Offset = (NumSubElts - (M - (i + j))) % NumSubElts;
12081 if (0 <= RotateAmt && Offset != RotateAmt)
12083 RotateAmt = Offset;
12089 static int matchShuffleAsBitRotate(MVT &RotateVT, int EltSizeInBits,
12090 const X86Subtarget &Subtarget,
12091 ArrayRef<int> Mask) {
12092 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
12093 assert(EltSizeInBits < 64 && "Can't rotate 64-bit integers");
12095 // AVX512 only has vXi32/vXi64 rotates, so limit the rotation sub group size.
12096 int MinSubElts = Subtarget.hasAVX512() ? std::max(32 / EltSizeInBits, 2) : 2;
12097 int MaxSubElts = 64 / EltSizeInBits;
12098 for (int NumSubElts = MinSubElts; NumSubElts <= MaxSubElts; NumSubElts *= 2) {
12099 int RotateAmt = matchShuffleAsBitRotate(Mask, NumSubElts);
12103 int NumElts = Mask.size();
12104 MVT RotateSVT = MVT::getIntegerVT(EltSizeInBits * NumSubElts);
12105 RotateVT = MVT::getVectorVT(RotateSVT, NumElts / NumSubElts);
12106 return RotateAmt * EltSizeInBits;
12112 /// Lower shuffle using X86ISD::VROTLI rotations.
12113 static SDValue lowerShuffleAsBitRotate(const SDLoc &DL, MVT VT, SDValue V1,
12114 ArrayRef<int> Mask,
12115 const X86Subtarget &Subtarget,
12116 SelectionDAG &DAG) {
12117 // Only XOP + AVX512 targets have bit rotation instructions.
12118 // If we at least have SSSE3 (PSHUFB) then we shouldn't attempt to use this.
12120 (VT.is128BitVector() && Subtarget.hasXOP()) || Subtarget.hasAVX512();
12121 if (!IsLegal && Subtarget.hasSSE3())
12125 int RotateAmt = matchShuffleAsBitRotate(RotateVT, VT.getScalarSizeInBits(),
12130 // For pre-SSSE3 targets, if we are shuffling vXi8 elts then ISD::ROTL,
12131 // expanded to OR(SRL,SHL), will be more efficient, but if they can
12132 // widen to vXi16 or more then existing lowering should will be better.
12134 if ((RotateAmt % 16) == 0)
12136 // TODO: Use getTargetVShiftByConstNode.
12137 unsigned ShlAmt = RotateAmt;
12138 unsigned SrlAmt = RotateVT.getScalarSizeInBits() - RotateAmt;
12139 V1 = DAG.getBitcast(RotateVT, V1);
12140 SDValue SHL = DAG.getNode(X86ISD::VSHLI, DL, RotateVT, V1,
12141 DAG.getTargetConstant(ShlAmt, DL, MVT::i8));
12142 SDValue SRL = DAG.getNode(X86ISD::VSRLI, DL, RotateVT, V1,
12143 DAG.getTargetConstant(SrlAmt, DL, MVT::i8));
12144 SDValue Rot = DAG.getNode(ISD::OR, DL, RotateVT, SHL, SRL);
12145 return DAG.getBitcast(VT, Rot);
12149 DAG.getNode(X86ISD::VROTLI, DL, RotateVT, DAG.getBitcast(RotateVT, V1),
12150 DAG.getTargetConstant(RotateAmt, DL, MVT::i8));
12151 return DAG.getBitcast(VT, Rot);
12154 /// Try to match a vector shuffle as an element rotation.
12156 /// This is used for support PALIGNR for SSSE3 or VALIGND/Q for AVX512.
12157 static int matchShuffleAsElementRotate(SDValue &V1, SDValue &V2,
12158 ArrayRef<int> Mask) {
12159 int NumElts = Mask.size();
12161 // We need to detect various ways of spelling a rotation:
12162 // [11, 12, 13, 14, 15, 0, 1, 2]
12163 // [-1, 12, 13, 14, -1, -1, 1, -1]
12164 // [-1, -1, -1, -1, -1, -1, 1, 2]
12165 // [ 3, 4, 5, 6, 7, 8, 9, 10]
12166 // [-1, 4, 5, 6, -1, -1, 9, -1]
12167 // [-1, 4, 5, 6, -1, -1, -1, -1]
12170 for (int i = 0; i < NumElts; ++i) {
12172 assert((M == SM_SentinelUndef || (0 <= M && M < (2*NumElts))) &&
12173 "Unexpected mask index.");
12177 // Determine where a rotated vector would have started.
12178 int StartIdx = i - (M % NumElts);
12180 // The identity rotation isn't interesting, stop.
12183 // If we found the tail of a vector the rotation must be the missing
12184 // front. If we found the head of a vector, it must be how much of the
12186 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumElts - StartIdx;
12189 Rotation = CandidateRotation;
12190 else if (Rotation != CandidateRotation)
12191 // The rotations don't match, so we can't match this mask.
12194 // Compute which value this mask is pointing at.
12195 SDValue MaskV = M < NumElts ? V1 : V2;
12197 // Compute which of the two target values this index should be assigned
12198 // to. This reflects whether the high elements are remaining or the low
12199 // elements are remaining.
12200 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
12202 // Either set up this value if we've not encountered it before, or check
12203 // that it remains consistent.
12206 else if (TargetV != MaskV)
12207 // This may be a rotation, but it pulls from the inputs in some
12208 // unsupported interleaving.
12212 // Check that we successfully analyzed the mask, and normalize the results.
12213 assert(Rotation != 0 && "Failed to locate a viable rotation!");
12214 assert((Lo || Hi) && "Failed to find a rotated input vector!");
12226 /// Try to lower a vector shuffle as a byte rotation.
12228 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
12229 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
12230 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
12231 /// try to generically lower a vector shuffle through such an pattern. It
12232 /// does not check for the profitability of lowering either as PALIGNR or
12233 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
12234 /// This matches shuffle vectors that look like:
12236 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
12238 /// Essentially it concatenates V1 and V2, shifts right by some number of
12239 /// elements, and takes the low elements as the result. Note that while this is
12240 /// specified as a *right shift* because x86 is little-endian, it is a *left
12241 /// rotate* of the vector lanes.
12242 static int matchShuffleAsByteRotate(MVT VT, SDValue &V1, SDValue &V2,
12243 ArrayRef<int> Mask) {
12244 // Don't accept any shuffles with zero elements.
12245 if (isAnyZero(Mask))
12248 // PALIGNR works on 128-bit lanes.
12249 SmallVector<int, 16> RepeatedMask;
12250 if (!is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedMask))
12253 int Rotation = matchShuffleAsElementRotate(V1, V2, RepeatedMask);
12257 // PALIGNR rotates bytes, so we need to scale the
12258 // rotation based on how many bytes are in the vector lane.
12259 int NumElts = RepeatedMask.size();
12260 int Scale = 16 / NumElts;
12261 return Rotation * Scale;
12264 static SDValue lowerShuffleAsByteRotate(const SDLoc &DL, MVT VT, SDValue V1,
12265 SDValue V2, ArrayRef<int> Mask,
12266 const X86Subtarget &Subtarget,
12267 SelectionDAG &DAG) {
12268 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
12270 SDValue Lo = V1, Hi = V2;
12271 int ByteRotation = matchShuffleAsByteRotate(VT, Lo, Hi, Mask);
12272 if (ByteRotation <= 0)
12275 // Cast the inputs to i8 vector of correct length to match PALIGNR or
12277 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
12278 Lo = DAG.getBitcast(ByteVT, Lo);
12279 Hi = DAG.getBitcast(ByteVT, Hi);
12281 // SSSE3 targets can use the palignr instruction.
12282 if (Subtarget.hasSSSE3()) {
12283 assert((!VT.is512BitVector() || Subtarget.hasBWI()) &&
12284 "512-bit PALIGNR requires BWI instructions");
12285 return DAG.getBitcast(
12286 VT, DAG.getNode(X86ISD::PALIGNR, DL, ByteVT, Lo, Hi,
12287 DAG.getTargetConstant(ByteRotation, DL, MVT::i8)));
12290 assert(VT.is128BitVector() &&
12291 "Rotate-based lowering only supports 128-bit lowering!");
12292 assert(Mask.size() <= 16 &&
12293 "Can shuffle at most 16 bytes in a 128-bit vector!");
12294 assert(ByteVT == MVT::v16i8 &&
12295 "SSE2 rotate lowering only needed for v16i8!");
12297 // Default SSE2 implementation
12298 int LoByteShift = 16 - ByteRotation;
12299 int HiByteShift = ByteRotation;
12302 DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Lo,
12303 DAG.getTargetConstant(LoByteShift, DL, MVT::i8));
12305 DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Hi,
12306 DAG.getTargetConstant(HiByteShift, DL, MVT::i8));
12307 return DAG.getBitcast(VT,
12308 DAG.getNode(ISD::OR, DL, MVT::v16i8, LoShift, HiShift));
12311 /// Try to lower a vector shuffle as a dword/qword rotation.
12313 /// AVX512 has a VALIGND/VALIGNQ instructions that will do an arbitrary
12314 /// rotation of the concatenation of two vectors; This routine will
12315 /// try to generically lower a vector shuffle through such an pattern.
12317 /// Essentially it concatenates V1 and V2, shifts right by some number of
12318 /// elements, and takes the low elements as the result. Note that while this is
12319 /// specified as a *right shift* because x86 is little-endian, it is a *left
12320 /// rotate* of the vector lanes.
12321 static SDValue lowerShuffleAsVALIGN(const SDLoc &DL, MVT VT, SDValue V1,
12322 SDValue V2, ArrayRef<int> Mask,
12323 const X86Subtarget &Subtarget,
12324 SelectionDAG &DAG) {
12325 assert((VT.getScalarType() == MVT::i32 || VT.getScalarType() == MVT::i64) &&
12326 "Only 32-bit and 64-bit elements are supported!");
12328 // 128/256-bit vectors are only supported with VLX.
12329 assert((Subtarget.hasVLX() || (!VT.is128BitVector() && !VT.is256BitVector()))
12330 && "VLX required for 128/256-bit vectors");
12332 SDValue Lo = V1, Hi = V2;
12333 int Rotation = matchShuffleAsElementRotate(Lo, Hi, Mask);
12337 return DAG.getNode(X86ISD::VALIGN, DL, VT, Lo, Hi,
12338 DAG.getTargetConstant(Rotation, DL, MVT::i8));
12341 /// Try to lower a vector shuffle as a byte shift sequence.
12342 static SDValue lowerShuffleAsByteShiftMask(const SDLoc &DL, MVT VT, SDValue V1,
12343 SDValue V2, ArrayRef<int> Mask,
12344 const APInt &Zeroable,
12345 const X86Subtarget &Subtarget,
12346 SelectionDAG &DAG) {
12347 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
12348 assert(VT.is128BitVector() && "Only 128-bit vectors supported");
12350 // We need a shuffle that has zeros at one/both ends and a sequential
12351 // shuffle from one source within.
12352 unsigned ZeroLo = Zeroable.countTrailingOnes();
12353 unsigned ZeroHi = Zeroable.countLeadingOnes();
12354 if (!ZeroLo && !ZeroHi)
12357 unsigned NumElts = Mask.size();
12358 unsigned Len = NumElts - (ZeroLo + ZeroHi);
12359 if (!isSequentialOrUndefInRange(Mask, ZeroLo, Len, Mask[ZeroLo]))
12362 unsigned Scale = VT.getScalarSizeInBits() / 8;
12363 ArrayRef<int> StubMask = Mask.slice(ZeroLo, Len);
12364 if (!isUndefOrInRange(StubMask, 0, NumElts) &&
12365 !isUndefOrInRange(StubMask, NumElts, 2 * NumElts))
12368 SDValue Res = Mask[ZeroLo] < (int)NumElts ? V1 : V2;
12369 Res = DAG.getBitcast(MVT::v16i8, Res);
12371 // Use VSHLDQ/VSRLDQ ops to zero the ends of a vector and leave an
12372 // inner sequential set of elements, possibly offset:
12373 // 01234567 --> zzzzzz01 --> 1zzzzzzz
12374 // 01234567 --> 4567zzzz --> zzzzz456
12375 // 01234567 --> z0123456 --> 3456zzzz --> zz3456zz
12377 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
12378 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12379 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12380 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
12381 DAG.getTargetConstant(Scale * ZeroHi, DL, MVT::i8));
12382 } else if (ZeroHi == 0) {
12383 unsigned Shift = Mask[ZeroLo] % NumElts;
12384 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
12385 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12386 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12387 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
12388 } else if (!Subtarget.hasSSSE3()) {
12389 // If we don't have PSHUFB then its worth avoiding an AND constant mask
12390 // by performing 3 byte shifts. Shuffle combining can kick in above that.
12391 // TODO: There may be some cases where VSH{LR}DQ+PAND is still better.
12392 unsigned Shift = (NumElts - 1) - (Mask[ZeroLo + Len - 1] % NumElts);
12393 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12394 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12395 Shift += Mask[ZeroLo] % NumElts;
12396 Res = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v16i8, Res,
12397 DAG.getTargetConstant(Scale * Shift, DL, MVT::i8));
12398 Res = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, Res,
12399 DAG.getTargetConstant(Scale * ZeroLo, DL, MVT::i8));
12403 return DAG.getBitcast(VT, Res);
12406 /// Try to lower a vector shuffle as a bit shift (shifts in zeros).
12408 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
12409 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
12410 /// matches elements from one of the input vectors shuffled to the left or
12411 /// right with zeroable elements 'shifted in'. It handles both the strictly
12412 /// bit-wise element shifts and the byte shift across an entire 128-bit double
12413 /// quad word lane.
12415 /// PSHL : (little-endian) left bit shift.
12416 /// [ zz, 0, zz, 2 ]
12417 /// [ -1, 4, zz, -1 ]
12418 /// PSRL : (little-endian) right bit shift.
12419 /// [ 1, zz, 3, zz]
12420 /// [ -1, -1, 7, zz]
12421 /// PSLLDQ : (little-endian) left byte shift
12422 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
12423 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
12424 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
12425 /// PSRLDQ : (little-endian) right byte shift
12426 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
12427 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
12428 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
12429 static int matchShuffleAsShift(MVT &ShiftVT, unsigned &Opcode,
12430 unsigned ScalarSizeInBits, ArrayRef<int> Mask,
12431 int MaskOffset, const APInt &Zeroable,
12432 const X86Subtarget &Subtarget) {
12433 int Size = Mask.size();
12434 unsigned SizeInBits = Size * ScalarSizeInBits;
12436 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
12437 for (int i = 0; i < Size; i += Scale)
12438 for (int j = 0; j < Shift; ++j)
12439 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
12445 auto MatchShift = [&](int Shift, int Scale, bool Left) {
12446 for (int i = 0; i != Size; i += Scale) {
12447 unsigned Pos = Left ? i + Shift : i;
12448 unsigned Low = Left ? i : i + Shift;
12449 unsigned Len = Scale - Shift;
12450 if (!isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset))
12454 int ShiftEltBits = ScalarSizeInBits * Scale;
12455 bool ByteShift = ShiftEltBits > 64;
12456 Opcode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
12457 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
12458 int ShiftAmt = Shift * ScalarSizeInBits / (ByteShift ? 8 : 1);
12460 // Normalize the scale for byte shifts to still produce an i64 element
12462 Scale = ByteShift ? Scale / 2 : Scale;
12464 // We need to round trip through the appropriate type for the shift.
12465 MVT ShiftSVT = MVT::getIntegerVT(ScalarSizeInBits * Scale);
12466 ShiftVT = ByteShift ? MVT::getVectorVT(MVT::i8, SizeInBits / 8)
12467 : MVT::getVectorVT(ShiftSVT, Size / Scale);
12468 return (int)ShiftAmt;
12471 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
12472 // keep doubling the size of the integer elements up to that. We can
12473 // then shift the elements of the integer vector by whole multiples of
12474 // their width within the elements of the larger integer vector. Test each
12475 // multiple to see if we can find a match with the moved element indices
12476 // and that the shifted in elements are all zeroable.
12477 unsigned MaxWidth = ((SizeInBits == 512) && !Subtarget.hasBWI() ? 64 : 128);
12478 for (int Scale = 2; Scale * ScalarSizeInBits <= MaxWidth; Scale *= 2)
12479 for (int Shift = 1; Shift != Scale; ++Shift)
12480 for (bool Left : {true, false})
12481 if (CheckZeros(Shift, Scale, Left)) {
12482 int ShiftAmt = MatchShift(Shift, Scale, Left);
12491 static SDValue lowerShuffleAsShift(const SDLoc &DL, MVT VT, SDValue V1,
12492 SDValue V2, ArrayRef<int> Mask,
12493 const APInt &Zeroable,
12494 const X86Subtarget &Subtarget,
12495 SelectionDAG &DAG) {
12496 int Size = Mask.size();
12497 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12503 // Try to match shuffle against V1 shift.
12504 int ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
12505 Mask, 0, Zeroable, Subtarget);
12507 // If V1 failed, try to match shuffle against V2 shift.
12508 if (ShiftAmt < 0) {
12509 ShiftAmt = matchShuffleAsShift(ShiftVT, Opcode, VT.getScalarSizeInBits(),
12510 Mask, Size, Zeroable, Subtarget);
12517 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
12518 "Illegal integer vector type");
12519 V = DAG.getBitcast(ShiftVT, V);
12520 V = DAG.getNode(Opcode, DL, ShiftVT, V,
12521 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
12522 return DAG.getBitcast(VT, V);
12525 // EXTRQ: Extract Len elements from lower half of source, starting at Idx.
12526 // Remainder of lower half result is zero and upper half is all undef.
12527 static bool matchShuffleAsEXTRQ(MVT VT, SDValue &V1, SDValue &V2,
12528 ArrayRef<int> Mask, uint64_t &BitLen,
12529 uint64_t &BitIdx, const APInt &Zeroable) {
12530 int Size = Mask.size();
12531 int HalfSize = Size / 2;
12532 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12533 assert(!Zeroable.isAllOnesValue() && "Fully zeroable shuffle mask");
12535 // Upper half must be undefined.
12536 if (!isUndefUpperHalf(Mask))
12539 // Determine the extraction length from the part of the
12540 // lower half that isn't zeroable.
12541 int Len = HalfSize;
12542 for (; Len > 0; --Len)
12543 if (!Zeroable[Len - 1])
12545 assert(Len > 0 && "Zeroable shuffle mask");
12547 // Attempt to match first Len sequential elements from the lower half.
12550 for (int i = 0; i != Len; ++i) {
12552 if (M == SM_SentinelUndef)
12554 SDValue &V = (M < Size ? V1 : V2);
12557 // The extracted elements must start at a valid index and all mask
12558 // elements must be in the lower half.
12559 if (i > M || M >= HalfSize)
12562 if (Idx < 0 || (Src == V && Idx == (M - i))) {
12570 if (!Src || Idx < 0)
12573 assert((Idx + Len) <= HalfSize && "Illegal extraction mask");
12574 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12575 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12580 // INSERTQ: Extract lowest Len elements from lower half of second source and
12581 // insert over first source, starting at Idx.
12582 // { A[0], .., A[Idx-1], B[0], .., B[Len-1], A[Idx+Len], .., UNDEF, ... }
12583 static bool matchShuffleAsINSERTQ(MVT VT, SDValue &V1, SDValue &V2,
12584 ArrayRef<int> Mask, uint64_t &BitLen,
12585 uint64_t &BitIdx) {
12586 int Size = Mask.size();
12587 int HalfSize = Size / 2;
12588 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
12590 // Upper half must be undefined.
12591 if (!isUndefUpperHalf(Mask))
12594 for (int Idx = 0; Idx != HalfSize; ++Idx) {
12597 // Attempt to match first source from mask before insertion point.
12598 if (isUndefInRange(Mask, 0, Idx)) {
12600 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, 0)) {
12602 } else if (isSequentialOrUndefInRange(Mask, 0, Idx, Size)) {
12608 // Extend the extraction length looking to match both the insertion of
12609 // the second source and the remaining elements of the first.
12610 for (int Hi = Idx + 1; Hi <= HalfSize; ++Hi) {
12612 int Len = Hi - Idx;
12614 // Match insertion.
12615 if (isSequentialOrUndefInRange(Mask, Idx, Len, 0)) {
12617 } else if (isSequentialOrUndefInRange(Mask, Idx, Len, Size)) {
12623 // Match the remaining elements of the lower half.
12624 if (isUndefInRange(Mask, Hi, HalfSize - Hi)) {
12626 } else if ((!Base || (Base == V1)) &&
12627 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi, Hi)) {
12629 } else if ((!Base || (Base == V2)) &&
12630 isSequentialOrUndefInRange(Mask, Hi, HalfSize - Hi,
12637 BitLen = (Len * VT.getScalarSizeInBits()) & 0x3f;
12638 BitIdx = (Idx * VT.getScalarSizeInBits()) & 0x3f;
12648 /// Try to lower a vector shuffle using SSE4a EXTRQ/INSERTQ.
12649 static SDValue lowerShuffleWithSSE4A(const SDLoc &DL, MVT VT, SDValue V1,
12650 SDValue V2, ArrayRef<int> Mask,
12651 const APInt &Zeroable, SelectionDAG &DAG) {
12652 uint64_t BitLen, BitIdx;
12653 if (matchShuffleAsEXTRQ(VT, V1, V2, Mask, BitLen, BitIdx, Zeroable))
12654 return DAG.getNode(X86ISD::EXTRQI, DL, VT, V1,
12655 DAG.getTargetConstant(BitLen, DL, MVT::i8),
12656 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12658 if (matchShuffleAsINSERTQ(VT, V1, V2, Mask, BitLen, BitIdx))
12659 return DAG.getNode(X86ISD::INSERTQI, DL, VT, V1 ? V1 : DAG.getUNDEF(VT),
12660 V2 ? V2 : DAG.getUNDEF(VT),
12661 DAG.getTargetConstant(BitLen, DL, MVT::i8),
12662 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
12667 /// Lower a vector shuffle as a zero or any extension.
12669 /// Given a specific number of elements, element bit width, and extension
12670 /// stride, produce either a zero or any extension based on the available
12671 /// features of the subtarget. The extended elements are consecutive and
12672 /// begin and can start from an offsetted element index in the input; to
12673 /// avoid excess shuffling the offset must either being in the bottom lane
12674 /// or at the start of a higher lane. All extended elements must be from
12676 static SDValue lowerShuffleAsSpecificZeroOrAnyExtend(
12677 const SDLoc &DL, MVT VT, int Scale, int Offset, bool AnyExt, SDValue InputV,
12678 ArrayRef<int> Mask, const X86Subtarget &Subtarget, SelectionDAG &DAG) {
12679 assert(Scale > 1 && "Need a scale to extend.");
12680 int EltBits = VT.getScalarSizeInBits();
12681 int NumElements = VT.getVectorNumElements();
12682 int NumEltsPerLane = 128 / EltBits;
12683 int OffsetLane = Offset / NumEltsPerLane;
12684 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
12685 "Only 8, 16, and 32 bit elements can be extended.");
12686 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
12687 assert(0 <= Offset && "Extension offset must be positive.");
12688 assert((Offset < NumEltsPerLane || Offset % NumEltsPerLane == 0) &&
12689 "Extension offset must be in the first lane or start an upper lane.");
12691 // Check that an index is in same lane as the base offset.
12692 auto SafeOffset = [&](int Idx) {
12693 return OffsetLane == (Idx / NumEltsPerLane);
12696 // Shift along an input so that the offset base moves to the first element.
12697 auto ShuffleOffset = [&](SDValue V) {
12701 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12702 for (int i = 0; i * Scale < NumElements; ++i) {
12703 int SrcIdx = i + Offset;
12704 ShMask[i] = SafeOffset(SrcIdx) ? SrcIdx : -1;
12706 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), ShMask);
12709 // Found a valid a/zext mask! Try various lowering strategies based on the
12710 // input type and available ISA extensions.
12711 if (Subtarget.hasSSE41()) {
12712 // Not worth offsetting 128-bit vectors if scale == 2, a pattern using
12713 // PUNPCK will catch this in a later shuffle match.
12714 if (Offset && Scale == 2 && VT.is128BitVector())
12716 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
12717 NumElements / Scale);
12718 InputV = ShuffleOffset(InputV);
12719 InputV = getExtendInVec(AnyExt ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND, DL,
12720 ExtVT, InputV, DAG);
12721 return DAG.getBitcast(VT, InputV);
12724 assert(VT.is128BitVector() && "Only 128-bit vectors can be extended.");
12726 // For any extends we can cheat for larger element sizes and use shuffle
12727 // instructions that can fold with a load and/or copy.
12728 if (AnyExt && EltBits == 32) {
12729 int PSHUFDMask[4] = {Offset, -1, SafeOffset(Offset + 1) ? Offset + 1 : -1,
12731 return DAG.getBitcast(
12732 VT, DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12733 DAG.getBitcast(MVT::v4i32, InputV),
12734 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
12736 if (AnyExt && EltBits == 16 && Scale > 2) {
12737 int PSHUFDMask[4] = {Offset / 2, -1,
12738 SafeOffset(Offset + 1) ? (Offset + 1) / 2 : -1, -1};
12739 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
12740 DAG.getBitcast(MVT::v4i32, InputV),
12741 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
12742 int PSHUFWMask[4] = {1, -1, -1, -1};
12743 unsigned OddEvenOp = (Offset & 1) ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
12744 return DAG.getBitcast(
12745 VT, DAG.getNode(OddEvenOp, DL, MVT::v8i16,
12746 DAG.getBitcast(MVT::v8i16, InputV),
12747 getV4X86ShuffleImm8ForMask(PSHUFWMask, DL, DAG)));
12750 // The SSE4A EXTRQ instruction can efficiently extend the first 2 lanes
12752 if ((Scale * EltBits) == 64 && EltBits < 32 && Subtarget.hasSSE4A()) {
12753 assert(NumElements == (int)Mask.size() && "Unexpected shuffle mask size!");
12754 assert(VT.is128BitVector() && "Unexpected vector width!");
12756 int LoIdx = Offset * EltBits;
12757 SDValue Lo = DAG.getBitcast(
12758 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12759 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12760 DAG.getTargetConstant(LoIdx, DL, MVT::i8)));
12762 if (isUndefUpperHalf(Mask) || !SafeOffset(Offset + 1))
12763 return DAG.getBitcast(VT, Lo);
12765 int HiIdx = (Offset + 1) * EltBits;
12766 SDValue Hi = DAG.getBitcast(
12767 MVT::v2i64, DAG.getNode(X86ISD::EXTRQI, DL, VT, InputV,
12768 DAG.getTargetConstant(EltBits, DL, MVT::i8),
12769 DAG.getTargetConstant(HiIdx, DL, MVT::i8)));
12770 return DAG.getBitcast(VT,
12771 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, Lo, Hi));
12774 // If this would require more than 2 unpack instructions to expand, use
12775 // pshufb when available. We can only use more than 2 unpack instructions
12776 // when zero extending i8 elements which also makes it easier to use pshufb.
12777 if (Scale > 4 && EltBits == 8 && Subtarget.hasSSSE3()) {
12778 assert(NumElements == 16 && "Unexpected byte vector width!");
12779 SDValue PSHUFBMask[16];
12780 for (int i = 0; i < 16; ++i) {
12781 int Idx = Offset + (i / Scale);
12782 if ((i % Scale == 0 && SafeOffset(Idx))) {
12783 PSHUFBMask[i] = DAG.getConstant(Idx, DL, MVT::i8);
12787 AnyExt ? DAG.getUNDEF(MVT::i8) : DAG.getConstant(0x80, DL, MVT::i8);
12789 InputV = DAG.getBitcast(MVT::v16i8, InputV);
12790 return DAG.getBitcast(
12791 VT, DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
12792 DAG.getBuildVector(MVT::v16i8, DL, PSHUFBMask)));
12795 // If we are extending from an offset, ensure we start on a boundary that
12796 // we can unpack from.
12797 int AlignToUnpack = Offset % (NumElements / Scale);
12798 if (AlignToUnpack) {
12799 SmallVector<int, 8> ShMask((unsigned)NumElements, -1);
12800 for (int i = AlignToUnpack; i < NumElements; ++i)
12801 ShMask[i - AlignToUnpack] = i;
12802 InputV = DAG.getVectorShuffle(VT, DL, InputV, DAG.getUNDEF(VT), ShMask);
12803 Offset -= AlignToUnpack;
12806 // Otherwise emit a sequence of unpacks.
12808 unsigned UnpackLoHi = X86ISD::UNPCKL;
12809 if (Offset >= (NumElements / 2)) {
12810 UnpackLoHi = X86ISD::UNPCKH;
12811 Offset -= (NumElements / 2);
12814 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
12815 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
12816 : getZeroVector(InputVT, Subtarget, DAG, DL);
12817 InputV = DAG.getBitcast(InputVT, InputV);
12818 InputV = DAG.getNode(UnpackLoHi, DL, InputVT, InputV, Ext);
12822 } while (Scale > 1);
12823 return DAG.getBitcast(VT, InputV);
12826 /// Try to lower a vector shuffle as a zero extension on any microarch.
12828 /// This routine will try to do everything in its power to cleverly lower
12829 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
12830 /// check for the profitability of this lowering, it tries to aggressively
12831 /// match this pattern. It will use all of the micro-architectural details it
12832 /// can to emit an efficient lowering. It handles both blends with all-zero
12833 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
12834 /// masking out later).
12836 /// The reason we have dedicated lowering for zext-style shuffles is that they
12837 /// are both incredibly common and often quite performance sensitive.
12838 static SDValue lowerShuffleAsZeroOrAnyExtend(
12839 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12840 const APInt &Zeroable, const X86Subtarget &Subtarget,
12841 SelectionDAG &DAG) {
12842 int Bits = VT.getSizeInBits();
12843 int NumLanes = Bits / 128;
12844 int NumElements = VT.getVectorNumElements();
12845 int NumEltsPerLane = NumElements / NumLanes;
12846 assert(VT.getScalarSizeInBits() <= 32 &&
12847 "Exceeds 32-bit integer zero extension limit");
12848 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
12850 // Define a helper function to check a particular ext-scale and lower to it if
12852 auto Lower = [&](int Scale) -> SDValue {
12854 bool AnyExt = true;
12857 for (int i = 0; i < NumElements; ++i) {
12860 continue; // Valid anywhere but doesn't tell us anything.
12861 if (i % Scale != 0) {
12862 // Each of the extended elements need to be zeroable.
12866 // We no longer are in the anyext case.
12871 // Each of the base elements needs to be consecutive indices into the
12872 // same input vector.
12873 SDValue V = M < NumElements ? V1 : V2;
12874 M = M % NumElements;
12877 Offset = M - (i / Scale);
12878 } else if (InputV != V)
12879 return SDValue(); // Flip-flopping inputs.
12881 // Offset must start in the lowest 128-bit lane or at the start of an
12883 // FIXME: Is it ever worth allowing a negative base offset?
12884 if (!((0 <= Offset && Offset < NumEltsPerLane) ||
12885 (Offset % NumEltsPerLane) == 0))
12888 // If we are offsetting, all referenced entries must come from the same
12890 if (Offset && (Offset / NumEltsPerLane) != (M / NumEltsPerLane))
12893 if ((M % NumElements) != (Offset + (i / Scale)))
12894 return SDValue(); // Non-consecutive strided elements.
12898 // If we fail to find an input, we have a zero-shuffle which should always
12899 // have already been handled.
12900 // FIXME: Maybe handle this here in case during blending we end up with one?
12904 // If we are offsetting, don't extend if we only match a single input, we
12905 // can always do better by using a basic PSHUF or PUNPCK.
12906 if (Offset != 0 && Matches < 2)
12909 return lowerShuffleAsSpecificZeroOrAnyExtend(DL, VT, Scale, Offset, AnyExt,
12910 InputV, Mask, Subtarget, DAG);
12913 // The widest scale possible for extending is to a 64-bit integer.
12914 assert(Bits % 64 == 0 &&
12915 "The number of bits in a vector must be divisible by 64 on x86!");
12916 int NumExtElements = Bits / 64;
12918 // Each iteration, try extending the elements half as much, but into twice as
12920 for (; NumExtElements < NumElements; NumExtElements *= 2) {
12921 assert(NumElements % NumExtElements == 0 &&
12922 "The input vector size must be divisible by the extended size.");
12923 if (SDValue V = Lower(NumElements / NumExtElements))
12927 // General extends failed, but 128-bit vectors may be able to use MOVQ.
12931 // Returns one of the source operands if the shuffle can be reduced to a
12932 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
12933 auto CanZExtLowHalf = [&]() {
12934 for (int i = NumElements / 2; i != NumElements; ++i)
12937 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
12939 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
12944 if (SDValue V = CanZExtLowHalf()) {
12945 V = DAG.getBitcast(MVT::v2i64, V);
12946 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
12947 return DAG.getBitcast(VT, V);
12950 // No viable ext lowering found.
12954 /// Try to get a scalar value for a specific element of a vector.
12956 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
12957 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
12958 SelectionDAG &DAG) {
12959 MVT VT = V.getSimpleValueType();
12960 MVT EltVT = VT.getVectorElementType();
12961 V = peekThroughBitcasts(V);
12963 // If the bitcasts shift the element size, we can't extract an equivalent
12964 // element from it.
12965 MVT NewVT = V.getSimpleValueType();
12966 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
12969 if (V.getOpcode() == ISD::BUILD_VECTOR ||
12970 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR)) {
12971 // Ensure the scalar operand is the same size as the destination.
12972 // FIXME: Add support for scalar truncation where possible.
12973 SDValue S = V.getOperand(Idx);
12974 if (EltVT.getSizeInBits() == S.getSimpleValueType().getSizeInBits())
12975 return DAG.getBitcast(EltVT, S);
12981 /// Helper to test for a load that can be folded with x86 shuffles.
12983 /// This is particularly important because the set of instructions varies
12984 /// significantly based on whether the operand is a load or not.
12985 static bool isShuffleFoldableLoad(SDValue V) {
12986 V = peekThroughBitcasts(V);
12987 return ISD::isNON_EXTLoad(V.getNode());
12990 /// Try to lower insertion of a single element into a zero vector.
12992 /// This is a common pattern that we have especially efficient patterns to lower
12993 /// across all subtarget feature sets.
12994 static SDValue lowerShuffleAsElementInsertion(
12995 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
12996 const APInt &Zeroable, const X86Subtarget &Subtarget,
12997 SelectionDAG &DAG) {
12999 MVT EltVT = VT.getVectorElementType();
13002 find_if(Mask, [&Mask](int M) { return M >= (int)Mask.size(); }) -
13004 bool IsV1Zeroable = true;
13005 for (int i = 0, Size = Mask.size(); i < Size; ++i)
13006 if (i != V2Index && !Zeroable[i]) {
13007 IsV1Zeroable = false;
13011 // Check for a single input from a SCALAR_TO_VECTOR node.
13012 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
13013 // all the smarts here sunk into that routine. However, the current
13014 // lowering of BUILD_VECTOR makes that nearly impossible until the old
13015 // vector shuffle lowering is dead.
13016 SDValue V2S = getScalarValueForVectorElement(V2, Mask[V2Index] - Mask.size(),
13018 if (V2S && DAG.getTargetLoweringInfo().isTypeLegal(V2S.getValueType())) {
13019 // We need to zext the scalar if it is smaller than an i32.
13020 V2S = DAG.getBitcast(EltVT, V2S);
13021 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
13022 // Using zext to expand a narrow element won't work for non-zero
13027 // Zero-extend directly to i32.
13028 ExtVT = MVT::getVectorVT(MVT::i32, ExtVT.getSizeInBits() / 32);
13029 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
13031 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
13032 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
13033 EltVT == MVT::i16) {
13034 // Either not inserting from the low element of the input or the input
13035 // element size is too small to use VZEXT_MOVL to clear the high bits.
13039 if (!IsV1Zeroable) {
13040 // If V1 can't be treated as a zero vector we have fewer options to lower
13041 // this. We can't support integer vectors or non-zero targets cheaply, and
13042 // the V1 elements can't be permuted in any way.
13043 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
13044 if (!VT.isFloatingPoint() || V2Index != 0)
13046 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
13047 V1Mask[V2Index] = -1;
13048 if (!isNoopShuffleMask(V1Mask))
13050 if (!VT.is128BitVector())
13053 // Otherwise, use MOVSD or MOVSS.
13054 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
13055 "Only two types of floating point element types to handle!");
13056 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
13060 // This lowering only works for the low element with floating point vectors.
13061 if (VT.isFloatingPoint() && V2Index != 0)
13064 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
13066 V2 = DAG.getBitcast(VT, V2);
13068 if (V2Index != 0) {
13069 // If we have 4 or fewer lanes we can cheaply shuffle the element into
13070 // the desired position. Otherwise it is more efficient to do a vector
13071 // shift left. We know that we can do a vector shift left because all
13072 // the inputs are zero.
13073 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
13074 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
13075 V2Shuffle[V2Index] = 0;
13076 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
13078 V2 = DAG.getBitcast(MVT::v16i8, V2);
13079 V2 = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v16i8, V2,
13080 DAG.getTargetConstant(
13081 V2Index * EltVT.getSizeInBits() / 8, DL, MVT::i8));
13082 V2 = DAG.getBitcast(VT, V2);
13088 /// Try to lower broadcast of a single - truncated - integer element,
13089 /// coming from a scalar_to_vector/build_vector node \p V0 with larger elements.
13091 /// This assumes we have AVX2.
13092 static SDValue lowerShuffleAsTruncBroadcast(const SDLoc &DL, MVT VT, SDValue V0,
13094 const X86Subtarget &Subtarget,
13095 SelectionDAG &DAG) {
13096 assert(Subtarget.hasAVX2() &&
13097 "We can only lower integer broadcasts with AVX2!");
13099 MVT EltVT = VT.getVectorElementType();
13100 MVT V0VT = V0.getSimpleValueType();
13102 assert(VT.isInteger() && "Unexpected non-integer trunc broadcast!");
13103 assert(V0VT.isVector() && "Unexpected non-vector vector-sized value!");
13105 MVT V0EltVT = V0VT.getVectorElementType();
13106 if (!V0EltVT.isInteger())
13109 const unsigned EltSize = EltVT.getSizeInBits();
13110 const unsigned V0EltSize = V0EltVT.getSizeInBits();
13112 // This is only a truncation if the original element type is larger.
13113 if (V0EltSize <= EltSize)
13116 assert(((V0EltSize % EltSize) == 0) &&
13117 "Scalar type sizes must all be powers of 2 on x86!");
13119 const unsigned V0Opc = V0.getOpcode();
13120 const unsigned Scale = V0EltSize / EltSize;
13121 const unsigned V0BroadcastIdx = BroadcastIdx / Scale;
13123 if ((V0Opc != ISD::SCALAR_TO_VECTOR || V0BroadcastIdx != 0) &&
13124 V0Opc != ISD::BUILD_VECTOR)
13127 SDValue Scalar = V0.getOperand(V0BroadcastIdx);
13129 // If we're extracting non-least-significant bits, shift so we can truncate.
13130 // Hopefully, we can fold away the trunc/srl/load into the broadcast.
13131 // Even if we can't (and !isShuffleFoldableLoad(Scalar)), prefer
13132 // vpbroadcast+vmovd+shr to vpshufb(m)+vmovd.
13133 if (const int OffsetIdx = BroadcastIdx % Scale)
13134 Scalar = DAG.getNode(ISD::SRL, DL, Scalar.getValueType(), Scalar,
13135 DAG.getConstant(OffsetIdx * EltSize, DL, MVT::i8));
13137 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
13138 DAG.getNode(ISD::TRUNCATE, DL, EltVT, Scalar));
13141 /// Test whether this can be lowered with a single SHUFPS instruction.
13143 /// This is used to disable more specialized lowerings when the shufps lowering
13144 /// will happen to be efficient.
13145 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
13146 // This routine only handles 128-bit shufps.
13147 assert(Mask.size() == 4 && "Unsupported mask size!");
13148 assert(Mask[0] >= -1 && Mask[0] < 8 && "Out of bound mask element!");
13149 assert(Mask[1] >= -1 && Mask[1] < 8 && "Out of bound mask element!");
13150 assert(Mask[2] >= -1 && Mask[2] < 8 && "Out of bound mask element!");
13151 assert(Mask[3] >= -1 && Mask[3] < 8 && "Out of bound mask element!");
13153 // To lower with a single SHUFPS we need to have the low half and high half
13154 // each requiring a single input.
13155 if (Mask[0] >= 0 && Mask[1] >= 0 && (Mask[0] < 4) != (Mask[1] < 4))
13157 if (Mask[2] >= 0 && Mask[3] >= 0 && (Mask[2] < 4) != (Mask[3] < 4))
13163 /// If we are extracting two 128-bit halves of a vector and shuffling the
13164 /// result, match that to a 256-bit AVX2 vperm* instruction to avoid a
13165 /// multi-shuffle lowering.
13166 static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
13167 SDValue N1, ArrayRef<int> Mask,
13168 SelectionDAG &DAG) {
13169 MVT VT = N0.getSimpleValueType();
13170 assert((VT.is128BitVector() &&
13171 (VT.getScalarSizeInBits() == 32 || VT.getScalarSizeInBits() == 64)) &&
13172 "VPERM* family of shuffles requires 32-bit or 64-bit elements");
13174 // Check that both sources are extracts of the same source vector.
13175 if (!N0.hasOneUse() || !N1.hasOneUse() ||
13176 N0.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13177 N1.getOpcode() != ISD::EXTRACT_SUBVECTOR ||
13178 N0.getOperand(0) != N1.getOperand(0))
13181 SDValue WideVec = N0.getOperand(0);
13182 MVT WideVT = WideVec.getSimpleValueType();
13183 if (!WideVT.is256BitVector())
13186 // Match extracts of each half of the wide source vector. Commute the shuffle
13187 // if the extract of the low half is N1.
13188 unsigned NumElts = VT.getVectorNumElements();
13189 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
13190 const APInt &ExtIndex0 = N0.getConstantOperandAPInt(1);
13191 const APInt &ExtIndex1 = N1.getConstantOperandAPInt(1);
13192 if (ExtIndex1 == 0 && ExtIndex0 == NumElts)
13193 ShuffleVectorSDNode::commuteMask(NewMask);
13194 else if (ExtIndex0 != 0 || ExtIndex1 != NumElts)
13197 // Final bailout: if the mask is simple, we are better off using an extract
13198 // and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
13199 // because that avoids a constant load from memory.
13200 if (NumElts == 4 &&
13201 (isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
13204 // Extend the shuffle mask with undef elements.
13205 NewMask.append(NumElts, -1);
13207 // shuf (extract X, 0), (extract X, 4), M --> extract (shuf X, undef, M'), 0
13208 SDValue Shuf = DAG.getVectorShuffle(WideVT, DL, WideVec, DAG.getUNDEF(WideVT),
13210 // This is free: ymm -> xmm.
13211 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Shuf,
13212 DAG.getIntPtrConstant(0, DL));
13215 /// Try to lower broadcast of a single element.
13217 /// For convenience, this code also bundles all of the subtarget feature set
13218 /// filtering. While a little annoying to re-dispatch on type here, there isn't
13219 /// a convenient way to factor it out.
13220 static SDValue lowerShuffleAsBroadcast(const SDLoc &DL, MVT VT, SDValue V1,
13221 SDValue V2, ArrayRef<int> Mask,
13222 const X86Subtarget &Subtarget,
13223 SelectionDAG &DAG) {
13224 if (!((Subtarget.hasSSE3() && VT == MVT::v2f64) ||
13225 (Subtarget.hasAVX() && VT.isFloatingPoint()) ||
13226 (Subtarget.hasAVX2() && VT.isInteger())))
13229 // With MOVDDUP (v2f64) we can broadcast from a register or a load, otherwise
13230 // we can only broadcast from a register with AVX2.
13231 unsigned NumEltBits = VT.getScalarSizeInBits();
13232 unsigned Opcode = (VT == MVT::v2f64 && !Subtarget.hasAVX2())
13234 : X86ISD::VBROADCAST;
13235 bool BroadcastFromReg = (Opcode == X86ISD::MOVDDUP) || Subtarget.hasAVX2();
13237 // Check that the mask is a broadcast.
13238 int BroadcastIdx = getSplatIndex(Mask);
13239 if (BroadcastIdx < 0)
13241 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
13242 "a sorted mask where the broadcast "
13245 // Go up the chain of (vector) values to find a scalar load that we can
13246 // combine with the broadcast.
13247 // TODO: Combine this logic with findEltLoadSrc() used by
13248 // EltsFromConsecutiveLoads().
13249 int BitOffset = BroadcastIdx * NumEltBits;
13252 switch (V.getOpcode()) {
13253 case ISD::BITCAST: {
13254 V = V.getOperand(0);
13257 case ISD::CONCAT_VECTORS: {
13258 int OpBitWidth = V.getOperand(0).getValueSizeInBits();
13259 int OpIdx = BitOffset / OpBitWidth;
13260 V = V.getOperand(OpIdx);
13261 BitOffset %= OpBitWidth;
13264 case ISD::EXTRACT_SUBVECTOR: {
13265 // The extraction index adds to the existing offset.
13266 unsigned EltBitWidth = V.getScalarValueSizeInBits();
13267 unsigned Idx = V.getConstantOperandVal(1);
13268 unsigned BeginOffset = Idx * EltBitWidth;
13269 BitOffset += BeginOffset;
13270 V = V.getOperand(0);
13273 case ISD::INSERT_SUBVECTOR: {
13274 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
13275 int EltBitWidth = VOuter.getScalarValueSizeInBits();
13276 int Idx = (int)V.getConstantOperandVal(2);
13277 int NumSubElts = (int)VInner.getSimpleValueType().getVectorNumElements();
13278 int BeginOffset = Idx * EltBitWidth;
13279 int EndOffset = BeginOffset + NumSubElts * EltBitWidth;
13280 if (BeginOffset <= BitOffset && BitOffset < EndOffset) {
13281 BitOffset -= BeginOffset;
13291 assert((BitOffset % NumEltBits) == 0 && "Illegal bit-offset");
13292 BroadcastIdx = BitOffset / NumEltBits;
13294 // Do we need to bitcast the source to retrieve the original broadcast index?
13295 bool BitCastSrc = V.getScalarValueSizeInBits() != NumEltBits;
13297 // Check if this is a broadcast of a scalar. We special case lowering
13298 // for scalars so that we can more effectively fold with loads.
13299 // If the original value has a larger element type than the shuffle, the
13300 // broadcast element is in essence truncated. Make that explicit to ease
13302 if (BitCastSrc && VT.isInteger())
13303 if (SDValue TruncBroadcast = lowerShuffleAsTruncBroadcast(
13304 DL, VT, V, BroadcastIdx, Subtarget, DAG))
13305 return TruncBroadcast;
13307 // Also check the simpler case, where we can directly reuse the scalar.
13309 ((V.getOpcode() == ISD::BUILD_VECTOR && V.hasOneUse()) ||
13310 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0))) {
13311 V = V.getOperand(BroadcastIdx);
13313 // If we can't broadcast from a register, check that the input is a load.
13314 if (!BroadcastFromReg && !isShuffleFoldableLoad(V))
13316 } else if (ISD::isNormalLoad(V.getNode()) &&
13317 cast<LoadSDNode>(V)->isSimple()) {
13318 // We do not check for one-use of the vector load because a broadcast load
13319 // is expected to be a win for code size, register pressure, and possibly
13320 // uops even if the original vector load is not eliminated.
13322 // Reduce the vector load and shuffle to a broadcasted scalar load.
13323 LoadSDNode *Ld = cast<LoadSDNode>(V);
13324 SDValue BaseAddr = Ld->getOperand(1);
13325 MVT SVT = VT.getScalarType();
13326 unsigned Offset = BroadcastIdx * SVT.getStoreSize();
13327 assert((int)(Offset * 8) == BitOffset && "Unexpected bit-offset");
13328 SDValue NewAddr = DAG.getMemBasePlusOffset(BaseAddr, Offset, DL);
13330 // Directly form VBROADCAST_LOAD if we're using VBROADCAST opcode rather
13332 // FIXME: Should we add VBROADCAST_LOAD isel patterns for pre-AVX?
13333 if (Opcode == X86ISD::VBROADCAST) {
13334 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
13335 SDValue Ops[] = {Ld->getChain(), NewAddr};
13336 V = DAG.getMemIntrinsicNode(
13337 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, SVT,
13338 DAG.getMachineFunction().getMachineMemOperand(
13339 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
13340 DAG.makeEquivalentMemoryOrdering(Ld, V);
13341 return DAG.getBitcast(VT, V);
13343 assert(SVT == MVT::f64 && "Unexpected VT!");
13344 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
13345 DAG.getMachineFunction().getMachineMemOperand(
13346 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
13347 DAG.makeEquivalentMemoryOrdering(Ld, V);
13348 } else if (!BroadcastFromReg) {
13349 // We can't broadcast from a vector register.
13351 } else if (BitOffset != 0) {
13352 // We can only broadcast from the zero-element of a vector register,
13353 // but it can be advantageous to broadcast from the zero-element of a
13355 if (!VT.is256BitVector() && !VT.is512BitVector())
13358 // VPERMQ/VPERMPD can perform the cross-lane shuffle directly.
13359 if (VT == MVT::v4f64 || VT == MVT::v4i64)
13362 // Only broadcast the zero-element of a 128-bit subvector.
13363 if ((BitOffset % 128) != 0)
13366 assert((BitOffset % V.getScalarValueSizeInBits()) == 0 &&
13367 "Unexpected bit-offset");
13368 assert((V.getValueSizeInBits() == 256 || V.getValueSizeInBits() == 512) &&
13369 "Unexpected vector size");
13370 unsigned ExtractIdx = BitOffset / V.getScalarValueSizeInBits();
13371 V = extract128BitVector(V, ExtractIdx, DAG, DL);
13374 if (Opcode == X86ISD::MOVDDUP && !V.getValueType().isVector())
13375 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
13376 DAG.getBitcast(MVT::f64, V));
13378 // If this is a scalar, do the broadcast on this type and bitcast.
13379 if (!V.getValueType().isVector()) {
13380 assert(V.getScalarValueSizeInBits() == NumEltBits &&
13381 "Unexpected scalar size");
13382 MVT BroadcastVT = MVT::getVectorVT(V.getSimpleValueType(),
13383 VT.getVectorNumElements());
13384 return DAG.getBitcast(VT, DAG.getNode(Opcode, DL, BroadcastVT, V));
13387 // We only support broadcasting from 128-bit vectors to minimize the
13388 // number of patterns we need to deal with in isel. So extract down to
13389 // 128-bits, removing as many bitcasts as possible.
13390 if (V.getValueSizeInBits() > 128)
13391 V = extract128BitVector(peekThroughBitcasts(V), 0, DAG, DL);
13393 // Otherwise cast V to a vector with the same element type as VT, but
13394 // possibly narrower than VT. Then perform the broadcast.
13395 unsigned NumSrcElts = V.getValueSizeInBits() / NumEltBits;
13396 MVT CastVT = MVT::getVectorVT(VT.getVectorElementType(), NumSrcElts);
13397 return DAG.getNode(Opcode, DL, VT, DAG.getBitcast(CastVT, V));
13400 // Check for whether we can use INSERTPS to perform the shuffle. We only use
13401 // INSERTPS when the V1 elements are already in the correct locations
13402 // because otherwise we can just always use two SHUFPS instructions which
13403 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
13404 // perform INSERTPS if a single V1 element is out of place and all V2
13405 // elements are zeroable.
13406 static bool matchShuffleAsInsertPS(SDValue &V1, SDValue &V2,
13407 unsigned &InsertPSMask,
13408 const APInt &Zeroable,
13409 ArrayRef<int> Mask, SelectionDAG &DAG) {
13410 assert(V1.getSimpleValueType().is128BitVector() && "Bad operand type!");
13411 assert(V2.getSimpleValueType().is128BitVector() && "Bad operand type!");
13412 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13414 // Attempt to match INSERTPS with one element from VA or VB being
13415 // inserted into VA (or undef). If successful, V1, V2 and InsertPSMask
13417 auto matchAsInsertPS = [&](SDValue VA, SDValue VB,
13418 ArrayRef<int> CandidateMask) {
13419 unsigned ZMask = 0;
13420 int VADstIndex = -1;
13421 int VBDstIndex = -1;
13422 bool VAUsedInPlace = false;
13424 for (int i = 0; i < 4; ++i) {
13425 // Synthesize a zero mask from the zeroable elements (includes undefs).
13431 // Flag if we use any VA inputs in place.
13432 if (i == CandidateMask[i]) {
13433 VAUsedInPlace = true;
13437 // We can only insert a single non-zeroable element.
13438 if (VADstIndex >= 0 || VBDstIndex >= 0)
13441 if (CandidateMask[i] < 4) {
13442 // VA input out of place for insertion.
13445 // VB input for insertion.
13450 // Don't bother if we have no (non-zeroable) element for insertion.
13451 if (VADstIndex < 0 && VBDstIndex < 0)
13454 // Determine element insertion src/dst indices. The src index is from the
13455 // start of the inserted vector, not the start of the concatenated vector.
13456 unsigned VBSrcIndex = 0;
13457 if (VADstIndex >= 0) {
13458 // If we have a VA input out of place, we use VA as the V2 element
13459 // insertion and don't use the original V2 at all.
13460 VBSrcIndex = CandidateMask[VADstIndex];
13461 VBDstIndex = VADstIndex;
13464 VBSrcIndex = CandidateMask[VBDstIndex] - 4;
13467 // If no V1 inputs are used in place, then the result is created only from
13468 // the zero mask and the V2 insertion - so remove V1 dependency.
13469 if (!VAUsedInPlace)
13470 VA = DAG.getUNDEF(MVT::v4f32);
13472 // Update V1, V2 and InsertPSMask accordingly.
13476 // Insert the V2 element into the desired position.
13477 InsertPSMask = VBSrcIndex << 6 | VBDstIndex << 4 | ZMask;
13478 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
13482 if (matchAsInsertPS(V1, V2, Mask))
13485 // Commute and try again.
13486 SmallVector<int, 4> CommutedMask(Mask.begin(), Mask.end());
13487 ShuffleVectorSDNode::commuteMask(CommutedMask);
13488 if (matchAsInsertPS(V2, V1, CommutedMask))
13494 static SDValue lowerShuffleAsInsertPS(const SDLoc &DL, SDValue V1, SDValue V2,
13495 ArrayRef<int> Mask, const APInt &Zeroable,
13496 SelectionDAG &DAG) {
13497 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13498 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13500 // Attempt to match the insertps pattern.
13501 unsigned InsertPSMask;
13502 if (!matchShuffleAsInsertPS(V1, V2, InsertPSMask, Zeroable, Mask, DAG))
13505 // Insert the V2 element into the desired position.
13506 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
13507 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
13510 /// Try to lower a shuffle as a permute of the inputs followed by an
13511 /// UNPCK instruction.
13513 /// This specifically targets cases where we end up with alternating between
13514 /// the two inputs, and so can permute them into something that feeds a single
13515 /// UNPCK instruction. Note that this routine only targets integer vectors
13516 /// because for floating point vectors we have a generalized SHUFPS lowering
13517 /// strategy that handles everything that doesn't *exactly* match an unpack,
13518 /// making this clever lowering unnecessary.
13519 static SDValue lowerShuffleAsPermuteAndUnpack(
13520 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
13521 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
13522 assert(!VT.isFloatingPoint() &&
13523 "This routine only supports integer vectors.");
13524 assert(VT.is128BitVector() &&
13525 "This routine only works on 128-bit vectors.");
13526 assert(!V2.isUndef() &&
13527 "This routine should only be used when blending two inputs.");
13528 assert(Mask.size() >= 2 && "Single element masks are invalid.");
13530 int Size = Mask.size();
13533 count_if(Mask, [Size](int M) { return M >= 0 && M % Size < Size / 2; });
13535 count_if(Mask, [Size](int M) { return M % Size >= Size / 2; });
13537 bool UnpackLo = NumLoInputs >= NumHiInputs;
13539 auto TryUnpack = [&](int ScalarSize, int Scale) {
13540 SmallVector<int, 16> V1Mask((unsigned)Size, -1);
13541 SmallVector<int, 16> V2Mask((unsigned)Size, -1);
13543 for (int i = 0; i < Size; ++i) {
13547 // Each element of the unpack contains Scale elements from this mask.
13548 int UnpackIdx = i / Scale;
13550 // We only handle the case where V1 feeds the first slots of the unpack.
13551 // We rely on canonicalization to ensure this is the case.
13552 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
13555 // Setup the mask for this input. The indexing is tricky as we have to
13556 // handle the unpack stride.
13557 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
13558 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
13562 // If we will have to shuffle both inputs to use the unpack, check whether
13563 // we can just unpack first and shuffle the result. If so, skip this unpack.
13564 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
13565 !isNoopShuffleMask(V2Mask))
13568 // Shuffle the inputs into place.
13569 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
13570 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
13572 // Cast the inputs to the type we will use to unpack them.
13573 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), Size / Scale);
13574 V1 = DAG.getBitcast(UnpackVT, V1);
13575 V2 = DAG.getBitcast(UnpackVT, V2);
13577 // Unpack the inputs and cast the result back to the desired type.
13578 return DAG.getBitcast(
13579 VT, DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
13580 UnpackVT, V1, V2));
13583 // We try each unpack from the largest to the smallest to try and find one
13584 // that fits this mask.
13585 int OrigScalarSize = VT.getScalarSizeInBits();
13586 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2)
13587 if (SDValue Unpack = TryUnpack(ScalarSize, ScalarSize / OrigScalarSize))
13590 // If we're shuffling with a zero vector then we're better off not doing
13591 // VECTOR_SHUFFLE(UNPCK()) as we lose track of those zero elements.
13592 if (ISD::isBuildVectorAllZeros(V1.getNode()) ||
13593 ISD::isBuildVectorAllZeros(V2.getNode()))
13596 // If none of the unpack-rooted lowerings worked (or were profitable) try an
13598 if (NumLoInputs == 0 || NumHiInputs == 0) {
13599 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
13600 "We have to have *some* inputs!");
13601 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
13603 // FIXME: We could consider the total complexity of the permute of each
13604 // possible unpacking. Or at the least we should consider how many
13605 // half-crossings are created.
13606 // FIXME: We could consider commuting the unpacks.
13608 SmallVector<int, 32> PermMask((unsigned)Size, -1);
13609 for (int i = 0; i < Size; ++i) {
13613 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
13616 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
13618 return DAG.getVectorShuffle(
13619 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
13621 DAG.getUNDEF(VT), PermMask);
13627 /// Handle lowering of 2-lane 64-bit floating point shuffles.
13629 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
13630 /// support for floating point shuffles but not integer shuffles. These
13631 /// instructions will incur a domain crossing penalty on some chips though so
13632 /// it is better to avoid lowering through this for integer vectors where
13634 static SDValue lowerV2F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13635 const APInt &Zeroable, SDValue V1, SDValue V2,
13636 const X86Subtarget &Subtarget,
13637 SelectionDAG &DAG) {
13638 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13639 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
13640 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13642 if (V2.isUndef()) {
13643 // Check for being able to broadcast a single element.
13644 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2f64, V1, V2,
13645 Mask, Subtarget, DAG))
13648 // Straight shuffle of a single input vector. Simulate this by using the
13649 // single input as both of the "inputs" to this instruction..
13650 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
13652 if (Subtarget.hasAVX()) {
13653 // If we have AVX, we can use VPERMILPS which will allow folding a load
13654 // into the shuffle.
13655 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
13656 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13659 return DAG.getNode(
13660 X86ISD::SHUFP, DL, MVT::v2f64,
13661 Mask[0] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13662 Mask[1] == SM_SentinelUndef ? DAG.getUNDEF(MVT::v2f64) : V1,
13663 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13665 assert(Mask[0] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13666 assert(Mask[1] >= 0 && "No undef lanes in multi-input v2 shuffles!");
13667 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13668 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13670 if (Subtarget.hasAVX2())
13671 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13674 // When loading a scalar and then shuffling it into a vector we can often do
13675 // the insertion cheaply.
13676 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13677 DL, MVT::v2f64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13679 // Try inverting the insertion since for v2 masks it is easy to do and we
13680 // can't reliably sort the mask one way or the other.
13681 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
13682 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
13683 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13684 DL, MVT::v2f64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13687 // Try to use one of the special instruction patterns to handle two common
13688 // blend patterns if a zero-blend above didn't work.
13689 if (isShuffleEquivalent(V1, V2, Mask, {0, 3}) ||
13690 isShuffleEquivalent(V1, V2, Mask, {1, 3}))
13691 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
13692 // We can either use a special instruction to load over the low double or
13693 // to move just the low double.
13694 return DAG.getNode(
13695 X86ISD::MOVSD, DL, MVT::v2f64, V2,
13696 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
13698 if (Subtarget.hasSSE41())
13699 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
13700 Zeroable, Subtarget, DAG))
13703 // Use dedicated unpack instructions for masks that match their pattern.
13704 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2f64, Mask, V1, V2, DAG))
13707 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
13708 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v2f64, V1, V2,
13709 DAG.getTargetConstant(SHUFPDMask, DL, MVT::i8));
13712 /// Handle lowering of 2-lane 64-bit integer shuffles.
13714 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
13715 /// the integer unit to minimize domain crossing penalties. However, for blends
13716 /// it falls back to the floating point shuffle operation with appropriate bit
13718 static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13719 const APInt &Zeroable, SDValue V1, SDValue V2,
13720 const X86Subtarget &Subtarget,
13721 SelectionDAG &DAG) {
13722 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13723 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
13724 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
13726 if (V2.isUndef()) {
13727 // Check for being able to broadcast a single element.
13728 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v2i64, V1, V2,
13729 Mask, Subtarget, DAG))
13732 // Straight shuffle of a single input vector. For everything from SSE2
13733 // onward this has a single fast instruction with no scary immediates.
13734 // We have to map the mask as it is actually a v4i32 shuffle instruction.
13735 V1 = DAG.getBitcast(MVT::v4i32, V1);
13736 int WidenedMask[4] = {
13737 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
13738 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
13739 return DAG.getBitcast(
13741 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
13742 getV4X86ShuffleImm8ForMask(WidenedMask, DL, DAG)));
13744 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
13745 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
13746 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
13747 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
13749 if (Subtarget.hasAVX2())
13750 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13753 // Try to use shift instructions.
13754 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask,
13755 Zeroable, Subtarget, DAG))
13758 // When loading a scalar and then shuffling it into a vector we can often do
13759 // the insertion cheaply.
13760 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13761 DL, MVT::v2i64, V1, V2, Mask, Zeroable, Subtarget, DAG))
13763 // Try inverting the insertion since for v2 masks it is easy to do and we
13764 // can't reliably sort the mask one way or the other.
13765 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
13766 if (SDValue Insertion = lowerShuffleAsElementInsertion(
13767 DL, MVT::v2i64, V2, V1, InverseMask, Zeroable, Subtarget, DAG))
13770 // We have different paths for blend lowering, but they all must use the
13771 // *exact* same predicate.
13772 bool IsBlendSupported = Subtarget.hasSSE41();
13773 if (IsBlendSupported)
13774 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
13775 Zeroable, Subtarget, DAG))
13778 // Use dedicated unpack instructions for masks that match their pattern.
13779 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v2i64, Mask, V1, V2, DAG))
13782 // Try to use byte rotation instructions.
13783 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
13784 if (Subtarget.hasSSSE3()) {
13785 if (Subtarget.hasVLX())
13786 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v2i64, V1, V2, Mask,
13790 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v2i64, V1, V2, Mask,
13795 // If we have direct support for blends, we should lower by decomposing into
13796 // a permute. That will be faster than the domain cross.
13797 if (IsBlendSupported)
13798 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2, Mask,
13801 // We implement this with SHUFPD which is pretty lame because it will likely
13802 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
13803 // However, all the alternatives are still more cycles and newer chips don't
13804 // have this problem. It would be really nice if x86 had better shuffles here.
13805 V1 = DAG.getBitcast(MVT::v2f64, V1);
13806 V2 = DAG.getBitcast(MVT::v2f64, V2);
13807 return DAG.getBitcast(MVT::v2i64,
13808 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
13811 /// Lower a vector shuffle using the SHUFPS instruction.
13813 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
13814 /// It makes no assumptions about whether this is the *best* lowering, it simply
13816 static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
13817 ArrayRef<int> Mask, SDValue V1,
13818 SDValue V2, SelectionDAG &DAG) {
13819 SDValue LowV = V1, HighV = V2;
13820 SmallVector<int, 4> NewMask(Mask.begin(), Mask.end());
13821 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13823 if (NumV2Elements == 1) {
13824 int V2Index = find_if(Mask, [](int M) { return M >= 4; }) - Mask.begin();
13826 // Compute the index adjacent to V2Index and in the same half by toggling
13828 int V2AdjIndex = V2Index ^ 1;
13830 if (Mask[V2AdjIndex] < 0) {
13831 // Handles all the cases where we have a single V2 element and an undef.
13832 // This will only ever happen in the high lanes because we commute the
13833 // vector otherwise.
13835 std::swap(LowV, HighV);
13836 NewMask[V2Index] -= 4;
13838 // Handle the case where the V2 element ends up adjacent to a V1 element.
13839 // To make this work, blend them together as the first step.
13840 int V1Index = V2AdjIndex;
13841 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
13842 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
13843 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13845 // Now proceed to reconstruct the final blend as we have the necessary
13846 // high or low half formed.
13853 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
13854 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
13856 } else if (NumV2Elements == 2) {
13857 if (Mask[0] < 4 && Mask[1] < 4) {
13858 // Handle the easy case where we have V1 in the low lanes and V2 in the
13862 } else if (Mask[2] < 4 && Mask[3] < 4) {
13863 // We also handle the reversed case because this utility may get called
13864 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
13865 // arrange things in the right direction.
13871 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
13872 // trying to place elements directly, just blend them and set up the final
13873 // shuffle to place them.
13875 // The first two blend mask elements are for V1, the second two are for
13877 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
13878 Mask[2] < 4 ? Mask[2] : Mask[3],
13879 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
13880 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
13881 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
13882 getV4X86ShuffleImm8ForMask(BlendMask, DL, DAG));
13884 // Now we do a normal shuffle of V1 by giving V1 as both operands to
13887 NewMask[0] = Mask[0] < 4 ? 0 : 2;
13888 NewMask[1] = Mask[0] < 4 ? 2 : 0;
13889 NewMask[2] = Mask[2] < 4 ? 1 : 3;
13890 NewMask[3] = Mask[2] < 4 ? 3 : 1;
13893 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
13894 getV4X86ShuffleImm8ForMask(NewMask, DL, DAG));
13897 /// Lower 4-lane 32-bit floating point shuffles.
13899 /// Uses instructions exclusively from the floating point unit to minimize
13900 /// domain crossing penalties, as these are sufficient to implement all v4f32
13902 static SDValue lowerV4F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13903 const APInt &Zeroable, SDValue V1, SDValue V2,
13904 const X86Subtarget &Subtarget,
13905 SelectionDAG &DAG) {
13906 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13907 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
13908 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
13910 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
13912 if (NumV2Elements == 0) {
13913 // Check for being able to broadcast a single element.
13914 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f32, V1, V2,
13915 Mask, Subtarget, DAG))
13918 // Use even/odd duplicate instructions for masks that match their pattern.
13919 if (Subtarget.hasSSE3()) {
13920 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
13921 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
13922 if (isShuffleEquivalent(V1, V2, Mask, {1, 1, 3, 3}))
13923 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
13926 if (Subtarget.hasAVX()) {
13927 // If we have AVX, we can use VPERMILPS which will allow folding a load
13928 // into the shuffle.
13929 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
13930 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13933 // Use MOVLHPS/MOVHLPS to simulate unary shuffles. These are only valid
13934 // in SSE1 because otherwise they are widened to v2f64 and never get here.
13935 if (!Subtarget.hasSSE2()) {
13936 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1}))
13937 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V1);
13938 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 2, 3}))
13939 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V1, V1);
13942 // Otherwise, use a straight shuffle of a single input vector. We pass the
13943 // input vector to both operands to simulate this with a SHUFPS.
13944 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
13945 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
13948 if (Subtarget.hasAVX2())
13949 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
13952 // There are special ways we can lower some single-element blends. However, we
13953 // have custom ways we can lower more complex single-element blends below that
13954 // we defer to if both this and BLENDPS fail to match, so restrict this to
13955 // when the V2 input is targeting element 0 of the mask -- that is the fast
13957 if (NumV2Elements == 1 && Mask[0] >= 4)
13958 if (SDValue V = lowerShuffleAsElementInsertion(
13959 DL, MVT::v4f32, V1, V2, Mask, Zeroable, Subtarget, DAG))
13962 if (Subtarget.hasSSE41()) {
13963 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
13964 Zeroable, Subtarget, DAG))
13967 // Use INSERTPS if we can complete the shuffle efficiently.
13968 if (SDValue V = lowerShuffleAsInsertPS(DL, V1, V2, Mask, Zeroable, DAG))
13971 if (!isSingleSHUFPSMask(Mask))
13972 if (SDValue BlendPerm = lowerShuffleAsBlendAndPermute(DL, MVT::v4f32, V1,
13977 // Use low/high mov instructions. These are only valid in SSE1 because
13978 // otherwise they are widened to v2f64 and never get here.
13979 if (!Subtarget.hasSSE2()) {
13980 if (isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5}))
13981 return DAG.getNode(X86ISD::MOVLHPS, DL, MVT::v4f32, V1, V2);
13982 if (isShuffleEquivalent(V1, V2, Mask, {2, 3, 6, 7}))
13983 return DAG.getNode(X86ISD::MOVHLPS, DL, MVT::v4f32, V2, V1);
13986 // Use dedicated unpack instructions for masks that match their pattern.
13987 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f32, Mask, V1, V2, DAG))
13990 // Otherwise fall back to a SHUFPS lowering strategy.
13991 return lowerShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
13994 /// Lower 4-lane i32 vector shuffles.
13996 /// We try to handle these with integer-domain shuffles where we can, but for
13997 /// blends we use the floating point domain blend instructions.
13998 static SDValue lowerV4I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
13999 const APInt &Zeroable, SDValue V1, SDValue V2,
14000 const X86Subtarget &Subtarget,
14001 SelectionDAG &DAG) {
14002 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
14003 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
14004 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
14006 // Whenever we can lower this as a zext, that instruction is strictly faster
14007 // than any alternative. It also allows us to fold memory operands into the
14008 // shuffle in many cases.
14009 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2, Mask,
14010 Zeroable, Subtarget, DAG))
14013 int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
14015 if (NumV2Elements == 0) {
14016 // Try to use broadcast unless the mask only has one non-undef element.
14017 if (count_if(Mask, [](int M) { return M >= 0 && M < 4; }) > 1) {
14018 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i32, V1, V2,
14019 Mask, Subtarget, DAG))
14023 // Straight shuffle of a single input vector. For everything from SSE2
14024 // onward this has a single fast instruction with no scary immediates.
14025 // We coerce the shuffle pattern to be compatible with UNPCK instructions
14026 // but we aren't actually going to use the UNPCK instruction because doing
14027 // so prevents folding a load into this instruction or making a copy.
14028 const int UnpackLoMask[] = {0, 0, 1, 1};
14029 const int UnpackHiMask[] = {2, 2, 3, 3};
14030 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 1, 1}))
14031 Mask = UnpackLoMask;
14032 else if (isShuffleEquivalent(V1, V2, Mask, {2, 2, 3, 3}))
14033 Mask = UnpackHiMask;
14035 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
14036 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
14039 if (Subtarget.hasAVX2())
14040 if (SDValue Extract = lowerShuffleOfExtractsAsVperm(DL, V1, V2, Mask, DAG))
14043 // Try to use shift instructions.
14044 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask,
14045 Zeroable, Subtarget, DAG))
14048 // There are special ways we can lower some single-element blends.
14049 if (NumV2Elements == 1)
14050 if (SDValue V = lowerShuffleAsElementInsertion(
14051 DL, MVT::v4i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
14054 // We have different paths for blend lowering, but they all must use the
14055 // *exact* same predicate.
14056 bool IsBlendSupported = Subtarget.hasSSE41();
14057 if (IsBlendSupported)
14058 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
14059 Zeroable, Subtarget, DAG))
14062 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask,
14063 Zeroable, Subtarget, DAG))
14066 // Use dedicated unpack instructions for masks that match their pattern.
14067 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i32, Mask, V1, V2, DAG))
14070 // Try to use byte rotation instructions.
14071 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
14072 if (Subtarget.hasSSSE3()) {
14073 if (Subtarget.hasVLX())
14074 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i32, V1, V2, Mask,
14078 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i32, V1, V2, Mask,
14083 // Assume that a single SHUFPS is faster than an alternative sequence of
14084 // multiple instructions (even if the CPU has a domain penalty).
14085 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
14086 if (!isSingleSHUFPSMask(Mask)) {
14087 // If we have direct support for blends, we should lower by decomposing into
14088 // a permute. That will be faster than the domain cross.
14089 if (IsBlendSupported)
14090 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2, Mask,
14093 // Try to lower by permuting the inputs into an unpack instruction.
14094 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v4i32, V1, V2,
14095 Mask, Subtarget, DAG))
14099 // We implement this with SHUFPS because it can blend from two vectors.
14100 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
14101 // up the inputs, bypassing domain shift penalties that we would incur if we
14102 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
14104 SDValue CastV1 = DAG.getBitcast(MVT::v4f32, V1);
14105 SDValue CastV2 = DAG.getBitcast(MVT::v4f32, V2);
14106 SDValue ShufPS = DAG.getVectorShuffle(MVT::v4f32, DL, CastV1, CastV2, Mask);
14107 return DAG.getBitcast(MVT::v4i32, ShufPS);
14110 /// Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
14111 /// shuffle lowering, and the most complex part.
14113 /// The lowering strategy is to try to form pairs of input lanes which are
14114 /// targeted at the same half of the final vector, and then use a dword shuffle
14115 /// to place them onto the right half, and finally unpack the paired lanes into
14116 /// their final position.
14118 /// The exact breakdown of how to form these dword pairs and align them on the
14119 /// correct sides is really tricky. See the comments within the function for
14120 /// more of the details.
14122 /// This code also handles repeated 128-bit lanes of v8i16 shuffles, but each
14123 /// lane must shuffle the *exact* same way. In fact, you must pass a v8 Mask to
14124 /// this routine for it to work correctly. To shuffle a 256-bit or 512-bit i16
14125 /// vector, form the analogous 128-bit 8-element Mask.
14126 static SDValue lowerV8I16GeneralSingleInputShuffle(
14127 const SDLoc &DL, MVT VT, SDValue V, MutableArrayRef<int> Mask,
14128 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
14129 assert(VT.getVectorElementType() == MVT::i16 && "Bad input type!");
14130 MVT PSHUFDVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
14132 assert(Mask.size() == 8 && "Shuffle mask length doesn't match!");
14133 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
14134 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
14136 // Attempt to directly match PSHUFLW or PSHUFHW.
14137 if (isUndefOrInRange(LoMask, 0, 4) &&
14138 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
14139 return DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14140 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14142 if (isUndefOrInRange(HiMask, 4, 8) &&
14143 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
14144 for (int i = 0; i != 4; ++i)
14145 HiMask[i] = (HiMask[i] < 0 ? HiMask[i] : (HiMask[i] - 4));
14146 return DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14147 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14150 SmallVector<int, 4> LoInputs;
14151 copy_if(LoMask, std::back_inserter(LoInputs), [](int M) { return M >= 0; });
14152 array_pod_sort(LoInputs.begin(), LoInputs.end());
14153 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
14154 SmallVector<int, 4> HiInputs;
14155 copy_if(HiMask, std::back_inserter(HiInputs), [](int M) { return M >= 0; });
14156 array_pod_sort(HiInputs.begin(), HiInputs.end());
14157 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
14158 int NumLToL = llvm::lower_bound(LoInputs, 4) - LoInputs.begin();
14159 int NumHToL = LoInputs.size() - NumLToL;
14160 int NumLToH = llvm::lower_bound(HiInputs, 4) - HiInputs.begin();
14161 int NumHToH = HiInputs.size() - NumLToH;
14162 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
14163 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
14164 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
14165 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
14167 // If we are shuffling values from one half - check how many different DWORD
14168 // pairs we need to create. If only 1 or 2 then we can perform this as a
14169 // PSHUFLW/PSHUFHW + PSHUFD instead of the PSHUFD+PSHUFLW+PSHUFHW chain below.
14170 auto ShuffleDWordPairs = [&](ArrayRef<int> PSHUFHalfMask,
14171 ArrayRef<int> PSHUFDMask, unsigned ShufWOp) {
14172 V = DAG.getNode(ShufWOp, DL, VT, V,
14173 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
14174 V = DAG.getBitcast(PSHUFDVT, V);
14175 V = DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, V,
14176 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG));
14177 return DAG.getBitcast(VT, V);
14180 if ((NumHToL + NumHToH) == 0 || (NumLToL + NumLToH) == 0) {
14181 int PSHUFDMask[4] = { -1, -1, -1, -1 };
14182 SmallVector<std::pair<int, int>, 4> DWordPairs;
14183 int DOffset = ((NumHToL + NumHToH) == 0 ? 0 : 2);
14185 // Collect the different DWORD pairs.
14186 for (int DWord = 0; DWord != 4; ++DWord) {
14187 int M0 = Mask[2 * DWord + 0];
14188 int M1 = Mask[2 * DWord + 1];
14189 M0 = (M0 >= 0 ? M0 % 4 : M0);
14190 M1 = (M1 >= 0 ? M1 % 4 : M1);
14191 if (M0 < 0 && M1 < 0)
14194 bool Match = false;
14195 for (int j = 0, e = DWordPairs.size(); j < e; ++j) {
14196 auto &DWordPair = DWordPairs[j];
14197 if ((M0 < 0 || isUndefOrEqual(DWordPair.first, M0)) &&
14198 (M1 < 0 || isUndefOrEqual(DWordPair.second, M1))) {
14199 DWordPair.first = (M0 >= 0 ? M0 : DWordPair.first);
14200 DWordPair.second = (M1 >= 0 ? M1 : DWordPair.second);
14201 PSHUFDMask[DWord] = DOffset + j;
14207 PSHUFDMask[DWord] = DOffset + DWordPairs.size();
14208 DWordPairs.push_back(std::make_pair(M0, M1));
14212 if (DWordPairs.size() <= 2) {
14213 DWordPairs.resize(2, std::make_pair(-1, -1));
14214 int PSHUFHalfMask[4] = {DWordPairs[0].first, DWordPairs[0].second,
14215 DWordPairs[1].first, DWordPairs[1].second};
14216 if ((NumHToL + NumHToH) == 0)
14217 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFLW);
14218 if ((NumLToL + NumLToH) == 0)
14219 return ShuffleDWordPairs(PSHUFHalfMask, PSHUFDMask, X86ISD::PSHUFHW);
14223 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
14224 // such inputs we can swap two of the dwords across the half mark and end up
14225 // with <=2 inputs to each half in each half. Once there, we can fall through
14226 // to the generic code below. For example:
14228 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
14229 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
14231 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
14232 // and an existing 2-into-2 on the other half. In this case we may have to
14233 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
14234 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
14235 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
14236 // because any other situation (including a 3-into-1 or 1-into-3 in the other
14237 // half than the one we target for fixing) will be fixed when we re-enter this
14238 // path. We will also combine away any sequence of PSHUFD instructions that
14239 // result into a single instruction. Here is an example of the tricky case:
14241 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
14242 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
14244 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
14246 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
14247 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
14249 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
14250 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
14252 // The result is fine to be handled by the generic logic.
14253 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
14254 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
14255 int AOffset, int BOffset) {
14256 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
14257 "Must call this with A having 3 or 1 inputs from the A half.");
14258 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
14259 "Must call this with B having 1 or 3 inputs from the B half.");
14260 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
14261 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
14263 bool ThreeAInputs = AToAInputs.size() == 3;
14265 // Compute the index of dword with only one word among the three inputs in
14266 // a half by taking the sum of the half with three inputs and subtracting
14267 // the sum of the actual three inputs. The difference is the remaining
14269 int ADWord = 0, BDWord = 0;
14270 int &TripleDWord = ThreeAInputs ? ADWord : BDWord;
14271 int &OneInputDWord = ThreeAInputs ? BDWord : ADWord;
14272 int TripleInputOffset = ThreeAInputs ? AOffset : BOffset;
14273 ArrayRef<int> TripleInputs = ThreeAInputs ? AToAInputs : BToAInputs;
14274 int OneInput = ThreeAInputs ? BToAInputs[0] : AToAInputs[0];
14275 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
14276 int TripleNonInputIdx =
14277 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
14278 TripleDWord = TripleNonInputIdx / 2;
14280 // We use xor with one to compute the adjacent DWord to whichever one the
14282 OneInputDWord = (OneInput / 2) ^ 1;
14284 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
14285 // and BToA inputs. If there is also such a problem with the BToB and AToB
14286 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
14287 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
14288 // is essential that we don't *create* a 3<-1 as then we might oscillate.
14289 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
14290 // Compute how many inputs will be flipped by swapping these DWords. We
14292 // to balance this to ensure we don't form a 3-1 shuffle in the other
14294 int NumFlippedAToBInputs =
14295 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
14296 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
14297 int NumFlippedBToBInputs =
14298 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
14299 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
14300 if ((NumFlippedAToBInputs == 1 &&
14301 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
14302 (NumFlippedBToBInputs == 1 &&
14303 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
14304 // We choose whether to fix the A half or B half based on whether that
14305 // half has zero flipped inputs. At zero, we may not be able to fix it
14306 // with that half. We also bias towards fixing the B half because that
14307 // will more commonly be the high half, and we have to bias one way.
14308 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
14309 ArrayRef<int> Inputs) {
14310 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
14311 bool IsFixIdxInput = is_contained(Inputs, PinnedIdx ^ 1);
14312 // Determine whether the free index is in the flipped dword or the
14313 // unflipped dword based on where the pinned index is. We use this bit
14314 // in an xor to conditionally select the adjacent dword.
14315 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
14316 bool IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
14317 if (IsFixIdxInput == IsFixFreeIdxInput)
14319 IsFixFreeIdxInput = is_contained(Inputs, FixFreeIdx);
14320 assert(IsFixIdxInput != IsFixFreeIdxInput &&
14321 "We need to be changing the number of flipped inputs!");
14322 int PSHUFHalfMask[] = {0, 1, 2, 3};
14323 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
14325 FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
14326 MVT::getVectorVT(MVT::i16, V.getValueSizeInBits() / 16), V,
14327 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DL, DAG));
14329 for (int &M : Mask)
14330 if (M >= 0 && M == FixIdx)
14332 else if (M >= 0 && M == FixFreeIdx)
14335 if (NumFlippedBToBInputs != 0) {
14337 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
14338 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
14340 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
14341 int APinnedIdx = ThreeAInputs ? TripleNonInputIdx : OneInput;
14342 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
14347 int PSHUFDMask[] = {0, 1, 2, 3};
14348 PSHUFDMask[ADWord] = BDWord;
14349 PSHUFDMask[BDWord] = ADWord;
14350 V = DAG.getBitcast(
14352 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14353 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14355 // Adjust the mask to match the new locations of A and B.
14356 for (int &M : Mask)
14357 if (M >= 0 && M/2 == ADWord)
14358 M = 2 * BDWord + M % 2;
14359 else if (M >= 0 && M/2 == BDWord)
14360 M = 2 * ADWord + M % 2;
14362 // Recurse back into this routine to re-compute state now that this isn't
14363 // a 3 and 1 problem.
14364 return lowerV8I16GeneralSingleInputShuffle(DL, VT, V, Mask, Subtarget, DAG);
14366 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
14367 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
14368 if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
14369 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
14371 // At this point there are at most two inputs to the low and high halves from
14372 // each half. That means the inputs can always be grouped into dwords and
14373 // those dwords can then be moved to the correct half with a dword shuffle.
14374 // We use at most one low and one high word shuffle to collect these paired
14375 // inputs into dwords, and finally a dword shuffle to place them.
14376 int PSHUFLMask[4] = {-1, -1, -1, -1};
14377 int PSHUFHMask[4] = {-1, -1, -1, -1};
14378 int PSHUFDMask[4] = {-1, -1, -1, -1};
14380 // First fix the masks for all the inputs that are staying in their
14381 // original halves. This will then dictate the targets of the cross-half
14383 auto fixInPlaceInputs =
14384 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
14385 MutableArrayRef<int> SourceHalfMask,
14386 MutableArrayRef<int> HalfMask, int HalfOffset) {
14387 if (InPlaceInputs.empty())
14389 if (InPlaceInputs.size() == 1) {
14390 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
14391 InPlaceInputs[0] - HalfOffset;
14392 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
14395 if (IncomingInputs.empty()) {
14396 // Just fix all of the in place inputs.
14397 for (int Input : InPlaceInputs) {
14398 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
14399 PSHUFDMask[Input / 2] = Input / 2;
14404 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
14405 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
14406 InPlaceInputs[0] - HalfOffset;
14407 // Put the second input next to the first so that they are packed into
14408 // a dword. We find the adjacent index by toggling the low bit.
14409 int AdjIndex = InPlaceInputs[0] ^ 1;
14410 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
14411 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
14412 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
14414 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
14415 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
14417 // Now gather the cross-half inputs and place them into a free dword of
14418 // their target half.
14419 // FIXME: This operation could almost certainly be simplified dramatically to
14420 // look more like the 3-1 fixing operation.
14421 auto moveInputsToRightHalf = [&PSHUFDMask](
14422 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
14423 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
14424 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
14426 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
14427 return SourceHalfMask[Word] >= 0 && SourceHalfMask[Word] != Word;
14429 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
14431 int LowWord = Word & ~1;
14432 int HighWord = Word | 1;
14433 return isWordClobbered(SourceHalfMask, LowWord) ||
14434 isWordClobbered(SourceHalfMask, HighWord);
14437 if (IncomingInputs.empty())
14440 if (ExistingInputs.empty()) {
14441 // Map any dwords with inputs from them into the right half.
14442 for (int Input : IncomingInputs) {
14443 // If the source half mask maps over the inputs, turn those into
14444 // swaps and use the swapped lane.
14445 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
14446 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] < 0) {
14447 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
14448 Input - SourceOffset;
14449 // We have to swap the uses in our half mask in one sweep.
14450 for (int &M : HalfMask)
14451 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
14453 else if (M == Input)
14454 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
14456 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
14457 Input - SourceOffset &&
14458 "Previous placement doesn't match!");
14460 // Note that this correctly re-maps both when we do a swap and when
14461 // we observe the other side of the swap above. We rely on that to
14462 // avoid swapping the members of the input list directly.
14463 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
14466 // Map the input's dword into the correct half.
14467 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] < 0)
14468 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
14470 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
14472 "Previous placement doesn't match!");
14475 // And just directly shift any other-half mask elements to be same-half
14476 // as we will have mirrored the dword containing the element into the
14477 // same position within that half.
14478 for (int &M : HalfMask)
14479 if (M >= SourceOffset && M < SourceOffset + 4) {
14480 M = M - SourceOffset + DestOffset;
14481 assert(M >= 0 && "This should never wrap below zero!");
14486 // Ensure we have the input in a viable dword of its current half. This
14487 // is particularly tricky because the original position may be clobbered
14488 // by inputs being moved and *staying* in that half.
14489 if (IncomingInputs.size() == 1) {
14490 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
14491 int InputFixed = find(SourceHalfMask, -1) - std::begin(SourceHalfMask) +
14493 SourceHalfMask[InputFixed - SourceOffset] =
14494 IncomingInputs[0] - SourceOffset;
14495 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
14497 IncomingInputs[0] = InputFixed;
14499 } else if (IncomingInputs.size() == 2) {
14500 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
14501 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
14502 // We have two non-adjacent or clobbered inputs we need to extract from
14503 // the source half. To do this, we need to map them into some adjacent
14504 // dword slot in the source mask.
14505 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
14506 IncomingInputs[1] - SourceOffset};
14508 // If there is a free slot in the source half mask adjacent to one of
14509 // the inputs, place the other input in it. We use (Index XOR 1) to
14510 // compute an adjacent index.
14511 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
14512 SourceHalfMask[InputsFixed[0] ^ 1] < 0) {
14513 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
14514 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14515 InputsFixed[1] = InputsFixed[0] ^ 1;
14516 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
14517 SourceHalfMask[InputsFixed[1] ^ 1] < 0) {
14518 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
14519 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
14520 InputsFixed[0] = InputsFixed[1] ^ 1;
14521 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] < 0 &&
14522 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] < 0) {
14523 // The two inputs are in the same DWord but it is clobbered and the
14524 // adjacent DWord isn't used at all. Move both inputs to the free
14526 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
14527 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
14528 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
14529 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
14531 // The only way we hit this point is if there is no clobbering
14532 // (because there are no off-half inputs to this half) and there is no
14533 // free slot adjacent to one of the inputs. In this case, we have to
14534 // swap an input with a non-input.
14535 for (int i = 0; i < 4; ++i)
14536 assert((SourceHalfMask[i] < 0 || SourceHalfMask[i] == i) &&
14537 "We can't handle any clobbers here!");
14538 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
14539 "Cannot have adjacent inputs here!");
14541 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
14542 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
14544 // We also have to update the final source mask in this case because
14545 // it may need to undo the above swap.
14546 for (int &M : FinalSourceHalfMask)
14547 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
14548 M = InputsFixed[1] + SourceOffset;
14549 else if (M == InputsFixed[1] + SourceOffset)
14550 M = (InputsFixed[0] ^ 1) + SourceOffset;
14552 InputsFixed[1] = InputsFixed[0] ^ 1;
14555 // Point everything at the fixed inputs.
14556 for (int &M : HalfMask)
14557 if (M == IncomingInputs[0])
14558 M = InputsFixed[0] + SourceOffset;
14559 else if (M == IncomingInputs[1])
14560 M = InputsFixed[1] + SourceOffset;
14562 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
14563 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
14566 llvm_unreachable("Unhandled input size!");
14569 // Now hoist the DWord down to the right half.
14570 int FreeDWord = (PSHUFDMask[DestOffset / 2] < 0 ? 0 : 1) + DestOffset / 2;
14571 assert(PSHUFDMask[FreeDWord] < 0 && "DWord not free");
14572 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
14573 for (int &M : HalfMask)
14574 for (int Input : IncomingInputs)
14576 M = FreeDWord * 2 + Input % 2;
14578 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
14579 /*SourceOffset*/ 4, /*DestOffset*/ 0);
14580 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
14581 /*SourceOffset*/ 0, /*DestOffset*/ 4);
14583 // Now enact all the shuffles we've computed to move the inputs into their
14585 if (!isNoopShuffleMask(PSHUFLMask))
14586 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14587 getV4X86ShuffleImm8ForMask(PSHUFLMask, DL, DAG));
14588 if (!isNoopShuffleMask(PSHUFHMask))
14589 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14590 getV4X86ShuffleImm8ForMask(PSHUFHMask, DL, DAG));
14591 if (!isNoopShuffleMask(PSHUFDMask))
14592 V = DAG.getBitcast(
14594 DAG.getNode(X86ISD::PSHUFD, DL, PSHUFDVT, DAG.getBitcast(PSHUFDVT, V),
14595 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
14597 // At this point, each half should contain all its inputs, and we can then
14598 // just shuffle them into their final position.
14599 assert(count_if(LoMask, [](int M) { return M >= 4; }) == 0 &&
14600 "Failed to lift all the high half inputs to the low mask!");
14601 assert(count_if(HiMask, [](int M) { return M >= 0 && M < 4; }) == 0 &&
14602 "Failed to lift all the low half inputs to the high mask!");
14604 // Do a half shuffle for the low mask.
14605 if (!isNoopShuffleMask(LoMask))
14606 V = DAG.getNode(X86ISD::PSHUFLW, DL, VT, V,
14607 getV4X86ShuffleImm8ForMask(LoMask, DL, DAG));
14609 // Do a half shuffle with the high mask after shifting its values down.
14610 for (int &M : HiMask)
14613 if (!isNoopShuffleMask(HiMask))
14614 V = DAG.getNode(X86ISD::PSHUFHW, DL, VT, V,
14615 getV4X86ShuffleImm8ForMask(HiMask, DL, DAG));
14620 /// Helper to form a PSHUFB-based shuffle+blend, opportunistically avoiding the
14621 /// blend if only one input is used.
14622 static SDValue lowerShuffleAsBlendOfPSHUFBs(
14623 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
14624 const APInt &Zeroable, SelectionDAG &DAG, bool &V1InUse, bool &V2InUse) {
14625 assert(!is128BitLaneCrossingShuffleMask(VT, Mask) &&
14626 "Lane crossing shuffle masks not supported");
14628 int NumBytes = VT.getSizeInBits() / 8;
14629 int Size = Mask.size();
14630 int Scale = NumBytes / Size;
14632 SmallVector<SDValue, 64> V1Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14633 SmallVector<SDValue, 64> V2Mask(NumBytes, DAG.getUNDEF(MVT::i8));
14637 for (int i = 0; i < NumBytes; ++i) {
14638 int M = Mask[i / Scale];
14642 const int ZeroMask = 0x80;
14643 int V1Idx = M < Size ? M * Scale + i % Scale : ZeroMask;
14644 int V2Idx = M < Size ? ZeroMask : (M - Size) * Scale + i % Scale;
14645 if (Zeroable[i / Scale])
14646 V1Idx = V2Idx = ZeroMask;
14648 V1Mask[i] = DAG.getConstant(V1Idx, DL, MVT::i8);
14649 V2Mask[i] = DAG.getConstant(V2Idx, DL, MVT::i8);
14650 V1InUse |= (ZeroMask != V1Idx);
14651 V2InUse |= (ZeroMask != V2Idx);
14654 MVT ShufVT = MVT::getVectorVT(MVT::i8, NumBytes);
14656 V1 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V1),
14657 DAG.getBuildVector(ShufVT, DL, V1Mask));
14659 V2 = DAG.getNode(X86ISD::PSHUFB, DL, ShufVT, DAG.getBitcast(ShufVT, V2),
14660 DAG.getBuildVector(ShufVT, DL, V2Mask));
14662 // If we need shuffled inputs from both, blend the two.
14664 if (V1InUse && V2InUse)
14665 V = DAG.getNode(ISD::OR, DL, ShufVT, V1, V2);
14667 V = V1InUse ? V1 : V2;
14669 // Cast the result back to the correct type.
14670 return DAG.getBitcast(VT, V);
14673 /// Generic lowering of 8-lane i16 shuffles.
14675 /// This handles both single-input shuffles and combined shuffle/blends with
14676 /// two inputs. The single input shuffles are immediately delegated to
14677 /// a dedicated lowering routine.
14679 /// The blends are lowered in one of three fundamental ways. If there are few
14680 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
14681 /// of the input is significantly cheaper when lowered as an interleaving of
14682 /// the two inputs, try to interleave them. Otherwise, blend the low and high
14683 /// halves of the inputs separately (making them have relatively few inputs)
14684 /// and then concatenate them.
14685 static SDValue lowerV8I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14686 const APInt &Zeroable, SDValue V1, SDValue V2,
14687 const X86Subtarget &Subtarget,
14688 SelectionDAG &DAG) {
14689 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14690 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
14691 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
14693 // Whenever we can lower this as a zext, that instruction is strictly faster
14694 // than any alternative.
14695 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i16, V1, V2, Mask,
14696 Zeroable, Subtarget, DAG))
14699 int NumV2Inputs = count_if(Mask, [](int M) { return M >= 8; });
14701 if (NumV2Inputs == 0) {
14702 // Try to use shift instructions.
14703 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V1, Mask,
14704 Zeroable, Subtarget, DAG))
14707 // Check for being able to broadcast a single element.
14708 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i16, V1, V2,
14709 Mask, Subtarget, DAG))
14712 // Try to use bit rotation instructions.
14713 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v8i16, V1, Mask,
14717 // Use dedicated unpack instructions for masks that match their pattern.
14718 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14721 // Use dedicated pack instructions for masks that match their pattern.
14722 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14726 // Try to use byte rotation instructions.
14727 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V1, Mask,
14731 // Make a copy of the mask so it can be modified.
14732 SmallVector<int, 8> MutableMask(Mask.begin(), Mask.end());
14733 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v8i16, V1, MutableMask,
14737 assert(llvm::any_of(Mask, [](int M) { return M >= 0 && M < 8; }) &&
14738 "All single-input shuffles should be canonicalized to be V1-input "
14741 // Try to use shift instructions.
14742 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask,
14743 Zeroable, Subtarget, DAG))
14746 // See if we can use SSE4A Extraction / Insertion.
14747 if (Subtarget.hasSSE4A())
14748 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v8i16, V1, V2, Mask,
14752 // There are special ways we can lower some single-element blends.
14753 if (NumV2Inputs == 1)
14754 if (SDValue V = lowerShuffleAsElementInsertion(
14755 DL, MVT::v8i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
14758 // We have different paths for blend lowering, but they all must use the
14759 // *exact* same predicate.
14760 bool IsBlendSupported = Subtarget.hasSSE41();
14761 if (IsBlendSupported)
14762 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
14763 Zeroable, Subtarget, DAG))
14766 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask,
14767 Zeroable, Subtarget, DAG))
14770 // Use dedicated unpack instructions for masks that match their pattern.
14771 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i16, Mask, V1, V2, DAG))
14774 // Use dedicated pack instructions for masks that match their pattern.
14775 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v8i16, Mask, V1, V2, DAG,
14779 // Try to use byte rotation instructions.
14780 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i16, V1, V2, Mask,
14784 if (SDValue BitBlend =
14785 lowerShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
14788 // Try to use byte shift instructions to mask.
14789 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v8i16, V1, V2, Mask,
14790 Zeroable, Subtarget, DAG))
14793 // Attempt to lower using compaction, SSE41 is necessary for PACKUSDW.
14794 // We could use SIGN_EXTEND_INREG+PACKSSDW for older targets but this seems to
14795 // be slower than a PSHUFLW+PSHUFHW+PSHUFD chain.
14796 int NumEvenDrops = canLowerByDroppingEvenElements(Mask, false);
14797 if ((NumEvenDrops == 1 || NumEvenDrops == 2) && Subtarget.hasSSE41() &&
14798 !Subtarget.hasVLX()) {
14799 SmallVector<SDValue, 8> DWordClearOps(4, DAG.getConstant(0, DL, MVT::i32));
14800 for (unsigned i = 0; i != 4; i += 1 << (NumEvenDrops - 1))
14801 DWordClearOps[i] = DAG.getConstant(0xFFFF, DL, MVT::i32);
14802 SDValue DWordClearMask = DAG.getBuildVector(MVT::v4i32, DL, DWordClearOps);
14803 V1 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V1),
14805 V2 = DAG.getNode(ISD::AND, DL, MVT::v4i32, DAG.getBitcast(MVT::v4i32, V2),
14807 // Now pack things back together.
14808 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, V1, V2);
14809 if (NumEvenDrops == 2) {
14810 Result = DAG.getBitcast(MVT::v4i32, Result);
14811 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v8i16, Result, Result);
14816 // Try to lower by permuting the inputs into an unpack instruction.
14817 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(DL, MVT::v8i16, V1, V2,
14818 Mask, Subtarget, DAG))
14821 // If we can't directly blend but can use PSHUFB, that will be better as it
14822 // can both shuffle and set up the inefficient blend.
14823 if (!IsBlendSupported && Subtarget.hasSSSE3()) {
14824 bool V1InUse, V2InUse;
14825 return lowerShuffleAsBlendOfPSHUFBs(DL, MVT::v8i16, V1, V2, Mask,
14826 Zeroable, DAG, V1InUse, V2InUse);
14829 // We can always bit-blend if we have to so the fallback strategy is to
14830 // decompose into single-input permutes and blends.
14831 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
14832 Mask, Subtarget, DAG);
14835 static SDValue lowerShuffleWithPERMV(const SDLoc &DL, MVT VT,
14836 ArrayRef<int> Mask, SDValue V1,
14837 SDValue V2, SelectionDAG &DAG) {
14838 MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
14839 MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
14841 SDValue MaskNode = getConstVector(Mask, MaskVecVT, DAG, DL, true);
14843 return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
14845 return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
14848 /// Generic lowering of v16i8 shuffles.
14850 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
14851 /// detect any complexity reducing interleaving. If that doesn't help, it uses
14852 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
14853 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
14855 static SDValue lowerV16I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
14856 const APInt &Zeroable, SDValue V1, SDValue V2,
14857 const X86Subtarget &Subtarget,
14858 SelectionDAG &DAG) {
14859 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14860 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
14861 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
14863 // Try to use shift instructions.
14864 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask,
14865 Zeroable, Subtarget, DAG))
14868 // Try to use byte rotation instructions.
14869 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i8, V1, V2, Mask,
14873 // Use dedicated pack instructions for masks that match their pattern.
14874 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i8, Mask, V1, V2, DAG,
14878 // Try to use a zext lowering.
14879 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v16i8, V1, V2, Mask,
14880 Zeroable, Subtarget, DAG))
14883 // See if we can use SSE4A Extraction / Insertion.
14884 if (Subtarget.hasSSE4A())
14885 if (SDValue V = lowerShuffleWithSSE4A(DL, MVT::v16i8, V1, V2, Mask,
14889 int NumV2Elements = count_if(Mask, [](int M) { return M >= 16; });
14891 // For single-input shuffles, there are some nicer lowering tricks we can use.
14892 if (NumV2Elements == 0) {
14893 // Check for being able to broadcast a single element.
14894 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i8, V1, V2,
14895 Mask, Subtarget, DAG))
14898 // Try to use bit rotation instructions.
14899 if (SDValue Rotate = lowerShuffleAsBitRotate(DL, MVT::v16i8, V1, Mask,
14903 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
14906 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
14907 // Notably, this handles splat and partial-splat shuffles more efficiently.
14908 // However, it only makes sense if the pre-duplication shuffle simplifies
14909 // things significantly. Currently, this means we need to be able to
14910 // express the pre-duplication shuffle as an i16 shuffle.
14912 // FIXME: We should check for other patterns which can be widened into an
14913 // i16 shuffle as well.
14914 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
14915 for (int i = 0; i < 16; i += 2)
14916 if (Mask[i] >= 0 && Mask[i + 1] >= 0 && Mask[i] != Mask[i + 1])
14921 auto tryToWidenViaDuplication = [&]() -> SDValue {
14922 if (!canWidenViaDuplication(Mask))
14924 SmallVector<int, 4> LoInputs;
14925 copy_if(Mask, std::back_inserter(LoInputs),
14926 [](int M) { return M >= 0 && M < 8; });
14927 array_pod_sort(LoInputs.begin(), LoInputs.end());
14928 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
14930 SmallVector<int, 4> HiInputs;
14931 copy_if(Mask, std::back_inserter(HiInputs), [](int M) { return M >= 8; });
14932 array_pod_sort(HiInputs.begin(), HiInputs.end());
14933 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
14936 bool TargetLo = LoInputs.size() >= HiInputs.size();
14937 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
14938 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
14940 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
14941 SmallDenseMap<int, int, 8> LaneMap;
14942 for (int I : InPlaceInputs) {
14943 PreDupI16Shuffle[I/2] = I/2;
14946 int j = TargetLo ? 0 : 4, je = j + 4;
14947 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
14948 // Check if j is already a shuffle of this input. This happens when
14949 // there are two adjacent bytes after we move the low one.
14950 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
14951 // If we haven't yet mapped the input, search for a slot into which
14953 while (j < je && PreDupI16Shuffle[j] >= 0)
14957 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
14960 // Map this input with the i16 shuffle.
14961 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
14964 // Update the lane map based on the mapping we ended up with.
14965 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
14967 V1 = DAG.getBitcast(
14969 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14970 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
14972 // Unpack the bytes to form the i16s that will be shuffled into place.
14973 bool EvenInUse = false, OddInUse = false;
14974 for (int i = 0; i < 16; i += 2) {
14975 EvenInUse |= (Mask[i + 0] >= 0);
14976 OddInUse |= (Mask[i + 1] >= 0);
14977 if (EvenInUse && OddInUse)
14980 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
14981 MVT::v16i8, EvenInUse ? V1 : DAG.getUNDEF(MVT::v16i8),
14982 OddInUse ? V1 : DAG.getUNDEF(MVT::v16i8));
14984 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
14985 for (int i = 0; i < 16; ++i)
14986 if (Mask[i] >= 0) {
14987 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
14988 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
14989 if (PostDupI16Shuffle[i / 2] < 0)
14990 PostDupI16Shuffle[i / 2] = MappedMask;
14992 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
14993 "Conflicting entries in the original shuffle!");
14995 return DAG.getBitcast(
14997 DAG.getVectorShuffle(MVT::v8i16, DL, DAG.getBitcast(MVT::v8i16, V1),
14998 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
15000 if (SDValue V = tryToWidenViaDuplication())
15004 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v16i8, V1, V2, Mask,
15005 Zeroable, Subtarget, DAG))
15008 // Use dedicated unpack instructions for masks that match their pattern.
15009 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i8, Mask, V1, V2, DAG))
15012 // Try to use byte shift instructions to mask.
15013 if (SDValue V = lowerShuffleAsByteShiftMask(DL, MVT::v16i8, V1, V2, Mask,
15014 Zeroable, Subtarget, DAG))
15017 // Check for compaction patterns.
15018 bool IsSingleInput = V2.isUndef();
15019 int NumEvenDrops = canLowerByDroppingEvenElements(Mask, IsSingleInput);
15021 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
15022 // with PSHUFB. It is important to do this before we attempt to generate any
15023 // blends but after all of the single-input lowerings. If the single input
15024 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
15025 // want to preserve that and we can DAG combine any longer sequences into
15026 // a PSHUFB in the end. But once we start blending from multiple inputs,
15027 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
15028 // and there are *very* few patterns that would actually be faster than the
15029 // PSHUFB approach because of its ability to zero lanes.
15031 // If the mask is a binary compaction, we can more efficiently perform this
15032 // as a PACKUS(AND(),AND()) - which is quicker than UNPACK(PSHUFB(),PSHUFB()).
15034 // FIXME: The only exceptions to the above are blends which are exact
15035 // interleavings with direct instructions supporting them. We currently don't
15036 // handle those well here.
15037 if (Subtarget.hasSSSE3() && (IsSingleInput || NumEvenDrops != 1)) {
15038 bool V1InUse = false;
15039 bool V2InUse = false;
15041 SDValue PSHUFB = lowerShuffleAsBlendOfPSHUFBs(
15042 DL, MVT::v16i8, V1, V2, Mask, Zeroable, DAG, V1InUse, V2InUse);
15044 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
15045 // do so. This avoids using them to handle blends-with-zero which is
15046 // important as a single pshufb is significantly faster for that.
15047 if (V1InUse && V2InUse) {
15048 if (Subtarget.hasSSE41())
15049 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
15050 Zeroable, Subtarget, DAG))
15053 // We can use an unpack to do the blending rather than an or in some
15054 // cases. Even though the or may be (very minorly) more efficient, we
15055 // preference this lowering because there are common cases where part of
15056 // the complexity of the shuffles goes away when we do the final blend as
15058 // FIXME: It might be worth trying to detect if the unpack-feeding
15059 // shuffles will both be pshufb, in which case we shouldn't bother with
15061 if (SDValue Unpack = lowerShuffleAsPermuteAndUnpack(
15062 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
15065 // If we have VBMI we can use one VPERM instead of multiple PSHUFBs.
15066 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
15067 return lowerShuffleWithPERMV(DL, MVT::v16i8, Mask, V1, V2, DAG);
15069 // Use PALIGNR+Permute if possible - permute might become PSHUFB but the
15070 // PALIGNR will be cheaper than the second PSHUFB+OR.
15071 if (SDValue V = lowerShuffleAsByteRotateAndPermute(
15072 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
15079 // There are special ways we can lower some single-element blends.
15080 if (NumV2Elements == 1)
15081 if (SDValue V = lowerShuffleAsElementInsertion(
15082 DL, MVT::v16i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
15085 if (SDValue Blend = lowerShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
15088 // Check whether a compaction lowering can be done. This handles shuffles
15089 // which take every Nth element for some even N. See the helper function for
15092 // We special case these as they can be particularly efficiently handled with
15093 // the PACKUSB instruction on x86 and they show up in common patterns of
15094 // rearranging bytes to truncate wide elements.
15095 if (NumEvenDrops) {
15096 // NumEvenDrops is the power of two stride of the elements. Another way of
15097 // thinking about it is that we need to drop the even elements this many
15098 // times to get the original input.
15100 // First we need to zero all the dropped bytes.
15101 assert(NumEvenDrops <= 3 &&
15102 "No support for dropping even elements more than 3 times.");
15103 SmallVector<SDValue, 8> WordClearOps(8, DAG.getConstant(0, DL, MVT::i16));
15104 for (unsigned i = 0; i != 8; i += 1 << (NumEvenDrops - 1))
15105 WordClearOps[i] = DAG.getConstant(0xFF, DL, MVT::i16);
15106 SDValue WordClearMask = DAG.getBuildVector(MVT::v8i16, DL, WordClearOps);
15107 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V1),
15109 if (!IsSingleInput)
15110 V2 = DAG.getNode(ISD::AND, DL, MVT::v8i16, DAG.getBitcast(MVT::v8i16, V2),
15113 // Now pack things back together.
15114 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1,
15115 IsSingleInput ? V1 : V2);
15116 for (int i = 1; i < NumEvenDrops; ++i) {
15117 Result = DAG.getBitcast(MVT::v8i16, Result);
15118 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
15123 // Handle multi-input cases by blending single-input shuffles.
15124 if (NumV2Elements > 0)
15125 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2, Mask,
15128 // The fallback path for single-input shuffles widens this into two v8i16
15129 // vectors with unpacks, shuffles those, and then pulls them back together
15133 std::array<int, 8> LoBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
15134 std::array<int, 8> HiBlendMask = {{-1, -1, -1, -1, -1, -1, -1, -1}};
15135 for (int i = 0; i < 16; ++i)
15137 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
15139 SDValue VLoHalf, VHiHalf;
15140 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
15141 // them out and avoid using UNPCK{L,H} to extract the elements of V as
15143 if (none_of(LoBlendMask, [](int M) { return M >= 0 && M % 2 == 1; }) &&
15144 none_of(HiBlendMask, [](int M) { return M >= 0 && M % 2 == 1; })) {
15145 // Use a mask to drop the high bytes.
15146 VLoHalf = DAG.getBitcast(MVT::v8i16, V);
15147 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
15148 DAG.getConstant(0x00FF, DL, MVT::v8i16));
15150 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
15151 VHiHalf = DAG.getUNDEF(MVT::v8i16);
15153 // Squash the masks to point directly into VLoHalf.
15154 for (int &M : LoBlendMask)
15157 for (int &M : HiBlendMask)
15161 // Otherwise just unpack the low half of V into VLoHalf and the high half into
15162 // VHiHalf so that we can blend them as i16s.
15163 SDValue Zero = getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
15165 VLoHalf = DAG.getBitcast(
15166 MVT::v8i16, DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
15167 VHiHalf = DAG.getBitcast(
15168 MVT::v8i16, DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
15171 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
15172 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
15174 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
15177 /// Dispatching routine to lower various 128-bit x86 vector shuffles.
15179 /// This routine breaks down the specific type of 128-bit shuffle and
15180 /// dispatches to the lowering routines accordingly.
15181 static SDValue lower128BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
15182 MVT VT, SDValue V1, SDValue V2,
15183 const APInt &Zeroable,
15184 const X86Subtarget &Subtarget,
15185 SelectionDAG &DAG) {
15186 switch (VT.SimpleTy) {
15188 return lowerV2I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15190 return lowerV2F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15192 return lowerV4I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15194 return lowerV4F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15196 return lowerV8I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15198 return lowerV16I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
15201 llvm_unreachable("Unimplemented!");
15205 /// Generic routine to split vector shuffle into half-sized shuffles.
15207 /// This routine just extracts two subvectors, shuffles them independently, and
15208 /// then concatenates them back together. This should work effectively with all
15209 /// AVX vector shuffle types.
15210 static SDValue splitAndLowerShuffle(const SDLoc &DL, MVT VT, SDValue V1,
15211 SDValue V2, ArrayRef<int> Mask,
15212 SelectionDAG &DAG) {
15213 assert(VT.getSizeInBits() >= 256 &&
15214 "Only for 256-bit or wider vector shuffles!");
15215 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
15216 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
15218 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
15219 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
15221 int NumElements = VT.getVectorNumElements();
15222 int SplitNumElements = NumElements / 2;
15223 MVT ScalarVT = VT.getVectorElementType();
15224 MVT SplitVT = MVT::getVectorVT(ScalarVT, SplitNumElements);
15226 // Use splitVector/extractSubVector so that split build-vectors just build two
15227 // narrower build vectors. This helps shuffling with splats and zeros.
15228 auto SplitVector = [&](SDValue V) {
15230 std::tie(LoV, HiV) = splitVector(peekThroughBitcasts(V), DAG, DL);
15231 return std::make_pair(DAG.getBitcast(SplitVT, LoV),
15232 DAG.getBitcast(SplitVT, HiV));
15235 SDValue LoV1, HiV1, LoV2, HiV2;
15236 std::tie(LoV1, HiV1) = SplitVector(V1);
15237 std::tie(LoV2, HiV2) = SplitVector(V2);
15239 // Now create two 4-way blends of these half-width vectors.
15240 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
15241 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
15242 SmallVector<int, 32> V1BlendMask((unsigned)SplitNumElements, -1);
15243 SmallVector<int, 32> V2BlendMask((unsigned)SplitNumElements, -1);
15244 SmallVector<int, 32> BlendMask((unsigned)SplitNumElements, -1);
15245 for (int i = 0; i < SplitNumElements; ++i) {
15246 int M = HalfMask[i];
15247 if (M >= NumElements) {
15248 if (M >= NumElements + SplitNumElements)
15252 V2BlendMask[i] = M - NumElements;
15253 BlendMask[i] = SplitNumElements + i;
15254 } else if (M >= 0) {
15255 if (M >= SplitNumElements)
15259 V1BlendMask[i] = M;
15264 // Because the lowering happens after all combining takes place, we need to
15265 // manually combine these blend masks as much as possible so that we create
15266 // a minimal number of high-level vector shuffle nodes.
15268 // First try just blending the halves of V1 or V2.
15269 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
15270 return DAG.getUNDEF(SplitVT);
15271 if (!UseLoV2 && !UseHiV2)
15272 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
15273 if (!UseLoV1 && !UseHiV1)
15274 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
15276 SDValue V1Blend, V2Blend;
15277 if (UseLoV1 && UseHiV1) {
15279 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
15281 // We only use half of V1 so map the usage down into the final blend mask.
15282 V1Blend = UseLoV1 ? LoV1 : HiV1;
15283 for (int i = 0; i < SplitNumElements; ++i)
15284 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
15285 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
15287 if (UseLoV2 && UseHiV2) {
15289 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
15291 // We only use half of V2 so map the usage down into the final blend mask.
15292 V2Blend = UseLoV2 ? LoV2 : HiV2;
15293 for (int i = 0; i < SplitNumElements; ++i)
15294 if (BlendMask[i] >= SplitNumElements)
15295 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
15297 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
15299 SDValue Lo = HalfBlend(LoMask);
15300 SDValue Hi = HalfBlend(HiMask);
15301 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
15304 /// Either split a vector in halves or decompose the shuffles and the
15307 /// This is provided as a good fallback for many lowerings of non-single-input
15308 /// shuffles with more than one 128-bit lane. In those cases, we want to select
15309 /// between splitting the shuffle into 128-bit components and stitching those
15310 /// back together vs. extracting the single-input shuffles and blending those
15312 static SDValue lowerShuffleAsSplitOrBlend(const SDLoc &DL, MVT VT, SDValue V1,
15313 SDValue V2, ArrayRef<int> Mask,
15314 const X86Subtarget &Subtarget,
15315 SelectionDAG &DAG) {
15316 assert(!V2.isUndef() && "This routine must not be used to lower single-input "
15317 "shuffles as it could then recurse on itself.");
15318 int Size = Mask.size();
15320 // If this can be modeled as a broadcast of two elements followed by a blend,
15321 // prefer that lowering. This is especially important because broadcasts can
15322 // often fold with memory operands.
15323 auto DoBothBroadcast = [&] {
15324 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
15327 if (V2BroadcastIdx < 0)
15328 V2BroadcastIdx = M - Size;
15329 else if (M - Size != V2BroadcastIdx)
15331 } else if (M >= 0) {
15332 if (V1BroadcastIdx < 0)
15333 V1BroadcastIdx = M;
15334 else if (M != V1BroadcastIdx)
15339 if (DoBothBroadcast())
15340 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
15343 // If the inputs all stem from a single 128-bit lane of each input, then we
15344 // split them rather than blending because the split will decompose to
15345 // unusually few instructions.
15346 int LaneCount = VT.getSizeInBits() / 128;
15347 int LaneSize = Size / LaneCount;
15348 SmallBitVector LaneInputs[2];
15349 LaneInputs[0].resize(LaneCount, false);
15350 LaneInputs[1].resize(LaneCount, false);
15351 for (int i = 0; i < Size; ++i)
15353 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
15354 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
15355 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15357 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
15358 // that the decomposed single-input shuffles don't end up here.
15359 return lowerShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, Subtarget,
15363 // Lower as SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15364 // TODO: Extend to support v8f32 (+ 512-bit shuffles).
15365 static SDValue lowerShuffleAsLanePermuteAndSHUFP(const SDLoc &DL, MVT VT,
15366 SDValue V1, SDValue V2,
15367 ArrayRef<int> Mask,
15368 SelectionDAG &DAG) {
15369 assert(VT == MVT::v4f64 && "Only for v4f64 shuffles");
15371 int LHSMask[4] = {-1, -1, -1, -1};
15372 int RHSMask[4] = {-1, -1, -1, -1};
15373 unsigned SHUFPMask = 0;
15375 // As SHUFPD uses a single LHS/RHS element per lane, we can always
15376 // perform the shuffle once the lanes have been shuffled in place.
15377 for (int i = 0; i != 4; ++i) {
15381 int LaneBase = i & ~1;
15382 auto &LaneMask = (i & 1) ? RHSMask : LHSMask;
15383 LaneMask[LaneBase + (M & 1)] = M;
15384 SHUFPMask |= (M & 1) << i;
15387 SDValue LHS = DAG.getVectorShuffle(VT, DL, V1, V2, LHSMask);
15388 SDValue RHS = DAG.getVectorShuffle(VT, DL, V1, V2, RHSMask);
15389 return DAG.getNode(X86ISD::SHUFP, DL, VT, LHS, RHS,
15390 DAG.getTargetConstant(SHUFPMask, DL, MVT::i8));
15393 /// Lower a vector shuffle crossing multiple 128-bit lanes as
15394 /// a lane permutation followed by a per-lane permutation.
15396 /// This is mainly for cases where we can have non-repeating permutes
15399 /// TODO: This is very similar to lowerShuffleAsLanePermuteAndRepeatedMask,
15400 /// we should investigate merging them.
15401 static SDValue lowerShuffleAsLanePermuteAndPermute(
15402 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15403 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
15404 int NumElts = VT.getVectorNumElements();
15405 int NumLanes = VT.getSizeInBits() / 128;
15406 int NumEltsPerLane = NumElts / NumLanes;
15408 SmallVector<int, 4> SrcLaneMask(NumLanes, SM_SentinelUndef);
15409 SmallVector<int, 16> PermMask(NumElts, SM_SentinelUndef);
15411 for (int i = 0; i != NumElts; ++i) {
15416 // Ensure that each lane comes from a single source lane.
15417 int SrcLane = M / NumEltsPerLane;
15418 int DstLane = i / NumEltsPerLane;
15419 if (!isUndefOrEqual(SrcLaneMask[DstLane], SrcLane))
15421 SrcLaneMask[DstLane] = SrcLane;
15423 PermMask[i] = (DstLane * NumEltsPerLane) + (M % NumEltsPerLane);
15426 // Make sure we set all elements of the lane mask, to avoid undef propagation.
15427 SmallVector<int, 16> LaneMask(NumElts, SM_SentinelUndef);
15428 for (int DstLane = 0; DstLane != NumLanes; ++DstLane) {
15429 int SrcLane = SrcLaneMask[DstLane];
15431 for (int j = 0; j != NumEltsPerLane; ++j) {
15432 LaneMask[(DstLane * NumEltsPerLane) + j] =
15433 (SrcLane * NumEltsPerLane) + j;
15437 // If we're only shuffling a single lowest lane and the rest are identity
15438 // then don't bother.
15439 // TODO - isShuffleMaskInputInPlace could be extended to something like this.
15440 int NumIdentityLanes = 0;
15441 bool OnlyShuffleLowestLane = true;
15442 for (int i = 0; i != NumLanes; ++i) {
15443 if (isSequentialOrUndefInRange(PermMask, i * NumEltsPerLane, NumEltsPerLane,
15444 i * NumEltsPerLane))
15445 NumIdentityLanes++;
15446 else if (SrcLaneMask[i] != 0 && SrcLaneMask[i] != NumLanes)
15447 OnlyShuffleLowestLane = false;
15449 if (OnlyShuffleLowestLane && NumIdentityLanes == (NumLanes - 1))
15452 SDValue LanePermute = DAG.getVectorShuffle(VT, DL, V1, V2, LaneMask);
15453 return DAG.getVectorShuffle(VT, DL, LanePermute, DAG.getUNDEF(VT), PermMask);
15456 /// Lower a vector shuffle crossing multiple 128-bit lanes by shuffling one
15457 /// source with a lane permutation.
15459 /// This lowering strategy results in four instructions in the worst case for a
15460 /// single-input cross lane shuffle which is lower than any other fully general
15461 /// cross-lane shuffle strategy I'm aware of. Special cases for each particular
15462 /// shuffle pattern should be handled prior to trying this lowering.
15463 static SDValue lowerShuffleAsLanePermuteAndShuffle(
15464 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15465 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
15466 // FIXME: This should probably be generalized for 512-bit vectors as well.
15467 assert(VT.is256BitVector() && "Only for 256-bit vector shuffles!");
15468 int Size = Mask.size();
15469 int LaneSize = Size / 2;
15471 // Fold to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
15472 // Only do this if the elements aren't all from the lower lane,
15473 // otherwise we're (probably) better off doing a split.
15474 if (VT == MVT::v4f64 &&
15475 !all_of(Mask, [LaneSize](int M) { return M < LaneSize; }))
15477 lowerShuffleAsLanePermuteAndSHUFP(DL, VT, V1, V2, Mask, DAG))
15480 // If there are only inputs from one 128-bit lane, splitting will in fact be
15481 // less expensive. The flags track whether the given lane contains an element
15482 // that crosses to another lane.
15483 if (!Subtarget.hasAVX2()) {
15484 bool LaneCrossing[2] = {false, false};
15485 for (int i = 0; i < Size; ++i)
15486 if (Mask[i] >= 0 && ((Mask[i] % Size) / LaneSize) != (i / LaneSize))
15487 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
15488 if (!LaneCrossing[0] || !LaneCrossing[1])
15489 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15491 bool LaneUsed[2] = {false, false};
15492 for (int i = 0; i < Size; ++i)
15494 LaneUsed[(Mask[i] % Size) / LaneSize] = true;
15495 if (!LaneUsed[0] || !LaneUsed[1])
15496 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
15499 // TODO - we could support shuffling V2 in the Flipped input.
15500 assert(V2.isUndef() &&
15501 "This last part of this routine only works on single input shuffles");
15503 SmallVector<int, 32> InLaneMask(Mask.begin(), Mask.end());
15504 for (int i = 0; i < Size; ++i) {
15505 int &M = InLaneMask[i];
15508 if (((M % Size) / LaneSize) != (i / LaneSize))
15509 M = (M % LaneSize) + ((i / LaneSize) * LaneSize) + Size;
15511 assert(!is128BitLaneCrossingShuffleMask(VT, InLaneMask) &&
15512 "In-lane shuffle mask expected");
15514 // Flip the lanes, and shuffle the results which should now be in-lane.
15515 MVT PVT = VT.isFloatingPoint() ? MVT::v4f64 : MVT::v4i64;
15516 SDValue Flipped = DAG.getBitcast(PVT, V1);
15518 DAG.getVectorShuffle(PVT, DL, Flipped, DAG.getUNDEF(PVT), {2, 3, 0, 1});
15519 Flipped = DAG.getBitcast(VT, Flipped);
15520 return DAG.getVectorShuffle(VT, DL, V1, Flipped, InLaneMask);
15523 /// Handle lowering 2-lane 128-bit shuffles.
15524 static SDValue lowerV2X128Shuffle(const SDLoc &DL, MVT VT, SDValue V1,
15525 SDValue V2, ArrayRef<int> Mask,
15526 const APInt &Zeroable,
15527 const X86Subtarget &Subtarget,
15528 SelectionDAG &DAG) {
15529 // With AVX2, use VPERMQ/VPERMPD for unary shuffles to allow memory folding.
15530 if (Subtarget.hasAVX2() && V2.isUndef())
15533 bool V2IsZero = !V2.isUndef() && ISD::isBuildVectorAllZeros(V2.getNode());
15535 SmallVector<int, 4> WidenedMask;
15536 if (!canWidenShuffleElements(Mask, Zeroable, V2IsZero, WidenedMask))
15539 bool IsLowZero = (Zeroable & 0x3) == 0x3;
15540 bool IsHighZero = (Zeroable & 0xc) == 0xc;
15542 // Try to use an insert into a zero vector.
15543 if (WidenedMask[0] == 0 && IsHighZero) {
15544 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15545 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
15546 DAG.getIntPtrConstant(0, DL));
15547 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
15548 getZeroVector(VT, Subtarget, DAG, DL), LoV,
15549 DAG.getIntPtrConstant(0, DL));
15552 // TODO: If minimizing size and one of the inputs is a zero vector and the
15553 // the zero vector has only one use, we could use a VPERM2X128 to save the
15554 // instruction bytes needed to explicitly generate the zero vector.
15556 // Blends are faster and handle all the non-lane-crossing cases.
15557 if (SDValue Blend = lowerShuffleAsBlend(DL, VT, V1, V2, Mask, Zeroable,
15561 // If either input operand is a zero vector, use VPERM2X128 because its mask
15562 // allows us to replace the zero input with an implicit zero.
15563 if (!IsLowZero && !IsHighZero) {
15564 // Check for patterns which can be matched with a single insert of a 128-bit
15566 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 0, 1});
15567 if (OnlyUsesV1 || isShuffleEquivalent(V1, V2, Mask, {0, 1, 4, 5})) {
15569 // With AVX1, use vperm2f128 (below) to allow load folding. Otherwise,
15570 // this will likely become vinsertf128 which can't fold a 256-bit memop.
15571 if (!isa<LoadSDNode>(peekThroughBitcasts(V1))) {
15572 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
15573 SDValue SubVec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
15574 OnlyUsesV1 ? V1 : V2,
15575 DAG.getIntPtrConstant(0, DL));
15576 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
15577 DAG.getIntPtrConstant(2, DL));
15581 // Try to use SHUF128 if possible.
15582 if (Subtarget.hasVLX()) {
15583 if (WidenedMask[0] < 2 && WidenedMask[1] >= 2) {
15584 unsigned PermMask = ((WidenedMask[0] % 2) << 0) |
15585 ((WidenedMask[1] % 2) << 1);
15586 return DAG.getNode(X86ISD::SHUF128, DL, VT, V1, V2,
15587 DAG.getTargetConstant(PermMask, DL, MVT::i8));
15592 // Otherwise form a 128-bit permutation. After accounting for undefs,
15593 // convert the 64-bit shuffle mask selection values into 128-bit
15594 // selection bits by dividing the indexes by 2 and shifting into positions
15595 // defined by a vperm2*128 instruction's immediate control byte.
15597 // The immediate permute control byte looks like this:
15598 // [1:0] - select 128 bits from sources for low half of destination
15600 // [3] - zero low half of destination
15601 // [5:4] - select 128 bits from sources for high half of destination
15603 // [7] - zero high half of destination
15605 assert((WidenedMask[0] >= 0 || IsLowZero) &&
15606 (WidenedMask[1] >= 0 || IsHighZero) && "Undef half?");
15608 unsigned PermMask = 0;
15609 PermMask |= IsLowZero ? 0x08 : (WidenedMask[0] << 0);
15610 PermMask |= IsHighZero ? 0x80 : (WidenedMask[1] << 4);
15612 // Check the immediate mask and replace unused sources with undef.
15613 if ((PermMask & 0x0a) != 0x00 && (PermMask & 0xa0) != 0x00)
15614 V1 = DAG.getUNDEF(VT);
15615 if ((PermMask & 0x0a) != 0x02 && (PermMask & 0xa0) != 0x20)
15616 V2 = DAG.getUNDEF(VT);
15618 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
15619 DAG.getTargetConstant(PermMask, DL, MVT::i8));
15622 /// Lower a vector shuffle by first fixing the 128-bit lanes and then
15623 /// shuffling each lane.
15625 /// This attempts to create a repeated lane shuffle where each lane uses one
15626 /// or two of the lanes of the inputs. The lanes of the input vectors are
15627 /// shuffled in one or two independent shuffles to get the lanes into the
15628 /// position needed by the final shuffle.
15629 static SDValue lowerShuffleAsLanePermuteAndRepeatedMask(
15630 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
15631 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
15632 assert(!V2.isUndef() && "This is only useful with multiple inputs.");
15634 if (is128BitLaneRepeatedShuffleMask(VT, Mask))
15637 int NumElts = Mask.size();
15638 int NumLanes = VT.getSizeInBits() / 128;
15639 int NumLaneElts = 128 / VT.getScalarSizeInBits();
15640 SmallVector<int, 16> RepeatMask(NumLaneElts, -1);
15641 SmallVector<std::array<int, 2>, 2> LaneSrcs(NumLanes, {{-1, -1}});
15643 // First pass will try to fill in the RepeatMask from lanes that need two
15645 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15646 int Srcs[2] = {-1, -1};
15647 SmallVector<int, 16> InLaneMask(NumLaneElts, -1);
15648 for (int i = 0; i != NumLaneElts; ++i) {
15649 int M = Mask[(Lane * NumLaneElts) + i];
15652 // Determine which of the possible input lanes (NumLanes from each source)
15653 // this element comes from. Assign that as one of the sources for this
15654 // lane. We can assign up to 2 sources for this lane. If we run out
15655 // sources we can't do anything.
15656 int LaneSrc = M / NumLaneElts;
15658 if (Srcs[0] < 0 || Srcs[0] == LaneSrc)
15660 else if (Srcs[1] < 0 || Srcs[1] == LaneSrc)
15665 Srcs[Src] = LaneSrc;
15666 InLaneMask[i] = (M % NumLaneElts) + Src * NumElts;
15669 // If this lane has two sources, see if it fits with the repeat mask so far.
15673 LaneSrcs[Lane][0] = Srcs[0];
15674 LaneSrcs[Lane][1] = Srcs[1];
15676 auto MatchMasks = [](ArrayRef<int> M1, ArrayRef<int> M2) {
15677 assert(M1.size() == M2.size() && "Unexpected mask size");
15678 for (int i = 0, e = M1.size(); i != e; ++i)
15679 if (M1[i] >= 0 && M2[i] >= 0 && M1[i] != M2[i])
15684 auto MergeMasks = [](ArrayRef<int> Mask, MutableArrayRef<int> MergedMask) {
15685 assert(Mask.size() == MergedMask.size() && "Unexpected mask size");
15686 for (int i = 0, e = MergedMask.size(); i != e; ++i) {
15690 assert((MergedMask[i] < 0 || MergedMask[i] == M) &&
15691 "Unexpected mask element");
15696 if (MatchMasks(InLaneMask, RepeatMask)) {
15697 // Merge this lane mask into the final repeat mask.
15698 MergeMasks(InLaneMask, RepeatMask);
15702 // Didn't find a match. Swap the operands and try again.
15703 std::swap(LaneSrcs[Lane][0], LaneSrcs[Lane][1]);
15704 ShuffleVectorSDNode::commuteMask(InLaneMask);
15706 if (MatchMasks(InLaneMask, RepeatMask)) {
15707 // Merge this lane mask into the final repeat mask.
15708 MergeMasks(InLaneMask, RepeatMask);
15712 // Couldn't find a match with the operands in either order.
15716 // Now handle any lanes with only one source.
15717 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15718 // If this lane has already been processed, skip it.
15719 if (LaneSrcs[Lane][0] >= 0)
15722 for (int i = 0; i != NumLaneElts; ++i) {
15723 int M = Mask[(Lane * NumLaneElts) + i];
15727 // If RepeatMask isn't defined yet we can define it ourself.
15728 if (RepeatMask[i] < 0)
15729 RepeatMask[i] = M % NumLaneElts;
15731 if (RepeatMask[i] < NumElts) {
15732 if (RepeatMask[i] != M % NumLaneElts)
15734 LaneSrcs[Lane][0] = M / NumLaneElts;
15736 if (RepeatMask[i] != ((M % NumLaneElts) + NumElts))
15738 LaneSrcs[Lane][1] = M / NumLaneElts;
15742 if (LaneSrcs[Lane][0] < 0 && LaneSrcs[Lane][1] < 0)
15746 SmallVector<int, 16> NewMask(NumElts, -1);
15747 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15748 int Src = LaneSrcs[Lane][0];
15749 for (int i = 0; i != NumLaneElts; ++i) {
15752 M = Src * NumLaneElts + i;
15753 NewMask[Lane * NumLaneElts + i] = M;
15756 SDValue NewV1 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15757 // Ensure we didn't get back the shuffle we started with.
15758 // FIXME: This is a hack to make up for some splat handling code in
15759 // getVectorShuffle.
15760 if (isa<ShuffleVectorSDNode>(NewV1) &&
15761 cast<ShuffleVectorSDNode>(NewV1)->getMask() == Mask)
15764 for (int Lane = 0; Lane != NumLanes; ++Lane) {
15765 int Src = LaneSrcs[Lane][1];
15766 for (int i = 0; i != NumLaneElts; ++i) {
15769 M = Src * NumLaneElts + i;
15770 NewMask[Lane * NumLaneElts + i] = M;
15773 SDValue NewV2 = DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
15774 // Ensure we didn't get back the shuffle we started with.
15775 // FIXME: This is a hack to make up for some splat handling code in
15776 // getVectorShuffle.
15777 if (isa<ShuffleVectorSDNode>(NewV2) &&
15778 cast<ShuffleVectorSDNode>(NewV2)->getMask() == Mask)
15781 for (int i = 0; i != NumElts; ++i) {
15782 NewMask[i] = RepeatMask[i % NumLaneElts];
15783 if (NewMask[i] < 0)
15786 NewMask[i] += (i / NumLaneElts) * NumLaneElts;
15788 return DAG.getVectorShuffle(VT, DL, NewV1, NewV2, NewMask);
15791 /// If the input shuffle mask results in a vector that is undefined in all upper
15792 /// or lower half elements and that mask accesses only 2 halves of the
15793 /// shuffle's operands, return true. A mask of half the width with mask indexes
15794 /// adjusted to access the extracted halves of the original shuffle operands is
15795 /// returned in HalfMask. HalfIdx1 and HalfIdx2 return whether the upper or
15796 /// lower half of each input operand is accessed.
15798 getHalfShuffleMask(ArrayRef<int> Mask, MutableArrayRef<int> HalfMask,
15799 int &HalfIdx1, int &HalfIdx2) {
15800 assert((Mask.size() == HalfMask.size() * 2) &&
15801 "Expected input mask to be twice as long as output");
15803 // Exactly one half of the result must be undef to allow narrowing.
15804 bool UndefLower = isUndefLowerHalf(Mask);
15805 bool UndefUpper = isUndefUpperHalf(Mask);
15806 if (UndefLower == UndefUpper)
15809 unsigned HalfNumElts = HalfMask.size();
15810 unsigned MaskIndexOffset = UndefLower ? HalfNumElts : 0;
15813 for (unsigned i = 0; i != HalfNumElts; ++i) {
15814 int M = Mask[i + MaskIndexOffset];
15820 // Determine which of the 4 half vectors this element is from.
15821 // i.e. 0 = Lower V1, 1 = Upper V1, 2 = Lower V2, 3 = Upper V2.
15822 int HalfIdx = M / HalfNumElts;
15824 // Determine the element index into its half vector source.
15825 int HalfElt = M % HalfNumElts;
15827 // We can shuffle with up to 2 half vectors, set the new 'half'
15828 // shuffle mask accordingly.
15829 if (HalfIdx1 < 0 || HalfIdx1 == HalfIdx) {
15830 HalfMask[i] = HalfElt;
15831 HalfIdx1 = HalfIdx;
15834 if (HalfIdx2 < 0 || HalfIdx2 == HalfIdx) {
15835 HalfMask[i] = HalfElt + HalfNumElts;
15836 HalfIdx2 = HalfIdx;
15840 // Too many half vectors referenced.
15847 /// Given the output values from getHalfShuffleMask(), create a half width
15848 /// shuffle of extracted vectors followed by an insert back to full width.
15849 static SDValue getShuffleHalfVectors(const SDLoc &DL, SDValue V1, SDValue V2,
15850 ArrayRef<int> HalfMask, int HalfIdx1,
15851 int HalfIdx2, bool UndefLower,
15852 SelectionDAG &DAG, bool UseConcat = false) {
15853 assert(V1.getValueType() == V2.getValueType() && "Different sized vectors?");
15854 assert(V1.getValueType().isSimple() && "Expecting only simple types");
15856 MVT VT = V1.getSimpleValueType();
15857 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15858 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15860 auto getHalfVector = [&](int HalfIdx) {
15862 return DAG.getUNDEF(HalfVT);
15863 SDValue V = (HalfIdx < 2 ? V1 : V2);
15864 HalfIdx = (HalfIdx % 2) * HalfNumElts;
15865 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V,
15866 DAG.getIntPtrConstant(HalfIdx, DL));
15869 // ins undef, (shuf (ext V1, HalfIdx1), (ext V2, HalfIdx2), HalfMask), Offset
15870 SDValue Half1 = getHalfVector(HalfIdx1);
15871 SDValue Half2 = getHalfVector(HalfIdx2);
15872 SDValue V = DAG.getVectorShuffle(HalfVT, DL, Half1, Half2, HalfMask);
15875 SDValue Op1 = DAG.getUNDEF(HalfVT);
15877 std::swap(Op0, Op1);
15878 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Op0, Op1);
15881 unsigned Offset = UndefLower ? HalfNumElts : 0;
15882 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V,
15883 DAG.getIntPtrConstant(Offset, DL));
15886 /// Lower shuffles where an entire half of a 256 or 512-bit vector is UNDEF.
15887 /// This allows for fast cases such as subvector extraction/insertion
15888 /// or shuffling smaller vector types which can lower more efficiently.
15889 static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
15890 SDValue V2, ArrayRef<int> Mask,
15891 const X86Subtarget &Subtarget,
15892 SelectionDAG &DAG) {
15893 assert((VT.is256BitVector() || VT.is512BitVector()) &&
15894 "Expected 256-bit or 512-bit vector");
15896 bool UndefLower = isUndefLowerHalf(Mask);
15897 if (!UndefLower && !isUndefUpperHalf(Mask))
15900 assert((!UndefLower || !isUndefUpperHalf(Mask)) &&
15901 "Completely undef shuffle mask should have been simplified already");
15903 // Upper half is undef and lower half is whole upper subvector.
15904 // e.g. vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
15905 MVT HalfVT = VT.getHalfNumVectorElementsVT();
15906 unsigned HalfNumElts = HalfVT.getVectorNumElements();
15908 isSequentialOrUndefInRange(Mask, 0, HalfNumElts, HalfNumElts)) {
15909 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15910 DAG.getIntPtrConstant(HalfNumElts, DL));
15911 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15912 DAG.getIntPtrConstant(0, DL));
15915 // Lower half is undef and upper half is whole lower subvector.
15916 // e.g. vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
15918 isSequentialOrUndefInRange(Mask, HalfNumElts, HalfNumElts, 0)) {
15919 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, V1,
15920 DAG.getIntPtrConstant(0, DL));
15921 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), Hi,
15922 DAG.getIntPtrConstant(HalfNumElts, DL));
15925 int HalfIdx1, HalfIdx2;
15926 SmallVector<int, 8> HalfMask(HalfNumElts);
15927 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2))
15930 assert(HalfMask.size() == HalfNumElts && "Unexpected shuffle mask length");
15932 // Only shuffle the halves of the inputs when useful.
15933 unsigned NumLowerHalves =
15934 (HalfIdx1 == 0 || HalfIdx1 == 2) + (HalfIdx2 == 0 || HalfIdx2 == 2);
15935 unsigned NumUpperHalves =
15936 (HalfIdx1 == 1 || HalfIdx1 == 3) + (HalfIdx2 == 1 || HalfIdx2 == 3);
15937 assert(NumLowerHalves + NumUpperHalves <= 2 && "Only 1 or 2 halves allowed");
15939 // Determine the larger pattern of undef/halves, then decide if it's worth
15940 // splitting the shuffle based on subtarget capabilities and types.
15941 unsigned EltWidth = VT.getVectorElementType().getSizeInBits();
15943 // XXXXuuuu: no insert is needed.
15944 // Always extract lowers when setting lower - these are all free subreg ops.
15945 if (NumUpperHalves == 0)
15946 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15949 if (NumUpperHalves == 1) {
15950 // AVX2 has efficient 32/64-bit element cross-lane shuffles.
15951 if (Subtarget.hasAVX2()) {
15952 // extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
15953 if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
15954 !is128BitUnpackShuffleMask(HalfMask) &&
15955 (!isSingleSHUFPSMask(HalfMask) ||
15956 Subtarget.hasFastVariableShuffle()))
15958 // If this is a unary shuffle (assume that the 2nd operand is
15959 // canonicalized to undef), then we can use vpermpd. Otherwise, we
15960 // are better off extracting the upper half of 1 operand and using a
15962 if (EltWidth == 64 && V2.isUndef())
15965 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15966 if (Subtarget.hasAVX512() && VT.is512BitVector())
15968 // Extract + narrow shuffle is better than the wide alternative.
15969 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15973 // Don't extract both uppers, instead shuffle and then extract.
15974 assert(NumUpperHalves == 2 && "Half vector count went wrong");
15978 // UndefLower - uuuuXXXX: an insert to high half is required if we split this.
15979 if (NumUpperHalves == 0) {
15980 // AVX2 has efficient 64-bit element cross-lane shuffles.
15981 // TODO: Refine to account for unary shuffle, splat, and other masks?
15982 if (Subtarget.hasAVX2() && EltWidth == 64)
15984 // AVX512 has efficient cross-lane shuffles for all legal 512-bit types.
15985 if (Subtarget.hasAVX512() && VT.is512BitVector())
15987 // Narrow shuffle + insert is better than the wide alternative.
15988 return getShuffleHalfVectors(DL, V1, V2, HalfMask, HalfIdx1, HalfIdx2,
15992 // NumUpperHalves != 0: don't bother with extract, shuffle, and then insert.
15996 /// Test whether the specified input (0 or 1) is in-place blended by the
15999 /// This returns true if the elements from a particular input are already in the
16000 /// slot required by the given mask and require no permutation.
16001 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
16002 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
16003 int Size = Mask.size();
16004 for (int i = 0; i < Size; ++i)
16005 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
16011 /// Handle case where shuffle sources are coming from the same 128-bit lane and
16012 /// every lane can be represented as the same repeating mask - allowing us to
16013 /// shuffle the sources with the repeating shuffle and then permute the result
16014 /// to the destination lanes.
16015 static SDValue lowerShuffleAsRepeatedMaskAndLanePermute(
16016 const SDLoc &DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
16017 const X86Subtarget &Subtarget, SelectionDAG &DAG) {
16018 int NumElts = VT.getVectorNumElements();
16019 int NumLanes = VT.getSizeInBits() / 128;
16020 int NumLaneElts = NumElts / NumLanes;
16022 // On AVX2 we may be able to just shuffle the lowest elements and then
16023 // broadcast the result.
16024 if (Subtarget.hasAVX2()) {
16025 for (unsigned BroadcastSize : {16, 32, 64}) {
16026 if (BroadcastSize <= VT.getScalarSizeInBits())
16028 int NumBroadcastElts = BroadcastSize / VT.getScalarSizeInBits();
16030 // Attempt to match a repeating pattern every NumBroadcastElts,
16031 // accounting for UNDEFs but only references the lowest 128-bit
16032 // lane of the inputs.
16033 auto FindRepeatingBroadcastMask = [&](SmallVectorImpl<int> &RepeatMask) {
16034 for (int i = 0; i != NumElts; i += NumBroadcastElts)
16035 for (int j = 0; j != NumBroadcastElts; ++j) {
16036 int M = Mask[i + j];
16039 int &R = RepeatMask[j];
16040 if (0 != ((M % NumElts) / NumLaneElts))
16042 if (0 <= R && R != M)
16049 SmallVector<int, 8> RepeatMask((unsigned)NumElts, -1);
16050 if (!FindRepeatingBroadcastMask(RepeatMask))
16053 // Shuffle the (lowest) repeated elements in place for broadcast.
16054 SDValue RepeatShuf = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatMask);
16056 // Shuffle the actual broadcast.
16057 SmallVector<int, 8> BroadcastMask((unsigned)NumElts, -1);
16058 for (int i = 0; i != NumElts; i += NumBroadcastElts)
16059 for (int j = 0; j != NumBroadcastElts; ++j)
16060 BroadcastMask[i + j] = j;
16061 return DAG.getVectorShuffle(VT, DL, RepeatShuf, DAG.getUNDEF(VT),
16066 // Bail if the shuffle mask doesn't cross 128-bit lanes.
16067 if (!is128BitLaneCrossingShuffleMask(VT, Mask))
16070 // Bail if we already have a repeated lane shuffle mask.
16071 SmallVector<int, 8> RepeatedShuffleMask;
16072 if (is128BitLaneRepeatedShuffleMask(VT, Mask, RepeatedShuffleMask))
16075 // On AVX2 targets we can permute 256-bit vectors as 64-bit sub-lanes
16076 // (with PERMQ/PERMPD), otherwise we can only permute whole 128-bit lanes.
16077 int SubLaneScale = Subtarget.hasAVX2() && VT.is256BitVector() ? 2 : 1;
16078 int NumSubLanes = NumLanes * SubLaneScale;
16079 int NumSubLaneElts = NumLaneElts / SubLaneScale;
16081 // Check that all the sources are coming from the same lane and see if we can
16082 // form a repeating shuffle mask (local to each sub-lane). At the same time,
16083 // determine the source sub-lane for each destination sub-lane.
16084 int TopSrcSubLane = -1;
16085 SmallVector<int, 8> Dst2SrcSubLanes((unsigned)NumSubLanes, -1);
16086 SmallVector<int, 8> RepeatedSubLaneMasks[2] = {
16087 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef),
16088 SmallVector<int, 8>((unsigned)NumSubLaneElts, SM_SentinelUndef)};
16090 for (int DstSubLane = 0; DstSubLane != NumSubLanes; ++DstSubLane) {
16091 // Extract the sub-lane mask, check that it all comes from the same lane
16092 // and normalize the mask entries to come from the first lane.
16094 SmallVector<int, 8> SubLaneMask((unsigned)NumSubLaneElts, -1);
16095 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
16096 int M = Mask[(DstSubLane * NumSubLaneElts) + Elt];
16099 int Lane = (M % NumElts) / NumLaneElts;
16100 if ((0 <= SrcLane) && (SrcLane != Lane))
16103 int LocalM = (M % NumLaneElts) + (M < NumElts ? 0 : NumElts);
16104 SubLaneMask[Elt] = LocalM;
16107 // Whole sub-lane is UNDEF.
16111 // Attempt to match against the candidate repeated sub-lane masks.
16112 for (int SubLane = 0; SubLane != SubLaneScale; ++SubLane) {
16113 auto MatchMasks = [NumSubLaneElts](ArrayRef<int> M1, ArrayRef<int> M2) {
16114 for (int i = 0; i != NumSubLaneElts; ++i) {
16115 if (M1[i] < 0 || M2[i] < 0)
16117 if (M1[i] != M2[i])
16123 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane];
16124 if (!MatchMasks(SubLaneMask, RepeatedSubLaneMask))
16127 // Merge the sub-lane mask into the matching repeated sub-lane mask.
16128 for (int i = 0; i != NumSubLaneElts; ++i) {
16129 int M = SubLaneMask[i];
16132 assert((RepeatedSubLaneMask[i] < 0 || RepeatedSubLaneMask[i] == M) &&
16133 "Unexpected mask element");
16134 RepeatedSubLaneMask[i] = M;
16137 // Track the top most source sub-lane - by setting the remaining to UNDEF
16138 // we can greatly simplify shuffle matching.
16139 int SrcSubLane = (SrcLane * SubLaneScale) + SubLane;
16140 TopSrcSubLane = std::max(TopSrcSubLane, SrcSubLane);
16141 Dst2SrcSubLanes[DstSubLane] = SrcSubLane;
16145 // Bail if we failed to find a matching repeated sub-lane mask.
16146 if (Dst2SrcSubLanes[DstSubLane] < 0)
16149 assert(0 <= TopSrcSubLane && TopSrcSubLane < NumSubLanes &&
16150 "Unexpected source lane");
16152 // Create a repeating shuffle mask for the entire vector.
16153 SmallVector<int, 8> RepeatedMask((unsigned)NumElts, -1);
16154 for (int SubLane = 0; SubLane <= TopSrcSubLane; ++SubLane) {
16155 int Lane = SubLane / SubLaneScale;
16156 auto &RepeatedSubLaneMask = RepeatedSubLaneMasks[SubLane % SubLaneScale];
16157 for (int Elt = 0; Elt != NumSubLaneElts; ++Elt) {
16158 int M = RepeatedSubLaneMask[Elt];
16161 int Idx = (SubLane * NumSubLaneElts) + Elt;
16162 RepeatedMask[Idx] = M + (Lane * NumLaneElts);
16165 SDValue RepeatedShuffle = DAG.getVectorShuffle(VT, DL, V1, V2, RepeatedMask);
16167 // Shuffle each source sub-lane to its destination.
16168 SmallVector<int, 8> SubLaneMask((unsigned)NumElts, -1);
16169 for (int i = 0; i != NumElts; i += NumSubLaneElts) {
16170 int SrcSubLane = Dst2SrcSubLanes[i / NumSubLaneElts];
16171 if (SrcSubLane < 0)
16173 for (int j = 0; j != NumSubLaneElts; ++j)
16174 SubLaneMask[i + j] = j + (SrcSubLane * NumSubLaneElts);
16177 return DAG.getVectorShuffle(VT, DL, RepeatedShuffle, DAG.getUNDEF(VT),
16181 static bool matchShuffleWithSHUFPD(MVT VT, SDValue &V1, SDValue &V2,
16182 bool &ForceV1Zero, bool &ForceV2Zero,
16183 unsigned &ShuffleImm, ArrayRef<int> Mask,
16184 const APInt &Zeroable) {
16185 int NumElts = VT.getVectorNumElements();
16186 assert(VT.getScalarSizeInBits() == 64 &&
16187 (NumElts == 2 || NumElts == 4 || NumElts == 8) &&
16188 "Unexpected data type for VSHUFPD");
16189 assert(isUndefOrZeroOrInRange(Mask, 0, 2 * NumElts) &&
16190 "Illegal shuffle mask");
16192 bool ZeroLane[2] = { true, true };
16193 for (int i = 0; i < NumElts; ++i)
16194 ZeroLane[i & 1] &= Zeroable[i];
16196 // Mask for V8F64: 0/1, 8/9, 2/3, 10/11, 4/5, ..
16197 // Mask for V4F64; 0/1, 4/5, 2/3, 6/7..
16199 bool ShufpdMask = true;
16200 bool CommutableMask = true;
16201 for (int i = 0; i < NumElts; ++i) {
16202 if (Mask[i] == SM_SentinelUndef || ZeroLane[i & 1])
16206 int Val = (i & 6) + NumElts * (i & 1);
16207 int CommutVal = (i & 0xe) + NumElts * ((i & 1) ^ 1);
16208 if (Mask[i] < Val || Mask[i] > Val + 1)
16209 ShufpdMask = false;
16210 if (Mask[i] < CommutVal || Mask[i] > CommutVal + 1)
16211 CommutableMask = false;
16212 ShuffleImm |= (Mask[i] % 2) << i;
16215 if (!ShufpdMask && !CommutableMask)
16218 if (!ShufpdMask && CommutableMask)
16221 ForceV1Zero = ZeroLane[0];
16222 ForceV2Zero = ZeroLane[1];
16226 static SDValue lowerShuffleWithSHUFPD(const SDLoc &DL, MVT VT, SDValue V1,
16227 SDValue V2, ArrayRef<int> Mask,
16228 const APInt &Zeroable,
16229 const X86Subtarget &Subtarget,
16230 SelectionDAG &DAG) {
16231 assert((VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v8f64) &&
16232 "Unexpected data type for VSHUFPD");
16234 unsigned Immediate = 0;
16235 bool ForceV1Zero = false, ForceV2Zero = false;
16236 if (!matchShuffleWithSHUFPD(VT, V1, V2, ForceV1Zero, ForceV2Zero, Immediate,
16240 // Create a REAL zero vector - ISD::isBuildVectorAllZeros allows UNDEFs.
16242 V1 = getZeroVector(VT, Subtarget, DAG, DL);
16244 V2 = getZeroVector(VT, Subtarget, DAG, DL);
16246 return DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
16247 DAG.getTargetConstant(Immediate, DL, MVT::i8));
16250 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16251 // by zeroable elements in the remaining 24 elements. Turn this into two
16252 // vmovqb instructions shuffled together.
16253 static SDValue lowerShuffleAsVTRUNCAndUnpack(const SDLoc &DL, MVT VT,
16254 SDValue V1, SDValue V2,
16255 ArrayRef<int> Mask,
16256 const APInt &Zeroable,
16257 SelectionDAG &DAG) {
16258 assert(VT == MVT::v32i8 && "Unexpected type!");
16260 // The first 8 indices should be every 8th element.
16261 if (!isSequentialOrUndefInRange(Mask, 0, 8, 0, 8))
16264 // Remaining elements need to be zeroable.
16265 if (Zeroable.countLeadingOnes() < (Mask.size() - 8))
16268 V1 = DAG.getBitcast(MVT::v4i64, V1);
16269 V2 = DAG.getBitcast(MVT::v4i64, V2);
16271 V1 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V1);
16272 V2 = DAG.getNode(X86ISD::VTRUNC, DL, MVT::v16i8, V2);
16274 // The VTRUNCs will put 0s in the upper 12 bytes. Use them to put zeroes in
16275 // the upper bits of the result using an unpckldq.
16276 SDValue Unpack = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2,
16277 { 0, 1, 2, 3, 16, 17, 18, 19,
16278 4, 5, 6, 7, 20, 21, 22, 23 });
16279 // Insert the unpckldq into a zero vector to widen to v32i8.
16280 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v32i8,
16281 DAG.getConstant(0, DL, MVT::v32i8), Unpack,
16282 DAG.getIntPtrConstant(0, DL));
16286 /// Handle lowering of 4-lane 64-bit floating point shuffles.
16288 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
16289 /// isn't available.
16290 static SDValue lowerV4F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16291 const APInt &Zeroable, SDValue V1, SDValue V2,
16292 const X86Subtarget &Subtarget,
16293 SelectionDAG &DAG) {
16294 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
16295 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
16296 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
16298 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4f64, V1, V2, Mask, Zeroable,
16302 if (V2.isUndef()) {
16303 // Check for being able to broadcast a single element.
16304 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4f64, V1, V2,
16305 Mask, Subtarget, DAG))
16308 // Use low duplicate instructions for masks that match their pattern.
16309 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2}))
16310 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
16312 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
16313 // Non-half-crossing single input shuffles can be lowered with an
16314 // interleaved permutation.
16315 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
16316 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
16317 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
16318 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
16321 // With AVX2 we have direct support for this permutation.
16322 if (Subtarget.hasAVX2())
16323 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
16324 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
16326 // Try to create an in-lane repeating shuffle mask and then shuffle the
16327 // results into the target lanes.
16328 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16329 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
16332 // Try to permute the lanes and then use a per-lane permute.
16333 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(DL, MVT::v4f64, V1, V2,
16334 Mask, DAG, Subtarget))
16337 // Otherwise, fall back.
16338 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v4f64, V1, V2, Mask,
16342 // Use dedicated unpack instructions for masks that match their pattern.
16343 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4f64, Mask, V1, V2, DAG))
16346 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
16347 Zeroable, Subtarget, DAG))
16350 // Check if the blend happens to exactly fit that of SHUFPD.
16351 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v4f64, V1, V2, Mask,
16352 Zeroable, Subtarget, DAG))
16355 // If we have lane crossing shuffles AND they don't all come from the lower
16356 // lane elements, lower to SHUFPD(VPERM2F128(V1, V2), VPERM2F128(V1, V2)).
16357 // TODO: Handle BUILD_VECTOR sources which getVectorShuffle currently
16358 // canonicalize to a blend of splat which isn't necessary for this combine.
16359 if (is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask) &&
16360 !all_of(Mask, [](int M) { return M < 2 || (4 <= M && M < 6); }) &&
16361 (V1.getOpcode() != ISD::BUILD_VECTOR) &&
16362 (V2.getOpcode() != ISD::BUILD_VECTOR))
16363 if (SDValue Op = lowerShuffleAsLanePermuteAndSHUFP(DL, MVT::v4f64, V1, V2,
16367 // If we have one input in place, then we can permute the other input and
16368 // blend the result.
16369 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16370 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
16373 // Try to create an in-lane repeating shuffle mask and then shuffle the
16374 // results into the target lanes.
16375 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16376 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
16379 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16380 // shuffle. However, if we have AVX2 and either inputs are already in place,
16381 // we will be able to shuffle even across lanes the other input in a single
16382 // instruction so skip this pattern.
16383 if (!(Subtarget.hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
16384 isShuffleMaskInputInPlace(1, Mask))))
16385 if (SDValue V = lowerShuffleAsLanePermuteAndRepeatedMask(
16386 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
16389 // If we have VLX support, we can use VEXPAND.
16390 if (Subtarget.hasVLX())
16391 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4f64, Zeroable, Mask, V1, V2,
16395 // If we have AVX2 then we always want to lower with a blend because an v4 we
16396 // can fully permute the elements.
16397 if (Subtarget.hasAVX2())
16398 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2, Mask,
16401 // Otherwise fall back on generic lowering.
16402 return lowerShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask,
16406 /// Handle lowering of 4-lane 64-bit integer shuffles.
16408 /// This routine is only called when we have AVX2 and thus a reasonable
16409 /// instruction set for v4i64 shuffling..
16410 static SDValue lowerV4I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16411 const APInt &Zeroable, SDValue V1, SDValue V2,
16412 const X86Subtarget &Subtarget,
16413 SelectionDAG &DAG) {
16414 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
16415 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
16416 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
16417 assert(Subtarget.hasAVX2() && "We can only lower v4i64 with AVX2!");
16419 if (SDValue V = lowerV2X128Shuffle(DL, MVT::v4i64, V1, V2, Mask, Zeroable,
16423 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
16424 Zeroable, Subtarget, DAG))
16427 // Check for being able to broadcast a single element.
16428 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v4i64, V1, V2, Mask,
16432 if (V2.isUndef()) {
16433 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
16434 // can use lower latency instructions that will operate on both lanes.
16435 SmallVector<int, 2> RepeatedMask;
16436 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
16437 SmallVector<int, 4> PSHUFDMask;
16438 narrowShuffleMaskElts(2, RepeatedMask, PSHUFDMask);
16439 return DAG.getBitcast(
16441 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
16442 DAG.getBitcast(MVT::v8i32, V1),
16443 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
16446 // AVX2 provides a direct instruction for permuting a single input across
16448 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
16449 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
16452 // Try to use shift instructions.
16453 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask,
16454 Zeroable, Subtarget, DAG))
16457 // If we have VLX support, we can use VALIGN or VEXPAND.
16458 if (Subtarget.hasVLX()) {
16459 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v4i64, V1, V2, Mask,
16463 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v4i64, Zeroable, Mask, V1, V2,
16468 // Try to use PALIGNR.
16469 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v4i64, V1, V2, Mask,
16473 // Use dedicated unpack instructions for masks that match their pattern.
16474 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v4i64, Mask, V1, V2, DAG))
16477 // If we have one input in place, then we can permute the other input and
16478 // blend the result.
16479 if (isShuffleMaskInputInPlace(0, Mask) || isShuffleMaskInputInPlace(1, Mask))
16480 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16483 // Try to create an in-lane repeating shuffle mask and then shuffle the
16484 // results into the target lanes.
16485 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16486 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16489 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16490 // shuffle. However, if we have AVX2 and either inputs are already in place,
16491 // we will be able to shuffle even across lanes the other input in a single
16492 // instruction so skip this pattern.
16493 if (!isShuffleMaskInputInPlace(0, Mask) &&
16494 !isShuffleMaskInputInPlace(1, Mask))
16495 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16496 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
16499 // Otherwise fall back on generic blend lowering.
16500 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2, Mask,
16504 /// Handle lowering of 8-lane 32-bit floating point shuffles.
16506 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
16507 /// isn't available.
16508 static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16509 const APInt &Zeroable, SDValue V1, SDValue V2,
16510 const X86Subtarget &Subtarget,
16511 SelectionDAG &DAG) {
16512 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16513 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
16514 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16516 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
16517 Zeroable, Subtarget, DAG))
16520 // Check for being able to broadcast a single element.
16521 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8f32, V1, V2, Mask,
16525 // If the shuffle mask is repeated in each 128-bit lane, we have many more
16526 // options to efficiently lower the shuffle.
16527 SmallVector<int, 4> RepeatedMask;
16528 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
16529 assert(RepeatedMask.size() == 4 &&
16530 "Repeated masks must be half the mask width!");
16532 // Use even/odd duplicate instructions for masks that match their pattern.
16533 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
16534 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
16535 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
16536 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
16539 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
16540 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16542 // Use dedicated unpack instructions for masks that match their pattern.
16543 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8f32, Mask, V1, V2, DAG))
16546 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
16547 // have already handled any direct blends.
16548 return lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
16551 // Try to create an in-lane repeating shuffle mask and then shuffle the
16552 // results into the target lanes.
16553 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16554 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16557 // If we have a single input shuffle with different shuffle patterns in the
16558 // two 128-bit lanes use the variable mask to VPERMILPS.
16559 if (V2.isUndef()) {
16560 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask)) {
16561 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16562 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v8f32, V1, VPermMask);
16564 if (Subtarget.hasAVX2()) {
16565 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16566 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32, VPermMask, V1);
16568 // Otherwise, fall back.
16569 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v8f32, V1, V2, Mask,
16573 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16575 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16576 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
16579 // If we have VLX support, we can use VEXPAND.
16580 if (Subtarget.hasVLX())
16581 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f32, Zeroable, Mask, V1, V2,
16585 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16586 // since after split we get a more efficient code using vpunpcklwd and
16587 // vpunpckhwd instrs than vblend.
16588 if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
16589 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16593 // If we have AVX2 then we always want to lower with a blend because at v8 we
16594 // can fully permute the elements.
16595 if (Subtarget.hasAVX2())
16596 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2, Mask,
16599 // Otherwise fall back on generic lowering.
16600 return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask,
16604 /// Handle lowering of 8-lane 32-bit integer shuffles.
16606 /// This routine is only called when we have AVX2 and thus a reasonable
16607 /// instruction set for v8i32 shuffling..
16608 static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16609 const APInt &Zeroable, SDValue V1, SDValue V2,
16610 const X86Subtarget &Subtarget,
16611 SelectionDAG &DAG) {
16612 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16613 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
16614 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
16615 assert(Subtarget.hasAVX2() && "We can only lower v8i32 with AVX2!");
16617 // Whenever we can lower this as a zext, that instruction is strictly faster
16618 // than any alternative. It also allows us to fold memory operands into the
16619 // shuffle in many cases.
16620 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2, Mask,
16621 Zeroable, Subtarget, DAG))
16624 // For non-AVX512 if the Mask is of 16bit elements in lane then try to split
16625 // since after split we get a more efficient code than vblend by using
16626 // vpunpcklwd and vpunpckhwd instrs.
16627 if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
16628 !Subtarget.hasAVX512())
16629 if (SDValue V = lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask,
16633 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
16634 Zeroable, Subtarget, DAG))
16637 // Check for being able to broadcast a single element.
16638 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v8i32, V1, V2, Mask,
16642 // If the shuffle mask is repeated in each 128-bit lane we can use more
16643 // efficient instructions that mirror the shuffles across the two 128-bit
16645 SmallVector<int, 4> RepeatedMask;
16646 bool Is128BitLaneRepeatedShuffle =
16647 is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask);
16648 if (Is128BitLaneRepeatedShuffle) {
16649 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
16651 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
16652 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
16654 // Use dedicated unpack instructions for masks that match their pattern.
16655 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v8i32, Mask, V1, V2, DAG))
16659 // Try to use shift instructions.
16660 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask,
16661 Zeroable, Subtarget, DAG))
16664 // If we have VLX support, we can use VALIGN or EXPAND.
16665 if (Subtarget.hasVLX()) {
16666 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i32, V1, V2, Mask,
16670 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i32, Zeroable, Mask, V1, V2,
16675 // Try to use byte rotation instructions.
16676 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i32, V1, V2, Mask,
16680 // Try to create an in-lane repeating shuffle mask and then shuffle the
16681 // results into the target lanes.
16682 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16683 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16686 if (V2.isUndef()) {
16687 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16688 // because that should be faster than the variable permute alternatives.
16689 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v8i32, Mask, V1, V2, DAG))
16692 // If the shuffle patterns aren't repeated but it's a single input, directly
16693 // generate a cross-lane VPERMD instruction.
16694 SDValue VPermMask = getConstVector(Mask, MVT::v8i32, DAG, DL, true);
16695 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8i32, VPermMask, V1);
16698 // Assume that a single SHUFPS is faster than an alternative sequence of
16699 // multiple instructions (even if the CPU has a domain penalty).
16700 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
16701 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
16702 SDValue CastV1 = DAG.getBitcast(MVT::v8f32, V1);
16703 SDValue CastV2 = DAG.getBitcast(MVT::v8f32, V2);
16704 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask,
16705 CastV1, CastV2, DAG);
16706 return DAG.getBitcast(MVT::v8i32, ShufPS);
16709 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16711 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16712 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
16715 // Otherwise fall back on generic blend lowering.
16716 return lowerShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2, Mask,
16720 /// Handle lowering of 16-lane 16-bit integer shuffles.
16722 /// This routine is only called when we have AVX2 and thus a reasonable
16723 /// instruction set for v16i16 shuffling..
16724 static SDValue lowerV16I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16725 const APInt &Zeroable, SDValue V1, SDValue V2,
16726 const X86Subtarget &Subtarget,
16727 SelectionDAG &DAG) {
16728 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16729 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
16730 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
16731 assert(Subtarget.hasAVX2() && "We can only lower v16i16 with AVX2!");
16733 // Whenever we can lower this as a zext, that instruction is strictly faster
16734 // than any alternative. It also allows us to fold memory operands into the
16735 // shuffle in many cases.
16736 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
16737 DL, MVT::v16i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
16740 // Check for being able to broadcast a single element.
16741 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v16i16, V1, V2, Mask,
16745 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
16746 Zeroable, Subtarget, DAG))
16749 // Use dedicated unpack instructions for masks that match their pattern.
16750 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i16, Mask, V1, V2, DAG))
16753 // Use dedicated pack instructions for masks that match their pattern.
16754 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v16i16, Mask, V1, V2, DAG,
16758 // Try to use shift instructions.
16759 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask,
16760 Zeroable, Subtarget, DAG))
16763 // Try to use byte rotation instructions.
16764 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i16, V1, V2, Mask,
16768 // Try to create an in-lane repeating shuffle mask and then shuffle the
16769 // results into the target lanes.
16770 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16771 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16774 if (V2.isUndef()) {
16775 // Try to use bit rotation instructions.
16776 if (SDValue Rotate =
16777 lowerShuffleAsBitRotate(DL, MVT::v16i16, V1, Mask, Subtarget, DAG))
16780 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16781 // because that should be faster than the variable permute alternatives.
16782 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v16i16, Mask, V1, V2, DAG))
16785 // There are no generalized cross-lane shuffle operations available on i16
16787 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask)) {
16788 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16789 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16792 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v16i16, V1, V2, Mask,
16796 SmallVector<int, 8> RepeatedMask;
16797 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
16798 // As this is a single-input shuffle, the repeated mask should be
16799 // a strictly valid v8i16 mask that we can pass through to the v8i16
16800 // lowering to handle even the v16 case.
16801 return lowerV8I16GeneralSingleInputShuffle(
16802 DL, MVT::v16i16, V1, RepeatedMask, Subtarget, DAG);
16806 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v16i16, Mask, V1, V2,
16807 Zeroable, Subtarget, DAG))
16810 // AVX512BWVL can lower to VPERMW.
16811 if (Subtarget.hasBWI() && Subtarget.hasVLX())
16812 return lowerShuffleWithPERMV(DL, MVT::v16i16, Mask, V1, V2, DAG);
16814 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16816 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16817 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
16820 // Try to permute the lanes and then use a per-lane permute.
16821 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16822 DL, MVT::v16i16, V1, V2, Mask, DAG, Subtarget))
16825 // Otherwise fall back on generic lowering.
16826 return lowerShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask,
16830 /// Handle lowering of 32-lane 8-bit integer shuffles.
16832 /// This routine is only called when we have AVX2 and thus a reasonable
16833 /// instruction set for v32i8 shuffling..
16834 static SDValue lowerV32I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
16835 const APInt &Zeroable, SDValue V1, SDValue V2,
16836 const X86Subtarget &Subtarget,
16837 SelectionDAG &DAG) {
16838 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16839 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
16840 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
16841 assert(Subtarget.hasAVX2() && "We can only lower v32i8 with AVX2!");
16843 // Whenever we can lower this as a zext, that instruction is strictly faster
16844 // than any alternative. It also allows us to fold memory operands into the
16845 // shuffle in many cases.
16846 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2, Mask,
16847 Zeroable, Subtarget, DAG))
16850 // Check for being able to broadcast a single element.
16851 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, MVT::v32i8, V1, V2, Mask,
16855 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
16856 Zeroable, Subtarget, DAG))
16859 // Use dedicated unpack instructions for masks that match their pattern.
16860 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i8, Mask, V1, V2, DAG))
16863 // Use dedicated pack instructions for masks that match their pattern.
16864 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v32i8, Mask, V1, V2, DAG,
16868 // Try to use shift instructions.
16869 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask,
16870 Zeroable, Subtarget, DAG))
16873 // Try to use byte rotation instructions.
16874 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i8, V1, V2, Mask,
16878 // Try to use bit rotation instructions.
16880 if (SDValue Rotate =
16881 lowerShuffleAsBitRotate(DL, MVT::v32i8, V1, Mask, Subtarget, DAG))
16884 // Try to create an in-lane repeating shuffle mask and then shuffle the
16885 // results into the target lanes.
16886 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
16887 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16890 // There are no generalized cross-lane shuffle operations available on i8
16892 if (V2.isUndef() && is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask)) {
16893 // Try to produce a fixed cross-128-bit lane permute followed by unpack
16894 // because that should be faster than the variable permute alternatives.
16895 if (SDValue V = lowerShuffleWithUNPCK256(DL, MVT::v32i8, Mask, V1, V2, DAG))
16898 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16899 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16902 return lowerShuffleAsLanePermuteAndShuffle(DL, MVT::v32i8, V1, V2, Mask,
16906 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i8, Mask, V1, V2,
16907 Zeroable, Subtarget, DAG))
16910 // AVX512VBMIVL can lower to VPERMB.
16911 if (Subtarget.hasVBMI() && Subtarget.hasVLX())
16912 return lowerShuffleWithPERMV(DL, MVT::v32i8, Mask, V1, V2, DAG);
16914 // Try to simplify this by merging 128-bit lanes to enable a lane-based
16916 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
16917 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
16920 // Try to permute the lanes and then use a per-lane permute.
16921 if (SDValue V = lowerShuffleAsLanePermuteAndPermute(
16922 DL, MVT::v32i8, V1, V2, Mask, DAG, Subtarget))
16925 // Look for {0, 8, 16, 24, 32, 40, 48, 56 } in the first 8 elements. Followed
16926 // by zeroable elements in the remaining 24 elements. Turn this into two
16927 // vmovqb instructions shuffled together.
16928 if (Subtarget.hasVLX())
16929 if (SDValue V = lowerShuffleAsVTRUNCAndUnpack(DL, MVT::v32i8, V1, V2,
16930 Mask, Zeroable, DAG))
16933 // Otherwise fall back on generic lowering.
16934 return lowerShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask,
16938 /// High-level routine to lower various 256-bit x86 vector shuffles.
16940 /// This routine either breaks down the specific type of a 256-bit x86 vector
16941 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
16942 /// together based on the available instructions.
16943 static SDValue lower256BitShuffle(const SDLoc &DL, ArrayRef<int> Mask, MVT VT,
16944 SDValue V1, SDValue V2, const APInt &Zeroable,
16945 const X86Subtarget &Subtarget,
16946 SelectionDAG &DAG) {
16947 // If we have a single input to the zero element, insert that into V1 if we
16948 // can do so cheaply.
16949 int NumElts = VT.getVectorNumElements();
16950 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
16952 if (NumV2Elements == 1 && Mask[0] >= NumElts)
16953 if (SDValue Insertion = lowerShuffleAsElementInsertion(
16954 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
16957 // Handle special cases where the lower or upper half is UNDEF.
16959 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
16962 // There is a really nice hard cut-over between AVX1 and AVX2 that means we
16963 // can check for those subtargets here and avoid much of the subtarget
16964 // querying in the per-vector-type lowering routines. With AVX1 we have
16965 // essentially *zero* ability to manipulate a 256-bit vector with integer
16966 // types. Since we'll use floating point types there eventually, just
16967 // immediately cast everything to a float and operate entirely in that domain.
16968 if (VT.isInteger() && !Subtarget.hasAVX2()) {
16969 int ElementBits = VT.getScalarSizeInBits();
16970 if (ElementBits < 32) {
16971 // No floating point type available, if we can't use the bit operations
16972 // for masking/blending then decompose into 128-bit vectors.
16973 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
16976 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
16978 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
16981 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
16982 VT.getVectorNumElements());
16983 V1 = DAG.getBitcast(FpVT, V1);
16984 V2 = DAG.getBitcast(FpVT, V2);
16985 return DAG.getBitcast(VT, DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
16988 switch (VT.SimpleTy) {
16990 return lowerV4F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16992 return lowerV4I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16994 return lowerV8F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16996 return lowerV8I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
16998 return lowerV16I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17000 return lowerV32I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17003 llvm_unreachable("Not a valid 256-bit x86 vector type!");
17007 /// Try to lower a vector shuffle as a 128-bit shuffles.
17008 static SDValue lowerV4X128Shuffle(const SDLoc &DL, MVT VT, ArrayRef<int> Mask,
17009 const APInt &Zeroable, SDValue V1, SDValue V2,
17010 const X86Subtarget &Subtarget,
17011 SelectionDAG &DAG) {
17012 assert(VT.getScalarSizeInBits() == 64 &&
17013 "Unexpected element type size for 128bit shuffle.");
17015 // To handle 256 bit vector requires VLX and most probably
17016 // function lowerV2X128VectorShuffle() is better solution.
17017 assert(VT.is512BitVector() && "Unexpected vector size for 512bit shuffle.");
17019 // TODO - use Zeroable like we do for lowerV2X128VectorShuffle?
17020 SmallVector<int, 4> Widened128Mask;
17021 if (!canWidenShuffleElements(Mask, Widened128Mask))
17023 assert(Widened128Mask.size() == 4 && "Shuffle widening mismatch");
17025 // Try to use an insert into a zero vector.
17026 if (Widened128Mask[0] == 0 && (Zeroable & 0xf0) == 0xf0 &&
17027 (Widened128Mask[1] == 1 || (Zeroable & 0x0c) == 0x0c)) {
17028 unsigned NumElts = ((Zeroable & 0x0c) == 0x0c) ? 2 : 4;
17029 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
17030 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
17031 DAG.getIntPtrConstant(0, DL));
17032 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17033 getZeroVector(VT, Subtarget, DAG, DL), LoV,
17034 DAG.getIntPtrConstant(0, DL));
17037 // Check for patterns which can be matched with a single insert of a 256-bit
17039 bool OnlyUsesV1 = isShuffleEquivalent(V1, V2, Mask, {0, 1, 2, 3, 0, 1, 2, 3});
17041 isShuffleEquivalent(V1, V2, Mask, {0, 1, 2, 3, 8, 9, 10, 11})) {
17042 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 4);
17044 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, OnlyUsesV1 ? V1 : V2,
17045 DAG.getIntPtrConstant(0, DL));
17046 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, V1, SubVec,
17047 DAG.getIntPtrConstant(4, DL));
17050 // See if this is an insertion of the lower 128-bits of V2 into V1.
17051 bool IsInsert = true;
17053 for (int i = 0; i < 4; ++i) {
17054 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
17055 if (Widened128Mask[i] < 0)
17058 // Make sure all V1 subvectors are in place.
17059 if (Widened128Mask[i] < 4) {
17060 if (Widened128Mask[i] != i) {
17065 // Make sure we only have a single V2 index and its the lowest 128-bits.
17066 if (V2Index >= 0 || Widened128Mask[i] != 4) {
17073 if (IsInsert && V2Index >= 0) {
17074 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(), 2);
17075 SDValue Subvec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
17076 DAG.getIntPtrConstant(0, DL));
17077 return insert128BitVector(V1, Subvec, V2Index * 2, DAG, DL);
17080 // See if we can widen to a 256-bit lane shuffle, we're going to lose 128-lane
17081 // UNDEF info by lowering to X86ISD::SHUF128 anyway, so by widening where
17082 // possible we at least ensure the lanes stay sequential to help later
17084 SmallVector<int, 2> Widened256Mask;
17085 if (canWidenShuffleElements(Widened128Mask, Widened256Mask)) {
17086 Widened128Mask.clear();
17087 narrowShuffleMaskElts(2, Widened256Mask, Widened128Mask);
17090 // Try to lower to vshuf64x2/vshuf32x4.
17091 SDValue Ops[2] = {DAG.getUNDEF(VT), DAG.getUNDEF(VT)};
17092 unsigned PermMask = 0;
17093 // Insure elements came from the same Op.
17094 for (int i = 0; i < 4; ++i) {
17095 assert(Widened128Mask[i] >= -1 && "Illegal shuffle sentinel value");
17096 if (Widened128Mask[i] < 0)
17099 SDValue Op = Widened128Mask[i] >= 4 ? V2 : V1;
17100 unsigned OpIndex = i / 2;
17101 if (Ops[OpIndex].isUndef())
17103 else if (Ops[OpIndex] != Op)
17106 // Convert the 128-bit shuffle mask selection values into 128-bit selection
17107 // bits defined by a vshuf64x2 instruction's immediate control byte.
17108 PermMask |= (Widened128Mask[i] % 4) << (i * 2);
17111 return DAG.getNode(X86ISD::SHUF128, DL, VT, Ops[0], Ops[1],
17112 DAG.getTargetConstant(PermMask, DL, MVT::i8));
17115 /// Handle lowering of 8-lane 64-bit floating point shuffles.
17116 static SDValue lowerV8F64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17117 const APInt &Zeroable, SDValue V1, SDValue V2,
17118 const X86Subtarget &Subtarget,
17119 SelectionDAG &DAG) {
17120 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
17121 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
17122 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
17124 if (V2.isUndef()) {
17125 // Use low duplicate instructions for masks that match their pattern.
17126 if (isShuffleEquivalent(V1, V2, Mask, {0, 0, 2, 2, 4, 4, 6, 6}))
17127 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v8f64, V1);
17129 if (!is128BitLaneCrossingShuffleMask(MVT::v8f64, Mask)) {
17130 // Non-half-crossing single input shuffles can be lowered with an
17131 // interleaved permutation.
17132 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
17133 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3) |
17134 ((Mask[4] == 5) << 4) | ((Mask[5] == 5) << 5) |
17135 ((Mask[6] == 7) << 6) | ((Mask[7] == 7) << 7);
17136 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f64, V1,
17137 DAG.getTargetConstant(VPERMILPMask, DL, MVT::i8));
17140 SmallVector<int, 4> RepeatedMask;
17141 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask))
17142 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8f64, V1,
17143 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17146 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8f64, Mask, Zeroable, V1,
17147 V2, Subtarget, DAG))
17150 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG))
17153 // Check if the blend happens to exactly fit that of SHUFPD.
17154 if (SDValue Op = lowerShuffleWithSHUFPD(DL, MVT::v8f64, V1, V2, Mask,
17155 Zeroable, Subtarget, DAG))
17158 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8f64, Zeroable, Mask, V1, V2,
17162 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8f64, V1, V2, Mask,
17163 Zeroable, Subtarget, DAG))
17166 return lowerShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
17169 /// Handle lowering of 16-lane 32-bit floating point shuffles.
17170 static SDValue lowerV16F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17171 const APInt &Zeroable, SDValue V1, SDValue V2,
17172 const X86Subtarget &Subtarget,
17173 SelectionDAG &DAG) {
17174 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
17175 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
17176 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
17178 // If the shuffle mask is repeated in each 128-bit lane, we have many more
17179 // options to efficiently lower the shuffle.
17180 SmallVector<int, 4> RepeatedMask;
17181 if (is128BitLaneRepeatedShuffleMask(MVT::v16f32, Mask, RepeatedMask)) {
17182 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
17184 // Use even/odd duplicate instructions for masks that match their pattern.
17185 if (isShuffleEquivalent(V1, V2, RepeatedMask, {0, 0, 2, 2}))
17186 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v16f32, V1);
17187 if (isShuffleEquivalent(V1, V2, RepeatedMask, {1, 1, 3, 3}))
17188 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v16f32, V1);
17191 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v16f32, V1,
17192 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17194 // Use dedicated unpack instructions for masks that match their pattern.
17195 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG))
17198 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16f32, V1, V2, Mask,
17199 Zeroable, Subtarget, DAG))
17202 // Otherwise, fall back to a SHUFPS sequence.
17203 return lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask, V1, V2, DAG);
17206 // Try to create an in-lane repeating shuffle mask and then shuffle the
17207 // results into the target lanes.
17208 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17209 DL, MVT::v16f32, V1, V2, Mask, Subtarget, DAG))
17212 // If we have a single input shuffle with different shuffle patterns in the
17213 // 128-bit lanes and don't lane cross, use variable mask VPERMILPS.
17214 if (V2.isUndef() &&
17215 !is128BitLaneCrossingShuffleMask(MVT::v16f32, Mask)) {
17216 SDValue VPermMask = getConstVector(Mask, MVT::v16i32, DAG, DL, true);
17217 return DAG.getNode(X86ISD::VPERMILPV, DL, MVT::v16f32, V1, VPermMask);
17220 // If we have AVX512F support, we can use VEXPAND.
17221 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16f32, Zeroable, Mask,
17222 V1, V2, DAG, Subtarget))
17225 return lowerShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
17228 /// Handle lowering of 8-lane 64-bit integer shuffles.
17229 static SDValue lowerV8I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17230 const APInt &Zeroable, SDValue V1, SDValue V2,
17231 const X86Subtarget &Subtarget,
17232 SelectionDAG &DAG) {
17233 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
17234 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
17235 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
17237 if (V2.isUndef()) {
17238 // When the shuffle is mirrored between the 128-bit lanes of the unit, we
17239 // can use lower latency instructions that will operate on all four
17241 SmallVector<int, 2> Repeated128Mask;
17242 if (is128BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated128Mask)) {
17243 SmallVector<int, 4> PSHUFDMask;
17244 narrowShuffleMaskElts(2, Repeated128Mask, PSHUFDMask);
17245 return DAG.getBitcast(
17247 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32,
17248 DAG.getBitcast(MVT::v16i32, V1),
17249 getV4X86ShuffleImm8ForMask(PSHUFDMask, DL, DAG)));
17252 SmallVector<int, 4> Repeated256Mask;
17253 if (is256BitLaneRepeatedShuffleMask(MVT::v8i64, Mask, Repeated256Mask))
17254 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v8i64, V1,
17255 getV4X86ShuffleImm8ForMask(Repeated256Mask, DL, DAG));
17258 if (SDValue Shuf128 = lowerV4X128Shuffle(DL, MVT::v8i64, Mask, Zeroable, V1,
17259 V2, Subtarget, DAG))
17262 // Try to use shift instructions.
17263 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v8i64, V1, V2, Mask,
17264 Zeroable, Subtarget, DAG))
17267 // Try to use VALIGN.
17268 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v8i64, V1, V2, Mask,
17272 // Try to use PALIGNR.
17273 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v8i64, V1, V2, Mask,
17277 if (SDValue Unpck = lowerShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG))
17279 // If we have AVX512F support, we can use VEXPAND.
17280 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v8i64, Zeroable, Mask, V1, V2,
17284 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v8i64, V1, V2, Mask,
17285 Zeroable, Subtarget, DAG))
17288 return lowerShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
17291 /// Handle lowering of 16-lane 32-bit integer shuffles.
17292 static SDValue lowerV16I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17293 const APInt &Zeroable, SDValue V1, SDValue V2,
17294 const X86Subtarget &Subtarget,
17295 SelectionDAG &DAG) {
17296 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
17297 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
17298 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
17300 // Whenever we can lower this as a zext, that instruction is strictly faster
17301 // than any alternative. It also allows us to fold memory operands into the
17302 // shuffle in many cases.
17303 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
17304 DL, MVT::v16i32, V1, V2, Mask, Zeroable, Subtarget, DAG))
17307 // If the shuffle mask is repeated in each 128-bit lane we can use more
17308 // efficient instructions that mirror the shuffles across the four 128-bit
17310 SmallVector<int, 4> RepeatedMask;
17311 bool Is128BitLaneRepeatedShuffle =
17312 is128BitLaneRepeatedShuffleMask(MVT::v16i32, Mask, RepeatedMask);
17313 if (Is128BitLaneRepeatedShuffle) {
17314 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
17316 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v16i32, V1,
17317 getV4X86ShuffleImm8ForMask(RepeatedMask, DL, DAG));
17319 // Use dedicated unpack instructions for masks that match their pattern.
17320 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG))
17324 // Try to use shift instructions.
17325 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v16i32, V1, V2, Mask,
17326 Zeroable, Subtarget, DAG))
17329 // Try to use VALIGN.
17330 if (SDValue Rotate = lowerShuffleAsVALIGN(DL, MVT::v16i32, V1, V2, Mask,
17334 // Try to use byte rotation instructions.
17335 if (Subtarget.hasBWI())
17336 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v16i32, V1, V2, Mask,
17340 // Assume that a single SHUFPS is faster than using a permv shuffle.
17341 // If some CPU is harmed by the domain switch, we can fix it in a later pass.
17342 if (Is128BitLaneRepeatedShuffle && isSingleSHUFPSMask(RepeatedMask)) {
17343 SDValue CastV1 = DAG.getBitcast(MVT::v16f32, V1);
17344 SDValue CastV2 = DAG.getBitcast(MVT::v16f32, V2);
17345 SDValue ShufPS = lowerShuffleWithSHUFPS(DL, MVT::v16f32, RepeatedMask,
17346 CastV1, CastV2, DAG);
17347 return DAG.getBitcast(MVT::v16i32, ShufPS);
17350 // Try to create an in-lane repeating shuffle mask and then shuffle the
17351 // results into the target lanes.
17352 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17353 DL, MVT::v16i32, V1, V2, Mask, Subtarget, DAG))
17356 // If we have AVX512F support, we can use VEXPAND.
17357 if (SDValue V = lowerShuffleToEXPAND(DL, MVT::v16i32, Zeroable, Mask, V1, V2,
17361 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v16i32, V1, V2, Mask,
17362 Zeroable, Subtarget, DAG))
17365 return lowerShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
17368 /// Handle lowering of 32-lane 16-bit integer shuffles.
17369 static SDValue lowerV32I16Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17370 const APInt &Zeroable, SDValue V1, SDValue V2,
17371 const X86Subtarget &Subtarget,
17372 SelectionDAG &DAG) {
17373 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
17374 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
17375 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
17376 assert(Subtarget.hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
17378 // Whenever we can lower this as a zext, that instruction is strictly faster
17379 // than any alternative. It also allows us to fold memory operands into the
17380 // shuffle in many cases.
17381 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
17382 DL, MVT::v32i16, V1, V2, Mask, Zeroable, Subtarget, DAG))
17385 // Use dedicated unpack instructions for masks that match their pattern.
17386 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v32i16, Mask, V1, V2, DAG))
17389 // Use dedicated pack instructions for masks that match their pattern.
17391 lowerShuffleWithPACK(DL, MVT::v32i16, Mask, V1, V2, DAG, Subtarget))
17394 // Try to use shift instructions.
17395 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v32i16, V1, V2, Mask,
17396 Zeroable, Subtarget, DAG))
17399 // Try to use byte rotation instructions.
17400 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v32i16, V1, V2, Mask,
17404 if (V2.isUndef()) {
17405 // Try to use bit rotation instructions.
17406 if (SDValue Rotate =
17407 lowerShuffleAsBitRotate(DL, MVT::v32i16, V1, Mask, Subtarget, DAG))
17410 SmallVector<int, 8> RepeatedMask;
17411 if (is128BitLaneRepeatedShuffleMask(MVT::v32i16, Mask, RepeatedMask)) {
17412 // As this is a single-input shuffle, the repeated mask should be
17413 // a strictly valid v8i16 mask that we can pass through to the v8i16
17414 // lowering to handle even the v32 case.
17415 return lowerV8I16GeneralSingleInputShuffle(DL, MVT::v32i16, V1,
17416 RepeatedMask, Subtarget, DAG);
17420 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v32i16, V1, V2, Mask,
17421 Zeroable, Subtarget, DAG))
17424 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v32i16, Mask, V1, V2,
17425 Zeroable, Subtarget, DAG))
17428 return lowerShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
17431 /// Handle lowering of 64-lane 8-bit integer shuffles.
17432 static SDValue lowerV64I8Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
17433 const APInt &Zeroable, SDValue V1, SDValue V2,
17434 const X86Subtarget &Subtarget,
17435 SelectionDAG &DAG) {
17436 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
17437 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
17438 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
17439 assert(Subtarget.hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
17441 // Whenever we can lower this as a zext, that instruction is strictly faster
17442 // than any alternative. It also allows us to fold memory operands into the
17443 // shuffle in many cases.
17444 if (SDValue ZExt = lowerShuffleAsZeroOrAnyExtend(
17445 DL, MVT::v64i8, V1, V2, Mask, Zeroable, Subtarget, DAG))
17448 // Use dedicated unpack instructions for masks that match their pattern.
17449 if (SDValue V = lowerShuffleWithUNPCK(DL, MVT::v64i8, Mask, V1, V2, DAG))
17452 // Use dedicated pack instructions for masks that match their pattern.
17453 if (SDValue V = lowerShuffleWithPACK(DL, MVT::v64i8, Mask, V1, V2, DAG,
17457 // Try to use shift instructions.
17458 if (SDValue Shift = lowerShuffleAsShift(DL, MVT::v64i8, V1, V2, Mask,
17459 Zeroable, Subtarget, DAG))
17462 // Try to use byte rotation instructions.
17463 if (SDValue Rotate = lowerShuffleAsByteRotate(DL, MVT::v64i8, V1, V2, Mask,
17467 // Try to use bit rotation instructions.
17469 if (SDValue Rotate =
17470 lowerShuffleAsBitRotate(DL, MVT::v64i8, V1, Mask, Subtarget, DAG))
17473 // Lower as AND if possible.
17474 if (SDValue Masked = lowerShuffleAsBitMask(DL, MVT::v64i8, V1, V2, Mask,
17475 Zeroable, Subtarget, DAG))
17478 if (SDValue PSHUFB = lowerShuffleWithPSHUFB(DL, MVT::v64i8, Mask, V1, V2,
17479 Zeroable, Subtarget, DAG))
17482 // VBMI can use VPERMV/VPERMV3 byte shuffles.
17483 if (Subtarget.hasVBMI())
17484 return lowerShuffleWithPERMV(DL, MVT::v64i8, Mask, V1, V2, DAG);
17486 // Try to create an in-lane repeating shuffle mask and then shuffle the
17487 // results into the target lanes.
17488 if (SDValue V = lowerShuffleAsRepeatedMaskAndLanePermute(
17489 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17492 if (SDValue Blend = lowerShuffleAsBlend(DL, MVT::v64i8, V1, V2, Mask,
17493 Zeroable, Subtarget, DAG))
17496 // Try to simplify this by merging 128-bit lanes to enable a lane-based
17499 if (SDValue Result = lowerShuffleAsLanePermuteAndRepeatedMask(
17500 DL, MVT::v64i8, V1, V2, Mask, Subtarget, DAG))
17503 // FIXME: Implement direct support for this type!
17504 return splitAndLowerShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
17507 /// High-level routine to lower various 512-bit x86 vector shuffles.
17509 /// This routine either breaks down the specific type of a 512-bit x86 vector
17510 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
17511 /// together based on the available instructions.
17512 static SDValue lower512BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17513 MVT VT, SDValue V1, SDValue V2,
17514 const APInt &Zeroable,
17515 const X86Subtarget &Subtarget,
17516 SelectionDAG &DAG) {
17517 assert(Subtarget.hasAVX512() &&
17518 "Cannot lower 512-bit vectors w/ basic ISA!");
17520 // If we have a single input to the zero element, insert that into V1 if we
17521 // can do so cheaply.
17522 int NumElts = Mask.size();
17523 int NumV2Elements = count_if(Mask, [NumElts](int M) { return M >= NumElts; });
17525 if (NumV2Elements == 1 && Mask[0] >= NumElts)
17526 if (SDValue Insertion = lowerShuffleAsElementInsertion(
17527 DL, VT, V1, V2, Mask, Zeroable, Subtarget, DAG))
17530 // Handle special cases where the lower or upper half is UNDEF.
17532 lowerShuffleWithUndefHalf(DL, VT, V1, V2, Mask, Subtarget, DAG))
17535 // Check for being able to broadcast a single element.
17536 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, Mask,
17540 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI()) {
17541 // Try using bit ops for masking and blending before falling back to
17543 if (SDValue V = lowerShuffleAsBitMask(DL, VT, V1, V2, Mask, Zeroable,
17546 if (SDValue V = lowerShuffleAsBitBlend(DL, VT, V1, V2, Mask, DAG))
17549 return splitAndLowerShuffle(DL, VT, V1, V2, Mask, DAG);
17552 // Dispatch to each element type for lowering. If we don't have support for
17553 // specific element type shuffles at 512 bits, immediately split them and
17554 // lower them. Each lowering routine of a given type is allowed to assume that
17555 // the requisite ISA extensions for that element type are available.
17556 switch (VT.SimpleTy) {
17558 return lowerV8F64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17560 return lowerV16F32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17562 return lowerV8I64Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17564 return lowerV16I32Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17566 return lowerV32I16Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17568 return lowerV64I8Shuffle(DL, Mask, Zeroable, V1, V2, Subtarget, DAG);
17571 llvm_unreachable("Not a valid 512-bit x86 vector type!");
17575 static SDValue lower1BitShuffleAsKSHIFTR(const SDLoc &DL, ArrayRef<int> Mask,
17576 MVT VT, SDValue V1, SDValue V2,
17577 const X86Subtarget &Subtarget,
17578 SelectionDAG &DAG) {
17579 // Shuffle should be unary.
17584 int NumElts = Mask.size();
17585 for (int i = 0; i != NumElts; ++i) {
17587 assert((M == SM_SentinelUndef || (0 <= M && M < NumElts)) &&
17588 "Unexpected mask index.");
17592 // The first non-undef element determines our shift amount.
17593 if (ShiftAmt < 0) {
17595 // Need to be shifting right.
17599 // All non-undef elements must shift by the same amount.
17600 if (ShiftAmt != M - i)
17603 assert(ShiftAmt >= 0 && "All undef?");
17605 // Great we found a shift right.
17607 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17608 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17609 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17610 DAG.getUNDEF(WideVT), V1,
17611 DAG.getIntPtrConstant(0, DL));
17612 Res = DAG.getNode(X86ISD::KSHIFTR, DL, WideVT, Res,
17613 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17614 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17615 DAG.getIntPtrConstant(0, DL));
17618 // Determine if this shuffle can be implemented with a KSHIFT instruction.
17619 // Returns the shift amount if possible or -1 if not. This is a simplified
17620 // version of matchShuffleAsShift.
17621 static int match1BitShuffleAsKSHIFT(unsigned &Opcode, ArrayRef<int> Mask,
17622 int MaskOffset, const APInt &Zeroable) {
17623 int Size = Mask.size();
17625 auto CheckZeros = [&](int Shift, bool Left) {
17626 for (int j = 0; j < Shift; ++j)
17627 if (!Zeroable[j + (Left ? 0 : (Size - Shift))])
17633 auto MatchShift = [&](int Shift, bool Left) {
17634 unsigned Pos = Left ? Shift : 0;
17635 unsigned Low = Left ? 0 : Shift;
17636 unsigned Len = Size - Shift;
17637 return isSequentialOrUndefInRange(Mask, Pos, Len, Low + MaskOffset);
17640 for (int Shift = 1; Shift != Size; ++Shift)
17641 for (bool Left : {true, false})
17642 if (CheckZeros(Shift, Left) && MatchShift(Shift, Left)) {
17643 Opcode = Left ? X86ISD::KSHIFTL : X86ISD::KSHIFTR;
17651 // Lower vXi1 vector shuffles.
17652 // There is no a dedicated instruction on AVX-512 that shuffles the masks.
17653 // The only way to shuffle bits is to sign-extend the mask vector to SIMD
17654 // vector, shuffle and then truncate it back.
17655 static SDValue lower1BitShuffle(const SDLoc &DL, ArrayRef<int> Mask,
17656 MVT VT, SDValue V1, SDValue V2,
17657 const APInt &Zeroable,
17658 const X86Subtarget &Subtarget,
17659 SelectionDAG &DAG) {
17660 assert(Subtarget.hasAVX512() &&
17661 "Cannot lower 512-bit vectors w/o basic ISA!");
17663 int NumElts = Mask.size();
17665 // Try to recognize shuffles that are just padding a subvector with zeros.
17666 int SubvecElts = 0;
17668 for (int i = 0; i != NumElts; ++i) {
17669 if (Mask[i] >= 0) {
17670 // Grab the source from the first valid mask. All subsequent elements need
17671 // to use this same source.
17673 Src = Mask[i] / NumElts;
17674 if (Src != (Mask[i] / NumElts) || (Mask[i] % NumElts) != i)
17680 assert(SubvecElts != NumElts && "Identity shuffle?");
17682 // Clip to a power 2.
17683 SubvecElts = PowerOf2Floor(SubvecElts);
17685 // Make sure the number of zeroable bits in the top at least covers the bits
17686 // not covered by the subvector.
17687 if ((int)Zeroable.countLeadingOnes() >= (NumElts - SubvecElts)) {
17688 assert(Src >= 0 && "Expected a source!");
17689 MVT ExtractVT = MVT::getVectorVT(MVT::i1, SubvecElts);
17690 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT,
17691 Src == 0 ? V1 : V2,
17692 DAG.getIntPtrConstant(0, DL));
17693 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
17694 DAG.getConstant(0, DL, VT),
17695 Extract, DAG.getIntPtrConstant(0, DL));
17698 // Try a simple shift right with undef elements. Later we'll try with zeros.
17699 if (SDValue Shift = lower1BitShuffleAsKSHIFTR(DL, Mask, VT, V1, V2, Subtarget,
17703 // Try to match KSHIFTs.
17704 unsigned Offset = 0;
17705 for (SDValue V : { V1, V2 }) {
17707 int ShiftAmt = match1BitShuffleAsKSHIFT(Opcode, Mask, Offset, Zeroable);
17708 if (ShiftAmt >= 0) {
17710 if ((!Subtarget.hasDQI() && NumElts == 8) || NumElts < 8)
17711 WideVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
17712 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideVT,
17713 DAG.getUNDEF(WideVT), V,
17714 DAG.getIntPtrConstant(0, DL));
17715 // Widened right shifts need two shifts to ensure we shift in zeroes.
17716 if (Opcode == X86ISD::KSHIFTR && WideVT != VT) {
17717 int WideElts = WideVT.getVectorNumElements();
17718 // Shift left to put the original vector in the MSBs of the new size.
17719 Res = DAG.getNode(X86ISD::KSHIFTL, DL, WideVT, Res,
17720 DAG.getTargetConstant(WideElts - NumElts, DL, MVT::i8));
17721 // Increase the shift amount to account for the left shift.
17722 ShiftAmt += WideElts - NumElts;
17725 Res = DAG.getNode(Opcode, DL, WideVT, Res,
17726 DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
17727 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
17728 DAG.getIntPtrConstant(0, DL));
17730 Offset += NumElts; // Increment for next iteration.
17736 switch (VT.SimpleTy) {
17738 llvm_unreachable("Expected a vector of i1 elements");
17740 ExtVT = MVT::v2i64;
17743 ExtVT = MVT::v4i32;
17746 // Take 512-bit type, more shuffles on KNL. If we have VLX use a 256-bit
17748 ExtVT = Subtarget.hasVLX() ? MVT::v8i32 : MVT::v8i64;
17751 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17752 // 256-bit operation available.
17753 ExtVT = Subtarget.canExtendTo512DQ() ? MVT::v16i32 : MVT::v16i16;
17756 // Take 512-bit type, unless we are avoiding 512-bit types and have the
17757 // 256-bit operation available.
17758 assert(Subtarget.hasBWI() && "Expected AVX512BW support");
17759 ExtVT = Subtarget.canExtendTo512BW() ? MVT::v32i16 : MVT::v32i8;
17762 // Fall back to scalarization. FIXME: We can do better if the shuffle
17763 // can be partitioned cleanly.
17764 if (!Subtarget.useBWIRegs())
17766 ExtVT = MVT::v64i8;
17770 V1 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V1);
17771 V2 = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, V2);
17773 SDValue Shuffle = DAG.getVectorShuffle(ExtVT, DL, V1, V2, Mask);
17774 // i1 was sign extended we can use X86ISD::CVT2MASK.
17775 int NumElems = VT.getVectorNumElements();
17776 if ((Subtarget.hasBWI() && (NumElems >= 32)) ||
17777 (Subtarget.hasDQI() && (NumElems < 32)))
17778 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, ExtVT),
17779 Shuffle, ISD::SETGT);
17781 return DAG.getNode(ISD::TRUNCATE, DL, VT, Shuffle);
17784 /// Helper function that returns true if the shuffle mask should be
17785 /// commuted to improve canonicalization.
17786 static bool canonicalizeShuffleMaskWithCommute(ArrayRef<int> Mask) {
17787 int NumElements = Mask.size();
17789 int NumV1Elements = 0, NumV2Elements = 0;
17793 else if (M < NumElements)
17798 // Commute the shuffle as needed such that more elements come from V1 than
17799 // V2. This allows us to match the shuffle pattern strictly on how many
17800 // elements come from V1 without handling the symmetric cases.
17801 if (NumV2Elements > NumV1Elements)
17804 assert(NumV1Elements > 0 && "No V1 indices");
17806 if (NumV2Elements == 0)
17809 // When the number of V1 and V2 elements are the same, try to minimize the
17810 // number of uses of V2 in the low half of the vector. When that is tied,
17811 // ensure that the sum of indices for V1 is equal to or lower than the sum
17812 // indices for V2. When those are equal, try to ensure that the number of odd
17813 // indices for V1 is lower than the number of odd indices for V2.
17814 if (NumV1Elements == NumV2Elements) {
17815 int LowV1Elements = 0, LowV2Elements = 0;
17816 for (int M : Mask.slice(0, NumElements / 2))
17817 if (M >= NumElements)
17821 if (LowV2Elements > LowV1Elements)
17823 if (LowV2Elements == LowV1Elements) {
17824 int SumV1Indices = 0, SumV2Indices = 0;
17825 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17826 if (Mask[i] >= NumElements)
17828 else if (Mask[i] >= 0)
17830 if (SumV2Indices < SumV1Indices)
17832 if (SumV2Indices == SumV1Indices) {
17833 int NumV1OddIndices = 0, NumV2OddIndices = 0;
17834 for (int i = 0, Size = Mask.size(); i < Size; ++i)
17835 if (Mask[i] >= NumElements)
17836 NumV2OddIndices += i % 2;
17837 else if (Mask[i] >= 0)
17838 NumV1OddIndices += i % 2;
17839 if (NumV2OddIndices < NumV1OddIndices)
17848 /// Top-level lowering for x86 vector shuffles.
17850 /// This handles decomposition, canonicalization, and lowering of all x86
17851 /// vector shuffles. Most of the specific lowering strategies are encapsulated
17852 /// above in helper routines. The canonicalization attempts to widen shuffles
17853 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
17854 /// s.t. only one of the two inputs needs to be tested, etc.
17855 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, const X86Subtarget &Subtarget,
17856 SelectionDAG &DAG) {
17857 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
17858 ArrayRef<int> OrigMask = SVOp->getMask();
17859 SDValue V1 = Op.getOperand(0);
17860 SDValue V2 = Op.getOperand(1);
17861 MVT VT = Op.getSimpleValueType();
17862 int NumElements = VT.getVectorNumElements();
17864 bool Is1BitVector = (VT.getVectorElementType() == MVT::i1);
17866 assert((VT.getSizeInBits() != 64 || Is1BitVector) &&
17867 "Can't lower MMX shuffles");
17869 bool V1IsUndef = V1.isUndef();
17870 bool V2IsUndef = V2.isUndef();
17871 if (V1IsUndef && V2IsUndef)
17872 return DAG.getUNDEF(VT);
17874 // When we create a shuffle node we put the UNDEF node to second operand,
17875 // but in some cases the first operand may be transformed to UNDEF.
17876 // In this case we should just commute the node.
17878 return DAG.getCommutedVectorShuffle(*SVOp);
17880 // Check for non-undef masks pointing at an undef vector and make the masks
17881 // undef as well. This makes it easier to match the shuffle based solely on
17884 any_of(OrigMask, [NumElements](int M) { return M >= NumElements; })) {
17885 SmallVector<int, 8> NewMask(OrigMask.begin(), OrigMask.end());
17886 for (int &M : NewMask)
17887 if (M >= NumElements)
17889 return DAG.getVectorShuffle(VT, DL, V1, V2, NewMask);
17892 // Check for illegal shuffle mask element index values.
17893 int MaskUpperLimit = OrigMask.size() * (V2IsUndef ? 1 : 2);
17894 (void)MaskUpperLimit;
17895 assert(llvm::all_of(OrigMask,
17896 [&](int M) { return -1 <= M && M < MaskUpperLimit; }) &&
17897 "Out of bounds shuffle index");
17899 // We actually see shuffles that are entirely re-arrangements of a set of
17900 // zero inputs. This mostly happens while decomposing complex shuffles into
17901 // simple ones. Directly lower these as a buildvector of zeros.
17902 APInt KnownUndef, KnownZero;
17903 computeZeroableShuffleElements(OrigMask, V1, V2, KnownUndef, KnownZero);
17905 APInt Zeroable = KnownUndef | KnownZero;
17906 if (Zeroable.isAllOnesValue())
17907 return getZeroVector(VT, Subtarget, DAG, DL);
17909 bool V2IsZero = !V2IsUndef && ISD::isBuildVectorAllZeros(V2.getNode());
17911 // Try to collapse shuffles into using a vector type with fewer elements but
17912 // wider element types. We cap this to not form integers or floating point
17913 // elements wider than 64 bits, but it might be interesting to form i128
17914 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
17915 SmallVector<int, 16> WidenedMask;
17916 if (VT.getScalarSizeInBits() < 64 && !Is1BitVector &&
17917 canWidenShuffleElements(OrigMask, Zeroable, V2IsZero, WidenedMask)) {
17918 // Shuffle mask widening should not interfere with a broadcast opportunity
17919 // by obfuscating the operands with bitcasts.
17920 // TODO: Avoid lowering directly from this top-level function: make this
17921 // a query (canLowerAsBroadcast) and defer lowering to the type-based calls.
17922 if (SDValue Broadcast = lowerShuffleAsBroadcast(DL, VT, V1, V2, OrigMask,
17926 MVT NewEltVT = VT.isFloatingPoint()
17927 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
17928 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
17929 int NewNumElts = NumElements / 2;
17930 MVT NewVT = MVT::getVectorVT(NewEltVT, NewNumElts);
17931 // Make sure that the new vector type is legal. For example, v2f64 isn't
17933 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
17935 // Modify the new Mask to take all zeros from the all-zero vector.
17936 // Choose indices that are blend-friendly.
17937 bool UsedZeroVector = false;
17938 assert(find(WidenedMask, SM_SentinelZero) != WidenedMask.end() &&
17939 "V2's non-undef elements are used?!");
17940 for (int i = 0; i != NewNumElts; ++i)
17941 if (WidenedMask[i] == SM_SentinelZero) {
17942 WidenedMask[i] = i + NewNumElts;
17943 UsedZeroVector = true;
17945 // Ensure all elements of V2 are zero - isBuildVectorAllZeros permits
17946 // some elements to be undef.
17947 if (UsedZeroVector)
17948 V2 = getZeroVector(NewVT, Subtarget, DAG, DL);
17950 V1 = DAG.getBitcast(NewVT, V1);
17951 V2 = DAG.getBitcast(NewVT, V2);
17952 return DAG.getBitcast(
17953 VT, DAG.getVectorShuffle(NewVT, DL, V1, V2, WidenedMask));
17957 // Commute the shuffle if it will improve canonicalization.
17958 SmallVector<int, 64> Mask(OrigMask.begin(), OrigMask.end());
17959 if (canonicalizeShuffleMaskWithCommute(Mask)) {
17960 ShuffleVectorSDNode::commuteMask(Mask);
17964 if (SDValue V = lowerShuffleWithVPMOV(DL, Mask, VT, V1, V2, DAG, Subtarget))
17967 // For each vector width, delegate to a specialized lowering routine.
17968 if (VT.is128BitVector())
17969 return lower128BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17971 if (VT.is256BitVector())
17972 return lower256BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17974 if (VT.is512BitVector())
17975 return lower512BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17978 return lower1BitShuffle(DL, Mask, VT, V1, V2, Zeroable, Subtarget, DAG);
17980 llvm_unreachable("Unimplemented!");
17983 /// Try to lower a VSELECT instruction to a vector shuffle.
17984 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
17985 const X86Subtarget &Subtarget,
17986 SelectionDAG &DAG) {
17987 SDValue Cond = Op.getOperand(0);
17988 SDValue LHS = Op.getOperand(1);
17989 SDValue RHS = Op.getOperand(2);
17990 MVT VT = Op.getSimpleValueType();
17992 // Only non-legal VSELECTs reach this lowering, convert those into generic
17993 // shuffles and re-use the shuffle lowering path for blends.
17994 SmallVector<int, 32> Mask;
17995 if (createShuffleMaskFromVSELECT(Mask, Cond))
17996 return DAG.getVectorShuffle(VT, SDLoc(Op), LHS, RHS, Mask);
18001 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
18002 SDValue Cond = Op.getOperand(0);
18003 SDValue LHS = Op.getOperand(1);
18004 SDValue RHS = Op.getOperand(2);
18006 // A vselect where all conditions and data are constants can be optimized into
18007 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
18008 if (ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()) &&
18009 ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
18010 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
18013 // Try to lower this to a blend-style vector shuffle. This can handle all
18014 // constant condition cases.
18015 if (SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG))
18018 // If this VSELECT has a vector if i1 as a mask, it will be directly matched
18019 // with patterns on the mask registers on AVX-512.
18020 MVT CondVT = Cond.getSimpleValueType();
18021 unsigned CondEltSize = Cond.getScalarValueSizeInBits();
18022 if (CondEltSize == 1)
18025 // Variable blends are only legal from SSE4.1 onward.
18026 if (!Subtarget.hasSSE41())
18030 MVT VT = Op.getSimpleValueType();
18031 unsigned EltSize = VT.getScalarSizeInBits();
18032 unsigned NumElts = VT.getVectorNumElements();
18034 // Expand v32i16/v64i8 without BWI.
18035 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
18038 // If the VSELECT is on a 512-bit type, we have to convert a non-i1 condition
18039 // into an i1 condition so that we can use the mask-based 512-bit blend
18041 if (VT.getSizeInBits() == 512) {
18042 // Build a mask by testing the condition against zero.
18043 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
18044 SDValue Mask = DAG.getSetCC(dl, MaskVT, Cond,
18045 DAG.getConstant(0, dl, CondVT),
18047 // Now return a new VSELECT using the mask.
18048 return DAG.getSelect(dl, VT, Mask, LHS, RHS);
18051 // SEXT/TRUNC cases where the mask doesn't match the destination size.
18052 if (CondEltSize != EltSize) {
18053 // If we don't have a sign splat, rely on the expansion.
18054 if (CondEltSize != DAG.ComputeNumSignBits(Cond))
18057 MVT NewCondSVT = MVT::getIntegerVT(EltSize);
18058 MVT NewCondVT = MVT::getVectorVT(NewCondSVT, NumElts);
18059 Cond = DAG.getSExtOrTrunc(Cond, dl, NewCondVT);
18060 return DAG.getNode(ISD::VSELECT, dl, VT, Cond, LHS, RHS);
18063 // Only some types will be legal on some subtargets. If we can emit a legal
18064 // VSELECT-matching blend, return Op, and but if we need to expand, return
18066 switch (VT.SimpleTy) {
18068 // Most of the vector types have blends past SSE4.1.
18072 // The byte blends for AVX vectors were introduced only in AVX2.
18073 if (Subtarget.hasAVX2())
18079 case MVT::v16i16: {
18080 // Bitcast everything to the vXi8 type and use a vXi8 vselect.
18081 MVT CastVT = MVT::getVectorVT(MVT::i8, NumElts * 2);
18082 Cond = DAG.getBitcast(CastVT, Cond);
18083 LHS = DAG.getBitcast(CastVT, LHS);
18084 RHS = DAG.getBitcast(CastVT, RHS);
18085 SDValue Select = DAG.getNode(ISD::VSELECT, dl, CastVT, Cond, LHS, RHS);
18086 return DAG.getBitcast(VT, Select);
18091 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
18092 MVT VT = Op.getSimpleValueType();
18093 SDValue Vec = Op.getOperand(0);
18094 SDValue Idx = Op.getOperand(1);
18095 assert(isa<ConstantSDNode>(Idx) && "Constant index expected");
18098 if (!Vec.getSimpleValueType().is128BitVector())
18101 if (VT.getSizeInBits() == 8) {
18102 // If IdxVal is 0, it's cheaper to do a move instead of a pextrb, unless
18103 // we're going to zero extend the register or fold the store.
18104 if (llvm::isNullConstant(Idx) && !MayFoldIntoZeroExtend(Op) &&
18105 !MayFoldIntoStore(Op))
18106 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i8,
18107 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18108 DAG.getBitcast(MVT::v4i32, Vec), Idx));
18110 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32, Vec, Idx);
18111 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
18114 if (VT == MVT::f32) {
18115 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
18116 // the result back to FR32 register. It's only worth matching if the
18117 // result has a single use which is a store or a bitcast to i32. And in
18118 // the case of a store, it's not worth it if the index is a constant 0,
18119 // because a MOVSSmr can be used instead, which is smaller and faster.
18120 if (!Op.hasOneUse())
18122 SDNode *User = *Op.getNode()->use_begin();
18123 if ((User->getOpcode() != ISD::STORE || isNullConstant(Idx)) &&
18124 (User->getOpcode() != ISD::BITCAST ||
18125 User->getValueType(0) != MVT::i32))
18127 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18128 DAG.getBitcast(MVT::v4i32, Vec), Idx);
18129 return DAG.getBitcast(MVT::f32, Extract);
18132 if (VT == MVT::i32 || VT == MVT::i64)
18138 /// Extract one bit from mask vector, like v16i1 or v8i1.
18139 /// AVX-512 feature.
18140 static SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG,
18141 const X86Subtarget &Subtarget) {
18142 SDValue Vec = Op.getOperand(0);
18144 MVT VecVT = Vec.getSimpleValueType();
18145 SDValue Idx = Op.getOperand(1);
18146 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
18147 MVT EltVT = Op.getSimpleValueType();
18149 assert((VecVT.getVectorNumElements() <= 16 || Subtarget.hasBWI()) &&
18150 "Unexpected vector type in ExtractBitFromMaskVector");
18152 // variable index can't be handled in mask registers,
18153 // extend vector to VR512/128
18155 unsigned NumElts = VecVT.getVectorNumElements();
18156 // Extending v8i1/v16i1 to 512-bit get better performance on KNL
18157 // than extending to 128/256bit.
18158 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18159 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18160 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec);
18161 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ExtEltVT, Ext, Idx);
18162 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
18165 unsigned IdxVal = IdxC->getZExtValue();
18166 if (IdxVal == 0) // the operation is legal
18169 // Extend to natively supported kshift.
18170 unsigned NumElems = VecVT.getVectorNumElements();
18171 MVT WideVecVT = VecVT;
18172 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
18173 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18174 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18175 DAG.getUNDEF(WideVecVT), Vec,
18176 DAG.getIntPtrConstant(0, dl));
18179 // Use kshiftr instruction to move to the lower element.
18180 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18181 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18183 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
18184 DAG.getIntPtrConstant(0, dl));
18188 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
18189 SelectionDAG &DAG) const {
18191 SDValue Vec = Op.getOperand(0);
18192 MVT VecVT = Vec.getSimpleValueType();
18193 SDValue Idx = Op.getOperand(1);
18194 auto* IdxC = dyn_cast<ConstantSDNode>(Idx);
18196 if (VecVT.getVectorElementType() == MVT::i1)
18197 return ExtractBitFromMaskVector(Op, DAG, Subtarget);
18200 // Its more profitable to go through memory (1 cycles throughput)
18201 // than using VMOVD + VPERMV/PSHUFB sequence ( 2/3 cycles throughput)
18202 // IACA tool was used to get performance estimation
18203 // (https://software.intel.com/en-us/articles/intel-architecture-code-analyzer)
18205 // example : extractelement <16 x i8> %a, i32 %i
18207 // Block Throughput: 3.00 Cycles
18208 // Throughput Bottleneck: Port5
18210 // | Num Of | Ports pressure in cycles | |
18211 // | Uops | 0 - DV | 5 | 6 | 7 | |
18212 // ---------------------------------------------
18213 // | 1 | | 1.0 | | | CP | vmovd xmm1, edi
18214 // | 1 | | 1.0 | | | CP | vpshufb xmm0, xmm0, xmm1
18215 // | 2 | 1.0 | 1.0 | | | CP | vpextrb eax, xmm0, 0x0
18216 // Total Num Of Uops: 4
18219 // Block Throughput: 1.00 Cycles
18220 // Throughput Bottleneck: PORT2_AGU, PORT3_AGU, Port4
18222 // | | Ports pressure in cycles | |
18223 // |Uops| 1 | 2 - D |3 - D | 4 | 5 | |
18224 // ---------------------------------------------------------
18225 // |2^ | | 0.5 | 0.5 |1.0| |CP| vmovaps xmmword ptr [rsp-0x18], xmm0
18226 // |1 |0.5| | | |0.5| | lea rax, ptr [rsp-0x18]
18227 // |1 | |0.5, 0.5|0.5, 0.5| | |CP| mov al, byte ptr [rdi+rax*1]
18228 // Total Num Of Uops: 4
18233 unsigned IdxVal = IdxC->getZExtValue();
18235 // If this is a 256-bit vector result, first extract the 128-bit vector and
18236 // then extract the element from the 128-bit vector.
18237 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
18238 // Get the 128-bit vector.
18239 Vec = extract128BitVector(Vec, IdxVal, DAG, dl);
18240 MVT EltVT = VecVT.getVectorElementType();
18242 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
18243 assert(isPowerOf2_32(ElemsPerChunk) && "Elements per chunk not power of 2");
18245 // Find IdxVal modulo ElemsPerChunk. Since ElemsPerChunk is a power of 2
18246 // this can be done with a mask.
18247 IdxVal &= ElemsPerChunk - 1;
18248 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
18249 DAG.getIntPtrConstant(IdxVal, dl));
18252 assert(VecVT.is128BitVector() && "Unexpected vector length");
18254 MVT VT = Op.getSimpleValueType();
18256 if (VT.getSizeInBits() == 16) {
18257 // If IdxVal is 0, it's cheaper to do a move instead of a pextrw, unless
18258 // we're going to zero extend the register or fold the store (SSE41 only).
18259 if (IdxVal == 0 && !MayFoldIntoZeroExtend(Op) &&
18260 !(Subtarget.hasSSE41() && MayFoldIntoStore(Op)))
18261 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
18262 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18263 DAG.getBitcast(MVT::v4i32, Vec), Idx));
18265 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32, Vec, Idx);
18266 return DAG.getNode(ISD::TRUNCATE, dl, VT, Extract);
18269 if (Subtarget.hasSSE41())
18270 if (SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG))
18273 // TODO: We only extract a single element from v16i8, we can probably afford
18274 // to be more aggressive here before using the default approach of spilling to
18276 if (VT.getSizeInBits() == 8 && Op->isOnlyUserOf(Vec.getNode())) {
18277 // Extract either the lowest i32 or any i16, and extract the sub-byte.
18278 int DWordIdx = IdxVal / 4;
18279 if (DWordIdx == 0) {
18280 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
18281 DAG.getBitcast(MVT::v4i32, Vec),
18282 DAG.getIntPtrConstant(DWordIdx, dl));
18283 int ShiftVal = (IdxVal % 4) * 8;
18285 Res = DAG.getNode(ISD::SRL, dl, MVT::i32, Res,
18286 DAG.getConstant(ShiftVal, dl, MVT::i8));
18287 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
18290 int WordIdx = IdxVal / 2;
18291 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
18292 DAG.getBitcast(MVT::v8i16, Vec),
18293 DAG.getIntPtrConstant(WordIdx, dl));
18294 int ShiftVal = (IdxVal % 2) * 8;
18296 Res = DAG.getNode(ISD::SRL, dl, MVT::i16, Res,
18297 DAG.getConstant(ShiftVal, dl, MVT::i8));
18298 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
18301 if (VT.getSizeInBits() == 32) {
18305 // SHUFPS the element to the lowest double word, then movss.
18306 int Mask[4] = { static_cast<int>(IdxVal), -1, -1, -1 };
18307 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18308 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18309 DAG.getIntPtrConstant(0, dl));
18312 if (VT.getSizeInBits() == 64) {
18313 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
18314 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
18315 // to match extract_elt for f64.
18319 // UNPCKHPD the element to the lowest double word, then movsd.
18320 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
18321 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
18322 int Mask[2] = { 1, -1 };
18323 Vec = DAG.getVectorShuffle(VecVT, dl, Vec, DAG.getUNDEF(VecVT), Mask);
18324 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
18325 DAG.getIntPtrConstant(0, dl));
18331 /// Insert one bit to mask vector, like v16i1 or v8i1.
18332 /// AVX-512 feature.
18333 static SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG,
18334 const X86Subtarget &Subtarget) {
18336 SDValue Vec = Op.getOperand(0);
18337 SDValue Elt = Op.getOperand(1);
18338 SDValue Idx = Op.getOperand(2);
18339 MVT VecVT = Vec.getSimpleValueType();
18341 if (!isa<ConstantSDNode>(Idx)) {
18342 // Non constant index. Extend source and destination,
18343 // insert element and then truncate the result.
18344 unsigned NumElts = VecVT.getVectorNumElements();
18345 MVT ExtEltVT = (NumElts <= 8) ? MVT::getIntegerVT(128 / NumElts) : MVT::i8;
18346 MVT ExtVecVT = MVT::getVectorVT(ExtEltVT, NumElts);
18347 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
18348 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtVecVT, Vec),
18349 DAG.getNode(ISD::SIGN_EXTEND, dl, ExtEltVT, Elt), Idx);
18350 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
18353 // Copy into a k-register, extract to v1i1 and insert_subvector.
18354 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i1, Elt);
18355 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VecVT, Vec, EltInVec, Idx);
18358 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
18359 SelectionDAG &DAG) const {
18360 MVT VT = Op.getSimpleValueType();
18361 MVT EltVT = VT.getVectorElementType();
18362 unsigned NumElts = VT.getVectorNumElements();
18364 if (EltVT == MVT::i1)
18365 return InsertBitToMaskVector(Op, DAG, Subtarget);
18368 SDValue N0 = Op.getOperand(0);
18369 SDValue N1 = Op.getOperand(1);
18370 SDValue N2 = Op.getOperand(2);
18372 auto *N2C = dyn_cast<ConstantSDNode>(N2);
18373 if (!N2C || N2C->getAPIntValue().uge(NumElts))
18375 uint64_t IdxVal = N2C->getZExtValue();
18377 bool IsZeroElt = X86::isZeroNode(N1);
18378 bool IsAllOnesElt = VT.isInteger() && llvm::isAllOnesConstant(N1);
18380 // If we are inserting a element, see if we can do this more efficiently with
18381 // a blend shuffle with a rematerializable vector than a costly integer
18383 if ((IsZeroElt || IsAllOnesElt) && Subtarget.hasSSE41() &&
18384 16 <= EltVT.getSizeInBits()) {
18385 SmallVector<int, 8> BlendMask;
18386 for (unsigned i = 0; i != NumElts; ++i)
18387 BlendMask.push_back(i == IdxVal ? i + NumElts : i);
18388 SDValue CstVector = IsZeroElt ? getZeroVector(VT, Subtarget, DAG, dl)
18389 : getOnesVector(VT, DAG, dl);
18390 return DAG.getVectorShuffle(VT, dl, N0, CstVector, BlendMask);
18393 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
18394 // into that, and then insert the subvector back into the result.
18395 if (VT.is256BitVector() || VT.is512BitVector()) {
18396 // With a 256-bit vector, we can insert into the zero element efficiently
18397 // using a blend if we have AVX or AVX2 and the right data type.
18398 if (VT.is256BitVector() && IdxVal == 0) {
18399 // TODO: It is worthwhile to cast integer to floating point and back
18400 // and incur a domain crossing penalty if that's what we'll end up
18401 // doing anyway after extracting to a 128-bit vector.
18402 if ((Subtarget.hasAVX() && (EltVT == MVT::f64 || EltVT == MVT::f32)) ||
18403 (Subtarget.hasAVX2() && EltVT == MVT::i32)) {
18404 SDValue N1Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18405 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec,
18406 DAG.getTargetConstant(1, dl, MVT::i8));
18410 // Get the desired 128-bit vector chunk.
18411 SDValue V = extract128BitVector(N0, IdxVal, DAG, dl);
18413 // Insert the element into the desired chunk.
18414 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
18415 assert(isPowerOf2_32(NumEltsIn128));
18416 // Since NumEltsIn128 is a power of 2 we can use mask instead of modulo.
18417 unsigned IdxIn128 = IdxVal & (NumEltsIn128 - 1);
18419 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
18420 DAG.getIntPtrConstant(IdxIn128, dl));
18422 // Insert the changed part back into the bigger vector
18423 return insert128BitVector(N0, V, IdxVal, DAG, dl);
18425 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
18427 // This will be just movd/movq/movss/movsd.
18428 if (IdxVal == 0 && ISD::isBuildVectorAllZeros(N0.getNode())) {
18429 if (EltVT == MVT::i32 || EltVT == MVT::f32 || EltVT == MVT::f64 ||
18430 EltVT == MVT::i64) {
18431 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, N1);
18432 return getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18435 // We can't directly insert an i8 or i16 into a vector, so zero extend
18436 // it to i32 first.
18437 if (EltVT == MVT::i16 || EltVT == MVT::i8) {
18438 N1 = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, N1);
18439 MVT ShufVT = MVT::getVectorVT(MVT::i32, VT.getSizeInBits()/32);
18440 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, ShufVT, N1);
18441 N1 = getShuffleVectorZeroOrUndef(N1, 0, true, Subtarget, DAG);
18442 return DAG.getBitcast(VT, N1);
18446 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
18447 // argument. SSE41 required for pinsrb.
18448 if (VT == MVT::v8i16 || (VT == MVT::v16i8 && Subtarget.hasSSE41())) {
18450 if (VT == MVT::v8i16) {
18451 assert(Subtarget.hasSSE2() && "SSE2 required for PINSRW");
18452 Opc = X86ISD::PINSRW;
18454 assert(VT == MVT::v16i8 && "PINSRB requires v16i8 vector");
18455 assert(Subtarget.hasSSE41() && "SSE41 required for PINSRB");
18456 Opc = X86ISD::PINSRB;
18459 if (N1.getValueType() != MVT::i32)
18460 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
18461 if (N2.getValueType() != MVT::i32)
18462 N2 = DAG.getIntPtrConstant(IdxVal, dl);
18463 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
18466 if (Subtarget.hasSSE41()) {
18467 if (EltVT == MVT::f32) {
18468 // Bits [7:6] of the constant are the source select. This will always be
18469 // zero here. The DAG Combiner may combine an extract_elt index into
18470 // these bits. For example (insert (extract, 3), 2) could be matched by
18471 // putting the '3' into bits [7:6] of X86ISD::INSERTPS.
18472 // Bits [5:4] of the constant are the destination select. This is the
18473 // value of the incoming immediate.
18474 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
18475 // combine either bitwise AND or insert of float 0.0 to set these bits.
18477 bool MinSize = DAG.getMachineFunction().getFunction().hasMinSize();
18478 if (IdxVal == 0 && (!MinSize || !MayFoldLoad(N1))) {
18479 // If this is an insertion of 32-bits into the low 32-bits of
18480 // a vector, we prefer to generate a blend with immediate rather
18481 // than an insertps. Blends are simpler operations in hardware and so
18482 // will always have equal or better performance than insertps.
18483 // But if optimizing for size and there's a load folding opportunity,
18484 // generate insertps because blendps does not have a 32-bit memory
18486 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18487 return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1,
18488 DAG.getTargetConstant(1, dl, MVT::i8));
18490 // Create this as a scalar to vector..
18491 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
18492 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1,
18493 DAG.getTargetConstant(IdxVal << 4, dl, MVT::i8));
18496 // PINSR* works with constant index.
18497 if (EltVT == MVT::i32 || EltVT == MVT::i64)
18504 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, const X86Subtarget &Subtarget,
18505 SelectionDAG &DAG) {
18507 MVT OpVT = Op.getSimpleValueType();
18509 // It's always cheaper to replace a xor+movd with xorps and simplifies further
18511 if (X86::isZeroNode(Op.getOperand(0)))
18512 return getZeroVector(OpVT, Subtarget, DAG, dl);
18514 // If this is a 256-bit vector result, first insert into a 128-bit
18515 // vector and then insert into the 256-bit vector.
18516 if (!OpVT.is128BitVector()) {
18517 // Insert into a 128-bit vector.
18518 unsigned SizeFactor = OpVT.getSizeInBits() / 128;
18519 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
18520 OpVT.getVectorNumElements() / SizeFactor);
18522 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
18524 // Insert the 128-bit vector.
18525 return insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
18527 assert(OpVT.is128BitVector() && OpVT.isInteger() && OpVT != MVT::v2i64 &&
18528 "Expected an SSE type!");
18530 // Pass through a v4i32 SCALAR_TO_VECTOR as that's what we use in tblgen.
18531 if (OpVT == MVT::v4i32)
18534 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
18535 return DAG.getBitcast(
18536 OpVT, DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, AnyExt));
18539 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
18540 // simple superregister reference or explicit instructions to insert
18541 // the upper bits of a vector.
18542 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18543 SelectionDAG &DAG) {
18544 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1);
18546 return insert1BitVector(Op, DAG, Subtarget);
18549 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget &Subtarget,
18550 SelectionDAG &DAG) {
18551 assert(Op.getSimpleValueType().getVectorElementType() == MVT::i1 &&
18552 "Only vXi1 extract_subvectors need custom lowering");
18555 SDValue Vec = Op.getOperand(0);
18556 uint64_t IdxVal = Op.getConstantOperandVal(1);
18558 if (IdxVal == 0) // the operation is legal
18561 MVT VecVT = Vec.getSimpleValueType();
18562 unsigned NumElems = VecVT.getVectorNumElements();
18564 // Extend to natively supported kshift.
18565 MVT WideVecVT = VecVT;
18566 if ((!Subtarget.hasDQI() && NumElems == 8) || NumElems < 8) {
18567 WideVecVT = Subtarget.hasDQI() ? MVT::v8i1 : MVT::v16i1;
18568 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVecVT,
18569 DAG.getUNDEF(WideVecVT), Vec,
18570 DAG.getIntPtrConstant(0, dl));
18573 // Shift to the LSB.
18574 Vec = DAG.getNode(X86ISD::KSHIFTR, dl, WideVecVT, Vec,
18575 DAG.getTargetConstant(IdxVal, dl, MVT::i8));
18577 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, Op.getValueType(), Vec,
18578 DAG.getIntPtrConstant(0, dl));
18581 // Returns the appropriate wrapper opcode for a global reference.
18582 unsigned X86TargetLowering::getGlobalWrapperKind(
18583 const GlobalValue *GV, const unsigned char OpFlags) const {
18584 // References to absolute symbols are never PC-relative.
18585 if (GV && GV->isAbsoluteSymbolRef())
18586 return X86ISD::Wrapper;
18588 CodeModel::Model M = getTargetMachine().getCodeModel();
18589 if (Subtarget.isPICStyleRIPRel() &&
18590 (M == CodeModel::Small || M == CodeModel::Kernel))
18591 return X86ISD::WrapperRIP;
18593 // GOTPCREL references must always use RIP.
18594 if (OpFlags == X86II::MO_GOTPCREL)
18595 return X86ISD::WrapperRIP;
18597 return X86ISD::Wrapper;
18600 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
18601 // their target counterpart wrapped in the X86ISD::Wrapper node. Suppose N is
18602 // one of the above mentioned nodes. It has to be wrapped because otherwise
18603 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
18604 // be used to form addressing mode. These wrapped nodes will be selected
18607 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
18608 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
18610 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18611 // global base reg.
18612 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18614 auto PtrVT = getPointerTy(DAG.getDataLayout());
18615 SDValue Result = DAG.getTargetConstantPool(
18616 CP->getConstVal(), PtrVT, CP->getAlign(), CP->getOffset(), OpFlag);
18618 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18619 // With PIC, the address is actually $g + Offset.
18622 DAG.getNode(ISD::ADD, DL, PtrVT,
18623 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18629 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
18630 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
18632 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18633 // global base reg.
18634 unsigned char OpFlag = Subtarget.classifyLocalReference(nullptr);
18636 auto PtrVT = getPointerTy(DAG.getDataLayout());
18637 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), PtrVT, OpFlag);
18639 Result = DAG.getNode(getGlobalWrapperKind(), DL, PtrVT, Result);
18641 // With PIC, the address is actually $g + Offset.
18644 DAG.getNode(ISD::ADD, DL, PtrVT,
18645 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), Result);
18650 SDValue X86TargetLowering::LowerExternalSymbol(SDValue Op,
18651 SelectionDAG &DAG) const {
18652 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18656 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
18657 // Create the TargetBlockAddressAddress node.
18658 unsigned char OpFlags =
18659 Subtarget.classifyBlockAddressReference();
18660 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
18661 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
18663 auto PtrVT = getPointerTy(DAG.getDataLayout());
18664 SDValue Result = DAG.getTargetBlockAddress(BA, PtrVT, Offset, OpFlags);
18665 Result = DAG.getNode(getGlobalWrapperKind(), dl, PtrVT, Result);
18667 // With PIC, the address is actually $g + Offset.
18668 if (isGlobalRelativeToPICBase(OpFlags)) {
18669 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18670 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18676 /// Creates target global address or external symbol nodes for calls or
18678 SDValue X86TargetLowering::LowerGlobalOrExternal(SDValue Op, SelectionDAG &DAG,
18679 bool ForCall) const {
18680 // Unpack the global address or external symbol.
18681 const SDLoc &dl = SDLoc(Op);
18682 const GlobalValue *GV = nullptr;
18683 int64_t Offset = 0;
18684 const char *ExternalSym = nullptr;
18685 if (const auto *G = dyn_cast<GlobalAddressSDNode>(Op)) {
18686 GV = G->getGlobal();
18687 Offset = G->getOffset();
18689 const auto *ES = cast<ExternalSymbolSDNode>(Op);
18690 ExternalSym = ES->getSymbol();
18693 // Calculate some flags for address lowering.
18694 const Module &Mod = *DAG.getMachineFunction().getFunction().getParent();
18695 unsigned char OpFlags;
18697 OpFlags = Subtarget.classifyGlobalFunctionReference(GV, Mod);
18699 OpFlags = Subtarget.classifyGlobalReference(GV, Mod);
18700 bool HasPICReg = isGlobalRelativeToPICBase(OpFlags);
18701 bool NeedsLoad = isGlobalStubReference(OpFlags);
18703 CodeModel::Model M = DAG.getTarget().getCodeModel();
18704 auto PtrVT = getPointerTy(DAG.getDataLayout());
18708 // Create a target global address if this is a global. If possible, fold the
18709 // offset into the global address reference. Otherwise, ADD it on later.
18710 int64_t GlobalOffset = 0;
18711 if (OpFlags == X86II::MO_NO_FLAG &&
18712 X86::isOffsetSuitableForCodeModel(Offset, M)) {
18713 std::swap(GlobalOffset, Offset);
18715 Result = DAG.getTargetGlobalAddress(GV, dl, PtrVT, GlobalOffset, OpFlags);
18717 // If this is not a global address, this must be an external symbol.
18718 Result = DAG.getTargetExternalSymbol(ExternalSym, PtrVT, OpFlags);
18721 // If this is a direct call, avoid the wrapper if we don't need to do any
18722 // loads or adds. This allows SDAG ISel to match direct calls.
18723 if (ForCall && !NeedsLoad && !HasPICReg && Offset == 0)
18726 Result = DAG.getNode(getGlobalWrapperKind(GV, OpFlags), dl, PtrVT, Result);
18728 // With PIC, the address is actually $g + Offset.
18730 Result = DAG.getNode(ISD::ADD, dl, PtrVT,
18731 DAG.getNode(X86ISD::GlobalBaseReg, dl, PtrVT), Result);
18734 // For globals that require a load from a stub to get the address, emit the
18737 Result = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Result,
18738 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18740 // If there was a non-zero offset that we didn't fold, create an explicit
18741 // addition for it.
18743 Result = DAG.getNode(ISD::ADD, dl, PtrVT, Result,
18744 DAG.getConstant(Offset, dl, PtrVT));
18750 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
18751 return LowerGlobalOrExternal(Op, DAG, /*ForCall=*/false);
18755 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
18756 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
18757 unsigned char OperandFlags, bool LocalDynamic = false) {
18758 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18759 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18761 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18762 GA->getValueType(0),
18766 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
18770 SDValue Ops[] = { Chain, TGA, *InFlag };
18771 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18773 SDValue Ops[] = { Chain, TGA };
18774 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
18777 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
18778 MFI.setAdjustsStack(true);
18779 MFI.setHasCalls(true);
18781 SDValue Flag = Chain.getValue(1);
18782 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
18785 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
18787 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18790 SDLoc dl(GA); // ? function entry point might be better
18791 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18792 DAG.getNode(X86ISD::GlobalBaseReg,
18793 SDLoc(), PtrVT), InFlag);
18794 InFlag = Chain.getValue(1);
18796 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
18799 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
18801 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18803 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
18804 X86::RAX, X86II::MO_TLSGD);
18807 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
18813 // Get the start address of the TLS block for this module.
18814 X86MachineFunctionInfo *MFI = DAG.getMachineFunction()
18815 .getInfo<X86MachineFunctionInfo>();
18816 MFI->incNumLocalDynamicTLSAccesses();
18820 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
18821 X86II::MO_TLSLD, /*LocalDynamic=*/true);
18824 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
18825 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
18826 InFlag = Chain.getValue(1);
18827 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
18828 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
18831 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
18835 unsigned char OperandFlags = X86II::MO_DTPOFF;
18836 unsigned WrapperKind = X86ISD::Wrapper;
18837 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
18838 GA->getValueType(0),
18839 GA->getOffset(), OperandFlags);
18840 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18842 // Add x@dtpoff with the base.
18843 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
18846 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
18847 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
18848 const EVT PtrVT, TLSModel::Model model,
18849 bool is64Bit, bool isPIC) {
18852 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
18853 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
18854 is64Bit ? 257 : 256));
18856 SDValue ThreadPointer =
18857 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0, dl),
18858 MachinePointerInfo(Ptr));
18860 unsigned char OperandFlags = 0;
18861 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
18863 unsigned WrapperKind = X86ISD::Wrapper;
18864 if (model == TLSModel::LocalExec) {
18865 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
18866 } else if (model == TLSModel::InitialExec) {
18868 OperandFlags = X86II::MO_GOTTPOFF;
18869 WrapperKind = X86ISD::WrapperRIP;
18871 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
18874 llvm_unreachable("Unexpected model");
18877 // emit "addl x@ntpoff,%eax" (local exec)
18878 // or "addl x@indntpoff,%eax" (initial exec)
18879 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
18881 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
18882 GA->getOffset(), OperandFlags);
18883 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
18885 if (model == TLSModel::InitialExec) {
18886 if (isPIC && !is64Bit) {
18887 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
18888 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18892 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
18893 MachinePointerInfo::getGOT(DAG.getMachineFunction()));
18896 // The address of the thread local variable is the add of the thread
18897 // pointer with the offset of the variable.
18898 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
18902 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
18904 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
18906 if (DAG.getTarget().useEmulatedTLS())
18907 return LowerToTLSEmulatedModel(GA, DAG);
18909 const GlobalValue *GV = GA->getGlobal();
18910 auto PtrVT = getPointerTy(DAG.getDataLayout());
18911 bool PositionIndependent = isPositionIndependent();
18913 if (Subtarget.isTargetELF()) {
18914 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
18916 case TLSModel::GeneralDynamic:
18917 if (Subtarget.is64Bit())
18918 return LowerToTLSGeneralDynamicModel64(GA, DAG, PtrVT);
18919 return LowerToTLSGeneralDynamicModel32(GA, DAG, PtrVT);
18920 case TLSModel::LocalDynamic:
18921 return LowerToTLSLocalDynamicModel(GA, DAG, PtrVT,
18922 Subtarget.is64Bit());
18923 case TLSModel::InitialExec:
18924 case TLSModel::LocalExec:
18925 return LowerToTLSExecModel(GA, DAG, PtrVT, model, Subtarget.is64Bit(),
18926 PositionIndependent);
18928 llvm_unreachable("Unknown TLS model.");
18931 if (Subtarget.isTargetDarwin()) {
18932 // Darwin only has one model of TLS. Lower to that.
18933 unsigned char OpFlag = 0;
18934 unsigned WrapperKind = Subtarget.isPICStyleRIPRel() ?
18935 X86ISD::WrapperRIP : X86ISD::Wrapper;
18937 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
18938 // global base reg.
18939 bool PIC32 = PositionIndependent && !Subtarget.is64Bit();
18941 OpFlag = X86II::MO_TLVP_PIC_BASE;
18943 OpFlag = X86II::MO_TLVP;
18945 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
18946 GA->getValueType(0),
18947 GA->getOffset(), OpFlag);
18948 SDValue Offset = DAG.getNode(WrapperKind, DL, PtrVT, Result);
18950 // With PIC32, the address is actually $g + Offset.
18952 Offset = DAG.getNode(ISD::ADD, DL, PtrVT,
18953 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
18956 // Lowering the machine isd will make sure everything is in the right
18958 SDValue Chain = DAG.getEntryNode();
18959 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
18960 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, DL);
18961 SDValue Args[] = { Chain, Offset };
18962 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
18963 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, DL, true),
18964 DAG.getIntPtrConstant(0, DL, true),
18965 Chain.getValue(1), DL);
18967 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
18968 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
18969 MFI.setAdjustsStack(true);
18971 // And our return value (tls address) is in the standard call return value
18973 unsigned Reg = Subtarget.is64Bit() ? X86::RAX : X86::EAX;
18974 return DAG.getCopyFromReg(Chain, DL, Reg, PtrVT, Chain.getValue(1));
18977 if (Subtarget.isOSWindows()) {
18978 // Just use the implicit TLS architecture
18979 // Need to generate something similar to:
18980 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
18982 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
18983 // mov rcx, qword [rdx+rcx*8]
18984 // mov eax, .tls$:tlsvar
18985 // [rax+rcx] contains the address
18986 // Windows 64bit: gs:0x58
18987 // Windows 32bit: fs:__tls_array
18990 SDValue Chain = DAG.getEntryNode();
18992 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
18993 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
18994 // use its literal value of 0x2C.
18995 Value *Ptr = Constant::getNullValue(Subtarget.is64Bit()
18996 ? Type::getInt8PtrTy(*DAG.getContext(),
18998 : Type::getInt32PtrTy(*DAG.getContext(),
19001 SDValue TlsArray = Subtarget.is64Bit()
19002 ? DAG.getIntPtrConstant(0x58, dl)
19003 : (Subtarget.isTargetWindowsGNU()
19004 ? DAG.getIntPtrConstant(0x2C, dl)
19005 : DAG.getExternalSymbol("_tls_array", PtrVT));
19007 SDValue ThreadPointer =
19008 DAG.getLoad(PtrVT, dl, Chain, TlsArray, MachinePointerInfo(Ptr));
19011 if (GV->getThreadLocalMode() == GlobalVariable::LocalExecTLSModel) {
19012 res = ThreadPointer;
19014 // Load the _tls_index variable
19015 SDValue IDX = DAG.getExternalSymbol("_tls_index", PtrVT);
19016 if (Subtarget.is64Bit())
19017 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, PtrVT, Chain, IDX,
19018 MachinePointerInfo(), MVT::i32);
19020 IDX = DAG.getLoad(PtrVT, dl, Chain, IDX, MachinePointerInfo());
19022 auto &DL = DAG.getDataLayout();
19024 DAG.getConstant(Log2_64_Ceil(DL.getPointerSize()), dl, MVT::i8);
19025 IDX = DAG.getNode(ISD::SHL, dl, PtrVT, IDX, Scale);
19027 res = DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, IDX);
19030 res = DAG.getLoad(PtrVT, dl, Chain, res, MachinePointerInfo());
19032 // Get the offset of start of .tls section
19033 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
19034 GA->getValueType(0),
19035 GA->getOffset(), X86II::MO_SECREL);
19036 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA);
19038 // The address of the thread local variable is the add of the thread
19039 // pointer with the offset of the variable.
19040 return DAG.getNode(ISD::ADD, dl, PtrVT, res, Offset);
19043 llvm_unreachable("TLS not implemented for this target.");
19046 /// Lower SRA_PARTS and friends, which return two i32 values
19047 /// and take a 2 x i32 value to shift plus a shift amount.
19048 /// TODO: Can this be moved to general expansion code?
19049 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
19050 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
19051 MVT VT = Op.getSimpleValueType();
19052 unsigned VTBits = VT.getSizeInBits();
19054 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
19055 SDValue ShOpLo = Op.getOperand(0);
19056 SDValue ShOpHi = Op.getOperand(1);
19057 SDValue ShAmt = Op.getOperand(2);
19058 // ISD::FSHL and ISD::FSHR have defined overflow behavior but ISD::SHL and
19059 // ISD::SRA/L nodes haven't. Insert an AND to be safe, it's optimized away
19061 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
19062 DAG.getConstant(VTBits - 1, dl, MVT::i8));
19063 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
19064 DAG.getConstant(VTBits - 1, dl, MVT::i8))
19065 : DAG.getConstant(0, dl, VT);
19067 SDValue Tmp2, Tmp3;
19068 if (Op.getOpcode() == ISD::SHL_PARTS) {
19069 Tmp2 = DAG.getNode(ISD::FSHL, dl, VT, ShOpHi, ShOpLo, ShAmt);
19070 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
19072 Tmp2 = DAG.getNode(ISD::FSHR, dl, VT, ShOpHi, ShOpLo, ShAmt);
19073 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
19076 // If the shift amount is larger or equal than the width of a part we can't
19077 // rely on the results of shld/shrd. Insert a test and select the appropriate
19078 // values for large shift amounts.
19079 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
19080 DAG.getConstant(VTBits, dl, MVT::i8));
19081 SDValue Cond = DAG.getSetCC(dl, MVT::i8, AndNode,
19082 DAG.getConstant(0, dl, MVT::i8), ISD::SETNE);
19085 if (Op.getOpcode() == ISD::SHL_PARTS) {
19086 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
19087 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
19089 Lo = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp3, Tmp2);
19090 Hi = DAG.getNode(ISD::SELECT, dl, VT, Cond, Tmp1, Tmp3);
19093 return DAG.getMergeValues({ Lo, Hi }, dl);
19096 static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
19097 SelectionDAG &DAG) {
19098 MVT VT = Op.getSimpleValueType();
19099 assert((Op.getOpcode() == ISD::FSHL || Op.getOpcode() == ISD::FSHR) &&
19100 "Unexpected funnel shift opcode!");
19103 SDValue Op0 = Op.getOperand(0);
19104 SDValue Op1 = Op.getOperand(1);
19105 SDValue Amt = Op.getOperand(2);
19107 bool IsFSHR = Op.getOpcode() == ISD::FSHR;
19109 if (VT.isVector()) {
19110 assert(Subtarget.hasVBMI2() && "Expected VBMI2");
19113 std::swap(Op0, Op1);
19115 APInt APIntShiftAmt;
19116 if (X86::isConstantSplat(Amt, APIntShiftAmt)) {
19117 uint64_t ShiftAmt = APIntShiftAmt.urem(VT.getScalarSizeInBits());
19118 return DAG.getNode(IsFSHR ? X86ISD::VSHRD : X86ISD::VSHLD, DL, VT, Op0,
19119 Op1, DAG.getTargetConstant(ShiftAmt, DL, MVT::i8));
19122 return DAG.getNode(IsFSHR ? X86ISD::VSHRDV : X86ISD::VSHLDV, DL, VT,
19126 (VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) &&
19127 "Unexpected funnel shift type!");
19129 // Expand slow SHLD/SHRD cases if we are not optimizing for size.
19130 bool OptForSize = DAG.shouldOptForSize();
19131 bool ExpandFunnel = !OptForSize && Subtarget.isSHLDSlow();
19133 // fshl(x,y,z) -> (((aext(x) << bw) | zext(y)) << (z & (bw-1))) >> bw.
19134 // fshr(x,y,z) -> (((aext(x) << bw) | zext(y)) >> (z & (bw-1))).
19135 if ((VT == MVT::i8 || (ExpandFunnel && VT == MVT::i16)) &&
19136 !isa<ConstantSDNode>(Amt)) {
19137 unsigned EltSizeInBits = VT.getScalarSizeInBits();
19138 SDValue Mask = DAG.getConstant(EltSizeInBits - 1, DL, Amt.getValueType());
19139 SDValue HiShift = DAG.getConstant(EltSizeInBits, DL, Amt.getValueType());
19140 Op0 = DAG.getAnyExtOrTrunc(Op0, DL, MVT::i32);
19141 Op1 = DAG.getZExtOrTrunc(Op1, DL, MVT::i32);
19142 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt, Mask);
19143 SDValue Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Op0, HiShift);
19144 Res = DAG.getNode(ISD::OR, DL, MVT::i32, Res, Op1);
19146 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, Amt);
19148 Res = DAG.getNode(ISD::SHL, DL, MVT::i32, Res, Amt);
19149 Res = DAG.getNode(ISD::SRL, DL, MVT::i32, Res, HiShift);
19151 return DAG.getZExtOrTrunc(Res, DL, VT);
19154 if (VT == MVT::i8 || ExpandFunnel)
19157 // i16 needs to modulo the shift amount, but i32/i64 have implicit modulo.
19158 if (VT == MVT::i16) {
19159 Amt = DAG.getNode(ISD::AND, DL, Amt.getValueType(), Amt,
19160 DAG.getConstant(15, DL, Amt.getValueType()));
19161 unsigned FSHOp = (IsFSHR ? X86ISD::FSHR : X86ISD::FSHL);
19162 return DAG.getNode(FSHOp, DL, VT, Op0, Op1, Amt);
19168 // Try to use a packed vector operation to handle i64 on 32-bit targets when
19169 // AVX512DQ is enabled.
19170 static SDValue LowerI64IntToFP_AVX512DQ(SDValue Op, SelectionDAG &DAG,
19171 const X86Subtarget &Subtarget) {
19172 assert((Op.getOpcode() == ISD::SINT_TO_FP ||
19173 Op.getOpcode() == ISD::STRICT_SINT_TO_FP ||
19174 Op.getOpcode() == ISD::STRICT_UINT_TO_FP ||
19175 Op.getOpcode() == ISD::UINT_TO_FP) &&
19176 "Unexpected opcode!");
19177 bool IsStrict = Op->isStrictFPOpcode();
19178 unsigned OpNo = IsStrict ? 1 : 0;
19179 SDValue Src = Op.getOperand(OpNo);
19180 MVT SrcVT = Src.getSimpleValueType();
19181 MVT VT = Op.getSimpleValueType();
19183 if (!Subtarget.hasDQI() || SrcVT != MVT::i64 || Subtarget.is64Bit() ||
19184 (VT != MVT::f32 && VT != MVT::f64))
19187 // Pack the i64 into a vector, do the operation and extract.
19189 // Using 256-bit to ensure result is 128-bits for f32 case.
19190 unsigned NumElts = Subtarget.hasVLX() ? 4 : 8;
19191 MVT VecInVT = MVT::getVectorVT(MVT::i64, NumElts);
19192 MVT VecVT = MVT::getVectorVT(VT, NumElts);
19195 SDValue InVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecInVT, Src);
19197 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, {VecVT, MVT::Other},
19198 {Op.getOperand(0), InVec});
19199 SDValue Chain = CvtVec.getValue(1);
19200 SDValue Value = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
19201 DAG.getIntPtrConstant(0, dl));
19202 return DAG.getMergeValues({Value, Chain}, dl);
19205 SDValue CvtVec = DAG.getNode(Op.getOpcode(), dl, VecVT, InVec);
19207 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, CvtVec,
19208 DAG.getIntPtrConstant(0, dl));
19211 static bool useVectorCast(unsigned Opcode, MVT FromVT, MVT ToVT,
19212 const X86Subtarget &Subtarget) {
19214 case ISD::SINT_TO_FP:
19215 // TODO: Handle wider types with AVX/AVX512.
19216 if (!Subtarget.hasSSE2() || FromVT != MVT::v4i32)
19218 // CVTDQ2PS or (V)CVTDQ2PD
19219 return ToVT == MVT::v4f32 || (Subtarget.hasAVX() && ToVT == MVT::v4f64);
19221 case ISD::UINT_TO_FP:
19222 // TODO: Handle wider types and i64 elements.
19223 if (!Subtarget.hasAVX512() || FromVT != MVT::v4i32)
19225 // VCVTUDQ2PS or VCVTUDQ2PD
19226 return ToVT == MVT::v4f32 || ToVT == MVT::v4f64;
19233 /// Given a scalar cast operation that is extracted from a vector, try to
19234 /// vectorize the cast op followed by extraction. This will avoid an expensive
19235 /// round-trip between XMM and GPR.
19236 static SDValue vectorizeExtractedCast(SDValue Cast, SelectionDAG &DAG,
19237 const X86Subtarget &Subtarget) {
19238 // TODO: This could be enhanced to handle smaller integer types by peeking
19239 // through an extend.
19240 SDValue Extract = Cast.getOperand(0);
19241 MVT DestVT = Cast.getSimpleValueType();
19242 if (Extract.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
19243 !isa<ConstantSDNode>(Extract.getOperand(1)))
19246 // See if we have a 128-bit vector cast op for this type of cast.
19247 SDValue VecOp = Extract.getOperand(0);
19248 MVT FromVT = VecOp.getSimpleValueType();
19249 unsigned NumEltsInXMM = 128 / FromVT.getScalarSizeInBits();
19250 MVT Vec128VT = MVT::getVectorVT(FromVT.getScalarType(), NumEltsInXMM);
19251 MVT ToVT = MVT::getVectorVT(DestVT, NumEltsInXMM);
19252 if (!useVectorCast(Cast.getOpcode(), Vec128VT, ToVT, Subtarget))
19255 // If we are extracting from a non-zero element, first shuffle the source
19256 // vector to allow extracting from element zero.
19258 if (!isNullConstant(Extract.getOperand(1))) {
19259 SmallVector<int, 16> Mask(FromVT.getVectorNumElements(), -1);
19260 Mask[0] = Extract.getConstantOperandVal(1);
19261 VecOp = DAG.getVectorShuffle(FromVT, DL, VecOp, DAG.getUNDEF(FromVT), Mask);
19263 // If the source vector is wider than 128-bits, extract the low part. Do not
19264 // create an unnecessarily wide vector cast op.
19265 if (FromVT != Vec128VT)
19266 VecOp = extract128BitVector(VecOp, 0, DAG, DL);
19268 // cast (extelt V, 0) --> extelt (cast (extract_subv V)), 0
19269 // cast (extelt V, C) --> extelt (cast (extract_subv (shuffle V, [C...]))), 0
19270 SDValue VCast = DAG.getNode(Cast.getOpcode(), DL, ToVT, VecOp);
19271 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, DestVT, VCast,
19272 DAG.getIntPtrConstant(0, DL));
19275 /// Given a scalar cast to FP with a cast to integer operand (almost an ftrunc),
19276 /// try to vectorize the cast ops. This will avoid an expensive round-trip
19277 /// between XMM and GPR.
19278 static SDValue lowerFPToIntToFP(SDValue CastToFP, SelectionDAG &DAG,
19279 const X86Subtarget &Subtarget) {
19280 // TODO: Allow FP_TO_UINT.
19281 SDValue CastToInt = CastToFP.getOperand(0);
19282 MVT VT = CastToFP.getSimpleValueType();
19283 if (CastToInt.getOpcode() != ISD::FP_TO_SINT || VT.isVector())
19286 MVT IntVT = CastToInt.getSimpleValueType();
19287 SDValue X = CastToInt.getOperand(0);
19288 MVT SrcVT = X.getSimpleValueType();
19289 if (SrcVT != MVT::f32 && SrcVT != MVT::f64)
19292 // See if we have 128-bit vector cast instructions for this type of cast.
19293 // We need cvttps2dq/cvttpd2dq and cvtdq2ps/cvtdq2pd.
19294 if (!Subtarget.hasSSE2() || (VT != MVT::f32 && VT != MVT::f64) ||
19298 unsigned SrcSize = SrcVT.getSizeInBits();
19299 unsigned IntSize = IntVT.getSizeInBits();
19300 unsigned VTSize = VT.getSizeInBits();
19301 MVT VecSrcVT = MVT::getVectorVT(SrcVT, 128 / SrcSize);
19302 MVT VecIntVT = MVT::getVectorVT(IntVT, 128 / IntSize);
19303 MVT VecVT = MVT::getVectorVT(VT, 128 / VTSize);
19305 // We need target-specific opcodes if this is v2f64 -> v4i32 -> v2f64.
19306 unsigned ToIntOpcode =
19307 SrcSize != IntSize ? X86ISD::CVTTP2SI : (unsigned)ISD::FP_TO_SINT;
19308 unsigned ToFPOpcode =
19309 IntSize != VTSize ? X86ISD::CVTSI2P : (unsigned)ISD::SINT_TO_FP;
19311 // sint_to_fp (fp_to_sint X) --> extelt (sint_to_fp (fp_to_sint (s2v X))), 0
19313 // We are not defining the high elements (for example, zero them) because
19314 // that could nullify any performance advantage that we hoped to gain from
19315 // this vector op hack. We do not expect any adverse effects (like denorm
19316 // penalties) with cast ops.
19317 SDLoc DL(CastToFP);
19318 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
19319 SDValue VecX = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecSrcVT, X);
19320 SDValue VCastToInt = DAG.getNode(ToIntOpcode, DL, VecIntVT, VecX);
19321 SDValue VCastToFP = DAG.getNode(ToFPOpcode, DL, VecVT, VCastToInt);
19322 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, VCastToFP, ZeroIdx);
19325 static SDValue lowerINT_TO_FP_vXi64(SDValue Op, SelectionDAG &DAG,
19326 const X86Subtarget &Subtarget) {
19328 bool IsStrict = Op->isStrictFPOpcode();
19329 MVT VT = Op->getSimpleValueType(0);
19330 SDValue Src = Op->getOperand(IsStrict ? 1 : 0);
19332 if (Subtarget.hasDQI()) {
19333 assert(!Subtarget.hasVLX() && "Unexpected features");
19335 assert((Src.getSimpleValueType() == MVT::v2i64 ||
19336 Src.getSimpleValueType() == MVT::v4i64) &&
19337 "Unsupported custom type");
19339 // With AVX512DQ, but not VLX we need to widen to get a 512-bit result type.
19340 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v4f64) &&
19342 MVT WideVT = VT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
19344 // Need to concat with zero vector for strict fp to avoid spurious
19346 SDValue Tmp = IsStrict ? DAG.getConstant(0, DL, MVT::v8i64)
19347 : DAG.getUNDEF(MVT::v8i64);
19348 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v8i64, Tmp, Src,
19349 DAG.getIntPtrConstant(0, DL));
19350 SDValue Res, Chain;
19352 Res = DAG.getNode(Op.getOpcode(), DL, {WideVT, MVT::Other},
19353 {Op->getOperand(0), Src});
19354 Chain = Res.getValue(1);
19356 Res = DAG.getNode(Op.getOpcode(), DL, WideVT, Src);
19359 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19360 DAG.getIntPtrConstant(0, DL));
19363 return DAG.getMergeValues({Res, Chain}, DL);
19367 bool IsSigned = Op->getOpcode() == ISD::SINT_TO_FP ||
19368 Op->getOpcode() == ISD::STRICT_SINT_TO_FP;
19369 if (VT != MVT::v4f32 || IsSigned)
19372 SDValue Zero = DAG.getConstant(0, DL, MVT::v4i64);
19373 SDValue One = DAG.getConstant(1, DL, MVT::v4i64);
19374 SDValue Sign = DAG.getNode(ISD::OR, DL, MVT::v4i64,
19375 DAG.getNode(ISD::SRL, DL, MVT::v4i64, Src, One),
19376 DAG.getNode(ISD::AND, DL, MVT::v4i64, Src, One));
19377 SDValue IsNeg = DAG.getSetCC(DL, MVT::v4i64, Src, Zero, ISD::SETLT);
19378 SDValue SignSrc = DAG.getSelect(DL, MVT::v4i64, IsNeg, Sign, Src);
19379 SmallVector<SDValue, 4> SignCvts(4);
19380 SmallVector<SDValue, 4> Chains(4);
19381 for (int i = 0; i != 4; ++i) {
19382 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i64, SignSrc,
19383 DAG.getIntPtrConstant(i, DL));
19386 DAG.getNode(ISD::STRICT_SINT_TO_FP, DL, {MVT::f32, MVT::Other},
19387 {Op.getOperand(0), Elt});
19388 Chains[i] = SignCvts[i].getValue(1);
19390 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, DL, MVT::f32, Elt);
19393 SDValue SignCvt = DAG.getBuildVector(VT, DL, SignCvts);
19395 SDValue Slow, Chain;
19397 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Chains);
19398 Slow = DAG.getNode(ISD::STRICT_FADD, DL, {MVT::v4f32, MVT::Other},
19399 {Chain, SignCvt, SignCvt});
19400 Chain = Slow.getValue(1);
19402 Slow = DAG.getNode(ISD::FADD, DL, MVT::v4f32, SignCvt, SignCvt);
19405 IsNeg = DAG.getNode(ISD::TRUNCATE, DL, MVT::v4i32, IsNeg);
19406 SDValue Cvt = DAG.getSelect(DL, MVT::v4f32, IsNeg, Slow, SignCvt);
19409 return DAG.getMergeValues({Cvt, Chain}, DL);
19414 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
19415 SelectionDAG &DAG) const {
19416 bool IsStrict = Op->isStrictFPOpcode();
19417 unsigned OpNo = IsStrict ? 1 : 0;
19418 SDValue Src = Op.getOperand(OpNo);
19419 SDValue Chain = IsStrict ? Op->getOperand(0) : DAG.getEntryNode();
19420 MVT SrcVT = Src.getSimpleValueType();
19421 MVT VT = Op.getSimpleValueType();
19424 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19427 if (SDValue R = lowerFPToIntToFP(Op, DAG, Subtarget))
19430 if (SrcVT.isVector()) {
19431 if (SrcVT == MVT::v2i32 && VT == MVT::v2f64) {
19432 // Note: Since v2f64 is a legal type. We don't need to zero extend the
19433 // source for strict FP.
19435 return DAG.getNode(
19436 X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
19437 {Chain, DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19438 DAG.getUNDEF(SrcVT))});
19439 return DAG.getNode(X86ISD::CVTSI2P, dl, VT,
19440 DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
19441 DAG.getUNDEF(SrcVT)));
19443 if (SrcVT == MVT::v2i64 || SrcVT == MVT::v4i64)
19444 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19449 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
19450 "Unknown SINT_TO_FP to lower!");
19452 bool UseSSEReg = isScalarFPTypeInSSEReg(VT);
19454 // These are really Legal; return the operand so the caller accepts it as
19456 if (SrcVT == MVT::i32 && UseSSEReg)
19458 if (SrcVT == MVT::i64 && UseSSEReg && Subtarget.is64Bit())
19461 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19464 // SSE doesn't have an i16 conversion so we need to promote.
19465 if (SrcVT == MVT::i16 && (UseSSEReg || VT == MVT::f128)) {
19466 SDValue Ext = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::i32, Src);
19468 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
19471 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Ext);
19474 if (VT == MVT::f128)
19475 return LowerF128Call(Op, DAG, RTLIB::getSINTTOFP(SrcVT, VT));
19477 SDValue ValueToStore = Src;
19478 if (SrcVT == MVT::i64 && Subtarget.hasSSE2() && !Subtarget.is64Bit())
19479 // Bitcasting to f64 here allows us to do a single 64-bit store from
19480 // an SSE register, avoiding the store forwarding penalty that would come
19481 // with two 32-bit stores.
19482 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19484 unsigned Size = SrcVT.getStoreSize();
19485 Align Alignment(Size);
19486 MachineFunction &MF = DAG.getMachineFunction();
19487 auto PtrVT = getPointerTy(MF.getDataLayout());
19488 int SSFI = MF.getFrameInfo().CreateStackObject(Size, Alignment, false);
19489 MachinePointerInfo MPI =
19490 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19491 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19492 Chain = DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Alignment);
19493 std::pair<SDValue, SDValue> Tmp =
19494 BuildFILD(VT, SrcVT, dl, Chain, StackSlot, MPI, Alignment, DAG);
19497 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19502 std::pair<SDValue, SDValue> X86TargetLowering::BuildFILD(
19503 EVT DstVT, EVT SrcVT, const SDLoc &DL, SDValue Chain, SDValue Pointer,
19504 MachinePointerInfo PtrInfo, Align Alignment, SelectionDAG &DAG) const {
19507 bool useSSE = isScalarFPTypeInSSEReg(DstVT);
19509 Tys = DAG.getVTList(MVT::f80, MVT::Other);
19511 Tys = DAG.getVTList(DstVT, MVT::Other);
19513 SDValue FILDOps[] = {Chain, Pointer};
19515 DAG.getMemIntrinsicNode(X86ISD::FILD, DL, Tys, FILDOps, SrcVT, PtrInfo,
19516 Alignment, MachineMemOperand::MOLoad);
19517 Chain = Result.getValue(1);
19520 MachineFunction &MF = DAG.getMachineFunction();
19521 unsigned SSFISize = DstVT.getStoreSize();
19523 MF.getFrameInfo().CreateStackObject(SSFISize, Align(SSFISize), false);
19524 auto PtrVT = getPointerTy(MF.getDataLayout());
19525 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
19526 Tys = DAG.getVTList(MVT::Other);
19527 SDValue FSTOps[] = {Chain, Result, StackSlot};
19528 MachineMemOperand *StoreMMO = DAG.getMachineFunction().getMachineMemOperand(
19529 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI),
19530 MachineMemOperand::MOStore, SSFISize, Align(SSFISize));
19533 DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys, FSTOps, DstVT, StoreMMO);
19534 Result = DAG.getLoad(
19535 DstVT, DL, Chain, StackSlot,
19536 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI));
19537 Chain = Result.getValue(1);
19540 return { Result, Chain };
19543 /// Horizontal vector math instructions may be slower than normal math with
19544 /// shuffles. Limit horizontal op codegen based on size/speed trade-offs, uarch
19545 /// implementation, and likely shuffle complexity of the alternate sequence.
19546 static bool shouldUseHorizontalOp(bool IsSingleSource, SelectionDAG &DAG,
19547 const X86Subtarget &Subtarget) {
19548 bool IsOptimizingSize = DAG.shouldOptForSize();
19549 bool HasFastHOps = Subtarget.hasFastHorizontalOps();
19550 return !IsSingleSource || IsOptimizingSize || HasFastHOps;
19553 /// 64-bit unsigned integer to double expansion.
19554 static SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG,
19555 const X86Subtarget &Subtarget) {
19556 // This algorithm is not obvious. Here it is what we're trying to output:
19559 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
19560 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
19562 haddpd %xmm0, %xmm0
19564 pshufd $0x4e, %xmm0, %xmm1
19569 bool IsStrict = Op->isStrictFPOpcode();
19570 unsigned OpNo = IsStrict ? 1 : 0;
19572 LLVMContext *Context = DAG.getContext();
19574 // Build some magic constants.
19575 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
19576 Constant *C0 = ConstantDataVector::get(*Context, CV0);
19577 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19578 SDValue CPIdx0 = DAG.getConstantPool(C0, PtrVT, Align(16));
19580 SmallVector<Constant*,2> CV1;
19582 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19583 APInt(64, 0x4330000000000000ULL))));
19585 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble(),
19586 APInt(64, 0x4530000000000000ULL))));
19587 Constant *C1 = ConstantVector::get(CV1);
19588 SDValue CPIdx1 = DAG.getConstantPool(C1, PtrVT, Align(16));
19590 // Load the 64-bit value into an XMM register.
19592 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Op.getOperand(OpNo));
19594 DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
19595 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19596 /* Alignment = */ 16);
19598 getUnpackl(DAG, dl, MVT::v4i32, DAG.getBitcast(MVT::v4i32, XR1), CLod0);
19601 DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
19602 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()),
19603 /* Alignment = */ 16);
19604 SDValue XR2F = DAG.getBitcast(MVT::v2f64, Unpck1);
19607 // TODO: Are there any fast-math-flags to propagate here?
19609 Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
19610 {Op.getOperand(0), XR2F, CLod1});
19611 Chain = Sub.getValue(1);
19613 Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
19616 if (!IsStrict && Subtarget.hasSSE3() &&
19617 shouldUseHorizontalOp(true, DAG, Subtarget)) {
19618 // FIXME: Do we need a STRICT version of FHADD?
19619 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
19621 SDValue Shuffle = DAG.getVectorShuffle(MVT::v2f64, dl, Sub, Sub, {1,-1});
19623 Result = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v2f64, MVT::Other},
19624 {Chain, Shuffle, Sub});
19625 Chain = Result.getValue(1);
19627 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64, Shuffle, Sub);
19629 Result = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
19630 DAG.getIntPtrConstant(0, dl));
19632 return DAG.getMergeValues({Result, Chain}, dl);
19637 /// 32-bit unsigned integer to float expansion.
19638 static SDValue LowerUINT_TO_FP_i32(SDValue Op, SelectionDAG &DAG,
19639 const X86Subtarget &Subtarget) {
19640 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19642 // FP constant to bias correct the final result.
19643 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl,
19646 // Load the 32-bit value into an XMM register.
19648 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Op.getOperand(OpNo));
19650 // Zero out the upper parts of the register.
19651 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
19653 // Or the load with the bias.
19654 SDValue Or = DAG.getNode(
19655 ISD::OR, dl, MVT::v2i64,
19656 DAG.getBitcast(MVT::v2i64, Load),
19657 DAG.getBitcast(MVT::v2i64,
19658 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2f64, Bias)));
19660 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
19661 DAG.getBitcast(MVT::v2f64, Or), DAG.getIntPtrConstant(0, dl));
19663 if (Op.getNode()->isStrictFPOpcode()) {
19664 // Subtract the bias.
19665 // TODO: Are there any fast-math-flags to propagate here?
19666 SDValue Chain = Op.getOperand(0);
19667 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::f64, MVT::Other},
19668 {Chain, Or, Bias});
19670 if (Op.getValueType() == Sub.getValueType())
19673 // Handle final rounding.
19674 std::pair<SDValue, SDValue> ResultPair = DAG.getStrictFPExtendOrRound(
19675 Sub, Sub.getValue(1), dl, Op.getSimpleValueType());
19677 return DAG.getMergeValues({ResultPair.first, ResultPair.second}, dl);
19680 // Subtract the bias.
19681 // TODO: Are there any fast-math-flags to propagate here?
19682 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
19684 // Handle final rounding.
19685 return DAG.getFPExtendOrRound(Sub, dl, Op.getSimpleValueType());
19688 static SDValue lowerUINT_TO_FP_v2i32(SDValue Op, SelectionDAG &DAG,
19689 const X86Subtarget &Subtarget,
19691 if (Op.getSimpleValueType() != MVT::v2f64)
19694 bool IsStrict = Op->isStrictFPOpcode();
19696 SDValue N0 = Op.getOperand(IsStrict ? 1 : 0);
19697 assert(N0.getSimpleValueType() == MVT::v2i32 && "Unexpected input type");
19699 if (Subtarget.hasAVX512()) {
19700 if (!Subtarget.hasVLX()) {
19701 // Let generic type legalization widen this.
19704 // Otherwise pad the integer input with 0s and widen the operation.
19705 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19706 DAG.getConstant(0, DL, MVT::v2i32));
19707 SDValue Res = DAG.getNode(Op->getOpcode(), DL, {MVT::v4f64, MVT::Other},
19708 {Op.getOperand(0), N0});
19709 SDValue Chain = Res.getValue(1);
19710 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2f64, Res,
19711 DAG.getIntPtrConstant(0, DL));
19712 return DAG.getMergeValues({Res, Chain}, DL);
19715 // Legalize to v4i32 type.
19716 N0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
19717 DAG.getUNDEF(MVT::v2i32));
19719 return DAG.getNode(X86ISD::STRICT_CVTUI2P, DL, {MVT::v2f64, MVT::Other},
19720 {Op.getOperand(0), N0});
19721 return DAG.getNode(X86ISD::CVTUI2P, DL, MVT::v2f64, N0);
19724 // Zero extend to 2i64, OR with the floating point representation of 2^52.
19725 // This gives us the floating point equivalent of 2^52 + the i32 integer
19726 // since double has 52-bits of mantissa. Then subtract 2^52 in floating
19727 // point leaving just our i32 integers in double format.
19728 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v2i64, N0);
19730 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), DL, MVT::v2f64);
19731 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v2i64, ZExtIn,
19732 DAG.getBitcast(MVT::v2i64, VBias));
19733 Or = DAG.getBitcast(MVT::v2f64, Or);
19736 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v2f64, MVT::Other},
19737 {Op.getOperand(0), Or, VBias});
19738 return DAG.getNode(ISD::FSUB, DL, MVT::v2f64, Or, VBias);
19741 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
19742 const X86Subtarget &Subtarget) {
19744 bool IsStrict = Op->isStrictFPOpcode();
19745 SDValue V = Op->getOperand(IsStrict ? 1 : 0);
19746 MVT VecIntVT = V.getSimpleValueType();
19747 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
19748 "Unsupported custom type");
19750 if (Subtarget.hasAVX512()) {
19751 // With AVX512, but not VLX we need to widen to get a 512-bit result type.
19752 assert(!Subtarget.hasVLX() && "Unexpected features");
19753 MVT VT = Op->getSimpleValueType(0);
19755 // v8i32->v8f64 is legal with AVX512 so just return it.
19756 if (VT == MVT::v8f64)
19759 assert((VT == MVT::v4f32 || VT == MVT::v8f32 || VT == MVT::v4f64) &&
19761 MVT WideVT = VT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
19762 MVT WideIntVT = VT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
19763 // Need to concat with zero vector for strict fp to avoid spurious
19766 IsStrict ? DAG.getConstant(0, DL, WideIntVT) : DAG.getUNDEF(WideIntVT);
19767 V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideIntVT, Tmp, V,
19768 DAG.getIntPtrConstant(0, DL));
19769 SDValue Res, Chain;
19771 Res = DAG.getNode(ISD::STRICT_UINT_TO_FP, DL, {WideVT, MVT::Other},
19772 {Op->getOperand(0), V});
19773 Chain = Res.getValue(1);
19775 Res = DAG.getNode(ISD::UINT_TO_FP, DL, WideVT, V);
19778 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
19779 DAG.getIntPtrConstant(0, DL));
19782 return DAG.getMergeValues({Res, Chain}, DL);
19786 if (Subtarget.hasAVX() && VecIntVT == MVT::v4i32 &&
19787 Op->getSimpleValueType(0) == MVT::v4f64) {
19788 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::v4i64, V);
19789 Constant *Bias = ConstantFP::get(
19791 APFloat(APFloat::IEEEdouble(), APInt(64, 0x4330000000000000ULL)));
19792 auto PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
19793 SDValue CPIdx = DAG.getConstantPool(Bias, PtrVT, Align(8));
19794 SDVTList Tys = DAG.getVTList(MVT::v4f64, MVT::Other);
19795 SDValue Ops[] = {DAG.getEntryNode(), CPIdx};
19796 SDValue VBias = DAG.getMemIntrinsicNode(
19797 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
19798 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), Align(8),
19799 MachineMemOperand::MOLoad);
19801 SDValue Or = DAG.getNode(ISD::OR, DL, MVT::v4i64, ZExtIn,
19802 DAG.getBitcast(MVT::v4i64, VBias));
19803 Or = DAG.getBitcast(MVT::v4f64, Or);
19806 return DAG.getNode(ISD::STRICT_FSUB, DL, {MVT::v4f64, MVT::Other},
19807 {Op.getOperand(0), Or, VBias});
19808 return DAG.getNode(ISD::FSUB, DL, MVT::v4f64, Or, VBias);
19811 // The algorithm is the following:
19812 // #ifdef __SSE4_1__
19813 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19814 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19815 // (uint4) 0x53000000, 0xaa);
19817 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19818 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19820 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19821 // return (float4) lo + fhi;
19823 bool Is128 = VecIntVT == MVT::v4i32;
19824 MVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
19825 // If we convert to something else than the supported type, e.g., to v4f64,
19827 if (VecFloatVT != Op->getSimpleValueType(0))
19830 // In the #idef/#else code, we have in common:
19831 // - The vector of constants:
19837 // Create the splat vector for 0x4b000000.
19838 SDValue VecCstLow = DAG.getConstant(0x4b000000, DL, VecIntVT);
19839 // Create the splat vector for 0x53000000.
19840 SDValue VecCstHigh = DAG.getConstant(0x53000000, DL, VecIntVT);
19842 // Create the right shift.
19843 SDValue VecCstShift = DAG.getConstant(16, DL, VecIntVT);
19844 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
19847 if (Subtarget.hasSSE41()) {
19848 MVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
19849 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
19850 SDValue VecCstLowBitcast = DAG.getBitcast(VecI16VT, VecCstLow);
19851 SDValue VecBitcast = DAG.getBitcast(VecI16VT, V);
19852 // Low will be bitcasted right away, so do not bother bitcasting back to its
19854 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
19855 VecCstLowBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19856 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
19857 // (uint4) 0x53000000, 0xaa);
19858 SDValue VecCstHighBitcast = DAG.getBitcast(VecI16VT, VecCstHigh);
19859 SDValue VecShiftBitcast = DAG.getBitcast(VecI16VT, HighShift);
19860 // High will be bitcasted right away, so do not bother bitcasting back to
19861 // its original type.
19862 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
19863 VecCstHighBitcast, DAG.getTargetConstant(0xaa, DL, MVT::i8));
19865 SDValue VecCstMask = DAG.getConstant(0xffff, DL, VecIntVT);
19866 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
19867 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
19868 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
19870 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
19871 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
19874 // Create the vector constant for (0x1.0p39f + 0x1.0p23f).
19875 SDValue VecCstFSub = DAG.getConstantFP(
19876 APFloat(APFloat::IEEEsingle(), APInt(32, 0x53000080)), DL, VecFloatVT);
19878 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
19879 // NOTE: By using fsub of a positive constant instead of fadd of a negative
19880 // constant, we avoid reassociation in MachineCombiner when unsafe-fp-math is
19881 // enabled. See PR24512.
19882 SDValue HighBitcast = DAG.getBitcast(VecFloatVT, High);
19883 // TODO: Are there any fast-math-flags to propagate here?
19885 SDValue LowBitcast = DAG.getBitcast(VecFloatVT, Low);
19886 // return (float4) lo + fhi;
19888 SDValue FHigh = DAG.getNode(ISD::STRICT_FSUB, DL, {VecFloatVT, MVT::Other},
19889 {Op.getOperand(0), HighBitcast, VecCstFSub});
19890 return DAG.getNode(ISD::STRICT_FADD, DL, {VecFloatVT, MVT::Other},
19891 {FHigh.getValue(1), LowBitcast, FHigh});
19895 DAG.getNode(ISD::FSUB, DL, VecFloatVT, HighBitcast, VecCstFSub);
19896 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
19899 static SDValue lowerUINT_TO_FP_vec(SDValue Op, SelectionDAG &DAG,
19900 const X86Subtarget &Subtarget) {
19901 unsigned OpNo = Op.getNode()->isStrictFPOpcode() ? 1 : 0;
19902 SDValue N0 = Op.getOperand(OpNo);
19903 MVT SrcVT = N0.getSimpleValueType();
19906 switch (SrcVT.SimpleTy) {
19908 llvm_unreachable("Custom UINT_TO_FP is not supported!");
19910 return lowerUINT_TO_FP_v2i32(Op, DAG, Subtarget, dl);
19913 return lowerUINT_TO_FP_vXi32(Op, DAG, Subtarget);
19916 return lowerINT_TO_FP_vXi64(Op, DAG, Subtarget);
19920 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
19921 SelectionDAG &DAG) const {
19922 bool IsStrict = Op->isStrictFPOpcode();
19923 unsigned OpNo = IsStrict ? 1 : 0;
19924 SDValue Src = Op.getOperand(OpNo);
19926 auto PtrVT = getPointerTy(DAG.getDataLayout());
19927 MVT SrcVT = Src.getSimpleValueType();
19928 MVT DstVT = Op->getSimpleValueType(0);
19929 SDValue Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
19931 if (DstVT == MVT::f128)
19932 return LowerF128Call(Op, DAG, RTLIB::getUINTTOFP(SrcVT, DstVT));
19934 if (DstVT.isVector())
19935 return lowerUINT_TO_FP_vec(Op, DAG, Subtarget);
19937 if (SDValue Extract = vectorizeExtractedCast(Op, DAG, Subtarget))
19940 if (Subtarget.hasAVX512() && isScalarFPTypeInSSEReg(DstVT) &&
19941 (SrcVT == MVT::i32 || (SrcVT == MVT::i64 && Subtarget.is64Bit()))) {
19942 // Conversions from unsigned i32 to f32/f64 are legal,
19943 // using VCVTUSI2SS/SD. Same for i64 in 64-bit mode.
19947 // Promote i32 to i64 and use a signed conversion on 64-bit targets.
19948 if (SrcVT == MVT::i32 && Subtarget.is64Bit()) {
19949 Src = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Src);
19951 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {DstVT, MVT::Other},
19953 return DAG.getNode(ISD::SINT_TO_FP, dl, DstVT, Src);
19956 if (SDValue V = LowerI64IntToFP_AVX512DQ(Op, DAG, Subtarget))
19959 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
19960 return LowerUINT_TO_FP_i64(Op, DAG, Subtarget);
19961 if (SrcVT == MVT::i32 && X86ScalarSSEf64 && DstVT != MVT::f80)
19962 return LowerUINT_TO_FP_i32(Op, DAG, Subtarget);
19963 if (Subtarget.is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
19966 // Make a 64-bit buffer, and use it to build an FILD.
19967 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64, 8);
19968 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
19969 MachinePointerInfo MPI =
19970 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SSFI);
19971 if (SrcVT == MVT::i32) {
19972 SDValue OffsetSlot = DAG.getMemBasePlusOffset(StackSlot, 4, dl);
19974 DAG.getStore(Chain, dl, Src, StackSlot, MPI, 8 /*Align*/);
19975 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, dl, MVT::i32),
19976 OffsetSlot, MPI.getWithOffset(4), 4);
19977 std::pair<SDValue, SDValue> Tmp =
19978 BuildFILD(DstVT, MVT::i64, dl, Store2, StackSlot, MPI, Align(8), DAG);
19980 return DAG.getMergeValues({Tmp.first, Tmp.second}, dl);
19985 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
19986 SDValue ValueToStore = Src;
19987 if (isScalarFPTypeInSSEReg(Op.getValueType()) && !Subtarget.is64Bit()) {
19988 // Bitcasting to f64 here allows us to do a single 64-bit store from
19989 // an SSE register, avoiding the store forwarding penalty that would come
19990 // with two 32-bit stores.
19991 ValueToStore = DAG.getBitcast(MVT::f64, ValueToStore);
19994 DAG.getStore(Chain, dl, ValueToStore, StackSlot, MPI, Align(8));
19995 // For i64 source, we need to add the appropriate power of 2 if the input
19996 // was negative. This is the same as the optimization in
19997 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
19998 // we must be careful to do the computation in x87 extended precision, not
19999 // in SSE. (The generic code can't know it's OK to do this, or how to.)
20000 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
20001 SDValue Ops[] = { Store, StackSlot };
20003 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops, MVT::i64, MPI,
20004 Align(8), MachineMemOperand::MOLoad);
20005 Chain = Fild.getValue(1);
20008 // Check whether the sign bit is set.
20009 SDValue SignSet = DAG.getSetCC(
20010 dl, getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), MVT::i64),
20011 Op.getOperand(OpNo), DAG.getConstant(0, dl, MVT::i64), ISD::SETLT);
20013 // Build a 64 bit pair (FF, 0) in the constant pool, with FF in the hi bits.
20014 APInt FF(64, 0x5F80000000000000ULL);
20015 SDValue FudgePtr = DAG.getConstantPool(
20016 ConstantInt::get(*DAG.getContext(), FF), PtrVT);
20017 Align CPAlignment = cast<ConstantPoolSDNode>(FudgePtr)->getAlign();
20019 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
20020 SDValue Zero = DAG.getIntPtrConstant(0, dl);
20021 SDValue Four = DAG.getIntPtrConstant(4, dl);
20022 SDValue Offset = DAG.getSelect(dl, Zero.getValueType(), SignSet, Four, Zero);
20023 FudgePtr = DAG.getNode(ISD::ADD, dl, PtrVT, FudgePtr, Offset);
20025 // Load the value out, extending it from f32 to f80.
20026 SDValue Fudge = DAG.getExtLoad(
20027 ISD::EXTLOAD, dl, MVT::f80, Chain, FudgePtr,
20028 MachinePointerInfo::getConstantPool(DAG.getMachineFunction()), MVT::f32,
20030 Chain = Fudge.getValue(1);
20031 // Extend everything to 80 bits to force it to be done on x87.
20032 // TODO: Are there any fast-math-flags to propagate here?
20034 SDValue Add = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::f80, MVT::Other},
20035 {Chain, Fild, Fudge});
20036 // STRICT_FP_ROUND can't handle equal types.
20037 if (DstVT == MVT::f80)
20039 return DAG.getNode(ISD::STRICT_FP_ROUND, dl, {DstVT, MVT::Other},
20040 {Add.getValue(1), Add, DAG.getIntPtrConstant(0, dl)});
20042 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
20043 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add,
20044 DAG.getIntPtrConstant(0, dl));
20047 // If the given FP_TO_SINT (IsSigned) or FP_TO_UINT (!IsSigned) operation
20048 // is legal, or has an fp128 or f16 source (which needs to be promoted to f32),
20049 // just return an SDValue().
20050 // Otherwise it is assumed to be a conversion from one of f32, f64 or f80
20051 // to i16, i32 or i64, and we lower it to a legal sequence and return the
20054 X86TargetLowering::FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
20055 bool IsSigned, SDValue &Chain) const {
20056 bool IsStrict = Op->isStrictFPOpcode();
20059 EVT DstTy = Op.getValueType();
20060 SDValue Value = Op.getOperand(IsStrict ? 1 : 0);
20061 EVT TheVT = Value.getValueType();
20062 auto PtrVT = getPointerTy(DAG.getDataLayout());
20064 if (TheVT != MVT::f32 && TheVT != MVT::f64 && TheVT != MVT::f80) {
20065 // f16 must be promoted before using the lowering in this routine.
20066 // fp128 does not use this lowering.
20070 // If using FIST to compute an unsigned i64, we'll need some fixup
20071 // to handle values above the maximum signed i64. A FIST is always
20072 // used for the 32-bit subtarget, but also for f80 on a 64-bit target.
20073 bool UnsignedFixup = !IsSigned && DstTy == MVT::i64;
20075 // FIXME: This does not generate an invalid exception if the input does not
20076 // fit in i32. PR44019
20077 if (!IsSigned && DstTy != MVT::i64) {
20078 // Replace the fp-to-uint32 operation with an fp-to-sint64 FIST.
20079 // The low 32 bits of the fist result will have the correct uint32 result.
20080 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
20084 assert(DstTy.getSimpleVT() <= MVT::i64 &&
20085 DstTy.getSimpleVT() >= MVT::i16 &&
20086 "Unknown FP_TO_INT to lower!");
20088 // We lower FP->int64 into FISTP64 followed by a load from a temporary
20090 MachineFunction &MF = DAG.getMachineFunction();
20091 unsigned MemSize = DstTy.getStoreSize();
20093 MF.getFrameInfo().CreateStackObject(MemSize, Align(MemSize), false);
20094 SDValue StackSlot = DAG.getFrameIndex(SSFI, PtrVT);
20096 Chain = IsStrict ? Op.getOperand(0) : DAG.getEntryNode();
20098 SDValue Adjust; // 0x0 or 0x80000000, for result sign bit adjustment.
20100 if (UnsignedFixup) {
20102 // Conversion to unsigned i64 is implemented with a select,
20103 // depending on whether the source value fits in the range
20104 // of a signed i64. Let Thresh be the FP equivalent of
20105 // 0x8000000000000000ULL.
20107 // Adjust = (Value < Thresh) ? 0 : 0x80000000;
20108 // FltOfs = (Value < Thresh) ? 0 : 0x80000000;
20109 // FistSrc = (Value - FltOfs);
20110 // Fist-to-mem64 FistSrc
20111 // Add 0 or 0x800...0ULL to the 64-bit result, which is equivalent
20112 // to XOR'ing the high 32 bits with Adjust.
20114 // Being a power of 2, Thresh is exactly representable in all FP formats.
20115 // For X87 we'd like to use the smallest FP type for this constant, but
20116 // for DAG type consistency we have to match the FP operand type.
20118 APFloat Thresh(APFloat::IEEEsingle(), APInt(32, 0x5f000000));
20119 LLVM_ATTRIBUTE_UNUSED APFloat::opStatus Status = APFloat::opOK;
20120 bool LosesInfo = false;
20121 if (TheVT == MVT::f64)
20122 // The rounding mode is irrelevant as the conversion should be exact.
20123 Status = Thresh.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven,
20125 else if (TheVT == MVT::f80)
20126 Status = Thresh.convert(APFloat::x87DoubleExtended(),
20127 APFloat::rmNearestTiesToEven, &LosesInfo);
20129 assert(Status == APFloat::opOK && !LosesInfo &&
20130 "FP conversion should have been exact");
20132 SDValue ThreshVal = DAG.getConstantFP(Thresh, DL, TheVT);
20134 EVT ResVT = getSetCCResultType(DAG.getDataLayout(),
20135 *DAG.getContext(), TheVT);
20138 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT,
20139 Chain, /*IsSignaling*/ true);
20140 Chain = Cmp.getValue(1);
20142 Cmp = DAG.getSetCC(DL, ResVT, Value, ThreshVal, ISD::SETLT);
20145 Adjust = DAG.getSelect(DL, MVT::i64, Cmp,
20146 DAG.getConstant(0, DL, MVT::i64),
20147 DAG.getConstant(APInt::getSignMask(64),
20149 SDValue FltOfs = DAG.getSelect(DL, TheVT, Cmp,
20150 DAG.getConstantFP(0.0, DL, TheVT),
20154 Value = DAG.getNode(ISD::STRICT_FSUB, DL, { TheVT, MVT::Other},
20155 { Chain, Value, FltOfs });
20156 Chain = Value.getValue(1);
20158 Value = DAG.getNode(ISD::FSUB, DL, TheVT, Value, FltOfs);
20161 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
20163 // FIXME This causes a redundant load/store if the SSE-class value is already
20164 // in memory, such as if it is on the callstack.
20165 if (isScalarFPTypeInSSEReg(TheVT)) {
20166 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
20167 Chain = DAG.getStore(Chain, DL, Value, StackSlot, MPI);
20168 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
20169 SDValue Ops[] = { Chain, StackSlot };
20171 unsigned FLDSize = TheVT.getStoreSize();
20172 assert(FLDSize <= MemSize && "Stack slot not big enough");
20173 MachineMemOperand *MMO = MF.getMachineMemOperand(
20174 MPI, MachineMemOperand::MOLoad, FLDSize, Align(FLDSize));
20175 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, TheVT, MMO);
20176 Chain = Value.getValue(1);
20179 // Build the FP_TO_INT*_IN_MEM
20180 MachineMemOperand *MMO = MF.getMachineMemOperand(
20181 MPI, MachineMemOperand::MOStore, MemSize, Align(MemSize));
20182 SDValue Ops[] = { Chain, Value, StackSlot };
20183 SDValue FIST = DAG.getMemIntrinsicNode(X86ISD::FP_TO_INT_IN_MEM, DL,
20184 DAG.getVTList(MVT::Other),
20187 SDValue Res = DAG.getLoad(Op.getValueType(), SDLoc(Op), FIST, StackSlot, MPI);
20188 Chain = Res.getValue(1);
20190 // If we need an unsigned fixup, XOR the result with adjust.
20192 Res = DAG.getNode(ISD::XOR, DL, MVT::i64, Res, Adjust);
20197 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
20198 const X86Subtarget &Subtarget) {
20199 MVT VT = Op.getSimpleValueType();
20200 SDValue In = Op.getOperand(0);
20201 MVT InVT = In.getSimpleValueType();
20203 unsigned Opc = Op.getOpcode();
20205 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
20206 assert((Opc == ISD::ANY_EXTEND || Opc == ISD::ZERO_EXTEND) &&
20207 "Unexpected extension opcode");
20208 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20209 "Expected same number of elements");
20210 assert((VT.getVectorElementType() == MVT::i16 ||
20211 VT.getVectorElementType() == MVT::i32 ||
20212 VT.getVectorElementType() == MVT::i64) &&
20213 "Unexpected element type");
20214 assert((InVT.getVectorElementType() == MVT::i8 ||
20215 InVT.getVectorElementType() == MVT::i16 ||
20216 InVT.getVectorElementType() == MVT::i32) &&
20217 "Unexpected element type");
20219 unsigned ExtendInVecOpc = getOpcode_EXTEND_VECTOR_INREG(Opc);
20221 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
20222 assert(InVT == MVT::v32i8 && "Unexpected VT!");
20223 return splitVectorIntUnary(Op, DAG);
20226 if (Subtarget.hasInt256())
20229 // Optimize vectors in AVX mode:
20232 // Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
20233 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
20234 // Concat upper and lower parts.
20237 // Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
20238 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
20239 // Concat upper and lower parts.
20241 MVT HalfVT = VT.getHalfNumVectorElementsVT();
20242 SDValue OpLo = DAG.getNode(ExtendInVecOpc, dl, HalfVT, In);
20244 // Short-circuit if we can determine that each 128-bit half is the same value.
20245 // Otherwise, this is difficult to match and optimize.
20246 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(In))
20247 if (hasIdenticalHalvesShuffleMask(Shuf->getMask()))
20248 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpLo);
20250 SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
20251 SDValue Undef = DAG.getUNDEF(InVT);
20252 bool NeedZero = Opc == ISD::ZERO_EXTEND;
20253 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
20254 OpHi = DAG.getBitcast(HalfVT, OpHi);
20256 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
20259 // Helper to split and extend a v16i1 mask to v16i8 or v16i16.
20260 static SDValue SplitAndExtendv16i1(unsigned ExtOpc, MVT VT, SDValue In,
20261 const SDLoc &dl, SelectionDAG &DAG) {
20262 assert((VT == MVT::v16i8 || VT == MVT::v16i16) && "Unexpected VT.");
20263 SDValue Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20264 DAG.getIntPtrConstant(0, dl));
20265 SDValue Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v8i1, In,
20266 DAG.getIntPtrConstant(8, dl));
20267 Lo = DAG.getNode(ExtOpc, dl, MVT::v8i16, Lo);
20268 Hi = DAG.getNode(ExtOpc, dl, MVT::v8i16, Hi);
20269 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i16, Lo, Hi);
20270 return DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20273 static SDValue LowerZERO_EXTEND_Mask(SDValue Op,
20274 const X86Subtarget &Subtarget,
20275 SelectionDAG &DAG) {
20276 MVT VT = Op->getSimpleValueType(0);
20277 SDValue In = Op->getOperand(0);
20278 MVT InVT = In.getSimpleValueType();
20279 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
20281 unsigned NumElts = VT.getVectorNumElements();
20283 // For all vectors, but vXi8 we can just emit a sign_extend and a shift. This
20284 // avoids a constant pool load.
20285 if (VT.getVectorElementType() != MVT::i8) {
20286 SDValue Extend = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, In);
20287 return DAG.getNode(ISD::SRL, DL, VT, Extend,
20288 DAG.getConstant(VT.getScalarSizeInBits() - 1, DL, VT));
20291 // Extend VT if BWI is not supported.
20293 if (!Subtarget.hasBWI()) {
20294 // If v16i32 is to be avoided, we'll need to split and concatenate.
20295 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
20296 return SplitAndExtendv16i1(ISD::ZERO_EXTEND, VT, In, DL, DAG);
20298 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
20301 // Widen to 512-bits if VLX is not supported.
20302 MVT WideVT = ExtVT;
20303 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
20304 NumElts *= 512 / ExtVT.getSizeInBits();
20305 InVT = MVT::getVectorVT(MVT::i1, NumElts);
20306 In = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InVT, DAG.getUNDEF(InVT),
20307 In, DAG.getIntPtrConstant(0, DL));
20308 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(),
20312 SDValue One = DAG.getConstant(1, DL, WideVT);
20313 SDValue Zero = DAG.getConstant(0, DL, WideVT);
20315 SDValue SelectedVal = DAG.getSelect(DL, WideVT, In, One, Zero);
20317 // Truncate if we had to extend above.
20319 WideVT = MVT::getVectorVT(MVT::i8, NumElts);
20320 SelectedVal = DAG.getNode(ISD::TRUNCATE, DL, WideVT, SelectedVal);
20323 // Extract back to 128/256-bit if we widened.
20325 SelectedVal = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, SelectedVal,
20326 DAG.getIntPtrConstant(0, DL));
20328 return SelectedVal;
20331 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
20332 SelectionDAG &DAG) {
20333 SDValue In = Op.getOperand(0);
20334 MVT SVT = In.getSimpleValueType();
20336 if (SVT.getVectorElementType() == MVT::i1)
20337 return LowerZERO_EXTEND_Mask(Op, Subtarget, DAG);
20339 assert(Subtarget.hasAVX() && "Expected AVX support");
20340 return LowerAVXExtend(Op, DAG, Subtarget);
20343 /// Helper to recursively truncate vector elements in half with PACKSS/PACKUS.
20344 /// It makes use of the fact that vectors with enough leading sign/zero bits
20345 /// prevent the PACKSS/PACKUS from saturating the results.
20346 /// AVX2 (Int256) sub-targets require extra shuffling as the PACK*S operates
20347 /// within each 128-bit lane.
20348 static SDValue truncateVectorWithPACK(unsigned Opcode, EVT DstVT, SDValue In,
20349 const SDLoc &DL, SelectionDAG &DAG,
20350 const X86Subtarget &Subtarget) {
20351 assert((Opcode == X86ISD::PACKSS || Opcode == X86ISD::PACKUS) &&
20352 "Unexpected PACK opcode");
20353 assert(DstVT.isVector() && "VT not a vector?");
20355 // Requires SSE2 for PACKSS (SSE41 PACKUSDW is handled below).
20356 if (!Subtarget.hasSSE2())
20359 EVT SrcVT = In.getValueType();
20361 // No truncation required, we might get here due to recursive calls.
20362 if (SrcVT == DstVT)
20365 // We only support vector truncation to 64bits or greater from a
20366 // 128bits or greater source.
20367 unsigned DstSizeInBits = DstVT.getSizeInBits();
20368 unsigned SrcSizeInBits = SrcVT.getSizeInBits();
20369 if ((DstSizeInBits % 64) != 0 || (SrcSizeInBits % 128) != 0)
20372 unsigned NumElems = SrcVT.getVectorNumElements();
20373 if (!isPowerOf2_32(NumElems))
20376 LLVMContext &Ctx = *DAG.getContext();
20377 assert(DstVT.getVectorNumElements() == NumElems && "Illegal truncation");
20378 assert(SrcSizeInBits > DstSizeInBits && "Illegal truncation");
20380 EVT PackedSVT = EVT::getIntegerVT(Ctx, SrcVT.getScalarSizeInBits() / 2);
20382 // Pack to the largest type possible:
20383 // vXi64/vXi32 -> PACK*SDW and vXi16 -> PACK*SWB.
20384 EVT InVT = MVT::i16, OutVT = MVT::i8;
20385 if (SrcVT.getScalarSizeInBits() > 16 &&
20386 (Opcode == X86ISD::PACKSS || Subtarget.hasSSE41())) {
20391 // 128bit -> 64bit truncate - PACK 128-bit src in the lower subvector.
20392 if (SrcVT.is128BitVector()) {
20393 InVT = EVT::getVectorVT(Ctx, InVT, 128 / InVT.getSizeInBits());
20394 OutVT = EVT::getVectorVT(Ctx, OutVT, 128 / OutVT.getSizeInBits());
20395 In = DAG.getBitcast(InVT, In);
20396 SDValue Res = DAG.getNode(Opcode, DL, OutVT, In, DAG.getUNDEF(InVT));
20397 Res = extractSubVector(Res, 0, DAG, DL, 64);
20398 return DAG.getBitcast(DstVT, Res);
20401 // Split lower/upper subvectors.
20403 std::tie(Lo, Hi) = splitVector(In, DAG, DL);
20405 unsigned SubSizeInBits = SrcSizeInBits / 2;
20406 InVT = EVT::getVectorVT(Ctx, InVT, SubSizeInBits / InVT.getSizeInBits());
20407 OutVT = EVT::getVectorVT(Ctx, OutVT, SubSizeInBits / OutVT.getSizeInBits());
20409 // 256bit -> 128bit truncate - PACK lower/upper 128-bit subvectors.
20410 if (SrcVT.is256BitVector() && DstVT.is128BitVector()) {
20411 Lo = DAG.getBitcast(InVT, Lo);
20412 Hi = DAG.getBitcast(InVT, Hi);
20413 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20414 return DAG.getBitcast(DstVT, Res);
20417 // AVX2: 512bit -> 256bit truncate - PACK lower/upper 256-bit subvectors.
20418 // AVX2: 512bit -> 128bit truncate - PACK(PACK, PACK).
20419 if (SrcVT.is512BitVector() && Subtarget.hasInt256()) {
20420 Lo = DAG.getBitcast(InVT, Lo);
20421 Hi = DAG.getBitcast(InVT, Hi);
20422 SDValue Res = DAG.getNode(Opcode, DL, OutVT, Lo, Hi);
20424 // 256-bit PACK(ARG0, ARG1) leaves us with ((LO0,LO1),(HI0,HI1)),
20425 // so we need to shuffle to get ((LO0,HI0),(LO1,HI1)).
20426 // Scale shuffle mask to avoid bitcasts and help ComputeNumSignBits.
20427 SmallVector<int, 64> Mask;
20428 int Scale = 64 / OutVT.getScalarSizeInBits();
20429 narrowShuffleMaskElts(Scale, { 0, 2, 1, 3 }, Mask);
20430 Res = DAG.getVectorShuffle(OutVT, DL, Res, Res, Mask);
20432 if (DstVT.is256BitVector())
20433 return DAG.getBitcast(DstVT, Res);
20435 // If 512bit -> 128bit truncate another stage.
20436 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20437 Res = DAG.getBitcast(PackedVT, Res);
20438 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20441 // Recursively pack lower/upper subvectors, concat result and pack again.
20442 assert(SrcSizeInBits >= 256 && "Expected 256-bit vector or greater");
20443 EVT PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems / 2);
20444 Lo = truncateVectorWithPACK(Opcode, PackedVT, Lo, DL, DAG, Subtarget);
20445 Hi = truncateVectorWithPACK(Opcode, PackedVT, Hi, DL, DAG, Subtarget);
20447 PackedVT = EVT::getVectorVT(Ctx, PackedSVT, NumElems);
20448 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, PackedVT, Lo, Hi);
20449 return truncateVectorWithPACK(Opcode, DstVT, Res, DL, DAG, Subtarget);
20452 static SDValue LowerTruncateVecI1(SDValue Op, SelectionDAG &DAG,
20453 const X86Subtarget &Subtarget) {
20456 MVT VT = Op.getSimpleValueType();
20457 SDValue In = Op.getOperand(0);
20458 MVT InVT = In.getSimpleValueType();
20460 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type.");
20462 // Shift LSB to MSB and use VPMOVB/W2M or TESTD/Q.
20463 unsigned ShiftInx = InVT.getScalarSizeInBits() - 1;
20464 if (InVT.getScalarSizeInBits() <= 16) {
20465 if (Subtarget.hasBWI()) {
20466 // legal, will go to VPMOVB2M, VPMOVW2M
20467 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20468 // We need to shift to get the lsb into sign position.
20469 // Shift packed bytes not supported natively, bitcast to word
20470 MVT ExtVT = MVT::getVectorVT(MVT::i16, InVT.getSizeInBits()/16);
20471 In = DAG.getNode(ISD::SHL, DL, ExtVT,
20472 DAG.getBitcast(ExtVT, In),
20473 DAG.getConstant(ShiftInx, DL, ExtVT));
20474 In = DAG.getBitcast(InVT, In);
20476 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT),
20479 // Use TESTD/Q, extended vector to packed dword/qword.
20480 assert((InVT.is256BitVector() || InVT.is128BitVector()) &&
20481 "Unexpected vector type.");
20482 unsigned NumElts = InVT.getVectorNumElements();
20483 assert((NumElts == 8 || NumElts == 16) && "Unexpected number of elements");
20484 // We need to change to a wider element type that we have support for.
20485 // For 8 element vectors this is easy, we either extend to v8i32 or v8i64.
20486 // For 16 element vectors we extend to v16i32 unless we are explicitly
20487 // trying to avoid 512-bit vectors. If we are avoiding 512-bit vectors
20488 // we need to split into two 8 element vectors which we can extend to v8i32,
20489 // truncate and concat the results. There's an additional complication if
20490 // the original type is v16i8. In that case we can't split the v16i8
20491 // directly, so we need to shuffle high elements to low and use
20492 // sign_extend_vector_inreg.
20493 if (NumElts == 16 && !Subtarget.canExtendTo512DQ()) {
20495 if (InVT == MVT::v16i8) {
20496 Lo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, In);
20497 Hi = DAG.getVectorShuffle(
20499 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
20500 Hi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, DL, MVT::v8i32, Hi);
20502 assert(InVT == MVT::v16i16 && "Unexpected VT!");
20503 Lo = extract128BitVector(In, 0, DAG, DL);
20504 Hi = extract128BitVector(In, 8, DAG, DL);
20506 // We're split now, just emit two truncates and a concat. The two
20507 // truncates will trigger legalization to come back to this function.
20508 Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Lo);
20509 Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i1, Hi);
20510 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20512 // We either have 8 elements or we're allowed to use 512-bit vectors.
20513 // If we have VLX, we want to use the narrowest vector that can get the
20514 // job done so we use vXi32.
20515 MVT EltVT = Subtarget.hasVLX() ? MVT::i32 : MVT::getIntegerVT(512/NumElts);
20516 MVT ExtVT = MVT::getVectorVT(EltVT, NumElts);
20517 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
20519 ShiftInx = InVT.getScalarSizeInBits() - 1;
20522 if (DAG.ComputeNumSignBits(In) < InVT.getScalarSizeInBits()) {
20523 // We need to shift to get the lsb into sign position.
20524 In = DAG.getNode(ISD::SHL, DL, InVT, In,
20525 DAG.getConstant(ShiftInx, DL, InVT));
20527 // If we have DQI, emit a pattern that will be iseled as vpmovq2m/vpmovd2m.
20528 if (Subtarget.hasDQI())
20529 return DAG.getSetCC(DL, VT, DAG.getConstant(0, DL, InVT), In, ISD::SETGT);
20530 return DAG.getSetCC(DL, VT, In, DAG.getConstant(0, DL, InVT), ISD::SETNE);
20533 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
20535 MVT VT = Op.getSimpleValueType();
20536 SDValue In = Op.getOperand(0);
20537 MVT InVT = In.getSimpleValueType();
20538 unsigned InNumEltBits = InVT.getScalarSizeInBits();
20540 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
20541 "Invalid TRUNCATE operation");
20543 // If we're called by the type legalizer, handle a few cases.
20544 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20545 if (!TLI.isTypeLegal(InVT)) {
20546 if ((InVT == MVT::v8i64 || InVT == MVT::v16i32 || InVT == MVT::v16i64) &&
20547 VT.is128BitVector()) {
20548 assert((InVT == MVT::v16i64 || Subtarget.hasVLX()) &&
20549 "Unexpected subtarget!");
20550 // The default behavior is to truncate one step, concatenate, and then
20551 // truncate the remainder. We'd rather produce two 64-bit results and
20552 // concatenate those.
20554 std::tie(Lo, Hi) = DAG.SplitVector(In, DL);
20557 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
20559 Lo = DAG.getNode(ISD::TRUNCATE, DL, LoVT, Lo);
20560 Hi = DAG.getNode(ISD::TRUNCATE, DL, HiVT, Hi);
20561 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
20564 // Otherwise let default legalization handle it.
20568 if (VT.getVectorElementType() == MVT::i1)
20569 return LowerTruncateVecI1(Op, DAG, Subtarget);
20571 // vpmovqb/w/d, vpmovdb/w, vpmovwb
20572 if (Subtarget.hasAVX512()) {
20573 if (InVT == MVT::v32i16 && !Subtarget.hasBWI()) {
20574 assert(VT == MVT::v32i8 && "Unexpected VT!");
20575 return splitVectorIntUnary(Op, DAG);
20578 // word to byte only under BWI. Otherwise we have to promoted to v16i32
20579 // and then truncate that. But we should only do that if we haven't been
20580 // asked to avoid 512-bit vectors. The actual promotion to v16i32 will be
20581 // handled by isel patterns.
20582 if (InVT != MVT::v16i16 || Subtarget.hasBWI() ||
20583 Subtarget.canExtendTo512DQ())
20587 unsigned NumPackedSignBits = std::min<unsigned>(VT.getScalarSizeInBits(), 16);
20588 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
20590 // Truncate with PACKUS if we are truncating a vector with leading zero bits
20591 // that extend all the way to the packed/truncated value.
20592 // Pre-SSE41 we can only use PACKUSWB.
20593 KnownBits Known = DAG.computeKnownBits(In);
20594 if ((InNumEltBits - NumPackedZeroBits) <= Known.countMinLeadingZeros())
20596 truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget))
20599 // Truncate with PACKSS if we are truncating a vector with sign-bits that
20600 // extend all the way to the packed/truncated value.
20601 if ((InNumEltBits - NumPackedSignBits) < DAG.ComputeNumSignBits(In))
20603 truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget))
20606 // Handle truncation of V256 to V128 using shuffles.
20607 assert(VT.is128BitVector() && InVT.is256BitVector() && "Unexpected types!");
20609 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
20610 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
20611 if (Subtarget.hasInt256()) {
20612 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
20613 In = DAG.getBitcast(MVT::v8i32, In);
20614 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, In, ShufMask);
20615 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
20616 DAG.getIntPtrConstant(0, DL));
20619 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20620 DAG.getIntPtrConstant(0, DL));
20621 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20622 DAG.getIntPtrConstant(2, DL));
20623 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20624 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20625 static const int ShufMask[] = {0, 2, 4, 6};
20626 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
20629 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
20630 // On AVX2, v8i32 -> v8i16 becomes PSHUFB.
20631 if (Subtarget.hasInt256()) {
20632 In = DAG.getBitcast(MVT::v32i8, In);
20634 // The PSHUFB mask:
20635 static const int ShufMask1[] = { 0, 1, 4, 5, 8, 9, 12, 13,
20636 -1, -1, -1, -1, -1, -1, -1, -1,
20637 16, 17, 20, 21, 24, 25, 28, 29,
20638 -1, -1, -1, -1, -1, -1, -1, -1 };
20639 In = DAG.getVectorShuffle(MVT::v32i8, DL, In, In, ShufMask1);
20640 In = DAG.getBitcast(MVT::v4i64, In);
20642 static const int ShufMask2[] = {0, 2, -1, -1};
20643 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, In, ShufMask2);
20644 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
20645 DAG.getIntPtrConstant(0, DL));
20646 return DAG.getBitcast(VT, In);
20649 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20650 DAG.getIntPtrConstant(0, DL));
20652 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
20653 DAG.getIntPtrConstant(4, DL));
20655 OpLo = DAG.getBitcast(MVT::v16i8, OpLo);
20656 OpHi = DAG.getBitcast(MVT::v16i8, OpHi);
20658 // The PSHUFB mask:
20659 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
20660 -1, -1, -1, -1, -1, -1, -1, -1};
20662 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, OpLo, ShufMask1);
20663 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, OpHi, ShufMask1);
20665 OpLo = DAG.getBitcast(MVT::v4i32, OpLo);
20666 OpHi = DAG.getBitcast(MVT::v4i32, OpHi);
20668 // The MOVLHPS Mask:
20669 static const int ShufMask2[] = {0, 1, 4, 5};
20670 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
20671 return DAG.getBitcast(MVT::v8i16, res);
20674 if (VT == MVT::v16i8 && InVT == MVT::v16i16) {
20675 // Use an AND to zero uppper bits for PACKUS.
20676 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(255, DL, InVT));
20678 SDValue InLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20679 DAG.getIntPtrConstant(0, DL));
20680 SDValue InHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v8i16, In,
20681 DAG.getIntPtrConstant(8, DL));
20682 return DAG.getNode(X86ISD::PACKUS, DL, VT, InLo, InHi);
20685 llvm_unreachable("All 256->128 cases should have been handled above!");
20688 SDValue X86TargetLowering::LowerFP_TO_INT(SDValue Op, SelectionDAG &DAG) const {
20689 bool IsStrict = Op->isStrictFPOpcode();
20690 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT ||
20691 Op.getOpcode() == ISD::STRICT_FP_TO_SINT;
20692 MVT VT = Op->getSimpleValueType(0);
20693 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
20694 MVT SrcVT = Src.getSimpleValueType();
20697 if (VT.isVector()) {
20698 if (VT == MVT::v2i1 && SrcVT == MVT::v2f64) {
20699 MVT ResVT = MVT::v4i32;
20700 MVT TruncVT = MVT::v4i1;
20703 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
20705 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20707 if (!IsSigned && !Subtarget.hasVLX()) {
20708 assert(Subtarget.useAVX512Regs() && "Unexpected features!");
20709 // Widen to 512-bits.
20710 ResVT = MVT::v8i32;
20711 TruncVT = MVT::v8i1;
20712 Opc = Op.getOpcode();
20713 // Need to concat with zero vector for strict fp to avoid spurious
20715 // TODO: Should we just do this for non-strict as well?
20716 SDValue Tmp = IsStrict ? DAG.getConstantFP(0.0, dl, MVT::v8f64)
20717 : DAG.getUNDEF(MVT::v8f64);
20718 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8f64, Tmp, Src,
20719 DAG.getIntPtrConstant(0, dl));
20721 SDValue Res, Chain;
20724 DAG.getNode(Opc, dl, {ResVT, MVT::Other}, {Op->getOperand(0), Src});
20725 Chain = Res.getValue(1);
20727 Res = DAG.getNode(Opc, dl, ResVT, Src);
20730 Res = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Res);
20731 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i1, Res,
20732 DAG.getIntPtrConstant(0, dl));
20734 return DAG.getMergeValues({Res, Chain}, dl);
20738 // v8f64->v8i32 is legal, but we need v8i32 to be custom for v8f32.
20739 if (VT == MVT::v8i32 && SrcVT == MVT::v8f64) {
20740 assert(!IsSigned && "Expected unsigned conversion!");
20741 assert(Subtarget.useAVX512Regs() && "Requires avx512f");
20745 // Widen vXi32 fp_to_uint with avx512f to 512-bit source.
20746 if ((VT == MVT::v4i32 || VT == MVT::v8i32) &&
20747 (SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32 || SrcVT == MVT::v8f32)) {
20748 assert(!IsSigned && "Expected unsigned conversion!");
20749 assert(Subtarget.useAVX512Regs() && !Subtarget.hasVLX() &&
20750 "Unexpected features!");
20751 MVT WideVT = SrcVT == MVT::v4f64 ? MVT::v8f64 : MVT::v16f32;
20752 MVT ResVT = SrcVT == MVT::v4f64 ? MVT::v8i32 : MVT::v16i32;
20753 // Need to concat with zero vector for strict fp to avoid spurious
20755 // TODO: Should we just do this for non-strict as well?
20757 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20758 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20759 DAG.getIntPtrConstant(0, dl));
20761 SDValue Res, Chain;
20763 Res = DAG.getNode(ISD::STRICT_FP_TO_UINT, dl, {ResVT, MVT::Other},
20764 {Op->getOperand(0), Src});
20765 Chain = Res.getValue(1);
20767 Res = DAG.getNode(ISD::FP_TO_UINT, dl, ResVT, Src);
20770 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20771 DAG.getIntPtrConstant(0, dl));
20774 return DAG.getMergeValues({Res, Chain}, dl);
20778 // Widen vXi64 fp_to_uint/fp_to_sint with avx512dq to 512-bit source.
20779 if ((VT == MVT::v2i64 || VT == MVT::v4i64) &&
20780 (SrcVT == MVT::v2f64 || SrcVT == MVT::v4f64 || SrcVT == MVT::v4f32)) {
20781 assert(Subtarget.useAVX512Regs() && Subtarget.hasDQI() &&
20782 !Subtarget.hasVLX() && "Unexpected features!");
20783 MVT WideVT = SrcVT == MVT::v4f32 ? MVT::v8f32 : MVT::v8f64;
20784 // Need to concat with zero vector for strict fp to avoid spurious
20786 // TODO: Should we just do this for non-strict as well?
20788 IsStrict ? DAG.getConstantFP(0.0, dl, WideVT) : DAG.getUNDEF(WideVT);
20789 Src = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, WideVT, Tmp, Src,
20790 DAG.getIntPtrConstant(0, dl));
20792 SDValue Res, Chain;
20794 Res = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20795 {Op->getOperand(0), Src});
20796 Chain = Res.getValue(1);
20798 Res = DAG.getNode(Op.getOpcode(), dl, MVT::v8i64, Src);
20801 Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Res,
20802 DAG.getIntPtrConstant(0, dl));
20805 return DAG.getMergeValues({Res, Chain}, dl);
20809 if (VT == MVT::v2i64 && SrcVT == MVT::v2f32) {
20810 if (!Subtarget.hasVLX()) {
20811 // Non-strict nodes without VLX can we widened to v4f32->v4i64 by type
20812 // legalizer and then widened again by vector op legalization.
20816 SDValue Zero = DAG.getConstantFP(0.0, dl, MVT::v2f32);
20817 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8f32,
20818 {Src, Zero, Zero, Zero});
20819 Tmp = DAG.getNode(Op.getOpcode(), dl, {MVT::v8i64, MVT::Other},
20820 {Op->getOperand(0), Tmp});
20821 SDValue Chain = Tmp.getValue(1);
20822 Tmp = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2i64, Tmp,
20823 DAG.getIntPtrConstant(0, dl));
20825 return DAG.getMergeValues({Tmp, Chain}, dl);
20829 assert(Subtarget.hasDQI() && Subtarget.hasVLX() && "Requires AVX512DQVL");
20830 SDValue Tmp = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
20831 DAG.getUNDEF(MVT::v2f32));
20833 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI
20834 : X86ISD::STRICT_CVTTP2UI;
20835 return DAG.getNode(Opc, dl, {VT, MVT::Other}, {Op->getOperand(0), Tmp});
20837 unsigned Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
20838 return DAG.getNode(Opc, dl, VT, Tmp);
20844 assert(!VT.isVector());
20846 bool UseSSEReg = isScalarFPTypeInSSEReg(SrcVT);
20848 if (!IsSigned && UseSSEReg) {
20849 // Conversions from f32/f64 with AVX512 should be legal.
20850 if (Subtarget.hasAVX512())
20853 // Use default expansion for i64.
20854 if (VT == MVT::i64)
20857 assert(VT == MVT::i32 && "Unexpected VT!");
20859 // Promote i32 to i64 and use a signed operation on 64-bit targets.
20860 // FIXME: This does not generate an invalid exception if the input does not
20861 // fit in i32. PR44019
20862 if (Subtarget.is64Bit()) {
20863 SDValue Res, Chain;
20865 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i64, MVT::Other},
20866 { Op.getOperand(0), Src });
20867 Chain = Res.getValue(1);
20869 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i64, Src);
20871 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20873 return DAG.getMergeValues({ Res, Chain }, dl);
20877 // Use default expansion for SSE1/2 targets without SSE3. With SSE3 we can
20878 // use fisttp which will be handled later.
20879 if (!Subtarget.hasSSE3())
20883 // Promote i16 to i32 if we can use a SSE operation or the type is f128.
20884 // FIXME: This does not generate an invalid exception if the input does not
20885 // fit in i16. PR44019
20886 if (VT == MVT::i16 && (UseSSEReg || SrcVT == MVT::f128)) {
20887 assert(IsSigned && "Expected i16 FP_TO_UINT to have been promoted!");
20888 SDValue Res, Chain;
20890 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, { MVT::i32, MVT::Other},
20891 { Op.getOperand(0), Src });
20892 Chain = Res.getValue(1);
20894 Res = DAG.getNode(ISD::FP_TO_SINT, dl, MVT::i32, Src);
20896 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
20898 return DAG.getMergeValues({ Res, Chain }, dl);
20902 // If this is a FP_TO_SINT using SSEReg we're done.
20903 if (UseSSEReg && IsSigned)
20906 // fp128 needs to use a libcall.
20907 if (SrcVT == MVT::f128) {
20910 LC = RTLIB::getFPTOSINT(SrcVT, VT);
20912 LC = RTLIB::getFPTOUINT(SrcVT, VT);
20914 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
20915 MakeLibCallOptions CallOptions;
20916 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, Src, CallOptions,
20920 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
20925 // Fall back to X87.
20927 if (SDValue V = FP_TO_INTHelper(Op, DAG, IsSigned, Chain)) {
20929 return DAG.getMergeValues({V, Chain}, dl);
20933 llvm_unreachable("Expected FP_TO_INTHelper to handle all remaining cases.");
20936 SDValue X86TargetLowering::LowerLRINT_LLRINT(SDValue Op,
20937 SelectionDAG &DAG) const {
20938 SDValue Src = Op.getOperand(0);
20939 MVT SrcVT = Src.getSimpleValueType();
20941 // If the source is in an SSE register, the node is Legal.
20942 if (isScalarFPTypeInSSEReg(SrcVT))
20945 return LRINT_LLRINTHelper(Op.getNode(), DAG);
20948 SDValue X86TargetLowering::LRINT_LLRINTHelper(SDNode *N,
20949 SelectionDAG &DAG) const {
20950 EVT DstVT = N->getValueType(0);
20951 SDValue Src = N->getOperand(0);
20952 EVT SrcVT = Src.getValueType();
20954 if (SrcVT != MVT::f32 && SrcVT != MVT::f64 && SrcVT != MVT::f80) {
20955 // f16 must be promoted before using the lowering in this routine.
20956 // fp128 does not use this lowering.
20961 SDValue Chain = DAG.getEntryNode();
20963 bool UseSSE = isScalarFPTypeInSSEReg(SrcVT);
20965 // If we're converting from SSE, the stack slot needs to hold both types.
20966 // Otherwise it only needs to hold the DstVT.
20967 EVT OtherVT = UseSSE ? SrcVT : DstVT;
20968 SDValue StackPtr = DAG.CreateStackTemporary(DstVT, OtherVT);
20969 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
20970 MachinePointerInfo MPI =
20971 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
20974 assert(DstVT == MVT::i64 && "Invalid LRINT/LLRINT to lower!");
20975 Chain = DAG.getStore(Chain, DL, Src, StackPtr, MPI);
20976 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
20977 SDValue Ops[] = { Chain, StackPtr };
20979 Src = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, SrcVT, MPI,
20980 /*Align*/ None, MachineMemOperand::MOLoad);
20981 Chain = Src.getValue(1);
20984 SDValue StoreOps[] = { Chain, Src, StackPtr };
20985 Chain = DAG.getMemIntrinsicNode(X86ISD::FIST, DL, DAG.getVTList(MVT::Other),
20986 StoreOps, DstVT, MPI, /*Align*/ None,
20987 MachineMemOperand::MOStore);
20989 return DAG.getLoad(DstVT, DL, Chain, StackPtr, MPI);
20992 SDValue X86TargetLowering::LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) const {
20993 bool IsStrict = Op->isStrictFPOpcode();
20996 MVT VT = Op.getSimpleValueType();
20997 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
20998 MVT SVT = In.getSimpleValueType();
21000 if (VT == MVT::f128) {
21001 RTLIB::Libcall LC = RTLIB::getFPEXT(SVT, VT);
21002 return LowerF128Call(Op, DAG, LC);
21005 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
21008 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32, In, DAG.getUNDEF(SVT));
21010 return DAG.getNode(X86ISD::STRICT_VFPEXT, DL, {VT, MVT::Other},
21011 {Op->getOperand(0), Res});
21012 return DAG.getNode(X86ISD::VFPEXT, DL, VT, Res);
21015 SDValue X86TargetLowering::LowerFP_ROUND(SDValue Op, SelectionDAG &DAG) const {
21016 bool IsStrict = Op->isStrictFPOpcode();
21018 MVT VT = Op.getSimpleValueType();
21019 SDValue In = Op.getOperand(IsStrict ? 1 : 0);
21020 MVT SVT = In.getSimpleValueType();
21022 // It's legal except when f128 is involved
21023 if (SVT != MVT::f128)
21026 RTLIB::Libcall LC = RTLIB::getFPROUND(SVT, VT);
21028 // FP_ROUND node has a second operand indicating whether it is known to be
21029 // precise. That doesn't take part in the LibCall so we can't directly use
21033 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
21034 MakeLibCallOptions CallOptions;
21035 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, LC, VT, In, CallOptions,
21039 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
21044 static SDValue LowerFP16_TO_FP(SDValue Op, SelectionDAG &DAG) {
21045 bool IsStrict = Op->isStrictFPOpcode();
21046 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21047 assert(Src.getValueType() == MVT::i16 && Op.getValueType() == MVT::f32 &&
21051 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16,
21052 DAG.getConstant(0, dl, MVT::v8i16), Src,
21053 DAG.getIntPtrConstant(0, dl));
21057 Res = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {MVT::v4f32, MVT::Other},
21058 {Op.getOperand(0), Res});
21059 Chain = Res.getValue(1);
21061 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
21064 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
21065 DAG.getIntPtrConstant(0, dl));
21068 return DAG.getMergeValues({Res, Chain}, dl);
21073 static SDValue LowerFP_TO_FP16(SDValue Op, SelectionDAG &DAG) {
21074 bool IsStrict = Op->isStrictFPOpcode();
21075 SDValue Src = Op.getOperand(IsStrict ? 1 : 0);
21076 assert(Src.getValueType() == MVT::f32 && Op.getValueType() == MVT::i16 &&
21080 SDValue Res, Chain;
21082 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v4f32,
21083 DAG.getConstantFP(0, dl, MVT::v4f32), Src,
21084 DAG.getIntPtrConstant(0, dl));
21086 X86ISD::STRICT_CVTPS2PH, dl, {MVT::v8i16, MVT::Other},
21087 {Op.getOperand(0), Res, DAG.getTargetConstant(4, dl, MVT::i32)});
21088 Chain = Res.getValue(1);
21090 // FIXME: Should we use zeros for upper elements for non-strict?
21091 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, Src);
21092 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
21093 DAG.getTargetConstant(4, dl, MVT::i32));
21096 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Res,
21097 DAG.getIntPtrConstant(0, dl));
21100 return DAG.getMergeValues({Res, Chain}, dl);
21105 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21106 /// vector operation in place of the typical scalar operation.
21107 static SDValue lowerAddSubToHorizontalOp(SDValue Op, SelectionDAG &DAG,
21108 const X86Subtarget &Subtarget) {
21109 // If both operands have other uses, this is probably not profitable.
21110 SDValue LHS = Op.getOperand(0);
21111 SDValue RHS = Op.getOperand(1);
21112 if (!LHS.hasOneUse() && !RHS.hasOneUse())
21115 // FP horizontal add/sub were added with SSE3. Integer with SSSE3.
21116 bool IsFP = Op.getSimpleValueType().isFloatingPoint();
21117 if (IsFP && !Subtarget.hasSSE3())
21119 if (!IsFP && !Subtarget.hasSSSE3())
21122 // Extract from a common vector.
21123 if (LHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21124 RHS.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
21125 LHS.getOperand(0) != RHS.getOperand(0) ||
21126 !isa<ConstantSDNode>(LHS.getOperand(1)) ||
21127 !isa<ConstantSDNode>(RHS.getOperand(1)) ||
21128 !shouldUseHorizontalOp(true, DAG, Subtarget))
21131 // Allow commuted 'hadd' ops.
21132 // TODO: Allow commuted (f)sub by negating the result of (F)HSUB?
21134 switch (Op.getOpcode()) {
21135 case ISD::ADD: HOpcode = X86ISD::HADD; break;
21136 case ISD::SUB: HOpcode = X86ISD::HSUB; break;
21137 case ISD::FADD: HOpcode = X86ISD::FHADD; break;
21138 case ISD::FSUB: HOpcode = X86ISD::FHSUB; break;
21140 llvm_unreachable("Trying to lower unsupported opcode to horizontal op");
21142 unsigned LExtIndex = LHS.getConstantOperandVal(1);
21143 unsigned RExtIndex = RHS.getConstantOperandVal(1);
21144 if ((LExtIndex & 1) == 1 && (RExtIndex & 1) == 0 &&
21145 (HOpcode == X86ISD::HADD || HOpcode == X86ISD::FHADD))
21146 std::swap(LExtIndex, RExtIndex);
21148 if ((LExtIndex & 1) != 0 || RExtIndex != (LExtIndex + 1))
21151 SDValue X = LHS.getOperand(0);
21152 EVT VecVT = X.getValueType();
21153 unsigned BitWidth = VecVT.getSizeInBits();
21154 unsigned NumLanes = BitWidth / 128;
21155 unsigned NumEltsPerLane = VecVT.getVectorNumElements() / NumLanes;
21156 assert((BitWidth == 128 || BitWidth == 256 || BitWidth == 512) &&
21157 "Not expecting illegal vector widths here");
21159 // Creating a 256-bit horizontal op would be wasteful, and there is no 512-bit
21160 // equivalent, so extract the 256/512-bit source op to 128-bit if we can.
21162 if (BitWidth == 256 || BitWidth == 512) {
21163 unsigned LaneIdx = LExtIndex / NumEltsPerLane;
21164 X = extract128BitVector(X, LaneIdx * NumEltsPerLane, DAG, DL);
21165 LExtIndex %= NumEltsPerLane;
21168 // add (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hadd X, X), 0
21169 // add (extractelt (X, 1), extractelt (X, 0)) --> extractelt (hadd X, X), 0
21170 // add (extractelt (X, 2), extractelt (X, 3)) --> extractelt (hadd X, X), 1
21171 // sub (extractelt (X, 0), extractelt (X, 1)) --> extractelt (hsub X, X), 0
21172 SDValue HOp = DAG.getNode(HOpcode, DL, X.getValueType(), X, X);
21173 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, Op.getSimpleValueType(), HOp,
21174 DAG.getIntPtrConstant(LExtIndex / 2, DL));
21177 /// Depending on uarch and/or optimizing for size, we might prefer to use a
21178 /// vector operation in place of the typical scalar operation.
21179 SDValue X86TargetLowering::lowerFaddFsub(SDValue Op, SelectionDAG &DAG) const {
21180 assert((Op.getValueType() == MVT::f32 || Op.getValueType() == MVT::f64) &&
21181 "Only expecting float/double");
21182 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
21185 /// ISD::FROUND is defined to round to nearest with ties rounding away from 0.
21186 /// This mode isn't supported in hardware on X86. But as long as we aren't
21187 /// compiling with trapping math, we can emulate this with
21188 /// floor(X + copysign(nextafter(0.5, 0.0), X)).
21189 static SDValue LowerFROUND(SDValue Op, SelectionDAG &DAG) {
21190 SDValue N0 = Op.getOperand(0);
21192 MVT VT = Op.getSimpleValueType();
21194 // N0 += copysign(nextafter(0.5, 0.0), N0)
21195 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21197 APFloat Point5Pred = APFloat(0.5f);
21198 Point5Pred.convert(Sem, APFloat::rmNearestTiesToEven, &Ignored);
21199 Point5Pred.next(/*nextDown*/true);
21201 SDValue Adder = DAG.getNode(ISD::FCOPYSIGN, dl, VT,
21202 DAG.getConstantFP(Point5Pred, dl, VT), N0);
21203 N0 = DAG.getNode(ISD::FADD, dl, VT, N0, Adder);
21205 // Truncate the result to remove fraction.
21206 return DAG.getNode(ISD::FTRUNC, dl, VT, N0);
21209 /// The only differences between FABS and FNEG are the mask and the logic op.
21210 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
21211 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
21212 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
21213 "Wrong opcode for lowering FABS or FNEG.");
21215 bool IsFABS = (Op.getOpcode() == ISD::FABS);
21217 // If this is a FABS and it has an FNEG user, bail out to fold the combination
21218 // into an FNABS. We'll lower the FABS after that if it is still in use.
21220 for (SDNode *User : Op->uses())
21221 if (User->getOpcode() == ISD::FNEG)
21225 MVT VT = Op.getSimpleValueType();
21227 bool IsF128 = (VT == MVT::f128);
21228 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
21229 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
21230 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
21231 "Unexpected type in LowerFABSorFNEG");
21233 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
21234 // decide if we should generate a 16-byte constant mask when we only need 4 or
21235 // 8 bytes for the scalar case.
21237 // There are no scalar bitwise logical SSE/AVX instructions, so we
21238 // generate a 16-byte vector constant and logic op even for the scalar case.
21239 // Using a 16-byte mask allows folding the load of the mask with
21240 // the logic op, so it can save (~4 bytes) on code size.
21241 bool IsFakeVector = !VT.isVector() && !IsF128;
21244 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
21246 unsigned EltBits = VT.getScalarSizeInBits();
21247 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
21248 APInt MaskElt = IsFABS ? APInt::getSignedMaxValue(EltBits) :
21249 APInt::getSignMask(EltBits);
21250 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21251 SDValue Mask = DAG.getConstantFP(APFloat(Sem, MaskElt), dl, LogicVT);
21253 SDValue Op0 = Op.getOperand(0);
21254 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
21255 unsigned LogicOp = IsFABS ? X86ISD::FAND :
21256 IsFNABS ? X86ISD::FOR :
21258 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
21260 if (VT.isVector() || IsF128)
21261 return DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21263 // For the scalar case extend to a 128-bit vector, perform the logic op,
21264 // and extract the scalar result back out.
21265 Operand = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Operand);
21266 SDValue LogicNode = DAG.getNode(LogicOp, dl, LogicVT, Operand, Mask);
21267 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, LogicNode,
21268 DAG.getIntPtrConstant(0, dl));
21271 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
21272 SDValue Mag = Op.getOperand(0);
21273 SDValue Sign = Op.getOperand(1);
21276 // If the sign operand is smaller, extend it first.
21277 MVT VT = Op.getSimpleValueType();
21278 if (Sign.getSimpleValueType().bitsLT(VT))
21279 Sign = DAG.getNode(ISD::FP_EXTEND, dl, VT, Sign);
21281 // And if it is bigger, shrink it first.
21282 if (Sign.getSimpleValueType().bitsGT(VT))
21283 Sign = DAG.getNode(ISD::FP_ROUND, dl, VT, Sign, DAG.getIntPtrConstant(1, dl));
21285 // At this point the operands and the result should have the same
21286 // type, and that won't be f80 since that is not custom lowered.
21287 bool IsF128 = (VT == MVT::f128);
21288 assert((VT == MVT::f64 || VT == MVT::f32 || VT == MVT::f128 ||
21289 VT == MVT::v2f64 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
21290 VT == MVT::v8f32 || VT == MVT::v8f64 || VT == MVT::v16f32) &&
21291 "Unexpected type in LowerFCOPYSIGN");
21293 const fltSemantics &Sem = SelectionDAG::EVTToAPFloatSemantics(VT);
21295 // Perform all scalar logic operations as 16-byte vectors because there are no
21296 // scalar FP logic instructions in SSE.
21297 // TODO: This isn't necessary. If we used scalar types, we might avoid some
21298 // unnecessary splats, but we might miss load folding opportunities. Should
21299 // this decision be based on OptimizeForSize?
21300 bool IsFakeVector = !VT.isVector() && !IsF128;
21303 LogicVT = (VT == MVT::f64) ? MVT::v2f64 : MVT::v4f32;
21305 // The mask constants are automatically splatted for vector types.
21306 unsigned EltSizeInBits = VT.getScalarSizeInBits();
21307 SDValue SignMask = DAG.getConstantFP(
21308 APFloat(Sem, APInt::getSignMask(EltSizeInBits)), dl, LogicVT);
21309 SDValue MagMask = DAG.getConstantFP(
21310 APFloat(Sem, APInt::getSignedMaxValue(EltSizeInBits)), dl, LogicVT);
21312 // First, clear all bits but the sign bit from the second operand (sign).
21314 Sign = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Sign);
21315 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, LogicVT, Sign, SignMask);
21317 // Next, clear the sign bit from the first operand (magnitude).
21318 // TODO: If we had general constant folding for FP logic ops, this check
21319 // wouldn't be necessary.
21321 if (ConstantFPSDNode *Op0CN = isConstOrConstSplatFP(Mag)) {
21322 APFloat APF = Op0CN->getValueAPF();
21324 MagBits = DAG.getConstantFP(APF, dl, LogicVT);
21326 // If the magnitude operand wasn't a constant, we need to AND out the sign.
21328 Mag = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LogicVT, Mag);
21329 MagBits = DAG.getNode(X86ISD::FAND, dl, LogicVT, Mag, MagMask);
21332 // OR the magnitude value with the sign bit.
21333 SDValue Or = DAG.getNode(X86ISD::FOR, dl, LogicVT, MagBits, SignBit);
21334 return !IsFakeVector ? Or : DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Or,
21335 DAG.getIntPtrConstant(0, dl));
21338 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
21339 SDValue N0 = Op.getOperand(0);
21341 MVT VT = Op.getSimpleValueType();
21343 MVT OpVT = N0.getSimpleValueType();
21344 assert((OpVT == MVT::f32 || OpVT == MVT::f64) &&
21345 "Unexpected type for FGETSIGN");
21347 // Lower ISD::FGETSIGN to (AND (X86ISD::MOVMSK ...) 1).
21348 MVT VecVT = (OpVT == MVT::f32 ? MVT::v4f32 : MVT::v2f64);
21349 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, N0);
21350 Res = DAG.getNode(X86ISD::MOVMSK, dl, MVT::i32, Res);
21351 Res = DAG.getZExtOrTrunc(Res, dl, VT);
21352 Res = DAG.getNode(ISD::AND, dl, VT, Res, DAG.getConstant(1, dl, VT));
21356 /// Helper for creating a X86ISD::SETCC node.
21357 static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
21358 SelectionDAG &DAG) {
21359 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
21360 DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
21363 /// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
21364 /// style scalarized (associative) reduction patterns. Partial reductions
21365 /// are supported when the pointer SrcMask is non-null.
21366 /// TODO - move this to SelectionDAG?
21367 static bool matchScalarReduction(SDValue Op, ISD::NodeType BinOp,
21368 SmallVectorImpl<SDValue> &SrcOps,
21369 SmallVectorImpl<APInt> *SrcMask = nullptr) {
21370 SmallVector<SDValue, 8> Opnds;
21371 DenseMap<SDValue, APInt> SrcOpMap;
21372 EVT VT = MVT::Other;
21374 // Recognize a special case where a vector is casted into wide integer to
21376 assert(Op.getOpcode() == unsigned(BinOp) &&
21377 "Unexpected bit reduction opcode");
21378 Opnds.push_back(Op.getOperand(0));
21379 Opnds.push_back(Op.getOperand(1));
21381 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
21382 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
21383 // BFS traverse all BinOp operands.
21384 if (I->getOpcode() == unsigned(BinOp)) {
21385 Opnds.push_back(I->getOperand(0));
21386 Opnds.push_back(I->getOperand(1));
21387 // Re-evaluate the number of nodes to be traversed.
21388 e += 2; // 2 more nodes (LHS and RHS) are pushed.
21392 // Quit if a non-EXTRACT_VECTOR_ELT
21393 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
21396 // Quit if without a constant index.
21397 auto *Idx = dyn_cast<ConstantSDNode>(I->getOperand(1));
21401 SDValue Src = I->getOperand(0);
21402 DenseMap<SDValue, APInt>::iterator M = SrcOpMap.find(Src);
21403 if (M == SrcOpMap.end()) {
21404 VT = Src.getValueType();
21405 // Quit if not the same type.
21406 if (SrcOpMap.begin() != SrcOpMap.end() &&
21407 VT != SrcOpMap.begin()->first.getValueType())
21409 unsigned NumElts = VT.getVectorNumElements();
21410 APInt EltCount = APInt::getNullValue(NumElts);
21411 M = SrcOpMap.insert(std::make_pair(Src, EltCount)).first;
21412 SrcOps.push_back(Src);
21415 // Quit if element already used.
21416 unsigned CIdx = Idx->getZExtValue();
21417 if (M->second[CIdx])
21419 M->second.setBit(CIdx);
21423 // Collect the source partial masks.
21424 for (SDValue &SrcOp : SrcOps)
21425 SrcMask->push_back(SrcOpMap[SrcOp]);
21427 // Quit if not all elements are used.
21428 for (DenseMap<SDValue, APInt>::const_iterator I = SrcOpMap.begin(),
21429 E = SrcOpMap.end();
21431 if (!I->second.isAllOnesValue())
21439 // Helper function for comparing all bits of a vector against zero.
21440 static SDValue LowerVectorAllZero(const SDLoc &DL, SDValue V, ISD::CondCode CC,
21442 const X86Subtarget &Subtarget,
21443 SelectionDAG &DAG, X86::CondCode &X86CC) {
21444 EVT VT = V.getValueType();
21445 assert(Mask.getBitWidth() == VT.getScalarSizeInBits() &&
21446 "Element Mask vs Vector bitwidth mismatch");
21448 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
21449 X86CC = (CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE);
21451 auto MaskBits = [&](SDValue Src) {
21452 if (Mask.isAllOnesValue())
21454 EVT SrcVT = Src.getValueType();
21455 SDValue MaskValue = DAG.getConstant(Mask, DL, SrcVT);
21456 return DAG.getNode(ISD::AND, DL, SrcVT, Src, MaskValue);
21459 // For sub-128-bit vector, cast to (legal) integer and compare with zero.
21460 if (VT.getSizeInBits() < 128) {
21461 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
21462 if (!DAG.getTargetLoweringInfo().isTypeLegal(IntVT))
21464 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
21465 DAG.getBitcast(IntVT, MaskBits(V)),
21466 DAG.getConstant(0, DL, IntVT));
21469 // Quit if not splittable to 128/256-bit vector.
21470 if (!isPowerOf2_32(VT.getSizeInBits()))
21473 // Split down to 128/256-bit vector.
21474 unsigned TestSize = Subtarget.hasAVX() ? 256 : 128;
21475 while (VT.getSizeInBits() > TestSize) {
21476 auto Split = DAG.SplitVector(V, DL);
21477 VT = Split.first.getValueType();
21478 V = DAG.getNode(ISD::OR, DL, VT, Split.first, Split.second);
21481 bool UsePTEST = Subtarget.hasSSE41();
21483 MVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
21484 V = DAG.getBitcast(TestVT, MaskBits(V));
21485 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, V, V);
21488 // Without PTEST, a masked v2i64 or-reduction is not faster than
21490 if (!Mask.isAllOnesValue() && VT.getScalarSizeInBits() > 32)
21493 V = DAG.getBitcast(MVT::v16i8, MaskBits(V));
21494 V = DAG.getNode(X86ISD::PCMPEQ, DL, MVT::v16i8, V,
21495 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
21496 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
21497 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, V,
21498 DAG.getConstant(0xFFFF, DL, MVT::i32));
21501 // Check whether an OR'd reduction tree is PTEST-able, or if we can fallback to
21502 // CMP(MOVMSK(PCMPEQB(X,0))).
21503 static SDValue MatchVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
21505 const X86Subtarget &Subtarget,
21506 SelectionDAG &DAG, SDValue &X86CC) {
21507 assert((CC == ISD::SETEQ || CC == ISD::SETNE) && "Unsupported ISD::CondCode");
21509 if (!Subtarget.hasSSE2() || !Op->hasOneUse())
21512 // Check whether we're masking/truncating an OR-reduction result, in which
21513 // case track the masked bits.
21514 APInt Mask = APInt::getAllOnesValue(Op.getScalarValueSizeInBits());
21515 switch (Op.getOpcode()) {
21516 case ISD::TRUNCATE: {
21517 SDValue Src = Op.getOperand(0);
21518 Mask = APInt::getLowBitsSet(Src.getScalarValueSizeInBits(),
21519 Op.getScalarValueSizeInBits());
21524 if (auto *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
21525 Mask = Cst->getAPIntValue();
21526 Op = Op.getOperand(0);
21532 SmallVector<SDValue, 8> VecIns;
21533 if (Op.getOpcode() == ISD::OR && matchScalarReduction(Op, ISD::OR, VecIns)) {
21534 EVT VT = VecIns[0].getValueType();
21535 assert(llvm::all_of(VecIns,
21536 [VT](SDValue V) { return VT == V.getValueType(); }) &&
21537 "Reduction source vector mismatch");
21539 // Quit if less than 128-bits or not splittable to 128/256-bit vector.
21540 if (VT.getSizeInBits() < 128 || !isPowerOf2_32(VT.getSizeInBits()))
21543 // If more than one full vector is evaluated, OR them first before PTEST.
21544 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1;
21545 Slot += 2, e += 1) {
21546 // Each iteration will OR 2 nodes and append the result until there is
21547 // only 1 node left, i.e. the final OR'd value of all vectors.
21548 SDValue LHS = VecIns[Slot];
21549 SDValue RHS = VecIns[Slot + 1];
21550 VecIns.push_back(DAG.getNode(ISD::OR, DL, VT, LHS, RHS));
21553 X86::CondCode CCode;
21554 if (SDValue V = LowerVectorAllZero(DL, VecIns.back(), CC, Mask, Subtarget,
21556 X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
21561 if (Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
21562 ISD::NodeType BinOp;
21563 if (SDValue Match =
21564 DAG.matchBinOpReduction(Op.getNode(), BinOp, {ISD::OR})) {
21565 X86::CondCode CCode;
21567 LowerVectorAllZero(DL, Match, CC, Mask, Subtarget, DAG, CCode)) {
21568 X86CC = DAG.getTargetConstant(CCode, DL, MVT::i8);
21577 /// return true if \c Op has a use that doesn't just read flags.
21578 static bool hasNonFlagsUse(SDValue Op) {
21579 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
21581 SDNode *User = *UI;
21582 unsigned UOpNo = UI.getOperandNo();
21583 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
21584 // Look pass truncate.
21585 UOpNo = User->use_begin().getOperandNo();
21586 User = *User->use_begin();
21589 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
21590 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
21596 // Transform to an x86-specific ALU node with flags if there is a chance of
21597 // using an RMW op or only the flags are used. Otherwise, leave
21598 // the node alone and emit a 'cmp' or 'test' instruction.
21599 static bool isProfitableToUseFlagOp(SDValue Op) {
21600 for (SDNode *U : Op->uses())
21601 if (U->getOpcode() != ISD::CopyToReg &&
21602 U->getOpcode() != ISD::SETCC &&
21603 U->getOpcode() != ISD::STORE)
21609 /// Emit nodes that will be selected as "test Op0,Op0", or something
21611 static SDValue EmitTest(SDValue Op, unsigned X86CC, const SDLoc &dl,
21612 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
21613 // CF and OF aren't always set the way we want. Determine which
21614 // of these we need.
21615 bool NeedCF = false;
21616 bool NeedOF = false;
21619 case X86::COND_A: case X86::COND_AE:
21620 case X86::COND_B: case X86::COND_BE:
21623 case X86::COND_G: case X86::COND_GE:
21624 case X86::COND_L: case X86::COND_LE:
21625 case X86::COND_O: case X86::COND_NO: {
21626 // Check if we really need to set the
21627 // Overflow flag. If NoSignedWrap is present
21628 // that is not actually needed.
21629 switch (Op->getOpcode()) {
21634 if (Op.getNode()->getFlags().hasNoSignedWrap())
21644 // See if we can use the EFLAGS value from the operand instead of
21645 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
21646 // we prove that the arithmetic won't overflow, we can't use OF or CF.
21647 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
21648 // Emit a CMP with 0, which is the TEST pattern.
21649 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
21650 DAG.getConstant(0, dl, Op.getValueType()));
21652 unsigned Opcode = 0;
21653 unsigned NumOperands = 0;
21655 SDValue ArithOp = Op;
21657 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
21658 // which may be the result of a CAST. We use the variable 'Op', which is the
21659 // non-casted variable when we check for possible users.
21660 switch (ArithOp.getOpcode()) {
21662 // If the primary 'and' result isn't used, don't bother using X86ISD::AND,
21663 // because a TEST instruction will be better.
21664 if (!hasNonFlagsUse(Op))
21672 if (!isProfitableToUseFlagOp(Op))
21675 // Otherwise use a regular EFLAGS-setting instruction.
21676 switch (ArithOp.getOpcode()) {
21677 default: llvm_unreachable("unexpected operator!");
21678 case ISD::ADD: Opcode = X86ISD::ADD; break;
21679 case ISD::SUB: Opcode = X86ISD::SUB; break;
21680 case ISD::XOR: Opcode = X86ISD::XOR; break;
21681 case ISD::AND: Opcode = X86ISD::AND; break;
21682 case ISD::OR: Opcode = X86ISD::OR; break;
21692 return SDValue(Op.getNode(), 1);
21695 // /USUBO/SSUBO will become a X86ISD::SUB and we can use its Z flag.
21696 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21697 return DAG.getNode(X86ISD::SUB, dl, VTs, Op->getOperand(0),
21698 Op->getOperand(1)).getValue(1);
21705 // Emit a CMP with 0, which is the TEST pattern.
21706 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
21707 DAG.getConstant(0, dl, Op.getValueType()));
21709 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
21710 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
21712 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
21713 DAG.ReplaceAllUsesOfValueWith(SDValue(Op.getNode(), 0), New);
21714 return SDValue(New.getNode(), 1);
21717 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
21719 static SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
21720 const SDLoc &dl, SelectionDAG &DAG,
21721 const X86Subtarget &Subtarget) {
21722 if (isNullConstant(Op1))
21723 return EmitTest(Op0, X86CC, dl, DAG, Subtarget);
21725 EVT CmpVT = Op0.getValueType();
21727 assert((CmpVT == MVT::i8 || CmpVT == MVT::i16 ||
21728 CmpVT == MVT::i32 || CmpVT == MVT::i64) && "Unexpected VT!");
21730 // Only promote the compare up to I32 if it is a 16 bit operation
21731 // with an immediate. 16 bit immediates are to be avoided.
21732 if (CmpVT == MVT::i16 && !Subtarget.isAtom() &&
21733 !DAG.getMachineFunction().getFunction().hasMinSize()) {
21734 ConstantSDNode *COp0 = dyn_cast<ConstantSDNode>(Op0);
21735 ConstantSDNode *COp1 = dyn_cast<ConstantSDNode>(Op1);
21736 // Don't do this if the immediate can fit in 8-bits.
21737 if ((COp0 && !COp0->getAPIntValue().isSignedIntN(8)) ||
21738 (COp1 && !COp1->getAPIntValue().isSignedIntN(8))) {
21739 unsigned ExtendOp =
21740 isX86CCSigned(X86CC) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
21741 if (X86CC == X86::COND_E || X86CC == X86::COND_NE) {
21742 // For equality comparisons try to use SIGN_EXTEND if the input was
21743 // truncate from something with enough sign bits.
21744 if (Op0.getOpcode() == ISD::TRUNCATE) {
21745 SDValue In = Op0.getOperand(0);
21747 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
21749 ExtendOp = ISD::SIGN_EXTEND;
21750 } else if (Op1.getOpcode() == ISD::TRUNCATE) {
21751 SDValue In = Op1.getOperand(0);
21753 In.getScalarValueSizeInBits() - DAG.ComputeNumSignBits(In) + 1;
21755 ExtendOp = ISD::SIGN_EXTEND;
21760 Op0 = DAG.getNode(ExtendOp, dl, CmpVT, Op0);
21761 Op1 = DAG.getNode(ExtendOp, dl, CmpVT, Op1);
21765 // Try to shrink i64 compares if the input has enough zero bits.
21766 // FIXME: Do this for non-constant compares for constant on LHS?
21767 if (CmpVT == MVT::i64 && isa<ConstantSDNode>(Op1) && !isX86CCSigned(X86CC) &&
21768 Op0.hasOneUse() && // Hacky way to not break CSE opportunities with sub.
21769 cast<ConstantSDNode>(Op1)->getAPIntValue().getActiveBits() <= 32 &&
21770 DAG.MaskedValueIsZero(Op0, APInt::getHighBitsSet(64, 32))) {
21772 Op0 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op0);
21773 Op1 = DAG.getNode(ISD::TRUNCATE, dl, CmpVT, Op1);
21776 // 0-x == y --> x+y == 0
21777 // 0-x != y --> x+y != 0
21778 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op0.getOperand(0)) &&
21779 Op0.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
21780 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
21781 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(1), Op1);
21782 return Add.getValue(1);
21785 // x == 0-y --> x+y == 0
21786 // x != 0-y --> x+y != 0
21787 if (Op1.getOpcode() == ISD::SUB && isNullConstant(Op1.getOperand(0)) &&
21788 Op1.hasOneUse() && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
21789 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
21790 SDValue Add = DAG.getNode(X86ISD::ADD, dl, VTs, Op0, Op1.getOperand(1));
21791 return Add.getValue(1);
21794 // Use SUB instead of CMP to enable CSE between SUB and CMP.
21795 SDVTList VTs = DAG.getVTList(CmpVT, MVT::i32);
21796 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs, Op0, Op1);
21797 return Sub.getValue(1);
21800 /// Check if replacement of SQRT with RSQRT should be disabled.
21801 bool X86TargetLowering::isFsqrtCheap(SDValue Op, SelectionDAG &DAG) const {
21802 EVT VT = Op.getValueType();
21804 // We never want to use both SQRT and RSQRT instructions for the same input.
21805 if (DAG.getNodeIfExists(X86ISD::FRSQRT, DAG.getVTList(VT), Op))
21809 return Subtarget.hasFastVectorFSQRT();
21810 return Subtarget.hasFastScalarFSQRT();
21813 /// The minimum architected relative accuracy is 2^-12. We need one
21814 /// Newton-Raphson step to have a good float result (24 bits of precision).
21815 SDValue X86TargetLowering::getSqrtEstimate(SDValue Op,
21816 SelectionDAG &DAG, int Enabled,
21817 int &RefinementSteps,
21818 bool &UseOneConstNR,
21819 bool Reciprocal) const {
21820 EVT VT = Op.getValueType();
21822 // SSE1 has rsqrtss and rsqrtps. AVX adds a 256-bit variant for rsqrtps.
21823 // It is likely not profitable to do this for f64 because a double-precision
21824 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
21825 // instructions: convert to single, rsqrtss, convert back to double, refine
21826 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
21827 // along with FMA, this could be a throughput win.
21828 // TODO: SQRT requires SSE2 to prevent the introduction of an illegal v4i32
21829 // after legalize types.
21830 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
21831 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) ||
21832 (VT == MVT::v4f32 && Subtarget.hasSSE2() && !Reciprocal) ||
21833 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
21834 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
21835 if (RefinementSteps == ReciprocalEstimate::Unspecified)
21836 RefinementSteps = 1;
21838 UseOneConstNR = false;
21839 // There is no FSQRT for 512-bits, but there is RSQRT14.
21840 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RSQRT14 : X86ISD::FRSQRT;
21841 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
21846 /// The minimum architected relative accuracy is 2^-12. We need one
21847 /// Newton-Raphson step to have a good float result (24 bits of precision).
21848 SDValue X86TargetLowering::getRecipEstimate(SDValue Op, SelectionDAG &DAG,
21850 int &RefinementSteps) const {
21851 EVT VT = Op.getValueType();
21853 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
21854 // It is likely not profitable to do this for f64 because a double-precision
21855 // reciprocal estimate with refinement on x86 prior to FMA requires
21856 // 15 instructions: convert to single, rcpss, convert back to double, refine
21857 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
21858 // along with FMA, this could be a throughput win.
21860 if ((VT == MVT::f32 && Subtarget.hasSSE1()) ||
21861 (VT == MVT::v4f32 && Subtarget.hasSSE1()) ||
21862 (VT == MVT::v8f32 && Subtarget.hasAVX()) ||
21863 (VT == MVT::v16f32 && Subtarget.useAVX512Regs())) {
21864 // Enable estimate codegen with 1 refinement step for vector division.
21865 // Scalar division estimates are disabled because they break too much
21866 // real-world code. These defaults are intended to match GCC behavior.
21867 if (VT == MVT::f32 && Enabled == ReciprocalEstimate::Unspecified)
21870 if (RefinementSteps == ReciprocalEstimate::Unspecified)
21871 RefinementSteps = 1;
21873 // There is no FSQRT for 512-bits, but there is RCP14.
21874 unsigned Opcode = VT == MVT::v16f32 ? X86ISD::RCP14 : X86ISD::FRCP;
21875 return DAG.getNode(Opcode, SDLoc(Op), VT, Op);
21880 /// If we have at least two divisions that use the same divisor, convert to
21881 /// multiplication by a reciprocal. This may need to be adjusted for a given
21882 /// CPU if a division's cost is not at least twice the cost of a multiplication.
21883 /// This is because we still need one division to calculate the reciprocal and
21884 /// then we need two multiplies by that reciprocal as replacements for the
21885 /// original divisions.
21886 unsigned X86TargetLowering::combineRepeatedFPDivisors() const {
21891 X86TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
21893 SmallVectorImpl<SDNode *> &Created) const {
21894 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
21895 if (isIntDivCheap(N->getValueType(0), Attr))
21896 return SDValue(N,0); // Lower SDIV as SDIV
21898 assert((Divisor.isPowerOf2() || (-Divisor).isPowerOf2()) &&
21899 "Unexpected divisor!");
21901 // Only perform this transform if CMOV is supported otherwise the select
21902 // below will become a branch.
21903 if (!Subtarget.hasCMov())
21906 // fold (sdiv X, pow2)
21907 EVT VT = N->getValueType(0);
21908 // FIXME: Support i8.
21909 if (VT != MVT::i16 && VT != MVT::i32 &&
21910 !(Subtarget.is64Bit() && VT == MVT::i64))
21913 unsigned Lg2 = Divisor.countTrailingZeros();
21915 // If the divisor is 2 or -2, the default expansion is better.
21920 SDValue N0 = N->getOperand(0);
21921 SDValue Zero = DAG.getConstant(0, DL, VT);
21922 APInt Lg2Mask = APInt::getLowBitsSet(VT.getSizeInBits(), Lg2);
21923 SDValue Pow2MinusOne = DAG.getConstant(Lg2Mask, DL, VT);
21925 // If N0 is negative, we need to add (Pow2 - 1) to it before shifting right.
21926 SDValue Cmp = DAG.getSetCC(DL, MVT::i8, N0, Zero, ISD::SETLT);
21927 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
21928 SDValue CMov = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
21930 Created.push_back(Cmp.getNode());
21931 Created.push_back(Add.getNode());
21932 Created.push_back(CMov.getNode());
21936 DAG.getNode(ISD::SRA, DL, VT, CMov, DAG.getConstant(Lg2, DL, MVT::i8));
21938 // If we're dividing by a positive value, we're done. Otherwise, we must
21939 // negate the result.
21940 if (Divisor.isNonNegative())
21943 Created.push_back(SRA.getNode());
21944 return DAG.getNode(ISD::SUB, DL, VT, Zero, SRA);
21947 /// Result of 'and' is compared against zero. Change to a BT node if possible.
21948 /// Returns the BT node and the condition code needed to use it.
21949 static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
21950 const SDLoc &dl, SelectionDAG &DAG,
21952 assert(And.getOpcode() == ISD::AND && "Expected AND node!");
21953 SDValue Op0 = And.getOperand(0);
21954 SDValue Op1 = And.getOperand(1);
21955 if (Op0.getOpcode() == ISD::TRUNCATE)
21956 Op0 = Op0.getOperand(0);
21957 if (Op1.getOpcode() == ISD::TRUNCATE)
21958 Op1 = Op1.getOperand(0);
21960 SDValue Src, BitNo;
21961 if (Op1.getOpcode() == ISD::SHL)
21962 std::swap(Op0, Op1);
21963 if (Op0.getOpcode() == ISD::SHL) {
21964 if (isOneConstant(Op0.getOperand(0))) {
21965 // If we looked past a truncate, check that it's only truncating away
21967 unsigned BitWidth = Op0.getValueSizeInBits();
21968 unsigned AndBitWidth = And.getValueSizeInBits();
21969 if (BitWidth > AndBitWidth) {
21970 KnownBits Known = DAG.computeKnownBits(Op0);
21971 if (Known.countMinLeadingZeros() < BitWidth - AndBitWidth)
21975 BitNo = Op0.getOperand(1);
21977 } else if (Op1.getOpcode() == ISD::Constant) {
21978 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
21979 uint64_t AndRHSVal = AndRHS->getZExtValue();
21980 SDValue AndLHS = Op0;
21982 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
21983 Src = AndLHS.getOperand(0);
21984 BitNo = AndLHS.getOperand(1);
21986 // Use BT if the immediate can't be encoded in a TEST instruction or we
21987 // are optimizing for size and the immedaite won't fit in a byte.
21988 bool OptForSize = DAG.shouldOptForSize();
21989 if ((!isUInt<32>(AndRHSVal) || (OptForSize && !isUInt<8>(AndRHSVal))) &&
21990 isPowerOf2_64(AndRHSVal)) {
21992 BitNo = DAG.getConstant(Log2_64_Ceil(AndRHSVal), dl,
21993 Src.getValueType());
21998 // No patterns found, give up.
21999 if (!Src.getNode())
22002 // If Src is i8, promote it to i32 with any_extend. There is no i8 BT
22003 // instruction. Since the shift amount is in-range-or-undefined, we know
22004 // that doing a bittest on the i32 value is ok. We extend to i32 because
22005 // the encoding for the i16 version is larger than the i32 version.
22006 // Also promote i16 to i32 for performance / code size reason.
22007 if (Src.getValueType() == MVT::i8 || Src.getValueType() == MVT::i16)
22008 Src = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Src);
22010 // See if we can use the 32-bit instruction instead of the 64-bit one for a
22011 // shorter encoding. Since the former takes the modulo 32 of BitNo and the
22012 // latter takes the modulo 64, this is only valid if the 5th bit of BitNo is
22013 // known to be zero.
22014 if (Src.getValueType() == MVT::i64 &&
22015 DAG.MaskedValueIsZero(BitNo, APInt(BitNo.getValueSizeInBits(), 32)))
22016 Src = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src);
22018 // If the operand types disagree, extend the shift amount to match. Since
22019 // BT ignores high bits (like shifts) we can use anyextend.
22020 if (Src.getValueType() != BitNo.getValueType())
22021 BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
22023 X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
22025 return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
22028 /// Turns an ISD::CondCode into a value suitable for SSE floating-point mask
22030 static unsigned translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
22031 SDValue &Op1, bool &IsAlwaysSignaling) {
22035 // SSE Condition code mapping:
22044 switch (SetCCOpcode) {
22045 default: llvm_unreachable("Unexpected SETCC condition");
22047 case ISD::SETEQ: SSECC = 0; break;
22049 case ISD::SETGT: Swap = true; LLVM_FALLTHROUGH;
22051 case ISD::SETOLT: SSECC = 1; break;
22053 case ISD::SETGE: Swap = true; LLVM_FALLTHROUGH;
22055 case ISD::SETOLE: SSECC = 2; break;
22056 case ISD::SETUO: SSECC = 3; break;
22058 case ISD::SETNE: SSECC = 4; break;
22059 case ISD::SETULE: Swap = true; LLVM_FALLTHROUGH;
22060 case ISD::SETUGE: SSECC = 5; break;
22061 case ISD::SETULT: Swap = true; LLVM_FALLTHROUGH;
22062 case ISD::SETUGT: SSECC = 6; break;
22063 case ISD::SETO: SSECC = 7; break;
22064 case ISD::SETUEQ: SSECC = 8; break;
22065 case ISD::SETONE: SSECC = 12; break;
22068 std::swap(Op0, Op1);
22070 switch (SetCCOpcode) {
22072 IsAlwaysSignaling = true;
22082 IsAlwaysSignaling = false;
22089 /// Break a VSETCC 256-bit integer VSETCC into two new 128 ones and then
22090 /// concatenate the result back.
22091 static SDValue splitIntVSETCC(SDValue Op, SelectionDAG &DAG) {
22092 EVT VT = Op.getValueType();
22094 assert(Op.getOpcode() == ISD::SETCC && "Unsupported operation");
22095 assert(Op.getOperand(0).getValueType().isInteger() &&
22096 VT == Op.getOperand(0).getValueType() && "Unsupported VTs!");
22099 SDValue CC = Op.getOperand(2);
22101 // Extract the LHS Lo/Hi vectors
22102 SDValue LHS1, LHS2;
22103 std::tie(LHS1, LHS2) = splitVector(Op.getOperand(0), DAG, dl);
22105 // Extract the RHS Lo/Hi vectors
22106 SDValue RHS1, RHS2;
22107 std::tie(RHS1, RHS2) = splitVector(Op.getOperand(1), DAG, dl);
22109 // Issue the operation on the smaller types and concatenate the result back
22111 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
22112 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
22113 DAG.getNode(ISD::SETCC, dl, LoVT, LHS1, RHS1, CC),
22114 DAG.getNode(ISD::SETCC, dl, HiVT, LHS2, RHS2, CC));
22117 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG) {
22119 SDValue Op0 = Op.getOperand(0);
22120 SDValue Op1 = Op.getOperand(1);
22121 SDValue CC = Op.getOperand(2);
22122 MVT VT = Op.getSimpleValueType();
22125 assert(VT.getVectorElementType() == MVT::i1 &&
22126 "Cannot set masked compare for this operation");
22128 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
22130 // Prefer SETGT over SETLT.
22131 if (SetCCOpcode == ISD::SETLT) {
22132 SetCCOpcode = ISD::getSetCCSwappedOperands(SetCCOpcode);
22133 std::swap(Op0, Op1);
22136 return DAG.getSetCC(dl, VT, Op0, Op1, SetCCOpcode);
22139 /// Given a buildvector constant, return a new vector constant with each element
22140 /// incremented or decremented. If incrementing or decrementing would result in
22141 /// unsigned overflow or underflow or this is not a simple vector constant,
22142 /// return an empty value.
22143 static SDValue incDecVectorConstant(SDValue V, SelectionDAG &DAG, bool IsInc) {
22144 auto *BV = dyn_cast<BuildVectorSDNode>(V.getNode());
22148 MVT VT = V.getSimpleValueType();
22149 MVT EltVT = VT.getVectorElementType();
22150 unsigned NumElts = VT.getVectorNumElements();
22151 SmallVector<SDValue, 8> NewVecC;
22153 for (unsigned i = 0; i < NumElts; ++i) {
22154 auto *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
22155 if (!Elt || Elt->isOpaque() || Elt->getSimpleValueType(0) != EltVT)
22158 // Avoid overflow/underflow.
22159 const APInt &EltC = Elt->getAPIntValue();
22160 if ((IsInc && EltC.isMaxValue()) || (!IsInc && EltC.isNullValue()))
22163 NewVecC.push_back(DAG.getConstant(EltC + (IsInc ? 1 : -1), DL, EltVT));
22166 return DAG.getBuildVector(VT, DL, NewVecC);
22169 /// As another special case, use PSUBUS[BW] when it's profitable. E.g. for
22171 /// t = psubus Op0, Op1
22172 /// pcmpeq t, <0..0>
22173 static SDValue LowerVSETCCWithSUBUS(SDValue Op0, SDValue Op1, MVT VT,
22174 ISD::CondCode Cond, const SDLoc &dl,
22175 const X86Subtarget &Subtarget,
22176 SelectionDAG &DAG) {
22177 if (!Subtarget.hasSSE2())
22180 MVT VET = VT.getVectorElementType();
22181 if (VET != MVT::i8 && VET != MVT::i16)
22187 case ISD::SETULT: {
22188 // If the comparison is against a constant we can turn this into a
22189 // setule. With psubus, setule does not require a swap. This is
22190 // beneficial because the constant in the register is no longer
22191 // destructed as the destination so it can be hoisted out of a loop.
22192 // Only do this pre-AVX since vpcmp* is no longer destructive.
22193 if (Subtarget.hasAVX())
22195 SDValue ULEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false);
22201 case ISD::SETUGT: {
22202 // If the comparison is against a constant, we can turn this into a setuge.
22203 // This is beneficial because materializing a constant 0 for the PCMPEQ is
22204 // probably cheaper than XOR+PCMPGT using 2 different vector constants:
22205 // cmpgt (xor X, SignMaskC) CmpC --> cmpeq (usubsat (CmpC+1), X), 0
22206 SDValue UGEOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true);
22213 // Psubus is better than flip-sign because it requires no inversion.
22215 std::swap(Op0, Op1);
22221 SDValue Result = DAG.getNode(ISD::USUBSAT, dl, VT, Op0, Op1);
22222 return DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
22223 DAG.getConstant(0, dl, VT));
22226 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
22227 SelectionDAG &DAG) {
22228 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
22229 Op.getOpcode() == ISD::STRICT_FSETCCS;
22230 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
22231 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
22232 SDValue CC = Op.getOperand(IsStrict ? 3 : 2);
22233 MVT VT = Op->getSimpleValueType(0);
22234 ISD::CondCode Cond = cast<CondCodeSDNode>(CC)->get();
22235 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
22240 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
22241 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
22244 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
22245 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
22247 // If we have a strict compare with a vXi1 result and the input is 128/256
22248 // bits we can't use a masked compare unless we have VLX. If we use a wider
22249 // compare like we do for non-strict, we might trigger spurious exceptions
22250 // from the upper elements. Instead emit a AVX compare and convert to mask.
22252 if (Subtarget.hasAVX512() && VT.getVectorElementType() == MVT::i1 &&
22253 (!IsStrict || Subtarget.hasVLX() ||
22254 Op0.getSimpleValueType().is512BitVector())) {
22255 assert(VT.getVectorNumElements() <= 16);
22256 Opc = IsStrict ? X86ISD::STRICT_CMPM : X86ISD::CMPM;
22258 Opc = IsStrict ? X86ISD::STRICT_CMPP : X86ISD::CMPP;
22259 // The SSE/AVX packed FP comparison nodes are defined with a
22260 // floating-point vector result that matches the operand type. This allows
22261 // them to work with an SSE1 target (integer vector types are not legal).
22262 VT = Op0.getSimpleValueType();
22266 bool IsAlwaysSignaling;
22267 unsigned SSECC = translateX86FSETCC(Cond, Op0, Op1, IsAlwaysSignaling);
22268 if (!Subtarget.hasAVX()) {
22269 // TODO: We could use following steps to handle a quiet compare with
22270 // signaling encodings.
22271 // 1. Get ordered masks from a quiet ISD::SETO
22272 // 2. Use the masks to mask potential unordered elements in operand A, B
22273 // 3. Get the compare results of masked A, B
22274 // 4. Calculating final result using the mask and result from 3
22275 // But currently, we just fall back to scalar operations.
22276 if (IsStrict && IsAlwaysSignaling && !IsSignaling)
22279 // Insert an extra signaling instruction to raise exception.
22280 if (IsStrict && !IsAlwaysSignaling && IsSignaling) {
22281 SDValue SignalCmp = DAG.getNode(
22282 Opc, dl, {VT, MVT::Other},
22283 {Chain, Op0, Op1, DAG.getTargetConstant(1, dl, MVT::i8)}); // LT_OS
22284 // FIXME: It seems we need to update the flags of all new strict nodes.
22285 // Otherwise, mayRaiseFPException in MI will return false due to
22286 // NoFPExcept = false by default. However, I didn't find it in other
22288 SignalCmp->setFlags(Op->getFlags());
22289 Chain = SignalCmp.getValue(1);
22292 // In the two cases not handled by SSE compare predicates (SETUEQ/SETONE),
22293 // emit two comparisons and a logic op to tie them together.
22295 // LLVM predicate is SETUEQ or SETONE.
22297 unsigned CombineOpc;
22298 if (Cond == ISD::SETUEQ) {
22301 CombineOpc = X86ISD::FOR;
22303 assert(Cond == ISD::SETONE);
22306 CombineOpc = X86ISD::FAND;
22309 SDValue Cmp0, Cmp1;
22311 Cmp0 = DAG.getNode(
22312 Opc, dl, {VT, MVT::Other},
22313 {Chain, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8)});
22314 Cmp1 = DAG.getNode(
22315 Opc, dl, {VT, MVT::Other},
22316 {Chain, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8)});
22317 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Cmp0.getValue(1),
22320 Cmp0 = DAG.getNode(
22321 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC0, dl, MVT::i8));
22322 Cmp1 = DAG.getNode(
22323 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(CC1, dl, MVT::i8));
22325 Cmp = DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
22329 Opc, dl, {VT, MVT::Other},
22330 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
22331 Chain = Cmp.getValue(1);
22334 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
22337 // Handle all other FP comparisons here.
22339 // Make a flip on already signaling CCs before setting bit 4 of AVX CC.
22340 SSECC |= (IsAlwaysSignaling ^ IsSignaling) << 4;
22342 Opc, dl, {VT, MVT::Other},
22343 {Chain, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8)});
22344 Chain = Cmp.getValue(1);
22347 Opc, dl, VT, Op0, Op1, DAG.getTargetConstant(SSECC, dl, MVT::i8));
22350 if (VT.getSizeInBits() > Op.getSimpleValueType().getSizeInBits()) {
22351 // We emitted a compare with an XMM/YMM result. Finish converting to a
22352 // mask register using a vptestm.
22353 EVT CastVT = EVT(VT).changeVectorElementTypeToInteger();
22354 Cmp = DAG.getBitcast(CastVT, Cmp);
22355 Cmp = DAG.getSetCC(dl, Op.getSimpleValueType(), Cmp,
22356 DAG.getConstant(0, dl, CastVT), ISD::SETNE);
22358 // If this is SSE/AVX CMPP, bitcast the result back to integer to match
22359 // the result type of SETCC. The bitcast is expected to be optimized
22360 // away during combining/isel.
22361 Cmp = DAG.getBitcast(Op.getSimpleValueType(), Cmp);
22365 return DAG.getMergeValues({Cmp, Chain}, dl);
22370 assert(!IsStrict && "Strict SETCC only handles FP operands.");
22372 MVT VTOp0 = Op0.getSimpleValueType();
22374 assert(VTOp0 == Op1.getSimpleValueType() &&
22375 "Expected operands with same type!");
22376 assert(VT.getVectorNumElements() == VTOp0.getVectorNumElements() &&
22377 "Invalid number of packed elements for source and destination!");
22379 // The non-AVX512 code below works under the assumption that source and
22380 // destination types are the same.
22381 assert((Subtarget.hasAVX512() || (VT == VTOp0)) &&
22382 "Value types for source and destination must be the same!");
22384 // The result is boolean, but operands are int/float
22385 if (VT.getVectorElementType() == MVT::i1) {
22386 // In AVX-512 architecture setcc returns mask with i1 elements,
22387 // But there is no compare instruction for i8 and i16 elements in KNL.
22388 assert((VTOp0.getScalarSizeInBits() >= 32 || Subtarget.hasBWI()) &&
22389 "Unexpected operand type");
22390 return LowerIntVSETCC_AVX512(Op, DAG);
22393 // Lower using XOP integer comparisons.
22394 if (VT.is128BitVector() && Subtarget.hasXOP()) {
22395 // Translate compare code to XOP PCOM compare mode.
22396 unsigned CmpMode = 0;
22398 default: llvm_unreachable("Unexpected SETCC condition");
22400 case ISD::SETLT: CmpMode = 0x00; break;
22402 case ISD::SETLE: CmpMode = 0x01; break;
22404 case ISD::SETGT: CmpMode = 0x02; break;
22406 case ISD::SETGE: CmpMode = 0x03; break;
22407 case ISD::SETEQ: CmpMode = 0x04; break;
22408 case ISD::SETNE: CmpMode = 0x05; break;
22411 // Are we comparing unsigned or signed integers?
22413 ISD::isUnsignedIntSetCC(Cond) ? X86ISD::VPCOMU : X86ISD::VPCOM;
22415 return DAG.getNode(Opc, dl, VT, Op0, Op1,
22416 DAG.getTargetConstant(CmpMode, dl, MVT::i8));
22419 // (X & Y) != 0 --> (X & Y) == Y iff Y is power-of-2.
22420 // Revert part of the simplifySetCCWithAnd combine, to avoid an invert.
22421 if (Cond == ISD::SETNE && ISD::isBuildVectorAllZeros(Op1.getNode())) {
22422 SDValue BC0 = peekThroughBitcasts(Op0);
22423 if (BC0.getOpcode() == ISD::AND) {
22425 SmallVector<APInt, 64> EltBits;
22426 if (getTargetConstantBitsFromNode(BC0.getOperand(1),
22427 VT.getScalarSizeInBits(), UndefElts,
22428 EltBits, false, false)) {
22429 if (llvm::all_of(EltBits, [](APInt &V) { return V.isPowerOf2(); })) {
22431 Op1 = DAG.getBitcast(VT, BC0.getOperand(1));
22437 // ICMP_EQ(AND(X,C),C) -> SRA(SHL(X,LOG2(C)),BW-1) iff C is power-of-2.
22438 if (Cond == ISD::SETEQ && Op0.getOpcode() == ISD::AND &&
22439 Op0.getOperand(1) == Op1 && Op0.hasOneUse()) {
22440 ConstantSDNode *C1 = isConstOrConstSplat(Op1);
22441 if (C1 && C1->getAPIntValue().isPowerOf2()) {
22442 unsigned BitWidth = VT.getScalarSizeInBits();
22443 unsigned ShiftAmt = BitWidth - C1->getAPIntValue().logBase2() - 1;
22445 SDValue Result = Op0.getOperand(0);
22446 Result = DAG.getNode(ISD::SHL, dl, VT, Result,
22447 DAG.getConstant(ShiftAmt, dl, VT));
22448 Result = DAG.getNode(ISD::SRA, dl, VT, Result,
22449 DAG.getConstant(BitWidth - 1, dl, VT));
22454 // Break 256-bit integer vector compare into smaller ones.
22455 if (VT.is256BitVector() && !Subtarget.hasInt256())
22456 return splitIntVSETCC(Op, DAG);
22458 if (VT == MVT::v32i16 || VT == MVT::v64i8) {
22459 assert(!Subtarget.hasBWI() && "Unexpected VT with AVX512BW!");
22460 return splitIntVSETCC(Op, DAG);
22463 // If this is a SETNE against the signed minimum value, change it to SETGT.
22464 // If this is a SETNE against the signed maximum value, change it to SETLT.
22465 // which will be swapped to SETGT.
22466 // Otherwise we use PCMPEQ+invert.
22468 if (Cond == ISD::SETNE &&
22469 ISD::isConstantSplatVector(Op1.getNode(), ConstValue)) {
22470 if (ConstValue.isMinSignedValue())
22472 else if (ConstValue.isMaxSignedValue())
22476 // If both operands are known non-negative, then an unsigned compare is the
22477 // same as a signed compare and there's no need to flip signbits.
22478 // TODO: We could check for more general simplifications here since we're
22479 // computing known bits.
22480 bool FlipSigns = ISD::isUnsignedIntSetCC(Cond) &&
22481 !(DAG.SignBitIsZero(Op0) && DAG.SignBitIsZero(Op1));
22483 // Special case: Use min/max operations for unsigned compares.
22484 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22485 if (ISD::isUnsignedIntSetCC(Cond) &&
22486 (FlipSigns || ISD::isTrueWhenEqual(Cond)) &&
22487 TLI.isOperationLegal(ISD::UMIN, VT)) {
22488 // If we have a constant operand, increment/decrement it and change the
22489 // condition to avoid an invert.
22490 if (Cond == ISD::SETUGT) {
22491 // X > C --> X >= (C+1) --> X == umax(X, C+1)
22492 if (SDValue UGTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/true)) {
22494 Cond = ISD::SETUGE;
22497 if (Cond == ISD::SETULT) {
22498 // X < C --> X <= (C-1) --> X == umin(X, C-1)
22499 if (SDValue ULTOp1 = incDecVectorConstant(Op1, DAG, /*IsInc*/false)) {
22501 Cond = ISD::SETULE;
22504 bool Invert = false;
22507 default: llvm_unreachable("Unexpected condition code");
22508 case ISD::SETUGT: Invert = true; LLVM_FALLTHROUGH;
22509 case ISD::SETULE: Opc = ISD::UMIN; break;
22510 case ISD::SETULT: Invert = true; LLVM_FALLTHROUGH;
22511 case ISD::SETUGE: Opc = ISD::UMAX; break;
22514 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
22515 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
22517 // If the logical-not of the result is required, perform that now.
22519 Result = DAG.getNOT(dl, Result, VT);
22524 // Try to use SUBUS and PCMPEQ.
22525 if (SDValue V = LowerVSETCCWithSUBUS(Op0, Op1, VT, Cond, dl, Subtarget, DAG))
22528 // We are handling one of the integer comparisons here. Since SSE only has
22529 // GT and EQ comparisons for integer, swapping operands and multiple
22530 // operations may be required for some comparisons.
22531 unsigned Opc = (Cond == ISD::SETEQ || Cond == ISD::SETNE) ? X86ISD::PCMPEQ
22533 bool Swap = Cond == ISD::SETLT || Cond == ISD::SETULT ||
22534 Cond == ISD::SETGE || Cond == ISD::SETUGE;
22535 bool Invert = Cond == ISD::SETNE ||
22536 (Cond != ISD::SETEQ && ISD::isTrueWhenEqual(Cond));
22539 std::swap(Op0, Op1);
22541 // Check that the operation in question is available (most are plain SSE2,
22542 // but PCMPGTQ and PCMPEQQ have different requirements).
22543 if (VT == MVT::v2i64) {
22544 if (Opc == X86ISD::PCMPGT && !Subtarget.hasSSE42()) {
22545 assert(Subtarget.hasSSE2() && "Don't know how to lower!");
22547 // Special case for sign bit test. We can use a v4i32 PCMPGT and shuffle
22548 // the odd elements over the even elements.
22549 if (!FlipSigns && !Invert && ISD::isBuildVectorAllZeros(Op0.getNode())) {
22550 Op0 = DAG.getConstant(0, dl, MVT::v4i32);
22551 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
22553 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
22554 static const int MaskHi[] = { 1, 1, 3, 3 };
22555 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
22557 return DAG.getBitcast(VT, Result);
22560 if (!FlipSigns && !Invert && ISD::isBuildVectorAllOnes(Op1.getNode())) {
22561 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
22562 Op1 = DAG.getConstant(-1, dl, MVT::v4i32);
22564 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
22565 static const int MaskHi[] = { 1, 1, 3, 3 };
22566 SDValue Result = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
22568 return DAG.getBitcast(VT, Result);
22571 // Since SSE has no unsigned integer comparisons, we need to flip the sign
22572 // bits of the inputs before performing those operations. The lower
22573 // compare is always unsigned.
22576 SB = DAG.getConstant(0x8000000080000000ULL, dl, MVT::v2i64);
22578 SB = DAG.getConstant(0x0000000080000000ULL, dl, MVT::v2i64);
22580 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op0, SB);
22581 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v2i64, Op1, SB);
22583 // Cast everything to the right type.
22584 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
22585 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
22587 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
22588 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
22589 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
22591 // Create masks for only the low parts/high parts of the 64 bit integers.
22592 static const int MaskHi[] = { 1, 1, 3, 3 };
22593 static const int MaskLo[] = { 0, 0, 2, 2 };
22594 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
22595 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
22596 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
22598 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
22599 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
22602 Result = DAG.getNOT(dl, Result, MVT::v4i32);
22604 return DAG.getBitcast(VT, Result);
22607 if (Opc == X86ISD::PCMPEQ && !Subtarget.hasSSE41()) {
22608 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
22609 // pcmpeqd + pshufd + pand.
22610 assert(Subtarget.hasSSE2() && !FlipSigns && "Don't know how to lower!");
22612 // First cast everything to the right type.
22613 Op0 = DAG.getBitcast(MVT::v4i32, Op0);
22614 Op1 = DAG.getBitcast(MVT::v4i32, Op1);
22617 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
22619 // Make sure the lower and upper halves are both all-ones.
22620 static const int Mask[] = { 1, 0, 3, 2 };
22621 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
22622 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
22625 Result = DAG.getNOT(dl, Result, MVT::v4i32);
22627 return DAG.getBitcast(VT, Result);
22631 // Since SSE has no unsigned integer comparisons, we need to flip the sign
22632 // bits of the inputs before performing those operations.
22634 MVT EltVT = VT.getVectorElementType();
22635 SDValue SM = DAG.getConstant(APInt::getSignMask(EltVT.getSizeInBits()), dl,
22637 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SM);
22638 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SM);
22641 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
22643 // If the logical-not of the result is required, perform that now.
22645 Result = DAG.getNOT(dl, Result, VT);
22650 // Try to select this as a KORTEST+SETCC or KTEST+SETCC if possible.
22651 static SDValue EmitAVX512Test(SDValue Op0, SDValue Op1, ISD::CondCode CC,
22652 const SDLoc &dl, SelectionDAG &DAG,
22653 const X86Subtarget &Subtarget,
22655 // Only support equality comparisons.
22656 if (CC != ISD::SETEQ && CC != ISD::SETNE)
22659 // Must be a bitcast from vXi1.
22660 if (Op0.getOpcode() != ISD::BITCAST)
22663 Op0 = Op0.getOperand(0);
22664 MVT VT = Op0.getSimpleValueType();
22665 if (!(Subtarget.hasAVX512() && VT == MVT::v16i1) &&
22666 !(Subtarget.hasDQI() && VT == MVT::v8i1) &&
22667 !(Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1)))
22670 X86::CondCode X86Cond;
22671 if (isNullConstant(Op1)) {
22672 X86Cond = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
22673 } else if (isAllOnesConstant(Op1)) {
22674 // C flag is set for all ones.
22675 X86Cond = CC == ISD::SETEQ ? X86::COND_B : X86::COND_AE;
22679 // If the input is an AND, we can combine it's operands into the KTEST.
22680 bool KTestable = false;
22681 if (Subtarget.hasDQI() && (VT == MVT::v8i1 || VT == MVT::v16i1))
22683 if (Subtarget.hasBWI() && (VT == MVT::v32i1 || VT == MVT::v64i1))
22685 if (!isNullConstant(Op1))
22687 if (KTestable && Op0.getOpcode() == ISD::AND && Op0.hasOneUse()) {
22688 SDValue LHS = Op0.getOperand(0);
22689 SDValue RHS = Op0.getOperand(1);
22690 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22691 return DAG.getNode(X86ISD::KTEST, dl, MVT::i32, LHS, RHS);
22694 // If the input is an OR, we can combine it's operands into the KORTEST.
22697 if (Op0.getOpcode() == ISD::OR && Op0.hasOneUse()) {
22698 LHS = Op0.getOperand(0);
22699 RHS = Op0.getOperand(1);
22702 X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
22703 return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
22706 /// Emit flags for the given setcc condition and operands. Also returns the
22707 /// corresponding X86 condition code constant in X86CC.
22708 SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
22709 ISD::CondCode CC, const SDLoc &dl,
22711 SDValue &X86CC) const {
22712 // Optimize to BT if possible.
22713 // Lower (X & (1 << N)) == 0 to BT(X, N).
22714 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
22715 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
22716 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() && isNullConstant(Op1) &&
22717 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22718 if (SDValue BT = LowerAndToBT(Op0, CC, dl, DAG, X86CC))
22722 // Try to use PTEST/PMOVMSKB for a tree ORs equality compared with 0.
22723 // TODO: We could do AND tree with all 1s as well by using the C flag.
22724 if (isNullConstant(Op1) && (CC == ISD::SETEQ || CC == ISD::SETNE))
22726 MatchVectorAllZeroTest(Op0, CC, dl, Subtarget, DAG, X86CC))
22729 // Try to lower using KORTEST or KTEST.
22730 if (SDValue Test = EmitAVX512Test(Op0, Op1, CC, dl, DAG, Subtarget, X86CC))
22733 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
22735 if ((isOneConstant(Op1) || isNullConstant(Op1)) &&
22736 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22737 // If the input is a setcc, then reuse the input setcc or use a new one with
22738 // the inverted condition.
22739 if (Op0.getOpcode() == X86ISD::SETCC) {
22740 bool Invert = (CC == ISD::SETNE) ^ isNullConstant(Op1);
22742 X86CC = Op0.getOperand(0);
22744 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
22745 CCode = X86::GetOppositeBranchCondition(CCode);
22746 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22749 return Op0.getOperand(1);
22753 // Try to use the carry flag from the add in place of an separate CMP for:
22754 // (seteq (add X, -1), -1). Similar for setne.
22755 if (isAllOnesConstant(Op1) && Op0.getOpcode() == ISD::ADD &&
22756 Op0.getOperand(1) == Op1 && (CC == ISD::SETEQ || CC == ISD::SETNE)) {
22757 if (isProfitableToUseFlagOp(Op0)) {
22758 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
22760 SDValue New = DAG.getNode(X86ISD::ADD, dl, VTs, Op0.getOperand(0),
22761 Op0.getOperand(1));
22762 DAG.ReplaceAllUsesOfValueWith(SDValue(Op0.getNode(), 0), New);
22763 X86::CondCode CCode = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
22764 X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
22765 return SDValue(New.getNode(), 1);
22769 X86::CondCode CondCode =
22770 TranslateX86CC(CC, dl, /*IsFP*/ false, Op0, Op1, DAG);
22771 assert(CondCode != X86::COND_INVALID && "Unexpected condition code!");
22773 SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG, Subtarget);
22774 X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
22778 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
22780 bool IsStrict = Op.getOpcode() == ISD::STRICT_FSETCC ||
22781 Op.getOpcode() == ISD::STRICT_FSETCCS;
22782 MVT VT = Op->getSimpleValueType(0);
22784 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
22786 assert(VT == MVT::i8 && "SetCC type must be 8-bit integer");
22787 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
22788 SDValue Op0 = Op.getOperand(IsStrict ? 1 : 0);
22789 SDValue Op1 = Op.getOperand(IsStrict ? 2 : 1);
22792 cast<CondCodeSDNode>(Op.getOperand(IsStrict ? 3 : 2))->get();
22794 // Handle f128 first, since one possible outcome is a normal integer
22795 // comparison which gets handled by emitFlagsForSetcc.
22796 if (Op0.getValueType() == MVT::f128) {
22797 softenSetCCOperands(DAG, MVT::f128, Op0, Op1, CC, dl, Op0, Op1, Chain,
22798 Op.getOpcode() == ISD::STRICT_FSETCCS);
22800 // If softenSetCCOperands returned a scalar, use it.
22801 if (!Op1.getNode()) {
22802 assert(Op0.getValueType() == Op.getValueType() &&
22803 "Unexpected setcc expansion!");
22805 return DAG.getMergeValues({Op0, Chain}, dl);
22810 if (Op0.getSimpleValueType().isInteger()) {
22812 SDValue EFLAGS = emitFlagsForSetcc(Op0, Op1, CC, dl, DAG, X86CC);
22813 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
22814 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
22817 // Handle floating point.
22818 X86::CondCode CondCode = TranslateX86CC(CC, dl, /*IsFP*/ true, Op0, Op1, DAG);
22819 if (CondCode == X86::COND_INVALID)
22824 bool IsSignaling = Op.getOpcode() == ISD::STRICT_FSETCCS;
22826 DAG.getNode(IsSignaling ? X86ISD::STRICT_FCMPS : X86ISD::STRICT_FCMP,
22827 dl, {MVT::i32, MVT::Other}, {Chain, Op0, Op1});
22828 Chain = EFLAGS.getValue(1);
22830 EFLAGS = DAG.getNode(X86ISD::FCMP, dl, MVT::i32, Op0, Op1);
22833 SDValue X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
22834 SDValue Res = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, X86CC, EFLAGS);
22835 return IsStrict ? DAG.getMergeValues({Res, Chain}, dl) : Res;
22838 SDValue X86TargetLowering::LowerSETCCCARRY(SDValue Op, SelectionDAG &DAG) const {
22839 SDValue LHS = Op.getOperand(0);
22840 SDValue RHS = Op.getOperand(1);
22841 SDValue Carry = Op.getOperand(2);
22842 SDValue Cond = Op.getOperand(3);
22845 assert(LHS.getSimpleValueType().isInteger() && "SETCCCARRY is integer only.");
22846 X86::CondCode CC = TranslateIntegerX86CC(cast<CondCodeSDNode>(Cond)->get());
22848 // Recreate the carry if needed.
22849 EVT CarryVT = Carry.getValueType();
22850 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
22851 Carry, DAG.getAllOnesConstant(DL, CarryVT));
22853 SDVTList VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
22854 SDValue Cmp = DAG.getNode(X86ISD::SBB, DL, VTs, LHS, RHS, Carry.getValue(1));
22855 return getSETCC(CC, Cmp.getValue(1), DL, DAG);
22858 // This function returns three things: the arithmetic computation itself
22859 // (Value), an EFLAGS result (Overflow), and a condition code (Cond). The
22860 // flag and the condition code define the case in which the arithmetic
22861 // computation overflows.
22862 static std::pair<SDValue, SDValue>
22863 getX86XALUOOp(X86::CondCode &Cond, SDValue Op, SelectionDAG &DAG) {
22864 assert(Op.getResNo() == 0 && "Unexpected result number!");
22865 SDValue Value, Overflow;
22866 SDValue LHS = Op.getOperand(0);
22867 SDValue RHS = Op.getOperand(1);
22868 unsigned BaseOp = 0;
22870 switch (Op.getOpcode()) {
22871 default: llvm_unreachable("Unknown ovf instruction!");
22873 BaseOp = X86ISD::ADD;
22874 Cond = X86::COND_O;
22877 BaseOp = X86ISD::ADD;
22878 Cond = isOneConstant(RHS) ? X86::COND_E : X86::COND_B;
22881 BaseOp = X86ISD::SUB;
22882 Cond = X86::COND_O;
22885 BaseOp = X86ISD::SUB;
22886 Cond = X86::COND_B;
22889 BaseOp = X86ISD::SMUL;
22890 Cond = X86::COND_O;
22893 BaseOp = X86ISD::UMUL;
22894 Cond = X86::COND_O;
22899 // Also sets EFLAGS.
22900 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
22901 Value = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
22902 Overflow = Value.getValue(1);
22905 return std::make_pair(Value, Overflow);
22908 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
22909 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
22910 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
22911 // looks for this combo and may remove the "setcc" instruction if the "setcc"
22912 // has only one use.
22914 X86::CondCode Cond;
22915 SDValue Value, Overflow;
22916 std::tie(Value, Overflow) = getX86XALUOOp(Cond, Op, DAG);
22918 SDValue SetCC = getSETCC(Cond, Overflow, DL, DAG);
22919 assert(Op->getValueType(1) == MVT::i8 && "Unexpected VT!");
22920 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(), Value, SetCC);
22923 /// Return true if opcode is a X86 logical comparison.
22924 static bool isX86LogicalCmp(SDValue Op) {
22925 unsigned Opc = Op.getOpcode();
22926 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
22927 Opc == X86ISD::FCMP)
22929 if (Op.getResNo() == 1 &&
22930 (Opc == X86ISD::ADD || Opc == X86ISD::SUB || Opc == X86ISD::ADC ||
22931 Opc == X86ISD::SBB || Opc == X86ISD::SMUL || Opc == X86ISD::UMUL ||
22932 Opc == X86ISD::OR || Opc == X86ISD::XOR || Opc == X86ISD::AND))
22938 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
22939 if (V.getOpcode() != ISD::TRUNCATE)
22942 SDValue VOp0 = V.getOperand(0);
22943 unsigned InBits = VOp0.getValueSizeInBits();
22944 unsigned Bits = V.getValueSizeInBits();
22945 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
22948 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
22949 bool AddTest = true;
22950 SDValue Cond = Op.getOperand(0);
22951 SDValue Op1 = Op.getOperand(1);
22952 SDValue Op2 = Op.getOperand(2);
22954 MVT VT = Op1.getSimpleValueType();
22957 // Lower FP selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
22958 // are available or VBLENDV if AVX is available.
22959 // Otherwise FP cmovs get lowered into a less efficient branch sequence later.
22960 if (Cond.getOpcode() == ISD::SETCC && isScalarFPTypeInSSEReg(VT) &&
22961 VT == Cond.getOperand(0).getSimpleValueType() && Cond->hasOneUse()) {
22962 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
22963 bool IsAlwaysSignaling;
22965 translateX86FSETCC(cast<CondCodeSDNode>(Cond.getOperand(2))->get(),
22966 CondOp0, CondOp1, IsAlwaysSignaling);
22968 if (Subtarget.hasAVX512()) {
22970 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CondOp0, CondOp1,
22971 DAG.getTargetConstant(SSECC, DL, MVT::i8));
22972 assert(!VT.isVector() && "Not a scalar type?");
22973 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
22976 if (SSECC < 8 || Subtarget.hasAVX()) {
22977 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
22978 DAG.getTargetConstant(SSECC, DL, MVT::i8));
22980 // If we have AVX, we can use a variable vector select (VBLENDV) instead
22981 // of 3 logic instructions for size savings and potentially speed.
22982 // Unfortunately, there is no scalar form of VBLENDV.
22984 // If either operand is a +0.0 constant, don't try this. We can expect to
22985 // optimize away at least one of the logic instructions later in that
22986 // case, so that sequence would be faster than a variable blend.
22988 // BLENDV was introduced with SSE 4.1, but the 2 register form implicitly
22989 // uses XMM0 as the selection register. That may need just as many
22990 // instructions as the AND/ANDN/OR sequence due to register moves, so
22992 if (Subtarget.hasAVX() && !isNullFPConstant(Op1) &&
22993 !isNullFPConstant(Op2)) {
22994 // Convert to vectors, do a VSELECT, and convert back to scalar.
22995 // All of the conversions should be optimized away.
22996 MVT VecVT = VT == MVT::f32 ? MVT::v4f32 : MVT::v2f64;
22997 SDValue VOp1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op1);
22998 SDValue VOp2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Op2);
22999 SDValue VCmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Cmp);
23001 MVT VCmpVT = VT == MVT::f32 ? MVT::v4i32 : MVT::v2i64;
23002 VCmp = DAG.getBitcast(VCmpVT, VCmp);
23004 SDValue VSel = DAG.getSelect(DL, VecVT, VCmp, VOp1, VOp2);
23006 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
23007 VSel, DAG.getIntPtrConstant(0, DL));
23009 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
23010 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
23011 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
23015 // AVX512 fallback is to lower selects of scalar floats to masked moves.
23016 if (isScalarFPTypeInSSEReg(VT) && Subtarget.hasAVX512()) {
23017 SDValue Cmp = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1, Cond);
23018 return DAG.getNode(X86ISD::SELECTS, DL, VT, Cmp, Op1, Op2);
23021 if (Cond.getOpcode() == ISD::SETCC) {
23022 if (SDValue NewCond = LowerSETCC(Cond, DAG)) {
23024 // If the condition was updated, it's possible that the operands of the
23025 // select were also updated (for example, EmitTest has a RAUW). Refresh
23026 // the local references to the select operands in case they got stale.
23027 Op1 = Op.getOperand(1);
23028 Op2 = Op.getOperand(2);
23032 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
23033 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
23034 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
23035 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
23036 // (select (and (x , 0x1) == 0), y, (z ^ y) ) -> (-(and (x , 0x1)) & z ) ^ y
23037 // (select (and (x , 0x1) == 0), y, (z | y) ) -> (-(and (x , 0x1)) & z ) | y
23038 if (Cond.getOpcode() == X86ISD::SETCC &&
23039 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
23040 isNullConstant(Cond.getOperand(1).getOperand(1))) {
23041 SDValue Cmp = Cond.getOperand(1);
23042 SDValue CmpOp0 = Cmp.getOperand(0);
23043 unsigned CondCode = Cond.getConstantOperandVal(0);
23045 // Special handling for __builtin_ffs(X) - 1 pattern which looks like
23046 // (select (seteq X, 0), -1, (cttz_zero_undef X)). Disable the special
23047 // handle to keep the CMP with 0. This should be removed by
23048 // optimizeCompareInst by using the flags from the BSR/TZCNT used for the
23049 // cttz_zero_undef.
23050 auto MatchFFSMinus1 = [&](SDValue Op1, SDValue Op2) {
23051 return (Op1.getOpcode() == ISD::CTTZ_ZERO_UNDEF && Op1.hasOneUse() &&
23052 Op1.getOperand(0) == CmpOp0 && isAllOnesConstant(Op2));
23054 if (Subtarget.hasCMov() && (VT == MVT::i32 || VT == MVT::i64) &&
23055 ((CondCode == X86::COND_NE && MatchFFSMinus1(Op1, Op2)) ||
23056 (CondCode == X86::COND_E && MatchFFSMinus1(Op2, Op1)))) {
23058 } else if ((isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23059 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
23060 SDValue Y = isAllOnesConstant(Op2) ? Op1 : Op2;
23062 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
23063 SDVTList CmpVTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
23065 // Apply further optimizations for special cases
23066 // (select (x != 0), -1, 0) -> neg & sbb
23067 // (select (x == 0), 0, -1) -> neg & sbb
23068 if (isNullConstant(Y) &&
23069 (isAllOnesConstant(Op1) == (CondCode == X86::COND_NE))) {
23070 SDValue Zero = DAG.getConstant(0, DL, CmpOp0.getValueType());
23071 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, CmpVTs, Zero, CmpOp0);
23072 Zero = DAG.getConstant(0, DL, Op.getValueType());
23073 return DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Neg.getValue(1));
23076 Cmp = DAG.getNode(X86ISD::SUB, DL, CmpVTs,
23077 CmpOp0, DAG.getConstant(1, DL, CmpOp0.getValueType()));
23079 SDValue Zero = DAG.getConstant(0, DL, Op.getValueType());
23080 SDValue Res = // Res = 0 or -1.
23081 DAG.getNode(X86ISD::SBB, DL, VTs, Zero, Zero, Cmp.getValue(1));
23083 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_E))
23084 Res = DAG.getNOT(DL, Res, Res.getValueType());
23086 return DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
23087 } else if (!Subtarget.hasCMov() && CondCode == X86::COND_E &&
23088 Cmp.getOperand(0).getOpcode() == ISD::AND &&
23089 isOneConstant(Cmp.getOperand(0).getOperand(1))) {
23090 SDValue Src1, Src2;
23091 // true if Op2 is XOR or OR operator and one of its operands
23093 // ( a , a op b) || ( b , a op b)
23094 auto isOrXorPattern = [&]() {
23095 if ((Op2.getOpcode() == ISD::XOR || Op2.getOpcode() == ISD::OR) &&
23096 (Op2.getOperand(0) == Op1 || Op2.getOperand(1) == Op1)) {
23098 Op2.getOperand(0) == Op1 ? Op2.getOperand(1) : Op2.getOperand(0);
23105 if (isOrXorPattern()) {
23107 unsigned int CmpSz = CmpOp0.getSimpleValueType().getSizeInBits();
23108 // we need mask of all zeros or ones with same size of the other
23110 if (CmpSz > VT.getSizeInBits())
23111 Neg = DAG.getNode(ISD::TRUNCATE, DL, VT, CmpOp0);
23112 else if (CmpSz < VT.getSizeInBits())
23113 Neg = DAG.getNode(ISD::AND, DL, VT,
23114 DAG.getNode(ISD::ANY_EXTEND, DL, VT, CmpOp0.getOperand(0)),
23115 DAG.getConstant(1, DL, VT));
23118 SDValue Mask = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
23119 Neg); // -(and (x, 0x1))
23120 SDValue And = DAG.getNode(ISD::AND, DL, VT, Mask, Src1); // Mask & z
23121 return DAG.getNode(Op2.getOpcode(), DL, VT, And, Src2); // And Op y
23126 // Look past (and (setcc_carry (cmp ...)), 1).
23127 if (Cond.getOpcode() == ISD::AND &&
23128 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY &&
23129 isOneConstant(Cond.getOperand(1)))
23130 Cond = Cond.getOperand(0);
23132 // If condition flag is set by a X86ISD::CMP, then use it as the condition
23133 // setting operand in place of the X86ISD::SETCC.
23134 unsigned CondOpcode = Cond.getOpcode();
23135 if (CondOpcode == X86ISD::SETCC ||
23136 CondOpcode == X86ISD::SETCC_CARRY) {
23137 CC = Cond.getOperand(0);
23139 SDValue Cmp = Cond.getOperand(1);
23140 bool IllegalFPCMov = false;
23141 if (VT.isFloatingPoint() && !VT.isVector() &&
23142 !isScalarFPTypeInSSEReg(VT) && Subtarget.hasCMov()) // FPStack?
23143 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
23145 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
23146 Cmp.getOpcode() == X86ISD::BT) { // FIXME
23150 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
23151 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
23152 CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) {
23154 X86::CondCode X86Cond;
23155 std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
23157 CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
23162 // Look past the truncate if the high bits are known zero.
23163 if (isTruncWithZeroHighBitsInput(Cond, DAG))
23164 Cond = Cond.getOperand(0);
23166 // We know the result of AND is compared against zero. Try to match
23168 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
23170 if (SDValue BT = LowerAndToBT(Cond, ISD::SETNE, DL, DAG, BTCC)) {
23179 CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
23180 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG, Subtarget);
23183 // a < b ? -1 : 0 -> RES = ~setcc_carry
23184 // a < b ? 0 : -1 -> RES = setcc_carry
23185 // a >= b ? -1 : 0 -> RES = setcc_carry
23186 // a >= b ? 0 : -1 -> RES = ~setcc_carry
23187 if (Cond.getOpcode() == X86ISD::SUB) {
23188 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
23190 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
23191 (isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
23192 (isNullConstant(Op1) || isNullConstant(Op2))) {
23194 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
23195 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
23196 if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
23197 return DAG.getNOT(DL, Res, Res.getValueType());
23202 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
23203 // widen the cmov and push the truncate through. This avoids introducing a new
23204 // branch during isel and doesn't add any extensions.
23205 if (Op.getValueType() == MVT::i8 &&
23206 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
23207 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
23208 if (T1.getValueType() == T2.getValueType() &&
23209 // Exclude CopyFromReg to avoid partial register stalls.
23210 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
23211 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, T1.getValueType(), T2, T1,
23213 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
23217 // Or finally, promote i8 cmovs if we have CMOV,
23218 // or i16 cmovs if it won't prevent folding a load.
23219 // FIXME: we should not limit promotion of i8 case to only when the CMOV is
23220 // legal, but EmitLoweredSelect() can not deal with these extensions
23221 // being inserted between two CMOV's. (in i16 case too TBN)
23222 // https://bugs.llvm.org/show_bug.cgi?id=40974
23223 if ((Op.getValueType() == MVT::i8 && Subtarget.hasCMov()) ||
23224 (Op.getValueType() == MVT::i16 && !MayFoldLoad(Op1) &&
23225 !MayFoldLoad(Op2))) {
23226 Op1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op1);
23227 Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op2);
23228 SDValue Ops[] = { Op2, Op1, CC, Cond };
23229 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, MVT::i32, Ops);
23230 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
23233 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
23234 // condition is true.
23235 SDValue Ops[] = { Op2, Op1, CC, Cond };
23236 return DAG.getNode(X86ISD::CMOV, DL, Op.getValueType(), Ops);
23239 static SDValue LowerSIGN_EXTEND_Mask(SDValue Op,
23240 const X86Subtarget &Subtarget,
23241 SelectionDAG &DAG) {
23242 MVT VT = Op->getSimpleValueType(0);
23243 SDValue In = Op->getOperand(0);
23244 MVT InVT = In.getSimpleValueType();
23245 assert(InVT.getVectorElementType() == MVT::i1 && "Unexpected input type!");
23246 MVT VTElt = VT.getVectorElementType();
23249 unsigned NumElts = VT.getVectorNumElements();
23251 // Extend VT if the scalar type is i8/i16 and BWI is not supported.
23253 if (!Subtarget.hasBWI() && VTElt.getSizeInBits() <= 16) {
23254 // If v16i32 is to be avoided, we'll need to split and concatenate.
23255 if (NumElts == 16 && !Subtarget.canExtendTo512DQ())
23256 return SplitAndExtendv16i1(Op.getOpcode(), VT, In, dl, DAG);
23258 ExtVT = MVT::getVectorVT(MVT::i32, NumElts);
23261 // Widen to 512-bits if VLX is not supported.
23262 MVT WideVT = ExtVT;
23263 if (!ExtVT.is512BitVector() && !Subtarget.hasVLX()) {
23264 NumElts *= 512 / ExtVT.getSizeInBits();
23265 InVT = MVT::getVectorVT(MVT::i1, NumElts);
23266 In = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, InVT, DAG.getUNDEF(InVT),
23267 In, DAG.getIntPtrConstant(0, dl));
23268 WideVT = MVT::getVectorVT(ExtVT.getVectorElementType(), NumElts);
23272 MVT WideEltVT = WideVT.getVectorElementType();
23273 if ((Subtarget.hasDQI() && WideEltVT.getSizeInBits() >= 32) ||
23274 (Subtarget.hasBWI() && WideEltVT.getSizeInBits() <= 16)) {
23275 V = DAG.getNode(Op.getOpcode(), dl, WideVT, In);
23277 SDValue NegOne = DAG.getConstant(-1, dl, WideVT);
23278 SDValue Zero = DAG.getConstant(0, dl, WideVT);
23279 V = DAG.getSelect(dl, WideVT, In, NegOne, Zero);
23282 // Truncate if we had to extend i16/i8 above.
23284 WideVT = MVT::getVectorVT(VTElt, NumElts);
23285 V = DAG.getNode(ISD::TRUNCATE, dl, WideVT, V);
23288 // Extract back to 128/256-bit if we widened.
23290 V = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, V,
23291 DAG.getIntPtrConstant(0, dl));
23296 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
23297 SelectionDAG &DAG) {
23298 SDValue In = Op->getOperand(0);
23299 MVT InVT = In.getSimpleValueType();
23301 if (InVT.getVectorElementType() == MVT::i1)
23302 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
23304 assert(Subtarget.hasAVX() && "Expected AVX support");
23305 return LowerAVXExtend(Op, DAG, Subtarget);
23308 // Lowering for SIGN_EXTEND_VECTOR_INREG and ZERO_EXTEND_VECTOR_INREG.
23309 // For sign extend this needs to handle all vector sizes and SSE4.1 and
23310 // non-SSE4.1 targets. For zero extend this should only handle inputs of
23311 // MVT::v64i8 when BWI is not supported, but AVX512 is.
23312 static SDValue LowerEXTEND_VECTOR_INREG(SDValue Op,
23313 const X86Subtarget &Subtarget,
23314 SelectionDAG &DAG) {
23315 SDValue In = Op->getOperand(0);
23316 MVT VT = Op->getSimpleValueType(0);
23317 MVT InVT = In.getSimpleValueType();
23319 MVT SVT = VT.getVectorElementType();
23320 MVT InSVT = InVT.getVectorElementType();
23321 assert(SVT.getSizeInBits() > InSVT.getSizeInBits());
23323 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16)
23325 if (InSVT != MVT::i32 && InSVT != MVT::i16 && InSVT != MVT::i8)
23327 if (!(VT.is128BitVector() && Subtarget.hasSSE2()) &&
23328 !(VT.is256BitVector() && Subtarget.hasAVX()) &&
23329 !(VT.is512BitVector() && Subtarget.hasAVX512()))
23333 unsigned Opc = Op.getOpcode();
23334 unsigned NumElts = VT.getVectorNumElements();
23336 // For 256-bit vectors, we only need the lower (128-bit) half of the input.
23337 // For 512-bit vectors, we need 128-bits or 256-bits.
23338 if (InVT.getSizeInBits() > 128) {
23339 // Input needs to be at least the same number of elements as output, and
23340 // at least 128-bits.
23341 int InSize = InSVT.getSizeInBits() * NumElts;
23342 In = extractSubVector(In, 0, DAG, dl, std::max(InSize, 128));
23343 InVT = In.getSimpleValueType();
23346 // SSE41 targets can use the pmov[sz]x* instructions directly for 128-bit results,
23347 // so are legal and shouldn't occur here. AVX2/AVX512 pmovsx* instructions still
23348 // need to be handled here for 256/512-bit results.
23349 if (Subtarget.hasInt256()) {
23350 assert(VT.getSizeInBits() > 128 && "Unexpected 128-bit vector extension");
23352 if (InVT.getVectorNumElements() != NumElts)
23353 return DAG.getNode(Op.getOpcode(), dl, VT, In);
23355 // FIXME: Apparently we create inreg operations that could be regular
23358 Opc == ISD::SIGN_EXTEND_VECTOR_INREG ? ISD::SIGN_EXTEND
23359 : ISD::ZERO_EXTEND;
23360 return DAG.getNode(ExtOpc, dl, VT, In);
23363 // pre-AVX2 256-bit extensions need to be split into 128-bit instructions.
23364 if (Subtarget.hasAVX()) {
23365 assert(VT.is256BitVector() && "256-bit vector expected");
23366 MVT HalfVT = VT.getHalfNumVectorElementsVT();
23367 int HalfNumElts = HalfVT.getVectorNumElements();
23369 unsigned NumSrcElts = InVT.getVectorNumElements();
23370 SmallVector<int, 16> HiMask(NumSrcElts, SM_SentinelUndef);
23371 for (int i = 0; i != HalfNumElts; ++i)
23372 HiMask[i] = HalfNumElts + i;
23374 SDValue Lo = DAG.getNode(Opc, dl, HalfVT, In);
23375 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, DAG.getUNDEF(InVT), HiMask);
23376 Hi = DAG.getNode(Opc, dl, HalfVT, Hi);
23377 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
23380 // We should only get here for sign extend.
23381 assert(Opc == ISD::SIGN_EXTEND_VECTOR_INREG && "Unexpected opcode!");
23382 assert(VT.is128BitVector() && InVT.is128BitVector() && "Unexpected VTs");
23384 // pre-SSE41 targets unpack lower lanes and then sign-extend using SRAI.
23386 SDValue SignExt = Curr;
23388 // As SRAI is only available on i16/i32 types, we expand only up to i32
23389 // and handle i64 separately.
23390 if (InVT != MVT::v4i32) {
23391 MVT DestVT = VT == MVT::v2i64 ? MVT::v4i32 : VT;
23393 unsigned DestWidth = DestVT.getScalarSizeInBits();
23394 unsigned Scale = DestWidth / InSVT.getSizeInBits();
23396 unsigned InNumElts = InVT.getVectorNumElements();
23397 unsigned DestElts = DestVT.getVectorNumElements();
23399 // Build a shuffle mask that takes each input element and places it in the
23400 // MSBs of the new element size.
23401 SmallVector<int, 16> Mask(InNumElts, SM_SentinelUndef);
23402 for (unsigned i = 0; i != DestElts; ++i)
23403 Mask[i * Scale + (Scale - 1)] = i;
23405 Curr = DAG.getVectorShuffle(InVT, dl, In, In, Mask);
23406 Curr = DAG.getBitcast(DestVT, Curr);
23408 unsigned SignExtShift = DestWidth - InSVT.getSizeInBits();
23409 SignExt = DAG.getNode(X86ISD::VSRAI, dl, DestVT, Curr,
23410 DAG.getTargetConstant(SignExtShift, dl, MVT::i8));
23413 if (VT == MVT::v2i64) {
23414 assert(Curr.getValueType() == MVT::v4i32 && "Unexpected input VT");
23415 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
23416 SDValue Sign = DAG.getSetCC(dl, MVT::v4i32, Zero, Curr, ISD::SETGT);
23417 SignExt = DAG.getVectorShuffle(MVT::v4i32, dl, SignExt, Sign, {0, 4, 1, 5});
23418 SignExt = DAG.getBitcast(VT, SignExt);
23424 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
23425 SelectionDAG &DAG) {
23426 MVT VT = Op->getSimpleValueType(0);
23427 SDValue In = Op->getOperand(0);
23428 MVT InVT = In.getSimpleValueType();
23431 if (InVT.getVectorElementType() == MVT::i1)
23432 return LowerSIGN_EXTEND_Mask(Op, Subtarget, DAG);
23434 assert(VT.isVector() && InVT.isVector() && "Expected vector type");
23435 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
23436 "Expected same number of elements");
23437 assert((VT.getVectorElementType() == MVT::i16 ||
23438 VT.getVectorElementType() == MVT::i32 ||
23439 VT.getVectorElementType() == MVT::i64) &&
23440 "Unexpected element type");
23441 assert((InVT.getVectorElementType() == MVT::i8 ||
23442 InVT.getVectorElementType() == MVT::i16 ||
23443 InVT.getVectorElementType() == MVT::i32) &&
23444 "Unexpected element type");
23446 if (VT == MVT::v32i16 && !Subtarget.hasBWI()) {
23447 assert(InVT == MVT::v32i8 && "Unexpected VT!");
23448 return splitVectorIntUnary(Op, DAG);
23451 if (Subtarget.hasInt256())
23454 // Optimize vectors in AVX mode
23455 // Sign extend v8i16 to v8i32 and
23458 // Divide input vector into two parts
23459 // for v4i32 the high shuffle mask will be {2, 3, -1, -1}
23460 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
23461 // concat the vectors to original VT
23462 MVT HalfVT = VT.getHalfNumVectorElementsVT();
23463 SDValue OpLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, In);
23465 unsigned NumElems = InVT.getVectorNumElements();
23466 SmallVector<int,8> ShufMask(NumElems, -1);
23467 for (unsigned i = 0; i != NumElems/2; ++i)
23468 ShufMask[i] = i + NumElems/2;
23470 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
23471 OpHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, HalfVT, OpHi);
23473 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
23476 /// Change a vector store into a pair of half-size vector stores.
23477 static SDValue splitVectorStore(StoreSDNode *Store, SelectionDAG &DAG) {
23478 SDValue StoredVal = Store->getValue();
23479 assert((StoredVal.getValueType().is256BitVector() ||
23480 StoredVal.getValueType().is512BitVector()) &&
23481 "Expecting 256/512-bit op");
23483 // Splitting volatile memory ops is not allowed unless the operation was not
23484 // legal to begin with. Assume the input store is legal (this transform is
23485 // only used for targets with AVX). Note: It is possible that we have an
23486 // illegal type like v2i128, and so we could allow splitting a volatile store
23487 // in that case if that is important.
23488 if (!Store->isSimple())
23492 SDValue Value0, Value1;
23493 std::tie(Value0, Value1) = splitVector(StoredVal, DAG, DL);
23494 unsigned HalfOffset = Value0.getValueType().getStoreSize();
23495 SDValue Ptr0 = Store->getBasePtr();
23496 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, HalfOffset, DL);
23498 DAG.getStore(Store->getChain(), DL, Value0, Ptr0, Store->getPointerInfo(),
23499 Store->getOriginalAlign(),
23500 Store->getMemOperand()->getFlags());
23501 SDValue Ch1 = DAG.getStore(Store->getChain(), DL, Value1, Ptr1,
23502 Store->getPointerInfo().getWithOffset(HalfOffset),
23503 Store->getOriginalAlign(),
23504 Store->getMemOperand()->getFlags());
23505 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Ch0, Ch1);
23508 /// Scalarize a vector store, bitcasting to TargetVT to determine the scalar
23510 static SDValue scalarizeVectorStore(StoreSDNode *Store, MVT StoreVT,
23511 SelectionDAG &DAG) {
23512 SDValue StoredVal = Store->getValue();
23513 assert(StoreVT.is128BitVector() &&
23514 StoredVal.getValueType().is128BitVector() && "Expecting 128-bit op");
23515 StoredVal = DAG.getBitcast(StoreVT, StoredVal);
23517 // Splitting volatile memory ops is not allowed unless the operation was not
23518 // legal to begin with. We are assuming the input op is legal (this transform
23519 // is only used for targets with AVX).
23520 if (!Store->isSimple())
23523 MVT StoreSVT = StoreVT.getScalarType();
23524 unsigned NumElems = StoreVT.getVectorNumElements();
23525 unsigned ScalarSize = StoreSVT.getStoreSize();
23528 SmallVector<SDValue, 4> Stores;
23529 for (unsigned i = 0; i != NumElems; ++i) {
23530 unsigned Offset = i * ScalarSize;
23531 SDValue Ptr = DAG.getMemBasePlusOffset(Store->getBasePtr(), Offset, DL);
23532 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, StoreSVT, StoredVal,
23533 DAG.getIntPtrConstant(i, DL));
23534 SDValue Ch = DAG.getStore(Store->getChain(), DL, Scl, Ptr,
23535 Store->getPointerInfo().getWithOffset(Offset),
23536 Store->getOriginalAlign(),
23537 Store->getMemOperand()->getFlags());
23538 Stores.push_back(Ch);
23540 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Stores);
23543 static SDValue LowerStore(SDValue Op, const X86Subtarget &Subtarget,
23544 SelectionDAG &DAG) {
23545 StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
23547 SDValue StoredVal = St->getValue();
23549 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 stores.
23550 if (StoredVal.getValueType().isVector() &&
23551 StoredVal.getValueType().getVectorElementType() == MVT::i1) {
23552 assert(StoredVal.getValueType().getVectorNumElements() <= 8 &&
23554 assert(!St->isTruncatingStore() && "Expected non-truncating store");
23555 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
23556 "Expected AVX512F without AVX512DQI");
23558 StoredVal = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
23559 DAG.getUNDEF(MVT::v16i1), StoredVal,
23560 DAG.getIntPtrConstant(0, dl));
23561 StoredVal = DAG.getBitcast(MVT::i16, StoredVal);
23562 StoredVal = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, StoredVal);
23564 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
23565 St->getPointerInfo(), St->getOriginalAlign(),
23566 St->getMemOperand()->getFlags());
23569 if (St->isTruncatingStore())
23572 // If this is a 256-bit store of concatenated ops, we are better off splitting
23573 // that store into two 128-bit stores. This avoids spurious use of 256-bit ops
23574 // and each half can execute independently. Some cores would split the op into
23575 // halves anyway, so the concat (vinsertf128) is purely an extra op.
23576 MVT StoreVT = StoredVal.getSimpleValueType();
23577 if (StoreVT.is256BitVector() ||
23578 ((StoreVT == MVT::v32i16 || StoreVT == MVT::v64i8) &&
23579 !Subtarget.hasBWI())) {
23580 SmallVector<SDValue, 4> CatOps;
23581 if (StoredVal.hasOneUse() && collectConcatOps(StoredVal.getNode(), CatOps))
23582 return splitVectorStore(St, DAG);
23586 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23587 assert(StoreVT.isVector() && StoreVT.getSizeInBits() == 64 &&
23589 assert(TLI.getTypeAction(*DAG.getContext(), StoreVT) ==
23590 TargetLowering::TypeWidenVector && "Unexpected type action!");
23592 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), StoreVT);
23593 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, StoredVal,
23594 DAG.getUNDEF(StoreVT));
23596 if (Subtarget.hasSSE2()) {
23597 // Widen the vector, cast to a v2x64 type, extract the single 64-bit element
23599 MVT StVT = Subtarget.is64Bit() && StoreVT.isInteger() ? MVT::i64 : MVT::f64;
23600 MVT CastVT = MVT::getVectorVT(StVT, 2);
23601 StoredVal = DAG.getBitcast(CastVT, StoredVal);
23602 StoredVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, StVT, StoredVal,
23603 DAG.getIntPtrConstant(0, dl));
23605 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
23606 St->getPointerInfo(), St->getOriginalAlign(),
23607 St->getMemOperand()->getFlags());
23609 assert(Subtarget.hasSSE1() && "Expected SSE");
23610 SDVTList Tys = DAG.getVTList(MVT::Other);
23611 SDValue Ops[] = {St->getChain(), StoredVal, St->getBasePtr()};
23612 return DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops, MVT::i64,
23613 St->getMemOperand());
23616 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
23617 // may emit an illegal shuffle but the expansion is still better than scalar
23618 // code. We generate sext/sext_invec for SEXTLOADs if it's available, otherwise
23619 // we'll emit a shuffle and a arithmetic shift.
23620 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
23621 // TODO: It is possible to support ZExt by zeroing the undef values during
23622 // the shuffle phase or after the shuffle.
23623 static SDValue LowerLoad(SDValue Op, const X86Subtarget &Subtarget,
23624 SelectionDAG &DAG) {
23625 MVT RegVT = Op.getSimpleValueType();
23626 assert(RegVT.isVector() && "We only custom lower vector loads.");
23627 assert(RegVT.isInteger() &&
23628 "We only custom lower integer vector loads.");
23630 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
23633 // Without AVX512DQ, we need to use a scalar type for v2i1/v4i1/v8i1 loads.
23634 if (RegVT.getVectorElementType() == MVT::i1) {
23635 assert(EVT(RegVT) == Ld->getMemoryVT() && "Expected non-extending load");
23636 assert(RegVT.getVectorNumElements() <= 8 && "Unexpected VT");
23637 assert(Subtarget.hasAVX512() && !Subtarget.hasDQI() &&
23638 "Expected AVX512F without AVX512DQI");
23640 SDValue NewLd = DAG.getLoad(MVT::i8, dl, Ld->getChain(), Ld->getBasePtr(),
23641 Ld->getPointerInfo(), Ld->getOriginalAlign(),
23642 Ld->getMemOperand()->getFlags());
23644 // Replace chain users with the new chain.
23645 assert(NewLd->getNumValues() == 2 && "Loads must carry a chain!");
23647 SDValue Val = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i16, NewLd);
23648 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, RegVT,
23649 DAG.getBitcast(MVT::v16i1, Val),
23650 DAG.getIntPtrConstant(0, dl));
23651 return DAG.getMergeValues({Val, NewLd.getValue(1)}, dl);
23657 /// Return true if node is an ISD::AND or ISD::OR of two X86ISD::SETCC nodes
23658 /// each of which has no other use apart from the AND / OR.
23659 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
23660 Opc = Op.getOpcode();
23661 if (Opc != ISD::OR && Opc != ISD::AND)
23663 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
23664 Op.getOperand(0).hasOneUse() &&
23665 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
23666 Op.getOperand(1).hasOneUse());
23669 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
23670 SDValue Chain = Op.getOperand(0);
23671 SDValue Cond = Op.getOperand(1);
23672 SDValue Dest = Op.getOperand(2);
23675 if (Cond.getOpcode() == ISD::SETCC &&
23676 Cond.getOperand(0).getValueType() != MVT::f128) {
23677 SDValue LHS = Cond.getOperand(0);
23678 SDValue RHS = Cond.getOperand(1);
23679 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23681 // Special case for
23682 // setcc([su]{add,sub,mul}o == 0)
23683 // setcc([su]{add,sub,mul}o != 1)
23684 if (ISD::isOverflowIntrOpRes(LHS) &&
23685 (CC == ISD::SETEQ || CC == ISD::SETNE) &&
23686 (isNullConstant(RHS) || isOneConstant(RHS))) {
23687 SDValue Value, Overflow;
23688 X86::CondCode X86Cond;
23689 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, LHS.getValue(0), DAG);
23691 if ((CC == ISD::SETEQ) == isNullConstant(RHS))
23692 X86Cond = X86::GetOppositeBranchCondition(X86Cond);
23694 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23695 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23699 if (LHS.getSimpleValueType().isInteger()) {
23701 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, CC, SDLoc(Cond), DAG, CCVal);
23702 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23706 if (CC == ISD::SETOEQ) {
23707 // For FCMP_OEQ, we can emit
23708 // two branches instead of an explicit AND instruction with a
23709 // separate test. However, we only do this if this block doesn't
23710 // have a fall-through edge, because this requires an explicit
23711 // jmp when the condition is false.
23712 if (Op.getNode()->hasOneUse()) {
23713 SDNode *User = *Op.getNode()->use_begin();
23714 // Look for an unconditional branch following this conditional branch.
23715 // We need this because we need to reverse the successors in order
23716 // to implement FCMP_OEQ.
23717 if (User->getOpcode() == ISD::BR) {
23718 SDValue FalseBB = User->getOperand(1);
23720 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
23721 assert(NewBR == User);
23726 DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
23727 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
23728 Chain = DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest,
23730 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23731 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23735 } else if (CC == ISD::SETUNE) {
23736 // For FCMP_UNE, we can emit
23737 // two branches instead of an explicit OR instruction with a
23739 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
23740 SDValue CCVal = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
23742 DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, Cmp);
23743 CCVal = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
23744 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23747 X86::CondCode X86Cond =
23748 TranslateX86CC(CC, dl, /*IsFP*/ true, LHS, RHS, DAG);
23749 SDValue Cmp = DAG.getNode(X86ISD::FCMP, SDLoc(Cond), MVT::i32, LHS, RHS);
23750 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23751 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23756 if (ISD::isOverflowIntrOpRes(Cond)) {
23757 SDValue Value, Overflow;
23758 X86::CondCode X86Cond;
23759 std::tie(Value, Overflow) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
23761 SDValue CCVal = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
23762 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23766 // Look past the truncate if the high bits are known zero.
23767 if (isTruncWithZeroHighBitsInput(Cond, DAG))
23768 Cond = Cond.getOperand(0);
23770 EVT CondVT = Cond.getValueType();
23772 // Add an AND with 1 if we don't already have one.
23773 if (!(Cond.getOpcode() == ISD::AND && isOneConstant(Cond.getOperand(1))))
23775 DAG.getNode(ISD::AND, dl, CondVT, Cond, DAG.getConstant(1, dl, CondVT));
23777 SDValue LHS = Cond;
23778 SDValue RHS = DAG.getConstant(0, dl, CondVT);
23781 SDValue EFLAGS = emitFlagsForSetcc(LHS, RHS, ISD::SETNE, dl, DAG, CCVal);
23782 return DAG.getNode(X86ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal,
23786 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
23787 // Calls to _alloca are needed to probe the stack when allocating more than 4k
23788 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
23789 // that the guard pages used by the OS virtual memory manager are allocated in
23790 // correct sequence.
23792 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
23793 SelectionDAG &DAG) const {
23794 MachineFunction &MF = DAG.getMachineFunction();
23795 bool SplitStack = MF.shouldSplitStack();
23796 bool EmitStackProbeCall = hasStackProbeSymbol(MF);
23797 bool Lower = (Subtarget.isOSWindows() && !Subtarget.isTargetMachO()) ||
23798 SplitStack || EmitStackProbeCall;
23802 SDNode *Node = Op.getNode();
23803 SDValue Chain = Op.getOperand(0);
23804 SDValue Size = Op.getOperand(1);
23805 MaybeAlign Alignment(Op.getConstantOperandVal(2));
23806 EVT VT = Node->getValueType(0);
23808 // Chain the dynamic stack allocation so that it doesn't modify the stack
23809 // pointer when other instructions are using the stack.
23810 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl);
23812 bool Is64Bit = Subtarget.is64Bit();
23813 MVT SPTy = getPointerTy(DAG.getDataLayout());
23817 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23818 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
23819 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
23820 " not tell us which reg is the stack pointer!");
23822 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
23823 const Align StackAlign = TFI.getStackAlign();
23824 if (hasInlineStackProbe(MF)) {
23825 MachineRegisterInfo &MRI = MF.getRegInfo();
23827 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23828 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23829 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23830 Result = DAG.getNode(X86ISD::PROBED_ALLOCA, dl, SPTy, Chain,
23831 DAG.getRegister(Vreg, SPTy));
23833 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
23834 Chain = SP.getValue(1);
23835 Result = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
23837 if (Alignment && *Alignment > StackAlign)
23839 DAG.getNode(ISD::AND, dl, VT, Result,
23840 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23841 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Result); // Output chain
23842 } else if (SplitStack) {
23843 MachineRegisterInfo &MRI = MF.getRegInfo();
23846 // The 64 bit implementation of segmented stacks needs to clobber both r10
23847 // r11. This makes it impossible to use it along with nested parameters.
23848 const Function &F = MF.getFunction();
23849 for (const auto &A : F.args()) {
23850 if (A.hasNestAttr())
23851 report_fatal_error("Cannot use segmented stacks with functions that "
23852 "have nested arguments.");
23856 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
23857 Register Vreg = MRI.createVirtualRegister(AddrRegClass);
23858 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
23859 Result = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
23860 DAG.getRegister(Vreg, SPTy));
23862 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
23863 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Size);
23864 MF.getInfo<X86MachineFunctionInfo>()->setHasWinAlloca(true);
23866 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
23867 Register SPReg = RegInfo->getStackRegister();
23868 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
23869 Chain = SP.getValue(1);
23872 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
23873 DAG.getConstant(~(Alignment->value() - 1ULL), dl, VT));
23874 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
23880 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
23881 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl);
23883 SDValue Ops[2] = {Result, Chain};
23884 return DAG.getMergeValues(Ops, dl);
23887 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
23888 MachineFunction &MF = DAG.getMachineFunction();
23889 auto PtrVT = getPointerTy(MF.getDataLayout());
23890 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
23892 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23895 if (!Subtarget.is64Bit() ||
23896 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv())) {
23897 // vastart just stores the address of the VarArgsFrameIndex slot into the
23898 // memory location argument.
23899 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23900 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
23901 MachinePointerInfo(SV));
23905 // gp_offset (0 - 6 * 8)
23906 // fp_offset (48 - 48 + 8 * 16)
23907 // overflow_arg_area (point to parameters coming in memory).
23909 SmallVector<SDValue, 8> MemOps;
23910 SDValue FIN = Op.getOperand(1);
23912 SDValue Store = DAG.getStore(
23913 Op.getOperand(0), DL,
23914 DAG.getConstant(FuncInfo->getVarArgsGPOffset(), DL, MVT::i32), FIN,
23915 MachinePointerInfo(SV));
23916 MemOps.push_back(Store);
23919 FIN = DAG.getMemBasePlusOffset(FIN, 4, DL);
23920 Store = DAG.getStore(
23921 Op.getOperand(0), DL,
23922 DAG.getConstant(FuncInfo->getVarArgsFPOffset(), DL, MVT::i32), FIN,
23923 MachinePointerInfo(SV, 4));
23924 MemOps.push_back(Store);
23926 // Store ptr to overflow_arg_area
23927 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(4, DL));
23928 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(), PtrVT);
23930 DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN, MachinePointerInfo(SV, 8));
23931 MemOps.push_back(Store);
23933 // Store ptr to reg_save_area.
23934 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getIntPtrConstant(
23935 Subtarget.isTarget64BitLP64() ? 8 : 4, DL));
23936 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(), PtrVT);
23937 Store = DAG.getStore(
23938 Op.getOperand(0), DL, RSFIN, FIN,
23939 MachinePointerInfo(SV, Subtarget.isTarget64BitLP64() ? 16 : 12));
23940 MemOps.push_back(Store);
23941 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
23944 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
23945 assert(Subtarget.is64Bit() &&
23946 "LowerVAARG only handles 64-bit va_arg!");
23947 assert(Op.getNumOperands() == 4);
23949 MachineFunction &MF = DAG.getMachineFunction();
23950 if (Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()))
23951 // The Win64 ABI uses char* instead of a structure.
23952 return DAG.expandVAArg(Op.getNode());
23954 SDValue Chain = Op.getOperand(0);
23955 SDValue SrcPtr = Op.getOperand(1);
23956 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
23957 unsigned Align = Op.getConstantOperandVal(3);
23960 EVT ArgVT = Op.getNode()->getValueType(0);
23961 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
23962 uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
23965 // Decide which area this value should be read from.
23966 // TODO: Implement the AMD64 ABI in its entirety. This simple
23967 // selection mechanism works only for the basic types.
23968 assert(ArgVT != MVT::f80 && "va_arg for f80 not yet implemented");
23969 if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
23970 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
23972 assert(ArgVT.isInteger() && ArgSize <= 32 /*bytes*/ &&
23973 "Unhandled argument type in LowerVAARG");
23974 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
23977 if (ArgMode == 2) {
23978 // Sanity Check: Make sure using fp_offset makes sense.
23979 assert(!Subtarget.useSoftFloat() &&
23980 !(MF.getFunction().hasFnAttribute(Attribute::NoImplicitFloat)) &&
23981 Subtarget.hasSSE1());
23984 // Insert VAARG_64 node into the DAG
23985 // VAARG_64 returns two values: Variable Argument Address, Chain
23986 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, dl, MVT::i32),
23987 DAG.getConstant(ArgMode, dl, MVT::i8),
23988 DAG.getConstant(Align, dl, MVT::i32)};
23989 SDVTList VTs = DAG.getVTList(getPointerTy(DAG.getDataLayout()), MVT::Other);
23990 SDValue VAARG = DAG.getMemIntrinsicNode(
23991 X86ISD::VAARG_64, dl, VTs, InstOps, MVT::i64, MachinePointerInfo(SV),
23992 /*Align=*/None, MachineMemOperand::MOLoad | MachineMemOperand::MOStore);
23993 Chain = VAARG.getValue(1);
23995 // Load the next argument and return it
23996 return DAG.getLoad(ArgVT, dl, Chain, VAARG, MachinePointerInfo());
23999 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget &Subtarget,
24000 SelectionDAG &DAG) {
24001 // X86-64 va_list is a struct { i32, i32, i8*, i8* }, except on Windows,
24002 // where a va_list is still an i8*.
24003 assert(Subtarget.is64Bit() && "This code only handles 64-bit va_copy!");
24004 if (Subtarget.isCallingConvWin64(
24005 DAG.getMachineFunction().getFunction().getCallingConv()))
24006 // Probably a Win64 va_copy.
24007 return DAG.expandVACopy(Op.getNode());
24009 SDValue Chain = Op.getOperand(0);
24010 SDValue DstPtr = Op.getOperand(1);
24011 SDValue SrcPtr = Op.getOperand(2);
24012 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
24013 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
24016 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr, DAG.getIntPtrConstant(24, DL),
24017 Align(8), /*isVolatile*/ false, false, false,
24018 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
24021 // Helper to get immediate/variable SSE shift opcode from other shift opcodes.
24022 static unsigned getTargetVShiftUniformOpcode(unsigned Opc, bool IsVariable) {
24026 case X86ISD::VSHLI:
24027 return IsVariable ? X86ISD::VSHL : X86ISD::VSHLI;
24030 case X86ISD::VSRLI:
24031 return IsVariable ? X86ISD::VSRL : X86ISD::VSRLI;
24034 case X86ISD::VSRAI:
24035 return IsVariable ? X86ISD::VSRA : X86ISD::VSRAI;
24037 llvm_unreachable("Unknown target vector shift node");
24040 /// Handle vector element shifts where the shift amount is a constant.
24041 /// Takes immediate version of shift as input.
24042 static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
24043 SDValue SrcOp, uint64_t ShiftAmt,
24044 SelectionDAG &DAG) {
24045 MVT ElementType = VT.getVectorElementType();
24047 // Bitcast the source vector to the output type, this is mainly necessary for
24048 // vXi8/vXi64 shifts.
24049 if (VT != SrcOp.getSimpleValueType())
24050 SrcOp = DAG.getBitcast(VT, SrcOp);
24052 // Fold this packed shift into its first operand if ShiftAmt is 0.
24056 // Check for ShiftAmt >= element width
24057 if (ShiftAmt >= ElementType.getSizeInBits()) {
24058 if (Opc == X86ISD::VSRAI)
24059 ShiftAmt = ElementType.getSizeInBits() - 1;
24061 return DAG.getConstant(0, dl, VT);
24064 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
24065 && "Unknown target vector shift-by-constant node");
24067 // Fold this packed vector shift into a build vector if SrcOp is a
24068 // vector of Constants or UNDEFs.
24069 if (ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
24070 SmallVector<SDValue, 8> Elts;
24071 unsigned NumElts = SrcOp->getNumOperands();
24074 default: llvm_unreachable("Unknown opcode!");
24075 case X86ISD::VSHLI:
24076 for (unsigned i = 0; i != NumElts; ++i) {
24077 SDValue CurrentOp = SrcOp->getOperand(i);
24078 if (CurrentOp->isUndef()) {
24079 // Must produce 0s in the correct bits.
24080 Elts.push_back(DAG.getConstant(0, dl, ElementType));
24083 auto *ND = cast<ConstantSDNode>(CurrentOp);
24084 const APInt &C = ND->getAPIntValue();
24085 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), dl, ElementType));
24088 case X86ISD::VSRLI:
24089 for (unsigned i = 0; i != NumElts; ++i) {
24090 SDValue CurrentOp = SrcOp->getOperand(i);
24091 if (CurrentOp->isUndef()) {
24092 // Must produce 0s in the correct bits.
24093 Elts.push_back(DAG.getConstant(0, dl, ElementType));
24096 auto *ND = cast<ConstantSDNode>(CurrentOp);
24097 const APInt &C = ND->getAPIntValue();
24098 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), dl, ElementType));
24101 case X86ISD::VSRAI:
24102 for (unsigned i = 0; i != NumElts; ++i) {
24103 SDValue CurrentOp = SrcOp->getOperand(i);
24104 if (CurrentOp->isUndef()) {
24105 // All shifted in bits must be the same so use 0.
24106 Elts.push_back(DAG.getConstant(0, dl, ElementType));
24109 auto *ND = cast<ConstantSDNode>(CurrentOp);
24110 const APInt &C = ND->getAPIntValue();
24111 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), dl, ElementType));
24116 return DAG.getBuildVector(VT, dl, Elts);
24119 return DAG.getNode(Opc, dl, VT, SrcOp,
24120 DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
24123 /// Handle vector element shifts where the shift amount may or may not be a
24124 /// constant. Takes immediate version of shift as input.
24125 static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
24126 SDValue SrcOp, SDValue ShAmt,
24127 const X86Subtarget &Subtarget,
24128 SelectionDAG &DAG) {
24129 MVT SVT = ShAmt.getSimpleValueType();
24130 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
24132 // Catch shift-by-constant.
24133 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
24134 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
24135 CShAmt->getZExtValue(), DAG);
24137 // Change opcode to non-immediate version.
24138 Opc = getTargetVShiftUniformOpcode(Opc, true);
24140 // Need to build a vector containing shift amount.
24141 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
24142 // +====================+============+=======================================+
24143 // | ShAmt is | HasSSE4.1? | Construct ShAmt vector as |
24144 // +====================+============+=======================================+
24145 // | i64 | Yes, No | Use ShAmt as lowest elt |
24146 // | i32 | Yes | zero-extend in-reg |
24147 // | (i32 zext(i16/i8)) | Yes | zero-extend in-reg |
24148 // | (i32 zext(i16/i8)) | No | byte-shift-in-reg |
24149 // | i16/i32 | No | v4i32 build_vector(ShAmt, 0, ud, ud)) |
24150 // +====================+============+=======================================+
24152 if (SVT == MVT::i64)
24153 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v2i64, ShAmt);
24154 else if (ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
24155 ShAmt.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
24156 (ShAmt.getOperand(0).getSimpleValueType() == MVT::i16 ||
24157 ShAmt.getOperand(0).getSimpleValueType() == MVT::i8)) {
24158 ShAmt = ShAmt.getOperand(0);
24159 MVT AmtTy = ShAmt.getSimpleValueType() == MVT::i8 ? MVT::v16i8 : MVT::v8i16;
24160 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), AmtTy, ShAmt);
24161 if (Subtarget.hasSSE41())
24162 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
24163 MVT::v2i64, ShAmt);
24165 SDValue ByteShift = DAG.getTargetConstant(
24166 (128 - AmtTy.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
24167 ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
24168 ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
24170 ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
24173 } else if (Subtarget.hasSSE41() &&
24174 ShAmt.getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
24175 ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(ShAmt), MVT::v4i32, ShAmt);
24176 ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
24177 MVT::v2i64, ShAmt);
24179 SDValue ShOps[4] = {ShAmt, DAG.getConstant(0, dl, SVT), DAG.getUNDEF(SVT),
24180 DAG.getUNDEF(SVT)};
24181 ShAmt = DAG.getBuildVector(MVT::v4i32, dl, ShOps);
24184 // The return type has to be a 128-bit type with the same element
24185 // type as the input type.
24186 MVT EltVT = VT.getVectorElementType();
24187 MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
24189 ShAmt = DAG.getBitcast(ShVT, ShAmt);
24190 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
24193 /// Return Mask with the necessary casting or extending
24194 /// for \p Mask according to \p MaskVT when lowering masking intrinsics
24195 static SDValue getMaskNode(SDValue Mask, MVT MaskVT,
24196 const X86Subtarget &Subtarget, SelectionDAG &DAG,
24199 if (isAllOnesConstant(Mask))
24200 return DAG.getConstant(1, dl, MaskVT);
24201 if (X86::isZeroNode(Mask))
24202 return DAG.getConstant(0, dl, MaskVT);
24204 assert(MaskVT.bitsLE(Mask.getSimpleValueType()) && "Unexpected mask size!");
24206 if (Mask.getSimpleValueType() == MVT::i64 && Subtarget.is32Bit()) {
24207 assert(MaskVT == MVT::v64i1 && "Expected v64i1 mask!");
24208 assert(Subtarget.hasBWI() && "Expected AVX512BW target!");
24209 // In case 32bit mode, bitcast i64 is illegal, extend/split it.
24211 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
24212 DAG.getConstant(0, dl, MVT::i32));
24213 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Mask,
24214 DAG.getConstant(1, dl, MVT::i32));
24216 Lo = DAG.getBitcast(MVT::v32i1, Lo);
24217 Hi = DAG.getBitcast(MVT::v32i1, Hi);
24219 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
24221 MVT BitcastVT = MVT::getVectorVT(MVT::i1,
24222 Mask.getSimpleValueType().getSizeInBits());
24223 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
24224 // are extracted by EXTRACT_SUBVECTOR.
24225 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
24226 DAG.getBitcast(BitcastVT, Mask),
24227 DAG.getIntPtrConstant(0, dl));
24231 /// Return (and \p Op, \p Mask) for compare instructions or
24232 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
24233 /// necessary casting or extending for \p Mask when lowering masking intrinsics
24234 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
24235 SDValue PreservedSrc,
24236 const X86Subtarget &Subtarget,
24237 SelectionDAG &DAG) {
24238 MVT VT = Op.getSimpleValueType();
24239 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
24240 unsigned OpcodeSelect = ISD::VSELECT;
24243 if (isAllOnesConstant(Mask))
24246 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24248 if (PreservedSrc.isUndef())
24249 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
24250 return DAG.getNode(OpcodeSelect, dl, VT, VMask, Op, PreservedSrc);
24253 /// Creates an SDNode for a predicated scalar operation.
24254 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
24255 /// The mask is coming as MVT::i8 and it should be transformed
24256 /// to MVT::v1i1 while lowering masking intrinsics.
24257 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
24258 /// "X86select" instead of "vselect". We just can't create the "vselect" node
24259 /// for a scalar instruction.
24260 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
24261 SDValue PreservedSrc,
24262 const X86Subtarget &Subtarget,
24263 SelectionDAG &DAG) {
24265 if (auto *MaskConst = dyn_cast<ConstantSDNode>(Mask))
24266 if (MaskConst->getZExtValue() & 0x1)
24269 MVT VT = Op.getSimpleValueType();
24272 assert(Mask.getValueType() == MVT::i8 && "Unexpect type");
24273 SDValue IMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v1i1,
24274 DAG.getBitcast(MVT::v8i1, Mask),
24275 DAG.getIntPtrConstant(0, dl));
24276 if (Op.getOpcode() == X86ISD::FSETCCM ||
24277 Op.getOpcode() == X86ISD::FSETCCM_SAE ||
24278 Op.getOpcode() == X86ISD::VFPCLASSS)
24279 return DAG.getNode(ISD::AND, dl, VT, Op, IMask);
24281 if (PreservedSrc.isUndef())
24282 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
24283 return DAG.getNode(X86ISD::SELECTS, dl, VT, IMask, Op, PreservedSrc);
24286 static int getSEHRegistrationNodeSize(const Function *Fn) {
24287 if (!Fn->hasPersonalityFn())
24288 report_fatal_error(
24289 "querying registration node size for function without personality");
24290 // The RegNodeSize is 6 32-bit words for SEH and 4 for C++ EH. See
24291 // WinEHStatePass for the full struct definition.
24292 switch (classifyEHPersonality(Fn->getPersonalityFn())) {
24293 case EHPersonality::MSVC_X86SEH: return 24;
24294 case EHPersonality::MSVC_CXX: return 16;
24297 report_fatal_error(
24298 "can only recover FP for 32-bit MSVC EH personality functions");
24301 /// When the MSVC runtime transfers control to us, either to an outlined
24302 /// function or when returning to a parent frame after catching an exception, we
24303 /// recover the parent frame pointer by doing arithmetic on the incoming EBP.
24304 /// Here's the math:
24305 /// RegNodeBase = EntryEBP - RegNodeSize
24306 /// ParentFP = RegNodeBase - ParentFrameOffset
24307 /// Subtracting RegNodeSize takes us to the offset of the registration node, and
24308 /// subtracting the offset (negative on x86) takes us back to the parent FP.
24309 static SDValue recoverFramePointer(SelectionDAG &DAG, const Function *Fn,
24310 SDValue EntryEBP) {
24311 MachineFunction &MF = DAG.getMachineFunction();
24314 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24315 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
24317 // It's possible that the parent function no longer has a personality function
24318 // if the exceptional code was optimized away, in which case we just return
24319 // the incoming EBP.
24320 if (!Fn->hasPersonalityFn())
24323 // Get an MCSymbol that will ultimately resolve to the frame offset of the EH
24324 // registration, or the .set_setframe offset.
24325 MCSymbol *OffsetSym =
24326 MF.getMMI().getContext().getOrCreateParentFrameOffsetSymbol(
24327 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
24328 SDValue OffsetSymVal = DAG.getMCSymbol(OffsetSym, PtrVT);
24329 SDValue ParentFrameOffset =
24330 DAG.getNode(ISD::LOCAL_RECOVER, dl, PtrVT, OffsetSymVal);
24332 // Return EntryEBP + ParentFrameOffset for x64. This adjusts from RSP after
24333 // prologue to RBP in the parent function.
24334 const X86Subtarget &Subtarget =
24335 static_cast<const X86Subtarget &>(DAG.getSubtarget());
24336 if (Subtarget.is64Bit())
24337 return DAG.getNode(ISD::ADD, dl, PtrVT, EntryEBP, ParentFrameOffset);
24339 int RegNodeSize = getSEHRegistrationNodeSize(Fn);
24340 // RegNodeBase = EntryEBP - RegNodeSize
24341 // ParentFP = RegNodeBase - ParentFrameOffset
24342 SDValue RegNodeBase = DAG.getNode(ISD::SUB, dl, PtrVT, EntryEBP,
24343 DAG.getConstant(RegNodeSize, dl, PtrVT));
24344 return DAG.getNode(ISD::SUB, dl, PtrVT, RegNodeBase, ParentFrameOffset);
24347 SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
24348 SelectionDAG &DAG) const {
24349 // Helper to detect if the operand is CUR_DIRECTION rounding mode.
24350 auto isRoundModeCurDirection = [](SDValue Rnd) {
24351 if (auto *C = dyn_cast<ConstantSDNode>(Rnd))
24352 return C->getAPIntValue() == X86::STATIC_ROUNDING::CUR_DIRECTION;
24356 auto isRoundModeSAE = [](SDValue Rnd) {
24357 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
24358 unsigned RC = C->getZExtValue();
24359 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
24360 // Clear the NO_EXC bit and check remaining bits.
24361 RC ^= X86::STATIC_ROUNDING::NO_EXC;
24362 // As a convenience we allow no other bits or explicitly
24363 // current direction.
24364 return RC == 0 || RC == X86::STATIC_ROUNDING::CUR_DIRECTION;
24370 auto isRoundModeSAEToX = [](SDValue Rnd, unsigned &RC) {
24371 if (auto *C = dyn_cast<ConstantSDNode>(Rnd)) {
24372 RC = C->getZExtValue();
24373 if (RC & X86::STATIC_ROUNDING::NO_EXC) {
24374 // Clear the NO_EXC bit and check remaining bits.
24375 RC ^= X86::STATIC_ROUNDING::NO_EXC;
24376 return RC == X86::STATIC_ROUNDING::TO_NEAREST_INT ||
24377 RC == X86::STATIC_ROUNDING::TO_NEG_INF ||
24378 RC == X86::STATIC_ROUNDING::TO_POS_INF ||
24379 RC == X86::STATIC_ROUNDING::TO_ZERO;
24387 unsigned IntNo = Op.getConstantOperandVal(0);
24388 MVT VT = Op.getSimpleValueType();
24389 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
24392 switch(IntrData->Type) {
24393 case INTR_TYPE_1OP: {
24394 // We specify 2 possible opcodes for intrinsics with rounding modes.
24395 // First, we check if the intrinsic may have non-default rounding mode,
24396 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24397 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24398 if (IntrWithRoundingModeOpcode != 0) {
24399 SDValue Rnd = Op.getOperand(2);
24401 if (isRoundModeSAEToX(Rnd, RC))
24402 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24404 DAG.getTargetConstant(RC, dl, MVT::i32));
24405 if (!isRoundModeCurDirection(Rnd))
24408 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24411 case INTR_TYPE_1OP_SAE: {
24412 SDValue Sae = Op.getOperand(2);
24415 if (isRoundModeCurDirection(Sae))
24416 Opc = IntrData->Opc0;
24417 else if (isRoundModeSAE(Sae))
24418 Opc = IntrData->Opc1;
24422 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1));
24424 case INTR_TYPE_2OP: {
24425 SDValue Src2 = Op.getOperand(2);
24427 // We specify 2 possible opcodes for intrinsics with rounding modes.
24428 // First, we check if the intrinsic may have non-default rounding mode,
24429 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24430 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24431 if (IntrWithRoundingModeOpcode != 0) {
24432 SDValue Rnd = Op.getOperand(3);
24434 if (isRoundModeSAEToX(Rnd, RC))
24435 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24436 Op.getOperand(1), Src2,
24437 DAG.getTargetConstant(RC, dl, MVT::i32));
24438 if (!isRoundModeCurDirection(Rnd))
24442 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24443 Op.getOperand(1), Src2);
24445 case INTR_TYPE_2OP_SAE: {
24446 SDValue Sae = Op.getOperand(3);
24449 if (isRoundModeCurDirection(Sae))
24450 Opc = IntrData->Opc0;
24451 else if (isRoundModeSAE(Sae))
24452 Opc = IntrData->Opc1;
24456 return DAG.getNode(Opc, dl, Op.getValueType(), Op.getOperand(1),
24459 case INTR_TYPE_3OP:
24460 case INTR_TYPE_3OP_IMM8: {
24461 SDValue Src1 = Op.getOperand(1);
24462 SDValue Src2 = Op.getOperand(2);
24463 SDValue Src3 = Op.getOperand(3);
24465 // We specify 2 possible opcodes for intrinsics with rounding modes.
24466 // First, we check if the intrinsic may have non-default rounding mode,
24467 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24468 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24469 if (IntrWithRoundingModeOpcode != 0) {
24470 SDValue Rnd = Op.getOperand(4);
24472 if (isRoundModeSAEToX(Rnd, RC))
24473 return DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24475 DAG.getTargetConstant(RC, dl, MVT::i32));
24476 if (!isRoundModeCurDirection(Rnd))
24480 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24481 {Src1, Src2, Src3});
24483 case INTR_TYPE_4OP:
24484 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
24485 Op.getOperand(2), Op.getOperand(3), Op.getOperand(4));
24486 case INTR_TYPE_1OP_MASK: {
24487 SDValue Src = Op.getOperand(1);
24488 SDValue PassThru = Op.getOperand(2);
24489 SDValue Mask = Op.getOperand(3);
24490 // We add rounding mode to the Node when
24491 // - RC Opcode is specified and
24492 // - RC is not "current direction".
24493 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24494 if (IntrWithRoundingModeOpcode != 0) {
24495 SDValue Rnd = Op.getOperand(4);
24497 if (isRoundModeSAEToX(Rnd, RC))
24498 return getVectorMaskingNode(
24499 DAG.getNode(IntrWithRoundingModeOpcode, dl, Op.getValueType(),
24500 Src, DAG.getTargetConstant(RC, dl, MVT::i32)),
24501 Mask, PassThru, Subtarget, DAG);
24502 if (!isRoundModeCurDirection(Rnd))
24505 return getVectorMaskingNode(
24506 DAG.getNode(IntrData->Opc0, dl, VT, Src), Mask, PassThru,
24509 case INTR_TYPE_1OP_MASK_SAE: {
24510 SDValue Src = Op.getOperand(1);
24511 SDValue PassThru = Op.getOperand(2);
24512 SDValue Mask = Op.getOperand(3);
24513 SDValue Rnd = Op.getOperand(4);
24516 if (isRoundModeCurDirection(Rnd))
24517 Opc = IntrData->Opc0;
24518 else if (isRoundModeSAE(Rnd))
24519 Opc = IntrData->Opc1;
24523 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src), Mask, PassThru,
24526 case INTR_TYPE_SCALAR_MASK: {
24527 SDValue Src1 = Op.getOperand(1);
24528 SDValue Src2 = Op.getOperand(2);
24529 SDValue passThru = Op.getOperand(3);
24530 SDValue Mask = Op.getOperand(4);
24531 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
24532 // There are 2 kinds of intrinsics in this group:
24533 // (1) With suppress-all-exceptions (sae) or rounding mode- 6 operands
24534 // (2) With rounding mode and sae - 7 operands.
24535 bool HasRounding = IntrWithRoundingModeOpcode != 0;
24536 if (Op.getNumOperands() == (5U + HasRounding)) {
24538 SDValue Rnd = Op.getOperand(5);
24540 if (isRoundModeSAEToX(Rnd, RC))
24541 return getScalarMaskingNode(
24542 DAG.getNode(IntrWithRoundingModeOpcode, dl, VT, Src1, Src2,
24543 DAG.getTargetConstant(RC, dl, MVT::i32)),
24544 Mask, passThru, Subtarget, DAG);
24545 if (!isRoundModeCurDirection(Rnd))
24548 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1,
24550 Mask, passThru, Subtarget, DAG);
24553 assert(Op.getNumOperands() == (6U + HasRounding) &&
24554 "Unexpected intrinsic form");
24555 SDValue RoundingMode = Op.getOperand(5);
24556 unsigned Opc = IntrData->Opc0;
24558 SDValue Sae = Op.getOperand(6);
24559 if (isRoundModeSAE(Sae))
24560 Opc = IntrWithRoundingModeOpcode;
24561 else if (!isRoundModeCurDirection(Sae))
24564 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1,
24565 Src2, RoundingMode),
24566 Mask, passThru, Subtarget, DAG);
24568 case INTR_TYPE_SCALAR_MASK_RND: {
24569 SDValue Src1 = Op.getOperand(1);
24570 SDValue Src2 = Op.getOperand(2);
24571 SDValue passThru = Op.getOperand(3);
24572 SDValue Mask = Op.getOperand(4);
24573 SDValue Rnd = Op.getOperand(5);
24577 if (isRoundModeCurDirection(Rnd))
24578 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
24579 else if (isRoundModeSAEToX(Rnd, RC))
24580 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
24581 DAG.getTargetConstant(RC, dl, MVT::i32));
24585 return getScalarMaskingNode(NewOp, Mask, passThru, Subtarget, DAG);
24587 case INTR_TYPE_SCALAR_MASK_SAE: {
24588 SDValue Src1 = Op.getOperand(1);
24589 SDValue Src2 = Op.getOperand(2);
24590 SDValue passThru = Op.getOperand(3);
24591 SDValue Mask = Op.getOperand(4);
24592 SDValue Sae = Op.getOperand(5);
24594 if (isRoundModeCurDirection(Sae))
24595 Opc = IntrData->Opc0;
24596 else if (isRoundModeSAE(Sae))
24597 Opc = IntrData->Opc1;
24601 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
24602 Mask, passThru, Subtarget, DAG);
24604 case INTR_TYPE_2OP_MASK: {
24605 SDValue Src1 = Op.getOperand(1);
24606 SDValue Src2 = Op.getOperand(2);
24607 SDValue PassThru = Op.getOperand(3);
24608 SDValue Mask = Op.getOperand(4);
24610 if (IntrData->Opc1 != 0) {
24611 SDValue Rnd = Op.getOperand(5);
24613 if (isRoundModeSAEToX(Rnd, RC))
24614 NewOp = DAG.getNode(IntrData->Opc1, dl, VT, Src1, Src2,
24615 DAG.getTargetConstant(RC, dl, MVT::i32));
24616 else if (!isRoundModeCurDirection(Rnd))
24620 NewOp = DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2);
24621 return getVectorMaskingNode(NewOp, Mask, PassThru, Subtarget, DAG);
24623 case INTR_TYPE_2OP_MASK_SAE: {
24624 SDValue Src1 = Op.getOperand(1);
24625 SDValue Src2 = Op.getOperand(2);
24626 SDValue PassThru = Op.getOperand(3);
24627 SDValue Mask = Op.getOperand(4);
24629 unsigned Opc = IntrData->Opc0;
24630 if (IntrData->Opc1 != 0) {
24631 SDValue Sae = Op.getOperand(5);
24632 if (isRoundModeSAE(Sae))
24633 Opc = IntrData->Opc1;
24634 else if (!isRoundModeCurDirection(Sae))
24638 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2),
24639 Mask, PassThru, Subtarget, DAG);
24641 case INTR_TYPE_3OP_SCALAR_MASK_SAE: {
24642 SDValue Src1 = Op.getOperand(1);
24643 SDValue Src2 = Op.getOperand(2);
24644 SDValue Src3 = Op.getOperand(3);
24645 SDValue PassThru = Op.getOperand(4);
24646 SDValue Mask = Op.getOperand(5);
24647 SDValue Sae = Op.getOperand(6);
24649 if (isRoundModeCurDirection(Sae))
24650 Opc = IntrData->Opc0;
24651 else if (isRoundModeSAE(Sae))
24652 Opc = IntrData->Opc1;
24656 return getScalarMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
24657 Mask, PassThru, Subtarget, DAG);
24659 case INTR_TYPE_3OP_MASK_SAE: {
24660 SDValue Src1 = Op.getOperand(1);
24661 SDValue Src2 = Op.getOperand(2);
24662 SDValue Src3 = Op.getOperand(3);
24663 SDValue PassThru = Op.getOperand(4);
24664 SDValue Mask = Op.getOperand(5);
24666 unsigned Opc = IntrData->Opc0;
24667 if (IntrData->Opc1 != 0) {
24668 SDValue Sae = Op.getOperand(6);
24669 if (isRoundModeSAE(Sae))
24670 Opc = IntrData->Opc1;
24671 else if (!isRoundModeCurDirection(Sae))
24674 return getVectorMaskingNode(DAG.getNode(Opc, dl, VT, Src1, Src2, Src3),
24675 Mask, PassThru, Subtarget, DAG);
24678 SDValue Src1 = Op.getOperand(1);
24679 SDValue Src2 = Op.getOperand(2);
24680 SDValue Src3 = Op.getOperand(3);
24682 EVT MaskVT = Src3.getValueType().changeVectorElementTypeToInteger();
24683 Src3 = DAG.getBitcast(MaskVT, Src3);
24685 // Reverse the operands to match VSELECT order.
24686 return DAG.getNode(IntrData->Opc0, dl, VT, Src3, Src2, Src1);
24689 SDValue Src1 = Op.getOperand(1);
24690 SDValue Src2 = Op.getOperand(2);
24692 // Swap Src1 and Src2 in the node creation
24693 return DAG.getNode(IntrData->Opc0, dl, VT,Src2, Src1);
24696 // NOTE: We need to swizzle the operands to pass the multiply operands
24698 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24699 Op.getOperand(2), Op.getOperand(3), Op.getOperand(1));
24701 SDValue Src1 = Op.getOperand(1);
24702 SDValue Imm = Op.getOperand(2);
24703 SDValue Mask = Op.getOperand(3);
24704 SDValue FPclass = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Imm);
24705 SDValue FPclassMask = getScalarMaskingNode(FPclass, Mask, SDValue(),
24707 // Need to fill with zeros to ensure the bitcast will produce zeroes
24708 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24709 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
24710 DAG.getConstant(0, dl, MVT::v8i1),
24711 FPclassMask, DAG.getIntPtrConstant(0, dl));
24712 return DAG.getBitcast(MVT::i8, Ins);
24715 case CMP_MASK_CC: {
24716 MVT MaskVT = Op.getSimpleValueType();
24717 SDValue CC = Op.getOperand(3);
24718 // We specify 2 possible opcodes for intrinsics with rounding modes.
24719 // First, we check if the intrinsic may have non-default rounding mode,
24720 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
24721 if (IntrData->Opc1 != 0) {
24722 SDValue Sae = Op.getOperand(4);
24723 if (isRoundModeSAE(Sae))
24724 return DAG.getNode(IntrData->Opc1, dl, MaskVT, Op.getOperand(1),
24725 Op.getOperand(2), CC, Sae);
24726 if (!isRoundModeCurDirection(Sae))
24729 //default rounding mode
24730 return DAG.getNode(IntrData->Opc0, dl, MaskVT,
24731 {Op.getOperand(1), Op.getOperand(2), CC});
24733 case CMP_MASK_SCALAR_CC: {
24734 SDValue Src1 = Op.getOperand(1);
24735 SDValue Src2 = Op.getOperand(2);
24736 SDValue CC = Op.getOperand(3);
24737 SDValue Mask = Op.getOperand(4);
24740 if (IntrData->Opc1 != 0) {
24741 SDValue Sae = Op.getOperand(5);
24742 if (isRoundModeSAE(Sae))
24743 Cmp = DAG.getNode(IntrData->Opc1, dl, MVT::v1i1, Src1, Src2, CC, Sae);
24744 else if (!isRoundModeCurDirection(Sae))
24747 //default rounding mode
24748 if (!Cmp.getNode())
24749 Cmp = DAG.getNode(IntrData->Opc0, dl, MVT::v1i1, Src1, Src2, CC);
24751 SDValue CmpMask = getScalarMaskingNode(Cmp, Mask, SDValue(),
24753 // Need to fill with zeros to ensure the bitcast will produce zeroes
24754 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24755 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v8i1,
24756 DAG.getConstant(0, dl, MVT::v8i1),
24757 CmpMask, DAG.getIntPtrConstant(0, dl));
24758 return DAG.getBitcast(MVT::i8, Ins);
24760 case COMI: { // Comparison intrinsics
24761 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
24762 SDValue LHS = Op.getOperand(1);
24763 SDValue RHS = Op.getOperand(2);
24764 // Some conditions require the operands to be swapped.
24765 if (CC == ISD::SETLT || CC == ISD::SETLE)
24766 std::swap(LHS, RHS);
24768 SDValue Comi = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
24771 case ISD::SETEQ: { // (ZF = 0 and PF = 0)
24772 SetCC = getSETCC(X86::COND_E, Comi, dl, DAG);
24773 SDValue SetNP = getSETCC(X86::COND_NP, Comi, dl, DAG);
24774 SetCC = DAG.getNode(ISD::AND, dl, MVT::i8, SetCC, SetNP);
24777 case ISD::SETNE: { // (ZF = 1 or PF = 1)
24778 SetCC = getSETCC(X86::COND_NE, Comi, dl, DAG);
24779 SDValue SetP = getSETCC(X86::COND_P, Comi, dl, DAG);
24780 SetCC = DAG.getNode(ISD::OR, dl, MVT::i8, SetCC, SetP);
24783 case ISD::SETGT: // (CF = 0 and ZF = 0)
24784 case ISD::SETLT: { // Condition opposite to GT. Operands swapped above.
24785 SetCC = getSETCC(X86::COND_A, Comi, dl, DAG);
24788 case ISD::SETGE: // CF = 0
24789 case ISD::SETLE: // Condition opposite to GE. Operands swapped above.
24790 SetCC = getSETCC(X86::COND_AE, Comi, dl, DAG);
24793 llvm_unreachable("Unexpected illegal condition!");
24795 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
24797 case COMI_RM: { // Comparison intrinsics with Sae
24798 SDValue LHS = Op.getOperand(1);
24799 SDValue RHS = Op.getOperand(2);
24800 unsigned CondVal = Op.getConstantOperandVal(3);
24801 SDValue Sae = Op.getOperand(4);
24804 if (isRoundModeCurDirection(Sae))
24805 FCmp = DAG.getNode(X86ISD::FSETCCM, dl, MVT::v1i1, LHS, RHS,
24806 DAG.getTargetConstant(CondVal, dl, MVT::i8));
24807 else if (isRoundModeSAE(Sae))
24808 FCmp = DAG.getNode(X86ISD::FSETCCM_SAE, dl, MVT::v1i1, LHS, RHS,
24809 DAG.getTargetConstant(CondVal, dl, MVT::i8), Sae);
24812 // Need to fill with zeros to ensure the bitcast will produce zeroes
24813 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
24814 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, MVT::v16i1,
24815 DAG.getConstant(0, dl, MVT::v16i1),
24816 FCmp, DAG.getIntPtrConstant(0, dl));
24817 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32,
24818 DAG.getBitcast(MVT::i16, Ins));
24821 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
24822 Op.getOperand(1), Op.getOperand(2), Subtarget,
24824 case COMPRESS_EXPAND_IN_REG: {
24825 SDValue Mask = Op.getOperand(3);
24826 SDValue DataToCompress = Op.getOperand(1);
24827 SDValue PassThru = Op.getOperand(2);
24828 if (ISD::isBuildVectorAllOnes(Mask.getNode())) // return data as is
24829 return Op.getOperand(1);
24831 // Avoid false dependency.
24832 if (PassThru.isUndef())
24833 PassThru = DAG.getConstant(0, dl, VT);
24835 return DAG.getNode(IntrData->Opc0, dl, VT, DataToCompress, PassThru,
24839 case FIXUPIMM_MASKZ: {
24840 SDValue Src1 = Op.getOperand(1);
24841 SDValue Src2 = Op.getOperand(2);
24842 SDValue Src3 = Op.getOperand(3);
24843 SDValue Imm = Op.getOperand(4);
24844 SDValue Mask = Op.getOperand(5);
24845 SDValue Passthru = (IntrData->Type == FIXUPIMM)
24847 : getZeroVector(VT, Subtarget, DAG, dl);
24849 unsigned Opc = IntrData->Opc0;
24850 if (IntrData->Opc1 != 0) {
24851 SDValue Sae = Op.getOperand(6);
24852 if (isRoundModeSAE(Sae))
24853 Opc = IntrData->Opc1;
24854 else if (!isRoundModeCurDirection(Sae))
24858 SDValue FixupImm = DAG.getNode(Opc, dl, VT, Src1, Src2, Src3, Imm);
24860 if (Opc == X86ISD::VFIXUPIMM || Opc == X86ISD::VFIXUPIMM_SAE)
24861 return getVectorMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24863 return getScalarMaskingNode(FixupImm, Mask, Passthru, Subtarget, DAG);
24866 assert(IntrData->Opc0 == X86ISD::VRNDSCALE && "Unexpected opcode");
24867 // Clear the upper bits of the rounding immediate so that the legacy
24868 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24869 auto Round = cast<ConstantSDNode>(Op.getOperand(2));
24870 SDValue RoundingMode =
24871 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24872 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24873 Op.getOperand(1), RoundingMode);
24876 assert(IntrData->Opc0 == X86ISD::VRNDSCALES && "Unexpected opcode");
24877 // Clear the upper bits of the rounding immediate so that the legacy
24878 // intrinsic can't trigger the scaling behavior of VRNDSCALE.
24879 auto Round = cast<ConstantSDNode>(Op.getOperand(3));
24880 SDValue RoundingMode =
24881 DAG.getTargetConstant(Round->getZExtValue() & 0xf, dl, MVT::i32);
24882 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24883 Op.getOperand(1), Op.getOperand(2), RoundingMode);
24886 assert(IntrData->Opc0 == X86ISD::BEXTR && "Unexpected opcode");
24888 // The control is a TargetConstant, but we need to convert it to a
24890 uint64_t Imm = Op.getConstantOperandVal(2);
24891 SDValue Control = DAG.getConstant(Imm, dl, Op.getValueType());
24892 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(),
24893 Op.getOperand(1), Control);
24897 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::i32);
24898 SDVTList VTs = DAG.getVTList(Op.getOperand(2).getValueType(), MVT::i32);
24901 // If the carry in is zero, then we should just use ADD/SUB instead of
24903 if (isNullConstant(Op.getOperand(1))) {
24904 Res = DAG.getNode(IntrData->Opc1, dl, VTs, Op.getOperand(2),
24907 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(1),
24908 DAG.getConstant(-1, dl, MVT::i8));
24909 Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(2),
24910 Op.getOperand(3), GenCF.getValue(1));
24912 SDValue SetCC = getSETCC(X86::COND_B, Res.getValue(1), dl, DAG);
24913 SDValue Results[] = { SetCC, Res };
24914 return DAG.getMergeValues(Results, dl);
24916 case CVTPD2PS_MASK:
24917 case CVTPD2DQ_MASK:
24918 case CVTQQ2PS_MASK:
24919 case TRUNCATE_TO_REG: {
24920 SDValue Src = Op.getOperand(1);
24921 SDValue PassThru = Op.getOperand(2);
24922 SDValue Mask = Op.getOperand(3);
24924 if (isAllOnesConstant(Mask))
24925 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24927 MVT SrcVT = Src.getSimpleValueType();
24928 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24929 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24930 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(),
24931 {Src, PassThru, Mask});
24933 case CVTPS2PH_MASK: {
24934 SDValue Src = Op.getOperand(1);
24935 SDValue Rnd = Op.getOperand(2);
24936 SDValue PassThru = Op.getOperand(3);
24937 SDValue Mask = Op.getOperand(4);
24939 if (isAllOnesConstant(Mask))
24940 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src, Rnd);
24942 MVT SrcVT = Src.getSimpleValueType();
24943 MVT MaskVT = MVT::getVectorVT(MVT::i1, SrcVT.getVectorNumElements());
24944 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
24945 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, Rnd,
24949 case CVTNEPS2BF16_MASK: {
24950 SDValue Src = Op.getOperand(1);
24951 SDValue PassThru = Op.getOperand(2);
24952 SDValue Mask = Op.getOperand(3);
24954 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24955 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Src);
24957 // Break false dependency.
24958 if (PassThru.isUndef())
24959 PassThru = DAG.getConstant(0, dl, PassThru.getValueType());
24961 return DAG.getNode(IntrData->Opc1, dl, Op.getValueType(), Src, PassThru,
24970 default: return SDValue(); // Don't custom lower most intrinsics.
24972 // ptest and testp intrinsics. The intrinsic these come from are designed to
24973 // return an integer value, not just an instruction so lower it to the ptest
24974 // or testp pattern and a setcc for the result.
24975 case Intrinsic::x86_avx512_ktestc_b:
24976 case Intrinsic::x86_avx512_ktestc_w:
24977 case Intrinsic::x86_avx512_ktestc_d:
24978 case Intrinsic::x86_avx512_ktestc_q:
24979 case Intrinsic::x86_avx512_ktestz_b:
24980 case Intrinsic::x86_avx512_ktestz_w:
24981 case Intrinsic::x86_avx512_ktestz_d:
24982 case Intrinsic::x86_avx512_ktestz_q:
24983 case Intrinsic::x86_sse41_ptestz:
24984 case Intrinsic::x86_sse41_ptestc:
24985 case Intrinsic::x86_sse41_ptestnzc:
24986 case Intrinsic::x86_avx_ptestz_256:
24987 case Intrinsic::x86_avx_ptestc_256:
24988 case Intrinsic::x86_avx_ptestnzc_256:
24989 case Intrinsic::x86_avx_vtestz_ps:
24990 case Intrinsic::x86_avx_vtestc_ps:
24991 case Intrinsic::x86_avx_vtestnzc_ps:
24992 case Intrinsic::x86_avx_vtestz_pd:
24993 case Intrinsic::x86_avx_vtestc_pd:
24994 case Intrinsic::x86_avx_vtestnzc_pd:
24995 case Intrinsic::x86_avx_vtestz_ps_256:
24996 case Intrinsic::x86_avx_vtestc_ps_256:
24997 case Intrinsic::x86_avx_vtestnzc_ps_256:
24998 case Intrinsic::x86_avx_vtestz_pd_256:
24999 case Intrinsic::x86_avx_vtestc_pd_256:
25000 case Intrinsic::x86_avx_vtestnzc_pd_256: {
25001 unsigned TestOpc = X86ISD::PTEST;
25002 X86::CondCode X86CC;
25004 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
25005 case Intrinsic::x86_avx512_ktestc_b:
25006 case Intrinsic::x86_avx512_ktestc_w:
25007 case Intrinsic::x86_avx512_ktestc_d:
25008 case Intrinsic::x86_avx512_ktestc_q:
25010 TestOpc = X86ISD::KTEST;
25011 X86CC = X86::COND_B;
25013 case Intrinsic::x86_avx512_ktestz_b:
25014 case Intrinsic::x86_avx512_ktestz_w:
25015 case Intrinsic::x86_avx512_ktestz_d:
25016 case Intrinsic::x86_avx512_ktestz_q:
25017 TestOpc = X86ISD::KTEST;
25018 X86CC = X86::COND_E;
25020 case Intrinsic::x86_avx_vtestz_ps:
25021 case Intrinsic::x86_avx_vtestz_pd:
25022 case Intrinsic::x86_avx_vtestz_ps_256:
25023 case Intrinsic::x86_avx_vtestz_pd_256:
25024 TestOpc = X86ISD::TESTP;
25026 case Intrinsic::x86_sse41_ptestz:
25027 case Intrinsic::x86_avx_ptestz_256:
25029 X86CC = X86::COND_E;
25031 case Intrinsic::x86_avx_vtestc_ps:
25032 case Intrinsic::x86_avx_vtestc_pd:
25033 case Intrinsic::x86_avx_vtestc_ps_256:
25034 case Intrinsic::x86_avx_vtestc_pd_256:
25035 TestOpc = X86ISD::TESTP;
25037 case Intrinsic::x86_sse41_ptestc:
25038 case Intrinsic::x86_avx_ptestc_256:
25040 X86CC = X86::COND_B;
25042 case Intrinsic::x86_avx_vtestnzc_ps:
25043 case Intrinsic::x86_avx_vtestnzc_pd:
25044 case Intrinsic::x86_avx_vtestnzc_ps_256:
25045 case Intrinsic::x86_avx_vtestnzc_pd_256:
25046 TestOpc = X86ISD::TESTP;
25048 case Intrinsic::x86_sse41_ptestnzc:
25049 case Intrinsic::x86_avx_ptestnzc_256:
25051 X86CC = X86::COND_A;
25055 SDValue LHS = Op.getOperand(1);
25056 SDValue RHS = Op.getOperand(2);
25057 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
25058 SDValue SetCC = getSETCC(X86CC, Test, dl, DAG);
25059 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25062 case Intrinsic::x86_sse42_pcmpistria128:
25063 case Intrinsic::x86_sse42_pcmpestria128:
25064 case Intrinsic::x86_sse42_pcmpistric128:
25065 case Intrinsic::x86_sse42_pcmpestric128:
25066 case Intrinsic::x86_sse42_pcmpistrio128:
25067 case Intrinsic::x86_sse42_pcmpestrio128:
25068 case Intrinsic::x86_sse42_pcmpistris128:
25069 case Intrinsic::x86_sse42_pcmpestris128:
25070 case Intrinsic::x86_sse42_pcmpistriz128:
25071 case Intrinsic::x86_sse42_pcmpestriz128: {
25073 X86::CondCode X86CC;
25075 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
25076 case Intrinsic::x86_sse42_pcmpistria128:
25077 Opcode = X86ISD::PCMPISTR;
25078 X86CC = X86::COND_A;
25080 case Intrinsic::x86_sse42_pcmpestria128:
25081 Opcode = X86ISD::PCMPESTR;
25082 X86CC = X86::COND_A;
25084 case Intrinsic::x86_sse42_pcmpistric128:
25085 Opcode = X86ISD::PCMPISTR;
25086 X86CC = X86::COND_B;
25088 case Intrinsic::x86_sse42_pcmpestric128:
25089 Opcode = X86ISD::PCMPESTR;
25090 X86CC = X86::COND_B;
25092 case Intrinsic::x86_sse42_pcmpistrio128:
25093 Opcode = X86ISD::PCMPISTR;
25094 X86CC = X86::COND_O;
25096 case Intrinsic::x86_sse42_pcmpestrio128:
25097 Opcode = X86ISD::PCMPESTR;
25098 X86CC = X86::COND_O;
25100 case Intrinsic::x86_sse42_pcmpistris128:
25101 Opcode = X86ISD::PCMPISTR;
25102 X86CC = X86::COND_S;
25104 case Intrinsic::x86_sse42_pcmpestris128:
25105 Opcode = X86ISD::PCMPESTR;
25106 X86CC = X86::COND_S;
25108 case Intrinsic::x86_sse42_pcmpistriz128:
25109 Opcode = X86ISD::PCMPISTR;
25110 X86CC = X86::COND_E;
25112 case Intrinsic::x86_sse42_pcmpestriz128:
25113 Opcode = X86ISD::PCMPESTR;
25114 X86CC = X86::COND_E;
25117 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
25118 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
25119 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps).getValue(2);
25120 SDValue SetCC = getSETCC(X86CC, PCMP, dl, DAG);
25121 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
25124 case Intrinsic::x86_sse42_pcmpistri128:
25125 case Intrinsic::x86_sse42_pcmpestri128: {
25127 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
25128 Opcode = X86ISD::PCMPISTR;
25130 Opcode = X86ISD::PCMPESTR;
25132 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
25133 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
25134 return DAG.getNode(Opcode, dl, VTs, NewOps);
25137 case Intrinsic::x86_sse42_pcmpistrm128:
25138 case Intrinsic::x86_sse42_pcmpestrm128: {
25140 if (IntNo == Intrinsic::x86_sse42_pcmpistrm128)
25141 Opcode = X86ISD::PCMPISTR;
25143 Opcode = X86ISD::PCMPESTR;
25145 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
25146 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::v16i8, MVT::i32);
25147 return DAG.getNode(Opcode, dl, VTs, NewOps).getValue(1);
25150 case Intrinsic::eh_sjlj_lsda: {
25151 MachineFunction &MF = DAG.getMachineFunction();
25152 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25153 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
25154 auto &Context = MF.getMMI().getContext();
25155 MCSymbol *S = Context.getOrCreateSymbol(Twine("GCC_except_table") +
25156 Twine(MF.getFunctionNumber()));
25157 return DAG.getNode(getGlobalWrapperKind(), dl, VT,
25158 DAG.getMCSymbol(S, PtrVT));
25161 case Intrinsic::x86_seh_lsda: {
25162 // Compute the symbol for the LSDA. We know it'll get emitted later.
25163 MachineFunction &MF = DAG.getMachineFunction();
25164 SDValue Op1 = Op.getOperand(1);
25165 auto *Fn = cast<Function>(cast<GlobalAddressSDNode>(Op1)->getGlobal());
25166 MCSymbol *LSDASym = MF.getMMI().getContext().getOrCreateLSDASymbol(
25167 GlobalValue::dropLLVMManglingEscape(Fn->getName()));
25169 // Generate a simple absolute symbol reference. This intrinsic is only
25170 // supported on 32-bit Windows, which isn't PIC.
25171 SDValue Result = DAG.getMCSymbol(LSDASym, VT);
25172 return DAG.getNode(X86ISD::Wrapper, dl, VT, Result);
25175 case Intrinsic::eh_recoverfp: {
25176 SDValue FnOp = Op.getOperand(1);
25177 SDValue IncomingFPOp = Op.getOperand(2);
25178 GlobalAddressSDNode *GSD = dyn_cast<GlobalAddressSDNode>(FnOp);
25179 auto *Fn = dyn_cast_or_null<Function>(GSD ? GSD->getGlobal() : nullptr);
25181 report_fatal_error(
25182 "llvm.eh.recoverfp must take a function as the first argument");
25183 return recoverFramePointer(DAG, Fn, IncomingFPOp);
25186 case Intrinsic::localaddress: {
25187 // Returns one of the stack, base, or frame pointer registers, depending on
25188 // which is used to reference local variables.
25189 MachineFunction &MF = DAG.getMachineFunction();
25190 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25192 if (RegInfo->hasBasePointer(MF))
25193 Reg = RegInfo->getBaseRegister();
25194 else { // Handles the SP or FP case.
25195 bool CantUseFP = RegInfo->needsStackRealignment(MF);
25197 Reg = RegInfo->getPtrSizedStackRegister(MF);
25199 Reg = RegInfo->getPtrSizedFrameRegister(MF);
25201 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
25204 case Intrinsic::x86_avx512_vp2intersect_q_512:
25205 case Intrinsic::x86_avx512_vp2intersect_q_256:
25206 case Intrinsic::x86_avx512_vp2intersect_q_128:
25207 case Intrinsic::x86_avx512_vp2intersect_d_512:
25208 case Intrinsic::x86_avx512_vp2intersect_d_256:
25209 case Intrinsic::x86_avx512_vp2intersect_d_128: {
25210 MVT MaskVT = Op.getSimpleValueType();
25212 SDVTList VTs = DAG.getVTList(MVT::Untyped, MVT::Other);
25215 SDValue Operation =
25216 DAG.getNode(X86ISD::VP2INTERSECT, DL, VTs,
25217 Op->getOperand(1), Op->getOperand(2));
25219 SDValue Result0 = DAG.getTargetExtractSubreg(X86::sub_mask_0, DL,
25220 MaskVT, Operation);
25221 SDValue Result1 = DAG.getTargetExtractSubreg(X86::sub_mask_1, DL,
25222 MaskVT, Operation);
25223 return DAG.getMergeValues({Result0, Result1}, DL);
25225 case Intrinsic::x86_mmx_pslli_w:
25226 case Intrinsic::x86_mmx_pslli_d:
25227 case Intrinsic::x86_mmx_pslli_q:
25228 case Intrinsic::x86_mmx_psrli_w:
25229 case Intrinsic::x86_mmx_psrli_d:
25230 case Intrinsic::x86_mmx_psrli_q:
25231 case Intrinsic::x86_mmx_psrai_w:
25232 case Intrinsic::x86_mmx_psrai_d: {
25234 SDValue ShAmt = Op.getOperand(2);
25235 // If the argument is a constant, convert it to a target constant.
25236 if (auto *C = dyn_cast<ConstantSDNode>(ShAmt)) {
25237 // Clamp out of bounds shift amounts since they will otherwise be masked
25238 // to 8-bits which may make it no longer out of bounds.
25239 unsigned ShiftAmount = C->getAPIntValue().getLimitedValue(255);
25240 if (ShiftAmount == 0)
25241 return Op.getOperand(1);
25243 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
25244 Op.getOperand(0), Op.getOperand(1),
25245 DAG.getTargetConstant(ShiftAmount, DL, MVT::i32));
25248 unsigned NewIntrinsic;
25250 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
25251 case Intrinsic::x86_mmx_pslli_w:
25252 NewIntrinsic = Intrinsic::x86_mmx_psll_w;
25254 case Intrinsic::x86_mmx_pslli_d:
25255 NewIntrinsic = Intrinsic::x86_mmx_psll_d;
25257 case Intrinsic::x86_mmx_pslli_q:
25258 NewIntrinsic = Intrinsic::x86_mmx_psll_q;
25260 case Intrinsic::x86_mmx_psrli_w:
25261 NewIntrinsic = Intrinsic::x86_mmx_psrl_w;
25263 case Intrinsic::x86_mmx_psrli_d:
25264 NewIntrinsic = Intrinsic::x86_mmx_psrl_d;
25266 case Intrinsic::x86_mmx_psrli_q:
25267 NewIntrinsic = Intrinsic::x86_mmx_psrl_q;
25269 case Intrinsic::x86_mmx_psrai_w:
25270 NewIntrinsic = Intrinsic::x86_mmx_psra_w;
25272 case Intrinsic::x86_mmx_psrai_d:
25273 NewIntrinsic = Intrinsic::x86_mmx_psra_d;
25277 // The vector shift intrinsics with scalars uses 32b shift amounts but
25278 // the sse2/mmx shift instructions reads 64 bits. Copy the 32 bits to an
25280 ShAmt = DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, ShAmt);
25281 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(),
25282 DAG.getConstant(NewIntrinsic, DL, MVT::i32),
25283 Op.getOperand(1), ShAmt);
25289 static SDValue getAVX2GatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
25290 SDValue Src, SDValue Mask, SDValue Base,
25291 SDValue Index, SDValue ScaleOp, SDValue Chain,
25292 const X86Subtarget &Subtarget) {
25294 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25295 // Scale must be constant.
25298 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25299 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25300 TLI.getPointerTy(DAG.getDataLayout()));
25301 EVT MaskVT = Mask.getValueType().changeVectorElementTypeToInteger();
25302 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
25303 // If source is undef or we know it won't be used, use a zero vector
25304 // to break register dependency.
25305 // TODO: use undef instead and let BreakFalseDeps deal with it?
25306 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
25307 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
25309 // Cast mask to an integer type.
25310 Mask = DAG.getBitcast(MaskVT, Mask);
25312 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
25314 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
25316 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
25317 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
25318 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
25321 static SDValue getGatherNode(SDValue Op, SelectionDAG &DAG,
25322 SDValue Src, SDValue Mask, SDValue Base,
25323 SDValue Index, SDValue ScaleOp, SDValue Chain,
25324 const X86Subtarget &Subtarget) {
25325 MVT VT = Op.getSimpleValueType();
25327 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25328 // Scale must be constant.
25331 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25332 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25333 TLI.getPointerTy(DAG.getDataLayout()));
25334 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
25335 VT.getVectorNumElements());
25336 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
25338 // We support two versions of the gather intrinsics. One with scalar mask and
25339 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
25340 if (Mask.getValueType() != MaskVT)
25341 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25343 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Other);
25344 // If source is undef or we know it won't be used, use a zero vector
25345 // to break register dependency.
25346 // TODO: use undef instead and let BreakFalseDeps deal with it?
25347 if (Src.isUndef() || ISD::isBuildVectorAllOnes(Mask.getNode()))
25348 Src = getZeroVector(Op.getSimpleValueType(), Subtarget, DAG, dl);
25350 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
25352 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale };
25354 DAG.getMemIntrinsicNode(X86ISD::MGATHER, dl, VTs, Ops,
25355 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
25356 return DAG.getMergeValues({Res, Res.getValue(1)}, dl);
25359 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
25360 SDValue Src, SDValue Mask, SDValue Base,
25361 SDValue Index, SDValue ScaleOp, SDValue Chain,
25362 const X86Subtarget &Subtarget) {
25364 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25365 // Scale must be constant.
25368 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25369 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25370 TLI.getPointerTy(DAG.getDataLayout()));
25371 unsigned MinElts = std::min(Index.getSimpleValueType().getVectorNumElements(),
25372 Src.getSimpleValueType().getVectorNumElements());
25373 MVT MaskVT = MVT::getVectorVT(MVT::i1, MinElts);
25375 // We support two versions of the scatter intrinsics. One with scalar mask and
25376 // one with vXi1 mask. Convert scalar to vXi1 if necessary.
25377 if (Mask.getValueType() != MaskVT)
25378 Mask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25380 MemIntrinsicSDNode *MemIntr = cast<MemIntrinsicSDNode>(Op);
25382 SDVTList VTs = DAG.getVTList(MVT::Other);
25383 SDValue Ops[] = {Chain, Src, Mask, Base, Index, Scale};
25385 DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
25386 MemIntr->getMemoryVT(), MemIntr->getMemOperand());
25390 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
25391 SDValue Mask, SDValue Base, SDValue Index,
25392 SDValue ScaleOp, SDValue Chain,
25393 const X86Subtarget &Subtarget) {
25395 auto *C = dyn_cast<ConstantSDNode>(ScaleOp);
25396 // Scale must be constant.
25399 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25400 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), dl,
25401 TLI.getPointerTy(DAG.getDataLayout()));
25402 SDValue Disp = DAG.getTargetConstant(0, dl, MVT::i32);
25403 SDValue Segment = DAG.getRegister(0, MVT::i32);
25405 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
25406 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25407 SDValue Ops[] = {VMask, Base, Scale, Index, Disp, Segment, Chain};
25408 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
25409 return SDValue(Res, 0);
25412 /// Handles the lowering of builtin intrinsics with chain that return their
25413 /// value into registers EDX:EAX.
25414 /// If operand ScrReg is a valid register identifier, then operand 2 of N is
25415 /// copied to SrcReg. The assumption is that SrcReg is an implicit input to
25417 /// Returns a Glue value which can be used to add extra copy-from-reg if the
25418 /// expanded intrinsics implicitly defines extra registers (i.e. not just
25420 static SDValue expandIntrinsicWChainHelper(SDNode *N, const SDLoc &DL,
25422 unsigned TargetOpcode,
25424 const X86Subtarget &Subtarget,
25425 SmallVectorImpl<SDValue> &Results) {
25426 SDValue Chain = N->getOperand(0);
25430 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
25431 Chain = DAG.getCopyToReg(Chain, DL, SrcReg, N->getOperand(2), Glue);
25432 Glue = Chain.getValue(1);
25435 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
25436 SDValue N1Ops[] = {Chain, Glue};
25437 SDNode *N1 = DAG.getMachineNode(
25438 TargetOpcode, DL, Tys, ArrayRef<SDValue>(N1Ops, Glue.getNode() ? 2 : 1));
25439 Chain = SDValue(N1, 0);
25441 // Reads the content of XCR and returns it in registers EDX:EAX.
25443 if (Subtarget.is64Bit()) {
25444 LO = DAG.getCopyFromReg(Chain, DL, X86::RAX, MVT::i64, SDValue(N1, 1));
25445 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
25448 LO = DAG.getCopyFromReg(Chain, DL, X86::EAX, MVT::i32, SDValue(N1, 1));
25449 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
25452 Chain = HI.getValue(1);
25453 Glue = HI.getValue(2);
25455 if (Subtarget.is64Bit()) {
25456 // Merge the two 32-bit values into a 64-bit one.
25457 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
25458 DAG.getConstant(32, DL, MVT::i8));
25459 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
25460 Results.push_back(Chain);
25464 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
25465 SDValue Ops[] = { LO, HI };
25466 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
25467 Results.push_back(Pair);
25468 Results.push_back(Chain);
25472 /// Handles the lowering of builtin intrinsics that read the time stamp counter
25473 /// (x86_rdtsc and x86_rdtscp). This function is also used to custom lower
25474 /// READCYCLECOUNTER nodes.
25475 static void getReadTimeStampCounter(SDNode *N, const SDLoc &DL, unsigned Opcode,
25477 const X86Subtarget &Subtarget,
25478 SmallVectorImpl<SDValue> &Results) {
25479 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
25480 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
25481 // and the EAX register is loaded with the low-order 32 bits.
25482 SDValue Glue = expandIntrinsicWChainHelper(N, DL, DAG, Opcode,
25483 /* NoRegister */0, Subtarget,
25485 if (Opcode != X86::RDTSCP)
25488 SDValue Chain = Results[1];
25489 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
25490 // the ECX register. Add 'ecx' explicitly to the chain.
25491 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32, Glue);
25493 Results.push_back(ecx.getValue(1));
25496 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget &Subtarget,
25497 SelectionDAG &DAG) {
25498 SmallVector<SDValue, 3> Results;
25500 getReadTimeStampCounter(Op.getNode(), DL, X86::RDTSC, DAG, Subtarget,
25502 return DAG.getMergeValues(Results, DL);
25505 static SDValue MarkEHRegistrationNode(SDValue Op, SelectionDAG &DAG) {
25506 MachineFunction &MF = DAG.getMachineFunction();
25507 SDValue Chain = Op.getOperand(0);
25508 SDValue RegNode = Op.getOperand(2);
25509 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
25511 report_fatal_error("EH registrations only live in functions using WinEH");
25513 // Cast the operand to an alloca, and remember the frame index.
25514 auto *FINode = dyn_cast<FrameIndexSDNode>(RegNode);
25516 report_fatal_error("llvm.x86.seh.ehregnode expects a static alloca");
25517 EHInfo->EHRegNodeFrameIndex = FINode->getIndex();
25519 // Return the chain operand without making any DAG nodes.
25523 static SDValue MarkEHGuard(SDValue Op, SelectionDAG &DAG) {
25524 MachineFunction &MF = DAG.getMachineFunction();
25525 SDValue Chain = Op.getOperand(0);
25526 SDValue EHGuard = Op.getOperand(2);
25527 WinEHFuncInfo *EHInfo = MF.getWinEHFuncInfo();
25529 report_fatal_error("EHGuard only live in functions using WinEH");
25531 // Cast the operand to an alloca, and remember the frame index.
25532 auto *FINode = dyn_cast<FrameIndexSDNode>(EHGuard);
25534 report_fatal_error("llvm.x86.seh.ehguard expects a static alloca");
25535 EHInfo->EHGuardFrameIndex = FINode->getIndex();
25537 // Return the chain operand without making any DAG nodes.
25541 /// Emit Truncating Store with signed or unsigned saturation.
25543 EmitTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl, SDValue Val,
25544 SDValue Ptr, EVT MemVT, MachineMemOperand *MMO,
25545 SelectionDAG &DAG) {
25546 SDVTList VTs = DAG.getVTList(MVT::Other);
25547 SDValue Undef = DAG.getUNDEF(Ptr.getValueType());
25548 SDValue Ops[] = { Chain, Val, Ptr, Undef };
25549 unsigned Opc = SignedSat ? X86ISD::VTRUNCSTORES : X86ISD::VTRUNCSTOREUS;
25550 return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
25553 /// Emit Masked Truncating Store with signed or unsigned saturation.
25555 EmitMaskedTruncSStore(bool SignedSat, SDValue Chain, const SDLoc &Dl,
25556 SDValue Val, SDValue Ptr, SDValue Mask, EVT MemVT,
25557 MachineMemOperand *MMO, SelectionDAG &DAG) {
25558 SDVTList VTs = DAG.getVTList(MVT::Other);
25559 SDValue Ops[] = { Chain, Val, Ptr, Mask };
25560 unsigned Opc = SignedSat ? X86ISD::VMTRUNCSTORES : X86ISD::VMTRUNCSTOREUS;
25561 return DAG.getMemIntrinsicNode(Opc, Dl, VTs, Ops, MemVT, MMO);
25564 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
25565 SelectionDAG &DAG) {
25566 unsigned IntNo = Op.getConstantOperandVal(1);
25567 const IntrinsicData *IntrData = getIntrinsicWithChain(IntNo);
25570 case llvm::Intrinsic::x86_seh_ehregnode:
25571 return MarkEHRegistrationNode(Op, DAG);
25572 case llvm::Intrinsic::x86_seh_ehguard:
25573 return MarkEHGuard(Op, DAG);
25574 case llvm::Intrinsic::x86_rdpkru: {
25576 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
25577 // Create a RDPKRU node and pass 0 to the ECX parameter.
25578 return DAG.getNode(X86ISD::RDPKRU, dl, VTs, Op.getOperand(0),
25579 DAG.getConstant(0, dl, MVT::i32));
25581 case llvm::Intrinsic::x86_wrpkru: {
25583 // Create a WRPKRU node, pass the input to the EAX parameter, and pass 0
25584 // to the EDX and ECX parameters.
25585 return DAG.getNode(X86ISD::WRPKRU, dl, MVT::Other,
25586 Op.getOperand(0), Op.getOperand(2),
25587 DAG.getConstant(0, dl, MVT::i32),
25588 DAG.getConstant(0, dl, MVT::i32));
25590 case llvm::Intrinsic::x86_flags_read_u32:
25591 case llvm::Intrinsic::x86_flags_read_u64:
25592 case llvm::Intrinsic::x86_flags_write_u32:
25593 case llvm::Intrinsic::x86_flags_write_u64: {
25594 // We need a frame pointer because this will get lowered to a PUSH/POP
25596 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25597 MFI.setHasCopyImplyingStackAdjustment(true);
25598 // Don't do anything here, we will expand these intrinsics out later
25599 // during FinalizeISel in EmitInstrWithCustomInserter.
25602 case Intrinsic::x86_lwpins32:
25603 case Intrinsic::x86_lwpins64:
25604 case Intrinsic::x86_umwait:
25605 case Intrinsic::x86_tpause: {
25607 SDValue Chain = Op->getOperand(0);
25608 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
25612 default: llvm_unreachable("Impossible intrinsic");
25613 case Intrinsic::x86_umwait:
25614 Opcode = X86ISD::UMWAIT;
25616 case Intrinsic::x86_tpause:
25617 Opcode = X86ISD::TPAUSE;
25619 case Intrinsic::x86_lwpins32:
25620 case Intrinsic::x86_lwpins64:
25621 Opcode = X86ISD::LWPINS;
25625 SDValue Operation =
25626 DAG.getNode(Opcode, dl, VTs, Chain, Op->getOperand(2),
25627 Op->getOperand(3), Op->getOperand(4));
25628 SDValue SetCC = getSETCC(X86::COND_B, Operation.getValue(0), dl, DAG);
25629 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
25630 Operation.getValue(1));
25632 case Intrinsic::x86_enqcmd:
25633 case Intrinsic::x86_enqcmds: {
25635 SDValue Chain = Op.getOperand(0);
25636 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Other);
25639 default: llvm_unreachable("Impossible intrinsic!");
25640 case Intrinsic::x86_enqcmd:
25641 Opcode = X86ISD::ENQCMD;
25643 case Intrinsic::x86_enqcmds:
25644 Opcode = X86ISD::ENQCMDS;
25647 SDValue Operation = DAG.getNode(Opcode, dl, VTs, Chain, Op.getOperand(2),
25649 SDValue SetCC = getSETCC(X86::COND_E, Operation.getValue(0), dl, DAG);
25650 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), SetCC,
25651 Operation.getValue(1));
25658 switch(IntrData->Type) {
25659 default: llvm_unreachable("Unknown Intrinsic Type");
25662 // Emit the node with the right value type.
25663 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32, MVT::Other);
25664 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
25666 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
25667 // Otherwise return the value from Rand, which is always 0, casted to i32.
25668 SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
25669 DAG.getConstant(1, dl, Op->getValueType(1)),
25670 DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
25671 SDValue(Result.getNode(), 1)};
25672 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
25674 // Return { result, isValid, chain }.
25675 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
25676 SDValue(Result.getNode(), 2));
25678 case GATHER_AVX2: {
25679 SDValue Chain = Op.getOperand(0);
25680 SDValue Src = Op.getOperand(2);
25681 SDValue Base = Op.getOperand(3);
25682 SDValue Index = Op.getOperand(4);
25683 SDValue Mask = Op.getOperand(5);
25684 SDValue Scale = Op.getOperand(6);
25685 return getAVX2GatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
25686 Scale, Chain, Subtarget);
25689 //gather(v1, mask, index, base, scale);
25690 SDValue Chain = Op.getOperand(0);
25691 SDValue Src = Op.getOperand(2);
25692 SDValue Base = Op.getOperand(3);
25693 SDValue Index = Op.getOperand(4);
25694 SDValue Mask = Op.getOperand(5);
25695 SDValue Scale = Op.getOperand(6);
25696 return getGatherNode(Op, DAG, Src, Mask, Base, Index, Scale,
25700 //scatter(base, mask, index, v1, scale);
25701 SDValue Chain = Op.getOperand(0);
25702 SDValue Base = Op.getOperand(2);
25703 SDValue Mask = Op.getOperand(3);
25704 SDValue Index = Op.getOperand(4);
25705 SDValue Src = Op.getOperand(5);
25706 SDValue Scale = Op.getOperand(6);
25707 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index,
25708 Scale, Chain, Subtarget);
25711 const APInt &HintVal = Op.getConstantOperandAPInt(6);
25712 assert((HintVal == 2 || HintVal == 3) &&
25713 "Wrong prefetch hint in intrinsic: should be 2 or 3");
25714 unsigned Opcode = (HintVal == 2 ? IntrData->Opc1 : IntrData->Opc0);
25715 SDValue Chain = Op.getOperand(0);
25716 SDValue Mask = Op.getOperand(2);
25717 SDValue Index = Op.getOperand(3);
25718 SDValue Base = Op.getOperand(4);
25719 SDValue Scale = Op.getOperand(5);
25720 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain,
25723 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
25725 SmallVector<SDValue, 2> Results;
25726 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget,
25728 return DAG.getMergeValues(Results, dl);
25730 // Read Performance Monitoring Counters.
25732 // GetExtended Control Register.
25734 SmallVector<SDValue, 2> Results;
25736 // RDPMC uses ECX to select the index of the performance counter to read.
25737 // XGETBV uses ECX to select the index of the XCR register to return.
25738 // The result is stored into registers EDX:EAX.
25739 expandIntrinsicWChainHelper(Op.getNode(), dl, DAG, IntrData->Opc0, X86::ECX,
25740 Subtarget, Results);
25741 return DAG.getMergeValues(Results, dl);
25743 // XTEST intrinsics.
25745 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
25746 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
25748 SDValue SetCC = getSETCC(X86::COND_NE, InTrans, dl, DAG);
25749 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
25750 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
25751 Ret, SDValue(InTrans.getNode(), 1));
25753 case TRUNCATE_TO_MEM_VI8:
25754 case TRUNCATE_TO_MEM_VI16:
25755 case TRUNCATE_TO_MEM_VI32: {
25756 SDValue Mask = Op.getOperand(4);
25757 SDValue DataToTruncate = Op.getOperand(3);
25758 SDValue Addr = Op.getOperand(2);
25759 SDValue Chain = Op.getOperand(0);
25761 MemIntrinsicSDNode *MemIntr = dyn_cast<MemIntrinsicSDNode>(Op);
25762 assert(MemIntr && "Expected MemIntrinsicSDNode!");
25764 EVT MemVT = MemIntr->getMemoryVT();
25766 uint16_t TruncationOp = IntrData->Opc0;
25767 switch (TruncationOp) {
25768 case X86ISD::VTRUNC: {
25769 if (isAllOnesConstant(Mask)) // return just a truncate store
25770 return DAG.getTruncStore(Chain, dl, DataToTruncate, Addr, MemVT,
25771 MemIntr->getMemOperand());
25773 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25774 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25775 SDValue Offset = DAG.getUNDEF(VMask.getValueType());
25777 return DAG.getMaskedStore(Chain, dl, DataToTruncate, Addr, Offset, VMask,
25778 MemVT, MemIntr->getMemOperand(), ISD::UNINDEXED,
25779 true /* truncating */);
25781 case X86ISD::VTRUNCUS:
25782 case X86ISD::VTRUNCS: {
25783 bool IsSigned = (TruncationOp == X86ISD::VTRUNCS);
25784 if (isAllOnesConstant(Mask))
25785 return EmitTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr, MemVT,
25786 MemIntr->getMemOperand(), DAG);
25788 MVT MaskVT = MVT::getVectorVT(MVT::i1, MemVT.getVectorNumElements());
25789 SDValue VMask = getMaskNode(Mask, MaskVT, Subtarget, DAG, dl);
25791 return EmitMaskedTruncSStore(IsSigned, Chain, dl, DataToTruncate, Addr,
25792 VMask, MemVT, MemIntr->getMemOperand(), DAG);
25795 llvm_unreachable("Unsupported truncstore intrinsic");
25801 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
25802 SelectionDAG &DAG) const {
25803 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
25804 MFI.setReturnAddressIsTaken(true);
25806 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
25809 unsigned Depth = Op.getConstantOperandVal(0);
25811 EVT PtrVT = getPointerTy(DAG.getDataLayout());
25814 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
25815 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25816 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), dl, PtrVT);
25817 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
25818 DAG.getNode(ISD::ADD, dl, PtrVT, FrameAddr, Offset),
25819 MachinePointerInfo());
25822 // Just load the return address.
25823 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
25824 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), RetAddrFI,
25825 MachinePointerInfo());
25828 SDValue X86TargetLowering::LowerADDROFRETURNADDR(SDValue Op,
25829 SelectionDAG &DAG) const {
25830 DAG.getMachineFunction().getFrameInfo().setReturnAddressIsTaken(true);
25831 return getReturnAddressFrameIndex(DAG);
25834 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
25835 MachineFunction &MF = DAG.getMachineFunction();
25836 MachineFrameInfo &MFI = MF.getFrameInfo();
25837 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
25838 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25839 EVT VT = Op.getValueType();
25841 MFI.setFrameAddressIsTaken(true);
25843 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
25844 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
25845 // is not possible to crawl up the stack without looking at the unwind codes
25847 int FrameAddrIndex = FuncInfo->getFAIndex();
25848 if (!FrameAddrIndex) {
25849 // Set up a frame object for the return address.
25850 unsigned SlotSize = RegInfo->getSlotSize();
25851 FrameAddrIndex = MF.getFrameInfo().CreateFixedObject(
25852 SlotSize, /*SPOffset=*/0, /*IsImmutable=*/false);
25853 FuncInfo->setFAIndex(FrameAddrIndex);
25855 return DAG.getFrameIndex(FrameAddrIndex, VT);
25858 unsigned FrameReg =
25859 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
25860 SDLoc dl(Op); // FIXME probably not meaningful
25861 unsigned Depth = Op.getConstantOperandVal(0);
25862 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
25863 (FrameReg == X86::EBP && VT == MVT::i32)) &&
25864 "Invalid Frame Register!");
25865 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
25867 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
25868 MachinePointerInfo());
25872 // FIXME? Maybe this could be a TableGen attribute on some registers and
25873 // this table could be generated automatically from RegInfo.
25874 Register X86TargetLowering::getRegisterByName(const char* RegName, LLT VT,
25875 const MachineFunction &MF) const {
25876 const TargetFrameLowering &TFI = *Subtarget.getFrameLowering();
25878 Register Reg = StringSwitch<unsigned>(RegName)
25879 .Case("esp", X86::ESP)
25880 .Case("rsp", X86::RSP)
25881 .Case("ebp", X86::EBP)
25882 .Case("rbp", X86::RBP)
25885 if (Reg == X86::EBP || Reg == X86::RBP) {
25886 if (!TFI.hasFP(MF))
25887 report_fatal_error("register " + StringRef(RegName) +
25888 " is allocatable: function has no frame pointer");
25891 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25892 Register FrameReg = RegInfo->getPtrSizedFrameRegister(MF);
25893 assert((FrameReg == X86::EBP || FrameReg == X86::RBP) &&
25894 "Invalid Frame Register!");
25902 report_fatal_error("Invalid register name global variable");
25905 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
25906 SelectionDAG &DAG) const {
25907 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25908 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize(), SDLoc(Op));
25911 Register X86TargetLowering::getExceptionPointerRegister(
25912 const Constant *PersonalityFn) const {
25913 if (classifyEHPersonality(PersonalityFn) == EHPersonality::CoreCLR)
25914 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25916 return Subtarget.isTarget64BitLP64() ? X86::RAX : X86::EAX;
25919 Register X86TargetLowering::getExceptionSelectorRegister(
25920 const Constant *PersonalityFn) const {
25921 // Funclet personalities don't use selectors (the runtime does the selection).
25922 assert(!isFuncletEHPersonality(classifyEHPersonality(PersonalityFn)));
25923 return Subtarget.isTarget64BitLP64() ? X86::RDX : X86::EDX;
25926 bool X86TargetLowering::needsFixedCatchObjects() const {
25927 return Subtarget.isTargetWin64();
25930 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
25931 SDValue Chain = Op.getOperand(0);
25932 SDValue Offset = Op.getOperand(1);
25933 SDValue Handler = Op.getOperand(2);
25936 EVT PtrVT = getPointerTy(DAG.getDataLayout());
25937 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
25938 Register FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
25939 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
25940 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
25941 "Invalid Frame Register!");
25942 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
25943 Register StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
25945 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
25946 DAG.getIntPtrConstant(RegInfo->getSlotSize(),
25948 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
25949 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo());
25950 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
25952 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
25953 DAG.getRegister(StoreAddrReg, PtrVT));
25956 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
25957 SelectionDAG &DAG) const {
25959 // If the subtarget is not 64bit, we may need the global base reg
25960 // after isel expand pseudo, i.e., after CGBR pass ran.
25961 // Therefore, ask for the GlobalBaseReg now, so that the pass
25962 // inserts the code for us in case we need it.
25963 // Otherwise, we will end up in a situation where we will
25964 // reference a virtual register that is not defined!
25965 if (!Subtarget.is64Bit()) {
25966 const X86InstrInfo *TII = Subtarget.getInstrInfo();
25967 (void)TII->getGlobalBaseReg(&DAG.getMachineFunction());
25969 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
25970 DAG.getVTList(MVT::i32, MVT::Other),
25971 Op.getOperand(0), Op.getOperand(1));
25974 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
25975 SelectionDAG &DAG) const {
25977 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
25978 Op.getOperand(0), Op.getOperand(1));
25981 SDValue X86TargetLowering::lowerEH_SJLJ_SETUP_DISPATCH(SDValue Op,
25982 SelectionDAG &DAG) const {
25984 return DAG.getNode(X86ISD::EH_SJLJ_SETUP_DISPATCH, DL, MVT::Other,
25988 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
25989 return Op.getOperand(0);
25992 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
25993 SelectionDAG &DAG) const {
25994 SDValue Root = Op.getOperand(0);
25995 SDValue Trmp = Op.getOperand(1); // trampoline
25996 SDValue FPtr = Op.getOperand(2); // nested function
25997 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
26000 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
26001 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
26003 if (Subtarget.is64Bit()) {
26004 SDValue OutChains[6];
26006 // Large code-model.
26007 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
26008 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
26010 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
26011 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
26013 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
26015 // Load the pointer to the nested function into R11.
26016 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
26017 SDValue Addr = Trmp;
26018 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
26019 Addr, MachinePointerInfo(TrmpAddr));
26021 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26022 DAG.getConstant(2, dl, MVT::i64));
26024 DAG.getStore(Root, dl, FPtr, Addr, MachinePointerInfo(TrmpAddr, 2),
26025 /* Alignment = */ 2);
26027 // Load the 'nest' parameter value into R10.
26028 // R10 is specified in X86CallingConv.td
26029 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
26030 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26031 DAG.getConstant(10, dl, MVT::i64));
26032 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
26033 Addr, MachinePointerInfo(TrmpAddr, 10));
26035 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26036 DAG.getConstant(12, dl, MVT::i64));
26038 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 12),
26039 /* Alignment = */ 2);
26041 // Jump to the nested function.
26042 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
26043 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26044 DAG.getConstant(20, dl, MVT::i64));
26045 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, dl, MVT::i16),
26046 Addr, MachinePointerInfo(TrmpAddr, 20));
26048 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
26049 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
26050 DAG.getConstant(22, dl, MVT::i64));
26051 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, dl, MVT::i8),
26052 Addr, MachinePointerInfo(TrmpAddr, 22));
26054 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
26056 const Function *Func =
26057 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
26058 CallingConv::ID CC = Func->getCallingConv();
26063 llvm_unreachable("Unsupported calling convention");
26064 case CallingConv::C:
26065 case CallingConv::X86_StdCall: {
26066 // Pass 'nest' parameter in ECX.
26067 // Must be kept in sync with X86CallingConv.td
26068 NestReg = X86::ECX;
26070 // Check that ECX wasn't needed by an 'inreg' parameter.
26071 FunctionType *FTy = Func->getFunctionType();
26072 const AttributeList &Attrs = Func->getAttributes();
26074 if (!Attrs.isEmpty() && !Func->isVarArg()) {
26075 unsigned InRegCount = 0;
26078 for (FunctionType::param_iterator I = FTy->param_begin(),
26079 E = FTy->param_end(); I != E; ++I, ++Idx)
26080 if (Attrs.hasAttribute(Idx, Attribute::InReg)) {
26081 auto &DL = DAG.getDataLayout();
26082 // FIXME: should only count parameters that are lowered to integers.
26083 InRegCount += (DL.getTypeSizeInBits(*I) + 31) / 32;
26086 if (InRegCount > 2) {
26087 report_fatal_error("Nest register in use - reduce number of inreg"
26093 case CallingConv::X86_FastCall:
26094 case CallingConv::X86_ThisCall:
26095 case CallingConv::Fast:
26096 case CallingConv::Tail:
26097 // Pass 'nest' parameter in EAX.
26098 // Must be kept in sync with X86CallingConv.td
26099 NestReg = X86::EAX;
26103 SDValue OutChains[4];
26104 SDValue Addr, Disp;
26106 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26107 DAG.getConstant(10, dl, MVT::i32));
26108 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
26110 // This is storing the opcode for MOV32ri.
26111 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
26112 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
26114 DAG.getStore(Root, dl, DAG.getConstant(MOV32ri | N86Reg, dl, MVT::i8),
26115 Trmp, MachinePointerInfo(TrmpAddr));
26117 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26118 DAG.getConstant(1, dl, MVT::i32));
26120 DAG.getStore(Root, dl, Nest, Addr, MachinePointerInfo(TrmpAddr, 1),
26121 /* Alignment = */ 1);
26123 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
26124 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26125 DAG.getConstant(5, dl, MVT::i32));
26126 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, dl, MVT::i8),
26127 Addr, MachinePointerInfo(TrmpAddr, 5),
26128 /* Alignment = */ 1);
26130 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
26131 DAG.getConstant(6, dl, MVT::i32));
26133 DAG.getStore(Root, dl, Disp, Addr, MachinePointerInfo(TrmpAddr, 6),
26134 /* Alignment = */ 1);
26136 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
26140 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
26141 SelectionDAG &DAG) const {
26143 The rounding mode is in bits 11:10 of FPSR, and has the following
26145 00 Round to nearest
26150 FLT_ROUNDS, on the other hand, expects the following:
26157 To perform the conversion, we use a packed lookup table of the four 2-bit
26158 values that we can index by FPSP[11:10]
26159 0x2d --> (0b00,10,11,01) --> (0,2,3,1) >> FPSR[11:10]
26161 (0x2d >> ((FPSR & 0xc00) >> 9)) & 3
26164 MachineFunction &MF = DAG.getMachineFunction();
26165 MVT VT = Op.getSimpleValueType();
26168 // Save FP Control Word to stack slot
26169 int SSFI = MF.getFrameInfo().CreateStackObject(2, Align(2), false);
26170 SDValue StackSlot =
26171 DAG.getFrameIndex(SSFI, getPointerTy(DAG.getDataLayout()));
26173 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, SSFI);
26175 SDValue Chain = Op.getOperand(0);
26176 SDValue Ops[] = {Chain, StackSlot};
26177 Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
26178 DAG.getVTList(MVT::Other), Ops, MVT::i16, MPI,
26179 Align(2), MachineMemOperand::MOStore);
26181 // Load FP Control Word from stack slot
26182 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot, MPI, Align(2));
26183 Chain = CWD.getValue(1);
26185 // Mask and turn the control bits into a shift for the lookup table.
26187 DAG.getNode(ISD::SRL, DL, MVT::i16,
26188 DAG.getNode(ISD::AND, DL, MVT::i16,
26189 CWD, DAG.getConstant(0xc00, DL, MVT::i16)),
26190 DAG.getConstant(9, DL, MVT::i8));
26191 Shift = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, Shift);
26193 SDValue LUT = DAG.getConstant(0x2d, DL, MVT::i32);
26195 DAG.getNode(ISD::AND, DL, MVT::i32,
26196 DAG.getNode(ISD::SRL, DL, MVT::i32, LUT, Shift),
26197 DAG.getConstant(3, DL, MVT::i32));
26199 RetVal = DAG.getZExtOrTrunc(RetVal, DL, VT);
26201 return DAG.getMergeValues({RetVal, Chain}, DL);
26204 /// Lower a vector CTLZ using native supported vector CTLZ instruction.
26206 // i8/i16 vector implemented using dword LZCNT vector instruction
26207 // ( sub(trunc(lzcnt(zext32(x)))) ). In case zext32(x) is illegal,
26208 // split the vector, perform operation on it's Lo a Hi part and
26209 // concatenate the results.
26210 static SDValue LowerVectorCTLZ_AVX512CDI(SDValue Op, SelectionDAG &DAG,
26211 const X86Subtarget &Subtarget) {
26212 assert(Op.getOpcode() == ISD::CTLZ);
26214 MVT VT = Op.getSimpleValueType();
26215 MVT EltVT = VT.getVectorElementType();
26216 unsigned NumElems = VT.getVectorNumElements();
26218 assert((EltVT == MVT::i8 || EltVT == MVT::i16) &&
26219 "Unsupported element type");
26221 // Split vector, it's Lo and Hi parts will be handled in next iteration.
26222 if (NumElems > 16 ||
26223 (NumElems == 16 && !Subtarget.canExtendTo512DQ()))
26224 return splitVectorIntUnary(Op, DAG);
26226 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
26227 assert((NewVT.is256BitVector() || NewVT.is512BitVector()) &&
26228 "Unsupported value type for operation");
26230 // Use native supported vector instruction vplzcntd.
26231 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, NewVT, Op.getOperand(0));
26232 SDValue CtlzNode = DAG.getNode(ISD::CTLZ, dl, NewVT, Op);
26233 SDValue TruncNode = DAG.getNode(ISD::TRUNCATE, dl, VT, CtlzNode);
26234 SDValue Delta = DAG.getConstant(32 - EltVT.getSizeInBits(), dl, VT);
26236 return DAG.getNode(ISD::SUB, dl, VT, TruncNode, Delta);
26239 // Lower CTLZ using a PSHUFB lookup table implementation.
26240 static SDValue LowerVectorCTLZInRegLUT(SDValue Op, const SDLoc &DL,
26241 const X86Subtarget &Subtarget,
26242 SelectionDAG &DAG) {
26243 MVT VT = Op.getSimpleValueType();
26244 int NumElts = VT.getVectorNumElements();
26245 int NumBytes = NumElts * (VT.getScalarSizeInBits() / 8);
26246 MVT CurrVT = MVT::getVectorVT(MVT::i8, NumBytes);
26248 // Per-nibble leading zero PSHUFB lookup table.
26249 const int LUT[16] = {/* 0 */ 4, /* 1 */ 3, /* 2 */ 2, /* 3 */ 2,
26250 /* 4 */ 1, /* 5 */ 1, /* 6 */ 1, /* 7 */ 1,
26251 /* 8 */ 0, /* 9 */ 0, /* a */ 0, /* b */ 0,
26252 /* c */ 0, /* d */ 0, /* e */ 0, /* f */ 0};
26254 SmallVector<SDValue, 64> LUTVec;
26255 for (int i = 0; i < NumBytes; ++i)
26256 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
26257 SDValue InRegLUT = DAG.getBuildVector(CurrVT, DL, LUTVec);
26259 // Begin by bitcasting the input to byte vector, then split those bytes
26260 // into lo/hi nibbles and use the PSHUFB LUT to perform CLTZ on each of them.
26261 // If the hi input nibble is zero then we add both results together, otherwise
26262 // we just take the hi result (by masking the lo result to zero before the
26264 SDValue Op0 = DAG.getBitcast(CurrVT, Op.getOperand(0));
26265 SDValue Zero = DAG.getConstant(0, DL, CurrVT);
26267 SDValue NibbleShift = DAG.getConstant(0x4, DL, CurrVT);
26269 SDValue Hi = DAG.getNode(ISD::SRL, DL, CurrVT, Op0, NibbleShift);
26271 if (CurrVT.is512BitVector()) {
26272 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
26273 HiZ = DAG.getSetCC(DL, MaskVT, Hi, Zero, ISD::SETEQ);
26274 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
26276 HiZ = DAG.getSetCC(DL, CurrVT, Hi, Zero, ISD::SETEQ);
26279 Lo = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Lo);
26280 Hi = DAG.getNode(X86ISD::PSHUFB, DL, CurrVT, InRegLUT, Hi);
26281 Lo = DAG.getNode(ISD::AND, DL, CurrVT, Lo, HiZ);
26282 SDValue Res = DAG.getNode(ISD::ADD, DL, CurrVT, Lo, Hi);
26284 // Merge result back from vXi8 back to VT, working on the lo/hi halves
26285 // of the current vector width in the same way we did for the nibbles.
26286 // If the upper half of the input element is zero then add the halves'
26287 // leading zero counts together, otherwise just use the upper half's.
26288 // Double the width of the result until we are at target width.
26289 while (CurrVT != VT) {
26290 int CurrScalarSizeInBits = CurrVT.getScalarSizeInBits();
26291 int CurrNumElts = CurrVT.getVectorNumElements();
26292 MVT NextSVT = MVT::getIntegerVT(CurrScalarSizeInBits * 2);
26293 MVT NextVT = MVT::getVectorVT(NextSVT, CurrNumElts / 2);
26294 SDValue Shift = DAG.getConstant(CurrScalarSizeInBits, DL, NextVT);
26296 // Check if the upper half of the input element is zero.
26297 if (CurrVT.is512BitVector()) {
26298 MVT MaskVT = MVT::getVectorVT(MVT::i1, CurrVT.getVectorNumElements());
26299 HiZ = DAG.getSetCC(DL, MaskVT, DAG.getBitcast(CurrVT, Op0),
26300 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
26301 HiZ = DAG.getNode(ISD::SIGN_EXTEND, DL, CurrVT, HiZ);
26303 HiZ = DAG.getSetCC(DL, CurrVT, DAG.getBitcast(CurrVT, Op0),
26304 DAG.getBitcast(CurrVT, Zero), ISD::SETEQ);
26306 HiZ = DAG.getBitcast(NextVT, HiZ);
26308 // Move the upper/lower halves to the lower bits as we'll be extending to
26309 // NextVT. Mask the lower result to zero if HiZ is true and add the results
26311 SDValue ResNext = Res = DAG.getBitcast(NextVT, Res);
26312 SDValue R0 = DAG.getNode(ISD::SRL, DL, NextVT, ResNext, Shift);
26313 SDValue R1 = DAG.getNode(ISD::SRL, DL, NextVT, HiZ, Shift);
26314 R1 = DAG.getNode(ISD::AND, DL, NextVT, ResNext, R1);
26315 Res = DAG.getNode(ISD::ADD, DL, NextVT, R0, R1);
26322 static SDValue LowerVectorCTLZ(SDValue Op, const SDLoc &DL,
26323 const X86Subtarget &Subtarget,
26324 SelectionDAG &DAG) {
26325 MVT VT = Op.getSimpleValueType();
26327 if (Subtarget.hasCDI() &&
26328 // vXi8 vectors need to be promoted to 512-bits for vXi32.
26329 (Subtarget.canExtendTo512DQ() || VT.getVectorElementType() != MVT::i8))
26330 return LowerVectorCTLZ_AVX512CDI(Op, DAG, Subtarget);
26332 // Decompose 256-bit ops into smaller 128-bit ops.
26333 if (VT.is256BitVector() && !Subtarget.hasInt256())
26334 return splitVectorIntUnary(Op, DAG);
26336 // Decompose 512-bit ops into smaller 256-bit ops.
26337 if (VT.is512BitVector() && !Subtarget.hasBWI())
26338 return splitVectorIntUnary(Op, DAG);
26340 assert(Subtarget.hasSSSE3() && "Expected SSSE3 support for PSHUFB");
26341 return LowerVectorCTLZInRegLUT(Op, DL, Subtarget, DAG);
26344 static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
26345 SelectionDAG &DAG) {
26346 MVT VT = Op.getSimpleValueType();
26348 unsigned NumBits = VT.getSizeInBits();
26350 unsigned Opc = Op.getOpcode();
26353 return LowerVectorCTLZ(Op, dl, Subtarget, DAG);
26355 Op = Op.getOperand(0);
26356 if (VT == MVT::i8) {
26357 // Zero extend to i32 since there is not an i8 bsr.
26359 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
26362 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
26363 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
26364 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
26366 if (Opc == ISD::CTLZ) {
26367 // If src is zero (i.e. bsr sets ZF), returns NumBits.
26368 SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
26369 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
26371 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
26374 // Finally xor with NumBits-1.
26375 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op,
26376 DAG.getConstant(NumBits - 1, dl, OpVT));
26379 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
26383 static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
26384 SelectionDAG &DAG) {
26385 MVT VT = Op.getSimpleValueType();
26386 unsigned NumBits = VT.getScalarSizeInBits();
26387 SDValue N0 = Op.getOperand(0);
26390 assert(!VT.isVector() && Op.getOpcode() == ISD::CTTZ &&
26391 "Only scalar CTTZ requires custom lowering");
26393 // Issue a bsf (scan bits forward) which also sets EFLAGS.
26394 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
26395 Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
26397 // If src is zero (i.e. bsf sets ZF), returns NumBits.
26398 SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
26399 DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
26401 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
26404 static SDValue lowerAddSub(SDValue Op, SelectionDAG &DAG,
26405 const X86Subtarget &Subtarget) {
26406 MVT VT = Op.getSimpleValueType();
26407 if (VT == MVT::i16 || VT == MVT::i32)
26408 return lowerAddSubToHorizontalOp(Op, DAG, Subtarget);
26410 if (VT.getScalarType() == MVT::i1)
26411 return DAG.getNode(ISD::XOR, SDLoc(Op), VT,
26412 Op.getOperand(0), Op.getOperand(1));
26414 if (VT == MVT::v32i16 || VT == MVT::v64i8)
26415 return splitVectorIntBinary(Op, DAG);
26417 assert(Op.getSimpleValueType().is256BitVector() &&
26418 Op.getSimpleValueType().isInteger() &&
26419 "Only handle AVX 256-bit vector integer operation");
26420 return splitVectorIntBinary(Op, DAG);
26423 static SDValue LowerADDSAT_SUBSAT(SDValue Op, SelectionDAG &DAG,
26424 const X86Subtarget &Subtarget) {
26425 MVT VT = Op.getSimpleValueType();
26426 SDValue X = Op.getOperand(0), Y = Op.getOperand(1);
26427 unsigned Opcode = Op.getOpcode();
26428 if (VT.getScalarType() == MVT::i1) {
26431 default: llvm_unreachable("Expected saturated arithmetic opcode");
26434 // *addsat i1 X, Y --> X | Y
26435 return DAG.getNode(ISD::OR, dl, VT, X, Y);
26438 // *subsat i1 X, Y --> X & ~Y
26439 return DAG.getNode(ISD::AND, dl, VT, X, DAG.getNOT(dl, Y, VT));
26443 if (VT.is128BitVector()) {
26444 // Avoid the generic expansion with min/max if we don't have pminu*/pmaxu*.
26445 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
26446 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
26447 *DAG.getContext(), VT);
26449 if (Opcode == ISD::UADDSAT && !TLI.isOperationLegal(ISD::UMIN, VT)) {
26450 // uaddsat X, Y --> (X >u (X + Y)) ? -1 : X + Y
26451 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, X, Y);
26452 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Add, ISD::SETUGT);
26453 return DAG.getSelect(DL, VT, Cmp, DAG.getAllOnesConstant(DL, VT), Add);
26455 if (Opcode == ISD::USUBSAT && !TLI.isOperationLegal(ISD::UMAX, VT)) {
26456 // usubsat X, Y --> (X >u Y) ? X - Y : 0
26457 SDValue Sub = DAG.getNode(ISD::SUB, DL, VT, X, Y);
26458 SDValue Cmp = DAG.getSetCC(DL, SetCCResultType, X, Y, ISD::SETUGT);
26459 return DAG.getSelect(DL, VT, Cmp, Sub, DAG.getConstant(0, DL, VT));
26461 // Use default expansion.
26465 if (VT == MVT::v32i16 || VT == MVT::v64i8)
26466 return splitVectorIntBinary(Op, DAG);
26468 assert(Op.getSimpleValueType().is256BitVector() &&
26469 Op.getSimpleValueType().isInteger() &&
26470 "Only handle AVX 256-bit vector integer operation");
26471 return splitVectorIntBinary(Op, DAG);
26474 static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
26475 SelectionDAG &DAG) {
26476 MVT VT = Op.getSimpleValueType();
26477 if (VT == MVT::i16 || VT == MVT::i32 || VT == MVT::i64) {
26478 // Since X86 does not have CMOV for 8-bit integer, we don't convert
26479 // 8-bit integer abs to NEG and CMOV.
26481 SDValue N0 = Op.getOperand(0);
26482 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
26483 DAG.getConstant(0, DL, VT), N0);
26484 SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
26485 SDValue(Neg.getNode(), 1)};
26486 return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
26489 // ABS(vXi64 X) --> VPBLENDVPD(X, 0-X, X).
26490 if ((VT == MVT::v2i64 || VT == MVT::v4i64) && Subtarget.hasSSE41()) {
26492 SDValue Src = Op.getOperand(0);
26494 DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Src);
26495 return DAG.getNode(X86ISD::BLENDV, DL, VT, Src, Sub, Src);
26498 if (VT.is256BitVector() && !Subtarget.hasInt256()) {
26499 assert(VT.isInteger() &&
26500 "Only handle AVX 256-bit vector integer operation");
26501 return splitVectorIntUnary(Op, DAG);
26504 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
26505 return splitVectorIntUnary(Op, DAG);
26507 // Default to expand.
26511 static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
26512 MVT VT = Op.getSimpleValueType();
26514 // For AVX1 cases, split to use legal ops (everything but v4i64).
26515 if (VT.getScalarType() != MVT::i64 && VT.is256BitVector())
26516 return splitVectorIntBinary(Op, DAG);
26518 if (VT == MVT::v32i16 || VT == MVT::v64i8)
26519 return splitVectorIntBinary(Op, DAG);
26522 unsigned Opcode = Op.getOpcode();
26523 SDValue N0 = Op.getOperand(0);
26524 SDValue N1 = Op.getOperand(1);
26526 // For pre-SSE41, we can perform UMIN/UMAX v8i16 by flipping the signbit,
26527 // using the SMIN/SMAX instructions and flipping the signbit back.
26528 if (VT == MVT::v8i16) {
26529 assert((Opcode == ISD::UMIN || Opcode == ISD::UMAX) &&
26530 "Unexpected MIN/MAX opcode");
26531 SDValue Sign = DAG.getConstant(APInt::getSignedMinValue(16), DL, VT);
26532 N0 = DAG.getNode(ISD::XOR, DL, VT, N0, Sign);
26533 N1 = DAG.getNode(ISD::XOR, DL, VT, N1, Sign);
26534 Opcode = (Opcode == ISD::UMIN ? ISD::SMIN : ISD::SMAX);
26535 SDValue Result = DAG.getNode(Opcode, DL, VT, N0, N1);
26536 return DAG.getNode(ISD::XOR, DL, VT, Result, Sign);
26539 // Else, expand to a compare/select.
26542 case ISD::SMIN: CC = ISD::CondCode::SETLT; break;
26543 case ISD::SMAX: CC = ISD::CondCode::SETGT; break;
26544 case ISD::UMIN: CC = ISD::CondCode::SETULT; break;
26545 case ISD::UMAX: CC = ISD::CondCode::SETUGT; break;
26546 default: llvm_unreachable("Unknown MINMAX opcode");
26549 SDValue Cond = DAG.getSetCC(DL, VT, N0, N1, CC);
26550 return DAG.getSelect(DL, VT, Cond, N0, N1);
26553 static SDValue LowerMUL(SDValue Op, const X86Subtarget &Subtarget,
26554 SelectionDAG &DAG) {
26556 MVT VT = Op.getSimpleValueType();
26558 if (VT.getScalarType() == MVT::i1)
26559 return DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0), Op.getOperand(1));
26561 // Decompose 256-bit ops into 128-bit ops.
26562 if (VT.is256BitVector() && !Subtarget.hasInt256())
26563 return splitVectorIntBinary(Op, DAG);
26565 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
26566 return splitVectorIntBinary(Op, DAG);
26568 SDValue A = Op.getOperand(0);
26569 SDValue B = Op.getOperand(1);
26571 // Lower v16i8/v32i8/v64i8 mul as sign-extension to v8i16/v16i16/v32i16
26572 // vector pairs, multiply and truncate.
26573 if (VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8) {
26574 unsigned NumElts = VT.getVectorNumElements();
26576 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26577 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26578 MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
26579 return DAG.getNode(
26580 ISD::TRUNCATE, dl, VT,
26581 DAG.getNode(ISD::MUL, dl, ExVT,
26582 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, A),
26583 DAG.getNode(ISD::ANY_EXTEND, dl, ExVT, B)));
26586 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26588 // Extract the lo/hi parts to any extend to i16.
26589 // We're going to mask off the low byte of each result element of the
26590 // pmullw, so it doesn't matter what's in the high byte of each 16-bit
26592 SDValue Undef = DAG.getUNDEF(VT);
26593 SDValue ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A, Undef));
26594 SDValue AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A, Undef));
26597 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26598 // If the LHS is a constant, manually unpackl/unpackh.
26599 SmallVector<SDValue, 16> LoOps, HiOps;
26600 for (unsigned i = 0; i != NumElts; i += 16) {
26601 for (unsigned j = 0; j != 8; ++j) {
26602 LoOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j), dl,
26604 HiOps.push_back(DAG.getAnyExtOrTrunc(B.getOperand(i + j + 8), dl,
26609 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26610 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26612 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B, Undef));
26613 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B, Undef));
26616 // Multiply, mask the lower 8bits of the lo/hi results and pack.
26617 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26618 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26619 RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, dl, ExVT));
26620 RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, dl, ExVT));
26621 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26624 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
26625 if (VT == MVT::v4i32) {
26626 assert(Subtarget.hasSSE2() && !Subtarget.hasSSE41() &&
26627 "Should not custom lower when pmulld is available!");
26629 // Extract the odd parts.
26630 static const int UnpackMask[] = { 1, -1, 3, -1 };
26631 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
26632 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
26634 // Multiply the even parts.
26635 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
26636 DAG.getBitcast(MVT::v2i64, A),
26637 DAG.getBitcast(MVT::v2i64, B));
26638 // Now multiply odd parts.
26639 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64,
26640 DAG.getBitcast(MVT::v2i64, Aodds),
26641 DAG.getBitcast(MVT::v2i64, Bodds));
26643 Evens = DAG.getBitcast(VT, Evens);
26644 Odds = DAG.getBitcast(VT, Odds);
26646 // Merge the two vectors back together with a shuffle. This expands into 2
26648 static const int ShufMask[] = { 0, 4, 2, 6 };
26649 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
26652 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
26653 "Only know how to lower V2I64/V4I64/V8I64 multiply");
26654 assert(!Subtarget.hasDQI() && "DQI should use MULLQ");
26656 // Ahi = psrlqi(a, 32);
26657 // Bhi = psrlqi(b, 32);
26659 // AloBlo = pmuludq(a, b);
26660 // AloBhi = pmuludq(a, Bhi);
26661 // AhiBlo = pmuludq(Ahi, b);
26663 // Hi = psllqi(AloBhi + AhiBlo, 32);
26664 // return AloBlo + Hi;
26665 KnownBits AKnown = DAG.computeKnownBits(A);
26666 KnownBits BKnown = DAG.computeKnownBits(B);
26668 APInt LowerBitsMask = APInt::getLowBitsSet(64, 32);
26669 bool ALoIsZero = LowerBitsMask.isSubsetOf(AKnown.Zero);
26670 bool BLoIsZero = LowerBitsMask.isSubsetOf(BKnown.Zero);
26672 APInt UpperBitsMask = APInt::getHighBitsSet(64, 32);
26673 bool AHiIsZero = UpperBitsMask.isSubsetOf(AKnown.Zero);
26674 bool BHiIsZero = UpperBitsMask.isSubsetOf(BKnown.Zero);
26676 SDValue Zero = DAG.getConstant(0, dl, VT);
26678 // Only multiply lo/hi halves that aren't known to be zero.
26679 SDValue AloBlo = Zero;
26680 if (!ALoIsZero && !BLoIsZero)
26681 AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
26683 SDValue AloBhi = Zero;
26684 if (!ALoIsZero && !BHiIsZero) {
26685 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
26686 AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
26689 SDValue AhiBlo = Zero;
26690 if (!AHiIsZero && !BLoIsZero) {
26691 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
26692 AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
26695 SDValue Hi = DAG.getNode(ISD::ADD, dl, VT, AloBhi, AhiBlo);
26696 Hi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Hi, 32, DAG);
26698 return DAG.getNode(ISD::ADD, dl, VT, AloBlo, Hi);
26701 static SDValue LowerMULH(SDValue Op, const X86Subtarget &Subtarget,
26702 SelectionDAG &DAG) {
26704 MVT VT = Op.getSimpleValueType();
26705 bool IsSigned = Op->getOpcode() == ISD::MULHS;
26706 unsigned NumElts = VT.getVectorNumElements();
26707 SDValue A = Op.getOperand(0);
26708 SDValue B = Op.getOperand(1);
26710 // Decompose 256-bit ops into 128-bit ops.
26711 if (VT.is256BitVector() && !Subtarget.hasInt256())
26712 return splitVectorIntBinary(Op, DAG);
26714 if ((VT == MVT::v32i16 || VT == MVT::v64i8) && !Subtarget.hasBWI())
26715 return splitVectorIntBinary(Op, DAG);
26717 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32) {
26718 assert((VT == MVT::v4i32 && Subtarget.hasSSE2()) ||
26719 (VT == MVT::v8i32 && Subtarget.hasInt256()) ||
26720 (VT == MVT::v16i32 && Subtarget.hasAVX512()));
26722 // PMULxD operations multiply each even value (starting at 0) of LHS with
26723 // the related value of RHS and produce a widen result.
26724 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26725 // => <2 x i64> <ae|cg>
26727 // In other word, to have all the results, we need to perform two PMULxD:
26728 // 1. one with the even values.
26729 // 2. one with the odd values.
26730 // To achieve #2, with need to place the odd values at an even position.
26732 // Place the odd value at an even position (basically, shift all values 1
26733 // step to the left):
26734 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1,
26735 9, -1, 11, -1, 13, -1, 15, -1};
26736 // <a|b|c|d> => <b|undef|d|undef>
26737 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, A, A,
26738 makeArrayRef(&Mask[0], NumElts));
26739 // <e|f|g|h> => <f|undef|h|undef>
26740 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, B, B,
26741 makeArrayRef(&Mask[0], NumElts));
26743 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
26745 MVT MulVT = MVT::getVectorVT(MVT::i64, NumElts / 2);
26747 (IsSigned && Subtarget.hasSSE41()) ? X86ISD::PMULDQ : X86ISD::PMULUDQ;
26748 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
26749 // => <2 x i64> <ae|cg>
26750 SDValue Mul1 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26751 DAG.getBitcast(MulVT, A),
26752 DAG.getBitcast(MulVT, B)));
26753 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
26754 // => <2 x i64> <bf|dh>
26755 SDValue Mul2 = DAG.getBitcast(VT, DAG.getNode(Opcode, dl, MulVT,
26756 DAG.getBitcast(MulVT, Odd0),
26757 DAG.getBitcast(MulVT, Odd1)));
26759 // Shuffle it back into the right order.
26760 SmallVector<int, 16> ShufMask(NumElts);
26761 for (int i = 0; i != (int)NumElts; ++i)
26762 ShufMask[i] = (i / 2) * 2 + ((i % 2) * NumElts) + 1;
26764 SDValue Res = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, ShufMask);
26766 // If we have a signed multiply but no PMULDQ fix up the result of an
26767 // unsigned multiply.
26768 if (IsSigned && !Subtarget.hasSSE41()) {
26769 SDValue Zero = DAG.getConstant(0, dl, VT);
26770 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
26771 DAG.getSetCC(dl, VT, Zero, A, ISD::SETGT), B);
26772 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
26773 DAG.getSetCC(dl, VT, Zero, B, ISD::SETGT), A);
26775 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
26776 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Fixup);
26782 // Only i8 vectors should need custom lowering after this.
26783 assert((VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
26784 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
26785 "Unsupported vector type");
26787 // Lower v16i8/v32i8 as extension to v8i16/v16i16 vector pairs, multiply,
26788 // logical shift down the upper half and pack back to i8.
26790 // With SSE41 we can use sign/zero extend, but for pre-SSE41 we unpack
26791 // and then ashr/lshr the upper bits down to the lower bits before multiply.
26792 unsigned ExAVX = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
26794 if ((VT == MVT::v16i8 && Subtarget.hasInt256()) ||
26795 (VT == MVT::v32i8 && Subtarget.canExtendTo512BW())) {
26796 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
26797 SDValue ExA = DAG.getNode(ExAVX, dl, ExVT, A);
26798 SDValue ExB = DAG.getNode(ExAVX, dl, ExVT, B);
26799 SDValue Mul = DAG.getNode(ISD::MUL, dl, ExVT, ExA, ExB);
26800 Mul = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, Mul, 8, DAG);
26801 return DAG.getNode(ISD::TRUNCATE, dl, VT, Mul);
26804 // For vXi8 we will unpack the low and high half of each 128 bit lane to widen
26805 // to a vXi16 type. Do the multiplies, shift the results and pack the half
26806 // lane results back together.
26808 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
26810 static const int PSHUFDMask[] = { 8, 9, 10, 11, 12, 13, 14, 15,
26811 -1, -1, -1, -1, -1, -1, -1, -1};
26813 // Extract the lo parts and zero/sign extend to i16.
26814 // Only use SSE4.1 instructions for signed v16i8 where using unpack requires
26815 // shifts to sign extend. Using unpack for unsigned only requires an xor to
26816 // create zeros and a copy due to tied registers contraints pre-avx. But using
26817 // zero_extend_vector_inreg would require an additional pshufd for the high
26821 if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26822 ALo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, A);
26824 AHi = DAG.getVectorShuffle(VT, dl, A, A, PSHUFDMask);
26825 AHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, AHi);
26826 } else if (IsSigned) {
26827 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), A));
26828 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), A));
26830 ALo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, ALo, 8, DAG);
26831 AHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, AHi, 8, DAG);
26833 ALo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, A,
26834 DAG.getConstant(0, dl, VT)));
26835 AHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, A,
26836 DAG.getConstant(0, dl, VT)));
26840 if (ISD::isBuildVectorOfConstantSDNodes(B.getNode())) {
26841 // If the LHS is a constant, manually unpackl/unpackh and extend.
26842 SmallVector<SDValue, 16> LoOps, HiOps;
26843 for (unsigned i = 0; i != NumElts; i += 16) {
26844 for (unsigned j = 0; j != 8; ++j) {
26845 SDValue LoOp = B.getOperand(i + j);
26846 SDValue HiOp = B.getOperand(i + j + 8);
26849 LoOp = DAG.getSExtOrTrunc(LoOp, dl, MVT::i16);
26850 HiOp = DAG.getSExtOrTrunc(HiOp, dl, MVT::i16);
26852 LoOp = DAG.getZExtOrTrunc(LoOp, dl, MVT::i16);
26853 HiOp = DAG.getZExtOrTrunc(HiOp, dl, MVT::i16);
26856 LoOps.push_back(LoOp);
26857 HiOps.push_back(HiOp);
26861 BLo = DAG.getBuildVector(ExVT, dl, LoOps);
26862 BHi = DAG.getBuildVector(ExVT, dl, HiOps);
26863 } else if (IsSigned && VT == MVT::v16i8 && Subtarget.hasSSE41()) {
26864 BLo = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, B);
26866 BHi = DAG.getVectorShuffle(VT, dl, B, B, PSHUFDMask);
26867 BHi = DAG.getNode(ISD::SIGN_EXTEND_VECTOR_INREG, dl, ExVT, BHi);
26868 } else if (IsSigned) {
26869 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), B));
26870 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), B));
26872 BLo = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BLo, 8, DAG);
26873 BHi = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, BHi, 8, DAG);
26875 BLo = DAG.getBitcast(ExVT, getUnpackl(DAG, dl, VT, B,
26876 DAG.getConstant(0, dl, VT)));
26877 BHi = DAG.getBitcast(ExVT, getUnpackh(DAG, dl, VT, B,
26878 DAG.getConstant(0, dl, VT)));
26881 // Multiply, lshr the upper 8bits to the lower 8bits of the lo/hi results and
26882 // pack back to vXi8.
26883 SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);
26884 SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);
26885 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RLo, 8, DAG);
26886 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExVT, RHi, 8, DAG);
26888 // Bitcast back to VT and then pack all the even elements from Lo and Hi.
26889 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
26892 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
26893 assert(Subtarget.isTargetWin64() && "Unexpected target");
26894 EVT VT = Op.getValueType();
26895 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
26896 "Unexpected return type for lowering");
26900 switch (Op->getOpcode()) {
26901 default: llvm_unreachable("Unexpected request for libcall!");
26902 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
26903 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
26904 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
26905 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
26906 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
26907 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
26911 SDValue InChain = DAG.getEntryNode();
26913 TargetLowering::ArgListTy Args;
26914 TargetLowering::ArgListEntry Entry;
26915 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
26916 EVT ArgVT = Op->getOperand(i).getValueType();
26917 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
26918 "Unexpected argument type for lowering");
26919 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
26920 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
26921 MachinePointerInfo MPI =
26922 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
26923 Entry.Node = StackPtr;
26924 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr,
26925 MPI, /* Alignment = */ 16);
26926 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
26927 Entry.Ty = PointerType::get(ArgTy,0);
26928 Entry.IsSExt = false;
26929 Entry.IsZExt = false;
26930 Args.push_back(Entry);
26933 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
26934 getPointerTy(DAG.getDataLayout()));
26936 TargetLowering::CallLoweringInfo CLI(DAG);
26937 CLI.setDebugLoc(dl)
26940 getLibcallCallingConv(LC),
26941 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()), Callee,
26944 .setSExtResult(isSigned)
26945 .setZExtResult(!isSigned);
26947 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
26948 return DAG.getBitcast(VT, CallInfo.first);
26951 // Return true if the required (according to Opcode) shift-imm form is natively
26952 // supported by the Subtarget
26953 static bool SupportedVectorShiftWithImm(MVT VT, const X86Subtarget &Subtarget,
26955 if (VT.getScalarSizeInBits() < 16)
26958 if (VT.is512BitVector() && Subtarget.hasAVX512() &&
26959 (VT.getScalarSizeInBits() > 16 || Subtarget.hasBWI()))
26962 bool LShift = (VT.is128BitVector() && Subtarget.hasSSE2()) ||
26963 (VT.is256BitVector() && Subtarget.hasInt256());
26965 bool AShift = LShift && (Subtarget.hasAVX512() ||
26966 (VT != MVT::v2i64 && VT != MVT::v4i64));
26967 return (Opcode == ISD::SRA) ? AShift : LShift;
26970 // The shift amount is a variable, but it is the same for all vector lanes.
26971 // These instructions are defined together with shift-immediate.
26973 bool SupportedVectorShiftWithBaseAmnt(MVT VT, const X86Subtarget &Subtarget,
26975 return SupportedVectorShiftWithImm(VT, Subtarget, Opcode);
26978 // Return true if the required (according to Opcode) variable-shift form is
26979 // natively supported by the Subtarget
26980 static bool SupportedVectorVarShift(MVT VT, const X86Subtarget &Subtarget,
26983 if (!Subtarget.hasInt256() || VT.getScalarSizeInBits() < 16)
26986 // vXi16 supported only on AVX-512, BWI
26987 if (VT.getScalarSizeInBits() == 16 && !Subtarget.hasBWI())
26990 if (Subtarget.hasAVX512())
26993 bool LShift = VT.is128BitVector() || VT.is256BitVector();
26994 bool AShift = LShift && VT != MVT::v2i64 && VT != MVT::v4i64;
26995 return (Opcode == ISD::SRA) ? AShift : LShift;
26998 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
26999 const X86Subtarget &Subtarget) {
27000 MVT VT = Op.getSimpleValueType();
27002 SDValue R = Op.getOperand(0);
27003 SDValue Amt = Op.getOperand(1);
27004 unsigned X86Opc = getTargetVShiftUniformOpcode(Op.getOpcode(), false);
27006 auto ArithmeticShiftRight64 = [&](uint64_t ShiftAmt) {
27007 assert((VT == MVT::v2i64 || VT == MVT::v4i64) && "Unexpected SRA type");
27008 MVT ExVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
27009 SDValue Ex = DAG.getBitcast(ExVT, R);
27011 // ashr(R, 63) === cmp_slt(R, 0)
27012 if (ShiftAmt == 63 && Subtarget.hasSSE42()) {
27013 assert((VT != MVT::v4i64 || Subtarget.hasInt256()) &&
27014 "Unsupported PCMPGT op");
27015 return DAG.getNode(X86ISD::PCMPGT, dl, VT, DAG.getConstant(0, dl, VT), R);
27018 if (ShiftAmt >= 32) {
27019 // Splat sign to upper i32 dst, and SRA upper i32 src to lower i32.
27021 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex, 31, DAG);
27022 SDValue Lower = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
27023 ShiftAmt - 32, DAG);
27024 if (VT == MVT::v2i64)
27025 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {5, 1, 7, 3});
27026 if (VT == MVT::v4i64)
27027 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
27028 {9, 1, 11, 3, 13, 5, 15, 7});
27030 // SRA upper i32, SRL whole i64 and select lower i32.
27031 SDValue Upper = getTargetVShiftByConstNode(X86ISD::VSRAI, dl, ExVT, Ex,
27034 getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt, DAG);
27035 Lower = DAG.getBitcast(ExVT, Lower);
27036 if (VT == MVT::v2i64)
27037 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower, {4, 1, 6, 3});
27038 if (VT == MVT::v4i64)
27039 Ex = DAG.getVectorShuffle(ExVT, dl, Upper, Lower,
27040 {8, 1, 10, 3, 12, 5, 14, 7});
27042 return DAG.getBitcast(VT, Ex);
27045 // Optimize shl/srl/sra with constant shift amount.
27046 APInt APIntShiftAmt;
27047 if (!X86::isConstantSplat(Amt, APIntShiftAmt))
27050 // If the shift amount is out of range, return undef.
27051 if (APIntShiftAmt.uge(VT.getScalarSizeInBits()))
27052 return DAG.getUNDEF(VT);
27054 uint64_t ShiftAmt = APIntShiftAmt.getZExtValue();
27056 if (SupportedVectorShiftWithImm(VT, Subtarget, Op.getOpcode()))
27057 return getTargetVShiftByConstNode(X86Opc, dl, VT, R, ShiftAmt, DAG);
27059 // i64 SRA needs to be performed as partial shifts.
27060 if (((!Subtarget.hasXOP() && VT == MVT::v2i64) ||
27061 (Subtarget.hasInt256() && VT == MVT::v4i64)) &&
27062 Op.getOpcode() == ISD::SRA)
27063 return ArithmeticShiftRight64(ShiftAmt);
27065 if (VT == MVT::v16i8 || (Subtarget.hasInt256() && VT == MVT::v32i8) ||
27066 (Subtarget.hasBWI() && VT == MVT::v64i8)) {
27067 unsigned NumElts = VT.getVectorNumElements();
27068 MVT ShiftVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27070 // Simple i8 add case
27071 if (Op.getOpcode() == ISD::SHL && ShiftAmt == 1)
27072 return DAG.getNode(ISD::ADD, dl, VT, R, R);
27074 // ashr(R, 7) === cmp_slt(R, 0)
27075 if (Op.getOpcode() == ISD::SRA && ShiftAmt == 7) {
27076 SDValue Zeros = DAG.getConstant(0, dl, VT);
27077 if (VT.is512BitVector()) {
27078 assert(VT == MVT::v64i8 && "Unexpected element type!");
27079 SDValue CMP = DAG.getSetCC(dl, MVT::v64i1, Zeros, R, ISD::SETGT);
27080 return DAG.getNode(ISD::SIGN_EXTEND, dl, VT, CMP);
27082 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
27085 // XOP can shift v16i8 directly instead of as shift v8i16 + mask.
27086 if (VT == MVT::v16i8 && Subtarget.hasXOP())
27089 if (Op.getOpcode() == ISD::SHL) {
27090 // Make a large shift.
27091 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ShiftVT, R,
27093 SHL = DAG.getBitcast(VT, SHL);
27094 // Zero out the rightmost bits.
27095 APInt Mask = APInt::getHighBitsSet(8, 8 - ShiftAmt);
27096 return DAG.getNode(ISD::AND, dl, VT, SHL, DAG.getConstant(Mask, dl, VT));
27098 if (Op.getOpcode() == ISD::SRL) {
27099 // Make a large shift.
27100 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ShiftVT, R,
27102 SRL = DAG.getBitcast(VT, SRL);
27103 // Zero out the leftmost bits.
27104 return DAG.getNode(ISD::AND, dl, VT, SRL,
27105 DAG.getConstant(uint8_t(-1U) >> ShiftAmt, dl, VT));
27107 if (Op.getOpcode() == ISD::SRA) {
27108 // ashr(R, Amt) === sub(xor(lshr(R, Amt), Mask), Mask)
27109 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
27111 SDValue Mask = DAG.getConstant(128 >> ShiftAmt, dl, VT);
27112 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
27113 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
27116 llvm_unreachable("Unknown shift opcode.");
27122 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
27123 const X86Subtarget &Subtarget) {
27124 MVT VT = Op.getSimpleValueType();
27126 SDValue R = Op.getOperand(0);
27127 SDValue Amt = Op.getOperand(1);
27128 unsigned Opcode = Op.getOpcode();
27129 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opcode, false);
27130 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opcode, true);
27132 if (SDValue BaseShAmt = DAG.getSplatValue(Amt)) {
27133 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Opcode)) {
27134 MVT EltVT = VT.getVectorElementType();
27135 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
27136 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
27137 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
27138 else if (EltVT.bitsLT(MVT::i32))
27139 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
27141 return getTargetVShiftNode(X86OpcI, dl, VT, R, BaseShAmt, Subtarget, DAG);
27144 // vXi8 shifts - shift as v8i16 + mask result.
27145 if (((VT == MVT::v16i8 && !Subtarget.canExtendTo512DQ()) ||
27146 (VT == MVT::v32i8 && !Subtarget.canExtendTo512BW()) ||
27147 VT == MVT::v64i8) &&
27148 !Subtarget.hasXOP()) {
27149 unsigned NumElts = VT.getVectorNumElements();
27150 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27151 if (SupportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, Opcode)) {
27152 unsigned LogicalOp = (Opcode == ISD::SHL ? ISD::SHL : ISD::SRL);
27153 unsigned LogicalX86Op = getTargetVShiftUniformOpcode(LogicalOp, false);
27154 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
27156 // Create the mask using vXi16 shifts. For shift-rights we need to move
27157 // the upper byte down before splatting the vXi8 mask.
27158 SDValue BitMask = DAG.getConstant(-1, dl, ExtVT);
27159 BitMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, BitMask,
27160 BaseShAmt, Subtarget, DAG);
27161 if (Opcode != ISD::SHL)
27162 BitMask = getTargetVShiftByConstNode(LogicalX86Op, dl, ExtVT, BitMask,
27164 BitMask = DAG.getBitcast(VT, BitMask);
27165 BitMask = DAG.getVectorShuffle(VT, dl, BitMask, BitMask,
27166 SmallVector<int, 64>(NumElts, 0));
27168 SDValue Res = getTargetVShiftNode(LogicalX86Op, dl, ExtVT,
27169 DAG.getBitcast(ExtVT, R), BaseShAmt,
27171 Res = DAG.getBitcast(VT, Res);
27172 Res = DAG.getNode(ISD::AND, dl, VT, Res, BitMask);
27174 if (Opcode == ISD::SRA) {
27175 // ashr(R, Amt) === sub(xor(lshr(R, Amt), SignMask), SignMask)
27176 // SignMask = lshr(SignBit, Amt) - safe to do this with PSRLW.
27177 SDValue SignMask = DAG.getConstant(0x8080, dl, ExtVT);
27178 SignMask = getTargetVShiftNode(LogicalX86Op, dl, ExtVT, SignMask,
27179 BaseShAmt, Subtarget, DAG);
27180 SignMask = DAG.getBitcast(VT, SignMask);
27181 Res = DAG.getNode(ISD::XOR, dl, VT, Res, SignMask);
27182 Res = DAG.getNode(ISD::SUB, dl, VT, Res, SignMask);
27189 // Check cases (mainly 32-bit) where i64 is expanded into high and low parts.
27190 if (VT == MVT::v2i64 && Amt.getOpcode() == ISD::BITCAST &&
27191 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
27192 Amt = Amt.getOperand(0);
27193 unsigned Ratio = 64 / Amt.getScalarValueSizeInBits();
27194 std::vector<SDValue> Vals(Ratio);
27195 for (unsigned i = 0; i != Ratio; ++i)
27196 Vals[i] = Amt.getOperand(i);
27197 for (unsigned i = Ratio, e = Amt.getNumOperands(); i != e; i += Ratio) {
27198 for (unsigned j = 0; j != Ratio; ++j)
27199 if (Vals[j] != Amt.getOperand(i + j))
27203 if (SupportedVectorShiftWithBaseAmnt(VT, Subtarget, Op.getOpcode()))
27204 return DAG.getNode(X86OpcV, dl, VT, R, Op.getOperand(1));
27209 // Convert a shift/rotate left amount to a multiplication scale factor.
27210 static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
27211 const X86Subtarget &Subtarget,
27212 SelectionDAG &DAG) {
27213 MVT VT = Amt.getSimpleValueType();
27214 if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
27215 (Subtarget.hasInt256() && VT == MVT::v16i16) ||
27216 (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
27219 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
27220 SmallVector<SDValue, 8> Elts;
27221 MVT SVT = VT.getVectorElementType();
27222 unsigned SVTBits = SVT.getSizeInBits();
27223 APInt One(SVTBits, 1);
27224 unsigned NumElems = VT.getVectorNumElements();
27226 for (unsigned i = 0; i != NumElems; ++i) {
27227 SDValue Op = Amt->getOperand(i);
27228 if (Op->isUndef()) {
27229 Elts.push_back(Op);
27233 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
27234 APInt C(SVTBits, ND->getZExtValue());
27235 uint64_t ShAmt = C.getZExtValue();
27236 if (ShAmt >= SVTBits) {
27237 Elts.push_back(DAG.getUNDEF(SVT));
27240 Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
27242 return DAG.getBuildVector(VT, dl, Elts);
27245 // If the target doesn't support variable shifts, use either FP conversion
27246 // or integer multiplication to avoid shifting each element individually.
27247 if (VT == MVT::v4i32) {
27248 Amt = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, dl, VT));
27249 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt,
27250 DAG.getConstant(0x3f800000U, dl, VT));
27251 Amt = DAG.getBitcast(MVT::v4f32, Amt);
27252 return DAG.getNode(ISD::FP_TO_SINT, dl, VT, Amt);
27255 // AVX2 can more effectively perform this as a zext/trunc to/from v8i32.
27256 if (VT == MVT::v8i16 && !Subtarget.hasAVX2()) {
27257 SDValue Z = DAG.getConstant(0, dl, VT);
27258 SDValue Lo = DAG.getBitcast(MVT::v4i32, getUnpackl(DAG, dl, VT, Amt, Z));
27259 SDValue Hi = DAG.getBitcast(MVT::v4i32, getUnpackh(DAG, dl, VT, Amt, Z));
27260 Lo = convertShiftLeftToScale(Lo, dl, Subtarget, DAG);
27261 Hi = convertShiftLeftToScale(Hi, dl, Subtarget, DAG);
27262 if (Subtarget.hasSSE41())
27263 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27265 return DAG.getVectorShuffle(VT, dl, DAG.getBitcast(VT, Lo),
27266 DAG.getBitcast(VT, Hi),
27267 {0, 2, 4, 6, 8, 10, 12, 14});
27273 static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
27274 SelectionDAG &DAG) {
27275 MVT VT = Op.getSimpleValueType();
27277 SDValue R = Op.getOperand(0);
27278 SDValue Amt = Op.getOperand(1);
27279 unsigned EltSizeInBits = VT.getScalarSizeInBits();
27280 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27282 unsigned Opc = Op.getOpcode();
27283 unsigned X86OpcV = getTargetVShiftUniformOpcode(Opc, true);
27284 unsigned X86OpcI = getTargetVShiftUniformOpcode(Opc, false);
27286 assert(VT.isVector() && "Custom lowering only for vector shifts!");
27287 assert(Subtarget.hasSSE2() && "Only custom lower when we have SSE2!");
27289 if (SDValue V = LowerScalarImmediateShift(Op, DAG, Subtarget))
27292 if (SDValue V = LowerScalarVariableShift(Op, DAG, Subtarget))
27295 if (SupportedVectorVarShift(VT, Subtarget, Opc))
27298 // XOP has 128-bit variable logical/arithmetic shifts.
27299 // +ve/-ve Amt = shift left/right.
27300 if (Subtarget.hasXOP() && (VT == MVT::v2i64 || VT == MVT::v4i32 ||
27301 VT == MVT::v8i16 || VT == MVT::v16i8)) {
27302 if (Opc == ISD::SRL || Opc == ISD::SRA) {
27303 SDValue Zero = DAG.getConstant(0, dl, VT);
27304 Amt = DAG.getNode(ISD::SUB, dl, VT, Zero, Amt);
27306 if (Opc == ISD::SHL || Opc == ISD::SRL)
27307 return DAG.getNode(X86ISD::VPSHL, dl, VT, R, Amt);
27308 if (Opc == ISD::SRA)
27309 return DAG.getNode(X86ISD::VPSHA, dl, VT, R, Amt);
27312 // 2i64 vector logical shifts can efficiently avoid scalarization - do the
27313 // shifts per-lane and then shuffle the partial results back together.
27314 if (VT == MVT::v2i64 && Opc != ISD::SRA) {
27315 // Splat the shift amounts so the scalar shifts above will catch it.
27316 SDValue Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {0, 0});
27317 SDValue Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Amt, {1, 1});
27318 SDValue R0 = DAG.getNode(Opc, dl, VT, R, Amt0);
27319 SDValue R1 = DAG.getNode(Opc, dl, VT, R, Amt1);
27320 return DAG.getVectorShuffle(VT, dl, R0, R1, {0, 3});
27323 // i64 vector arithmetic shift can be emulated with the transform:
27324 // M = lshr(SIGN_MASK, Amt)
27325 // ashr(R, Amt) === sub(xor(lshr(R, Amt), M), M)
27326 if ((VT == MVT::v2i64 || (VT == MVT::v4i64 && Subtarget.hasInt256())) &&
27328 SDValue S = DAG.getConstant(APInt::getSignMask(64), dl, VT);
27329 SDValue M = DAG.getNode(ISD::SRL, dl, VT, S, Amt);
27330 R = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
27331 R = DAG.getNode(ISD::XOR, dl, VT, R, M);
27332 R = DAG.getNode(ISD::SUB, dl, VT, R, M);
27336 // If possible, lower this shift as a sequence of two shifts by
27337 // constant plus a BLENDing shuffle instead of scalarizing it.
27339 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
27341 // Could be rewritten as:
27342 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
27344 // The advantage is that the two shifts from the example would be
27345 // lowered as X86ISD::VSRLI nodes in parallel before blending.
27346 if (ConstantAmt && (VT == MVT::v8i16 || VT == MVT::v4i32 ||
27347 (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
27348 SDValue Amt1, Amt2;
27349 unsigned NumElts = VT.getVectorNumElements();
27350 SmallVector<int, 8> ShuffleMask;
27351 for (unsigned i = 0; i != NumElts; ++i) {
27352 SDValue A = Amt->getOperand(i);
27354 ShuffleMask.push_back(SM_SentinelUndef);
27357 if (!Amt1 || Amt1 == A) {
27358 ShuffleMask.push_back(i);
27362 if (!Amt2 || Amt2 == A) {
27363 ShuffleMask.push_back(i + NumElts);
27370 // Only perform this blend if we can perform it without loading a mask.
27371 if (ShuffleMask.size() == NumElts && Amt1 && Amt2 &&
27372 (VT != MVT::v16i16 ||
27373 is128BitLaneRepeatedShuffleMask(VT, ShuffleMask)) &&
27374 (VT == MVT::v4i32 || Subtarget.hasSSE41() || Opc != ISD::SHL ||
27375 canWidenShuffleElements(ShuffleMask))) {
27376 auto *Cst1 = dyn_cast<ConstantSDNode>(Amt1);
27377 auto *Cst2 = dyn_cast<ConstantSDNode>(Amt2);
27378 if (Cst1 && Cst2 && Cst1->getAPIntValue().ult(EltSizeInBits) &&
27379 Cst2->getAPIntValue().ult(EltSizeInBits)) {
27380 SDValue Shift1 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
27381 Cst1->getZExtValue(), DAG);
27382 SDValue Shift2 = getTargetVShiftByConstNode(X86OpcI, dl, VT, R,
27383 Cst2->getZExtValue(), DAG);
27384 return DAG.getVectorShuffle(VT, dl, Shift1, Shift2, ShuffleMask);
27389 // If possible, lower this packed shift into a vector multiply instead of
27390 // expanding it into a sequence of scalar shifts.
27391 if (Opc == ISD::SHL)
27392 if (SDValue Scale = convertShiftLeftToScale(Amt, dl, Subtarget, DAG))
27393 return DAG.getNode(ISD::MUL, dl, VT, R, Scale);
27395 // Constant ISD::SRL can be performed efficiently on vXi16 vectors as we
27396 // can replace with ISD::MULHU, creating scale factor from (NumEltBits - Amt).
27397 if (Opc == ISD::SRL && ConstantAmt &&
27398 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256()))) {
27399 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
27400 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
27401 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
27402 SDValue Zero = DAG.getConstant(0, dl, VT);
27403 SDValue ZAmt = DAG.getSetCC(dl, VT, Amt, Zero, ISD::SETEQ);
27404 SDValue Res = DAG.getNode(ISD::MULHU, dl, VT, R, Scale);
27405 return DAG.getSelect(dl, VT, ZAmt, R, Res);
27409 // Constant ISD::SRA can be performed efficiently on vXi16 vectors as we
27410 // can replace with ISD::MULHS, creating scale factor from (NumEltBits - Amt).
27411 // TODO: Special case handling for shift by 0/1, really we can afford either
27412 // of these cases in pre-SSE41/XOP/AVX512 but not both.
27413 if (Opc == ISD::SRA && ConstantAmt &&
27414 (VT == MVT::v8i16 || (VT == MVT::v16i16 && Subtarget.hasInt256())) &&
27415 ((Subtarget.hasSSE41() && !Subtarget.hasXOP() &&
27416 !Subtarget.hasAVX512()) ||
27417 DAG.isKnownNeverZero(Amt))) {
27418 SDValue EltBits = DAG.getConstant(EltSizeInBits, dl, VT);
27419 SDValue RAmt = DAG.getNode(ISD::SUB, dl, VT, EltBits, Amt);
27420 if (SDValue Scale = convertShiftLeftToScale(RAmt, dl, Subtarget, DAG)) {
27422 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(0, dl, VT), ISD::SETEQ);
27424 DAG.getSetCC(dl, VT, Amt, DAG.getConstant(1, dl, VT), ISD::SETEQ);
27426 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, 1, DAG);
27427 SDValue Res = DAG.getNode(ISD::MULHS, dl, VT, R, Scale);
27428 Res = DAG.getSelect(dl, VT, Amt0, R, Res);
27429 return DAG.getSelect(dl, VT, Amt1, Sra1, Res);
27433 // v4i32 Non Uniform Shifts.
27434 // If the shift amount is constant we can shift each lane using the SSE2
27435 // immediate shifts, else we need to zero-extend each lane to the lower i64
27436 // and shift using the SSE2 variable shifts.
27437 // The separate results can then be blended together.
27438 if (VT == MVT::v4i32) {
27439 SDValue Amt0, Amt1, Amt2, Amt3;
27441 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {0, 0, 0, 0});
27442 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {1, 1, 1, 1});
27443 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {2, 2, 2, 2});
27444 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, DAG.getUNDEF(VT), {3, 3, 3, 3});
27446 // The SSE2 shifts use the lower i64 as the same shift amount for
27447 // all lanes and the upper i64 is ignored. On AVX we're better off
27448 // just zero-extending, but for SSE just duplicating the top 16-bits is
27449 // cheaper and has the same effect for out of range values.
27450 if (Subtarget.hasAVX()) {
27451 SDValue Z = DAG.getConstant(0, dl, VT);
27452 Amt0 = DAG.getVectorShuffle(VT, dl, Amt, Z, {0, 4, -1, -1});
27453 Amt1 = DAG.getVectorShuffle(VT, dl, Amt, Z, {1, 5, -1, -1});
27454 Amt2 = DAG.getVectorShuffle(VT, dl, Amt, Z, {2, 6, -1, -1});
27455 Amt3 = DAG.getVectorShuffle(VT, dl, Amt, Z, {3, 7, -1, -1});
27457 SDValue Amt01 = DAG.getBitcast(MVT::v8i16, Amt);
27458 SDValue Amt23 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
27459 {4, 5, 6, 7, -1, -1, -1, -1});
27460 Amt0 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
27461 {0, 1, 1, 1, -1, -1, -1, -1});
27462 Amt1 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt01, Amt01,
27463 {2, 3, 3, 3, -1, -1, -1, -1});
27464 Amt2 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
27465 {0, 1, 1, 1, -1, -1, -1, -1});
27466 Amt3 = DAG.getVectorShuffle(MVT::v8i16, dl, Amt23, Amt23,
27467 {2, 3, 3, 3, -1, -1, -1, -1});
27471 unsigned ShOpc = ConstantAmt ? Opc : X86OpcV;
27472 SDValue R0 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt0));
27473 SDValue R1 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt1));
27474 SDValue R2 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt2));
27475 SDValue R3 = DAG.getNode(ShOpc, dl, VT, R, DAG.getBitcast(VT, Amt3));
27477 // Merge the shifted lane results optimally with/without PBLENDW.
27478 // TODO - ideally shuffle combining would handle this.
27479 if (Subtarget.hasSSE41()) {
27480 SDValue R02 = DAG.getVectorShuffle(VT, dl, R0, R2, {0, -1, 6, -1});
27481 SDValue R13 = DAG.getVectorShuffle(VT, dl, R1, R3, {-1, 1, -1, 7});
27482 return DAG.getVectorShuffle(VT, dl, R02, R13, {0, 5, 2, 7});
27484 SDValue R01 = DAG.getVectorShuffle(VT, dl, R0, R1, {0, -1, -1, 5});
27485 SDValue R23 = DAG.getVectorShuffle(VT, dl, R2, R3, {2, -1, -1, 7});
27486 return DAG.getVectorShuffle(VT, dl, R01, R23, {0, 3, 4, 7});
27489 // It's worth extending once and using the vXi16/vXi32 shifts for smaller
27490 // types, but without AVX512 the extra overheads to get from vXi8 to vXi32
27491 // make the existing SSE solution better.
27492 // NOTE: We honor prefered vector width before promoting to 512-bits.
27493 if ((Subtarget.hasInt256() && VT == MVT::v8i16) ||
27494 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i16) ||
27495 (Subtarget.canExtendTo512DQ() && VT == MVT::v16i8) ||
27496 (Subtarget.canExtendTo512BW() && VT == MVT::v32i8) ||
27497 (Subtarget.hasBWI() && Subtarget.hasVLX() && VT == MVT::v16i8)) {
27498 assert((!Subtarget.hasBWI() || VT == MVT::v32i8 || VT == MVT::v16i8) &&
27499 "Unexpected vector type");
27500 MVT EvtSVT = Subtarget.hasBWI() ? MVT::i16 : MVT::i32;
27501 MVT ExtVT = MVT::getVectorVT(EvtSVT, VT.getVectorNumElements());
27502 unsigned ExtOpc = Opc == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
27503 R = DAG.getNode(ExtOpc, dl, ExtVT, R);
27504 Amt = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Amt);
27505 return DAG.getNode(ISD::TRUNCATE, dl, VT,
27506 DAG.getNode(Opc, dl, ExtVT, R, Amt));
27509 // Constant ISD::SRA/SRL can be performed efficiently on vXi8 vectors as we
27510 // extend to vXi16 to perform a MUL scale effectively as a MUL_LOHI.
27511 if (ConstantAmt && (Opc == ISD::SRA || Opc == ISD::SRL) &&
27512 (VT == MVT::v16i8 || (VT == MVT::v32i8 && Subtarget.hasInt256()) ||
27513 (VT == MVT::v64i8 && Subtarget.hasBWI())) &&
27514 !Subtarget.hasXOP()) {
27515 int NumElts = VT.getVectorNumElements();
27516 SDValue Cst8 = DAG.getTargetConstant(8, dl, MVT::i8);
27518 // Extend constant shift amount to vXi16 (it doesn't matter if the type
27520 MVT ExVT = MVT::getVectorVT(MVT::i16, NumElts);
27521 Amt = DAG.getZExtOrTrunc(Amt, dl, ExVT);
27522 Amt = DAG.getNode(ISD::SUB, dl, ExVT, DAG.getConstant(8, dl, ExVT), Amt);
27523 Amt = DAG.getNode(ISD::SHL, dl, ExVT, DAG.getConstant(1, dl, ExVT), Amt);
27524 assert(ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()) &&
27525 "Constant build vector expected");
27527 if (VT == MVT::v16i8 && Subtarget.hasInt256()) {
27528 R = Opc == ISD::SRA ? DAG.getSExtOrTrunc(R, dl, ExVT)
27529 : DAG.getZExtOrTrunc(R, dl, ExVT);
27530 R = DAG.getNode(ISD::MUL, dl, ExVT, R, Amt);
27531 R = DAG.getNode(X86ISD::VSRLI, dl, ExVT, R, Cst8);
27532 return DAG.getZExtOrTrunc(R, dl, VT);
27535 SmallVector<SDValue, 16> LoAmt, HiAmt;
27536 for (int i = 0; i != NumElts; i += 16) {
27537 for (int j = 0; j != 8; ++j) {
27538 LoAmt.push_back(Amt.getOperand(i + j));
27539 HiAmt.push_back(Amt.getOperand(i + j + 8));
27543 MVT VT16 = MVT::getVectorVT(MVT::i16, NumElts / 2);
27544 SDValue LoA = DAG.getBuildVector(VT16, dl, LoAmt);
27545 SDValue HiA = DAG.getBuildVector(VT16, dl, HiAmt);
27547 SDValue LoR = DAG.getBitcast(VT16, getUnpackl(DAG, dl, VT, R, R));
27548 SDValue HiR = DAG.getBitcast(VT16, getUnpackh(DAG, dl, VT, R, R));
27549 LoR = DAG.getNode(X86OpcI, dl, VT16, LoR, Cst8);
27550 HiR = DAG.getNode(X86OpcI, dl, VT16, HiR, Cst8);
27551 LoR = DAG.getNode(ISD::MUL, dl, VT16, LoR, LoA);
27552 HiR = DAG.getNode(ISD::MUL, dl, VT16, HiR, HiA);
27553 LoR = DAG.getNode(X86ISD::VSRLI, dl, VT16, LoR, Cst8);
27554 HiR = DAG.getNode(X86ISD::VSRLI, dl, VT16, HiR, Cst8);
27555 return DAG.getNode(X86ISD::PACKUS, dl, VT, LoR, HiR);
27558 if (VT == MVT::v16i8 ||
27559 (VT == MVT::v32i8 && Subtarget.hasInt256() && !Subtarget.hasXOP()) ||
27560 (VT == MVT::v64i8 && Subtarget.hasBWI())) {
27561 MVT ExtVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements() / 2);
27563 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27564 if (VT.is512BitVector()) {
27565 // On AVX512BW targets we make use of the fact that VSELECT lowers
27566 // to a masked blend which selects bytes based just on the sign bit
27567 // extracted to a mask.
27568 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
27569 V0 = DAG.getBitcast(VT, V0);
27570 V1 = DAG.getBitcast(VT, V1);
27571 Sel = DAG.getBitcast(VT, Sel);
27572 Sel = DAG.getSetCC(dl, MaskVT, DAG.getConstant(0, dl, VT), Sel,
27574 return DAG.getBitcast(SelVT, DAG.getSelect(dl, VT, Sel, V0, V1));
27575 } else if (Subtarget.hasSSE41()) {
27576 // On SSE41 targets we can use PBLENDVB which selects bytes based just
27577 // on the sign bit.
27578 V0 = DAG.getBitcast(VT, V0);
27579 V1 = DAG.getBitcast(VT, V1);
27580 Sel = DAG.getBitcast(VT, Sel);
27581 return DAG.getBitcast(SelVT,
27582 DAG.getNode(X86ISD::BLENDV, dl, VT, Sel, V0, V1));
27584 // On pre-SSE41 targets we test for the sign bit by comparing to
27585 // zero - a negative value will set all bits of the lanes to true
27586 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27587 SDValue Z = DAG.getConstant(0, dl, SelVT);
27588 SDValue C = DAG.getNode(X86ISD::PCMPGT, dl, SelVT, Z, Sel);
27589 return DAG.getSelect(dl, SelVT, C, V0, V1);
27592 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27593 // We can safely do this using i16 shifts as we're only interested in
27594 // the 3 lower bits of each byte.
27595 Amt = DAG.getBitcast(ExtVT, Amt);
27596 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, ExtVT, Amt, 5, DAG);
27597 Amt = DAG.getBitcast(VT, Amt);
27599 if (Opc == ISD::SHL || Opc == ISD::SRL) {
27600 // r = VSELECT(r, shift(r, 4), a);
27601 SDValue M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(4, dl, VT));
27602 R = SignBitSelect(VT, Amt, M, R);
27605 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27607 // r = VSELECT(r, shift(r, 2), a);
27608 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(2, dl, VT));
27609 R = SignBitSelect(VT, Amt, M, R);
27612 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27614 // return VSELECT(r, shift(r, 1), a);
27615 M = DAG.getNode(Opc, dl, VT, R, DAG.getConstant(1, dl, VT));
27616 R = SignBitSelect(VT, Amt, M, R);
27620 if (Opc == ISD::SRA) {
27621 // For SRA we need to unpack each byte to the higher byte of a i16 vector
27622 // so we can correctly sign extend. We don't care what happens to the
27624 SDValue ALo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
27625 SDValue AHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), Amt);
27626 SDValue RLo = getUnpackl(DAG, dl, VT, DAG.getUNDEF(VT), R);
27627 SDValue RHi = getUnpackh(DAG, dl, VT, DAG.getUNDEF(VT), R);
27628 ALo = DAG.getBitcast(ExtVT, ALo);
27629 AHi = DAG.getBitcast(ExtVT, AHi);
27630 RLo = DAG.getBitcast(ExtVT, RLo);
27631 RHi = DAG.getBitcast(ExtVT, RHi);
27633 // r = VSELECT(r, shift(r, 4), a);
27634 SDValue MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 4, DAG);
27635 SDValue MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 4, DAG);
27636 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27637 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27640 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
27641 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
27643 // r = VSELECT(r, shift(r, 2), a);
27644 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 2, DAG);
27645 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 2, DAG);
27646 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27647 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27650 ALo = DAG.getNode(ISD::ADD, dl, ExtVT, ALo, ALo);
27651 AHi = DAG.getNode(ISD::ADD, dl, ExtVT, AHi, AHi);
27653 // r = VSELECT(r, shift(r, 1), a);
27654 MLo = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RLo, 1, DAG);
27655 MHi = getTargetVShiftByConstNode(X86OpcI, dl, ExtVT, RHi, 1, DAG);
27656 RLo = SignBitSelect(ExtVT, ALo, MLo, RLo);
27657 RHi = SignBitSelect(ExtVT, AHi, MHi, RHi);
27659 // Logical shift the result back to the lower byte, leaving a zero upper
27660 // byte meaning that we can safely pack with PACKUSWB.
27661 RLo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RLo, 8, DAG);
27662 RHi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, RHi, 8, DAG);
27663 return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);
27667 if (Subtarget.hasInt256() && !Subtarget.hasXOP() && VT == MVT::v16i16) {
27668 MVT ExtVT = MVT::v8i32;
27669 SDValue Z = DAG.getConstant(0, dl, VT);
27670 SDValue ALo = getUnpackl(DAG, dl, VT, Amt, Z);
27671 SDValue AHi = getUnpackh(DAG, dl, VT, Amt, Z);
27672 SDValue RLo = getUnpackl(DAG, dl, VT, Z, R);
27673 SDValue RHi = getUnpackh(DAG, dl, VT, Z, R);
27674 ALo = DAG.getBitcast(ExtVT, ALo);
27675 AHi = DAG.getBitcast(ExtVT, AHi);
27676 RLo = DAG.getBitcast(ExtVT, RLo);
27677 RHi = DAG.getBitcast(ExtVT, RHi);
27678 SDValue Lo = DAG.getNode(Opc, dl, ExtVT, RLo, ALo);
27679 SDValue Hi = DAG.getNode(Opc, dl, ExtVT, RHi, AHi);
27680 Lo = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Lo, 16, DAG);
27681 Hi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, ExtVT, Hi, 16, DAG);
27682 return DAG.getNode(X86ISD::PACKUS, dl, VT, Lo, Hi);
27685 if (VT == MVT::v8i16) {
27686 // If we have a constant shift amount, the non-SSE41 path is best as
27687 // avoiding bitcasts make it easier to constant fold and reduce to PBLENDW.
27688 bool UseSSE41 = Subtarget.hasSSE41() &&
27689 !ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27691 auto SignBitSelect = [&](SDValue Sel, SDValue V0, SDValue V1) {
27692 // On SSE41 targets we can use PBLENDVB which selects bytes based just on
27695 MVT ExtVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() * 2);
27696 V0 = DAG.getBitcast(ExtVT, V0);
27697 V1 = DAG.getBitcast(ExtVT, V1);
27698 Sel = DAG.getBitcast(ExtVT, Sel);
27699 return DAG.getBitcast(
27700 VT, DAG.getNode(X86ISD::BLENDV, dl, ExtVT, Sel, V0, V1));
27702 // On pre-SSE41 targets we splat the sign bit - a negative value will
27703 // set all bits of the lanes to true and VSELECT uses that in
27704 // its OR(AND(V0,C),AND(V1,~C)) lowering.
27706 getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Sel, 15, DAG);
27707 return DAG.getSelect(dl, VT, C, V0, V1);
27710 // Turn 'a' into a mask suitable for VSELECT: a = a << 12;
27712 // On SSE41 targets we need to replicate the shift mask in both
27713 // bytes for PBLENDVB.
27716 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 4, DAG),
27717 getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG));
27719 Amt = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Amt, 12, DAG);
27722 // r = VSELECT(r, shift(r, 8), a);
27723 SDValue M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 8, DAG);
27724 R = SignBitSelect(Amt, M, R);
27727 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27729 // r = VSELECT(r, shift(r, 4), a);
27730 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 4, DAG);
27731 R = SignBitSelect(Amt, M, R);
27734 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27736 // r = VSELECT(r, shift(r, 2), a);
27737 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 2, DAG);
27738 R = SignBitSelect(Amt, M, R);
27741 Amt = DAG.getNode(ISD::ADD, dl, VT, Amt, Amt);
27743 // return VSELECT(r, shift(r, 1), a);
27744 M = getTargetVShiftByConstNode(X86OpcI, dl, VT, R, 1, DAG);
27745 R = SignBitSelect(Amt, M, R);
27749 // Decompose 256-bit shifts into 128-bit shifts.
27750 if (VT.is256BitVector())
27751 return splitVectorIntBinary(Op, DAG);
27753 if (VT == MVT::v32i16 || VT == MVT::v64i8)
27754 return splitVectorIntBinary(Op, DAG);
27759 static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
27760 SelectionDAG &DAG) {
27761 MVT VT = Op.getSimpleValueType();
27762 assert(VT.isVector() && "Custom lowering only for vector rotates!");
27765 SDValue R = Op.getOperand(0);
27766 SDValue Amt = Op.getOperand(1);
27767 unsigned Opcode = Op.getOpcode();
27768 unsigned EltSizeInBits = VT.getScalarSizeInBits();
27769 int NumElts = VT.getVectorNumElements();
27771 // Check for constant splat rotation amount.
27772 APInt CstSplatValue;
27773 bool IsCstSplat = X86::isConstantSplat(Amt, CstSplatValue);
27775 // Check for splat rotate by zero.
27776 if (IsCstSplat && CstSplatValue.urem(EltSizeInBits) == 0)
27779 // AVX512 implicitly uses modulo rotation amounts.
27780 if (Subtarget.hasAVX512() && 32 <= EltSizeInBits) {
27781 // Attempt to rotate by immediate.
27783 unsigned RotOpc = (Opcode == ISD::ROTL ? X86ISD::VROTLI : X86ISD::VROTRI);
27784 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
27785 return DAG.getNode(RotOpc, DL, VT, R,
27786 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
27789 // Else, fall-back on VPROLV/VPRORV.
27793 assert((Opcode == ISD::ROTL) && "Only ROTL supported");
27795 // XOP has 128-bit vector variable + immediate rotates.
27796 // +ve/-ve Amt = rotate left/right - just need to handle ISD::ROTL.
27797 // XOP implicitly uses modulo rotation amounts.
27798 if (Subtarget.hasXOP()) {
27799 if (VT.is256BitVector())
27800 return splitVectorIntBinary(Op, DAG);
27801 assert(VT.is128BitVector() && "Only rotate 128-bit vectors!");
27803 // Attempt to rotate by immediate.
27805 uint64_t RotAmt = CstSplatValue.urem(EltSizeInBits);
27806 return DAG.getNode(X86ISD::VROTLI, DL, VT, R,
27807 DAG.getTargetConstant(RotAmt, DL, MVT::i8));
27810 // Use general rotate by variable (per-element).
27814 // Split 256-bit integers on pre-AVX2 targets.
27815 if (VT.is256BitVector() && !Subtarget.hasAVX2())
27816 return splitVectorIntBinary(Op, DAG);
27818 assert((VT == MVT::v4i32 || VT == MVT::v8i16 || VT == MVT::v16i8 ||
27819 ((VT == MVT::v8i32 || VT == MVT::v16i16 || VT == MVT::v32i8) &&
27820 Subtarget.hasAVX2())) &&
27821 "Only vXi32/vXi16/vXi8 vector rotates supported");
27823 // Rotate by an uniform constant - expand back to shifts.
27827 bool IsSplatAmt = DAG.isSplatValue(Amt);
27829 // v16i8/v32i8: Split rotation into rot4/rot2/rot1 stages and select by
27831 if (EltSizeInBits == 8 && !IsSplatAmt) {
27832 if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
27835 // We don't need ModuloAmt here as we just peek at individual bits.
27836 MVT ExtVT = MVT::getVectorVT(MVT::i16, NumElts / 2);
27838 auto SignBitSelect = [&](MVT SelVT, SDValue Sel, SDValue V0, SDValue V1) {
27839 if (Subtarget.hasSSE41()) {
27840 // On SSE41 targets we can use PBLENDVB which selects bytes based just
27841 // on the sign bit.
27842 V0 = DAG.getBitcast(VT, V0);
27843 V1 = DAG.getBitcast(VT, V1);
27844 Sel = DAG.getBitcast(VT, Sel);
27845 return DAG.getBitcast(SelVT,
27846 DAG.getNode(X86ISD::BLENDV, DL, VT, Sel, V0, V1));
27848 // On pre-SSE41 targets we test for the sign bit by comparing to
27849 // zero - a negative value will set all bits of the lanes to true
27850 // and VSELECT uses that in its OR(AND(V0,C),AND(V1,~C)) lowering.
27851 SDValue Z = DAG.getConstant(0, DL, SelVT);
27852 SDValue C = DAG.getNode(X86ISD::PCMPGT, DL, SelVT, Z, Sel);
27853 return DAG.getSelect(DL, SelVT, C, V0, V1);
27856 // Turn 'a' into a mask suitable for VSELECT: a = a << 5;
27857 // We can safely do this using i16 shifts as we're only interested in
27858 // the 3 lower bits of each byte.
27859 Amt = DAG.getBitcast(ExtVT, Amt);
27860 Amt = DAG.getNode(ISD::SHL, DL, ExtVT, Amt, DAG.getConstant(5, DL, ExtVT));
27861 Amt = DAG.getBitcast(VT, Amt);
27863 // r = VSELECT(r, rot(r, 4), a);
27867 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(4, DL, VT)),
27868 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(4, DL, VT)));
27869 R = SignBitSelect(VT, Amt, M, R);
27872 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27874 // r = VSELECT(r, rot(r, 2), a);
27877 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(2, DL, VT)),
27878 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(6, DL, VT)));
27879 R = SignBitSelect(VT, Amt, M, R);
27882 Amt = DAG.getNode(ISD::ADD, DL, VT, Amt, Amt);
27884 // return VSELECT(r, rot(r, 1), a);
27887 DAG.getNode(ISD::SHL, DL, VT, R, DAG.getConstant(1, DL, VT)),
27888 DAG.getNode(ISD::SRL, DL, VT, R, DAG.getConstant(7, DL, VT)));
27889 return SignBitSelect(VT, Amt, M, R);
27892 // ISD::ROT* uses modulo rotate amounts.
27893 Amt = DAG.getNode(ISD::AND, DL, VT, Amt,
27894 DAG.getConstant(EltSizeInBits - 1, DL, VT));
27896 bool ConstantAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
27897 bool LegalVarShifts = SupportedVectorVarShift(VT, Subtarget, ISD::SHL) &&
27898 SupportedVectorVarShift(VT, Subtarget, ISD::SRL);
27900 // Fallback for splats + all supported variable shifts.
27901 // Fallback for non-constants AVX2 vXi16 as well.
27902 if (IsSplatAmt || LegalVarShifts || (Subtarget.hasAVX2() && !ConstantAmt)) {
27903 SDValue AmtR = DAG.getConstant(EltSizeInBits, DL, VT);
27904 AmtR = DAG.getNode(ISD::SUB, DL, VT, AmtR, Amt);
27905 SDValue SHL = DAG.getNode(ISD::SHL, DL, VT, R, Amt);
27906 SDValue SRL = DAG.getNode(ISD::SRL, DL, VT, R, AmtR);
27907 return DAG.getNode(ISD::OR, DL, VT, SHL, SRL);
27910 // As with shifts, convert the rotation amount to a multiplication factor.
27911 SDValue Scale = convertShiftLeftToScale(Amt, DL, Subtarget, DAG);
27912 assert(Scale && "Failed to convert ROTL amount to scale");
27914 // v8i16/v16i16: perform unsigned multiply hi/lo and OR the results.
27915 if (EltSizeInBits == 16) {
27916 SDValue Lo = DAG.getNode(ISD::MUL, DL, VT, R, Scale);
27917 SDValue Hi = DAG.getNode(ISD::MULHU, DL, VT, R, Scale);
27918 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
27921 // v4i32: make use of the PMULUDQ instruction to multiply 2 lanes of v4i32
27922 // to v2i64 results at a time. The upper 32-bits contain the wrapped bits
27923 // that can then be OR'd with the lower 32-bits.
27924 assert(VT == MVT::v4i32 && "Only v4i32 vector rotate expected");
27925 static const int OddMask[] = {1, -1, 3, -1};
27926 SDValue R13 = DAG.getVectorShuffle(VT, DL, R, R, OddMask);
27927 SDValue Scale13 = DAG.getVectorShuffle(VT, DL, Scale, Scale, OddMask);
27929 SDValue Res02 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27930 DAG.getBitcast(MVT::v2i64, R),
27931 DAG.getBitcast(MVT::v2i64, Scale));
27932 SDValue Res13 = DAG.getNode(X86ISD::PMULUDQ, DL, MVT::v2i64,
27933 DAG.getBitcast(MVT::v2i64, R13),
27934 DAG.getBitcast(MVT::v2i64, Scale13));
27935 Res02 = DAG.getBitcast(VT, Res02);
27936 Res13 = DAG.getBitcast(VT, Res13);
27938 return DAG.getNode(ISD::OR, DL, VT,
27939 DAG.getVectorShuffle(VT, DL, Res02, Res13, {0, 4, 2, 6}),
27940 DAG.getVectorShuffle(VT, DL, Res02, Res13, {1, 5, 3, 7}));
27943 /// Returns true if the operand type is exactly twice the native width, and
27944 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
27945 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
27946 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
27947 bool X86TargetLowering::needsCmpXchgNb(Type *MemType) const {
27948 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
27951 return Subtarget.hasCmpxchg8b() && !Subtarget.is64Bit();
27952 if (OpWidth == 128)
27953 return Subtarget.hasCmpxchg16b();
27958 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
27959 Type *MemType = SI->getValueOperand()->getType();
27961 bool NoImplicitFloatOps =
27962 SI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27963 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27964 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27965 (Subtarget.hasSSE1() || Subtarget.hasX87()))
27968 return needsCmpXchgNb(MemType);
27971 // Note: this turns large loads into lock cmpxchg8b/16b.
27972 // TODO: In 32-bit mode, use MOVLPS when SSE1 is available?
27973 TargetLowering::AtomicExpansionKind
27974 X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
27975 Type *MemType = LI->getType();
27977 // If this a 64 bit atomic load on a 32-bit target and SSE2 is enabled, we
27978 // can use movq to do the load. If we have X87 we can load into an 80-bit
27979 // X87 register and store it to a stack temporary.
27980 bool NoImplicitFloatOps =
27981 LI->getFunction()->hasFnAttribute(Attribute::NoImplicitFloat);
27982 if (MemType->getPrimitiveSizeInBits() == 64 && !Subtarget.is64Bit() &&
27983 !Subtarget.useSoftFloat() && !NoImplicitFloatOps &&
27984 (Subtarget.hasSSE1() || Subtarget.hasX87()))
27985 return AtomicExpansionKind::None;
27987 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
27988 : AtomicExpansionKind::None;
27991 TargetLowering::AtomicExpansionKind
27992 X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
27993 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
27994 Type *MemType = AI->getType();
27996 // If the operand is too big, we must see if cmpxchg8/16b is available
27997 // and default to library calls otherwise.
27998 if (MemType->getPrimitiveSizeInBits() > NativeWidth) {
27999 return needsCmpXchgNb(MemType) ? AtomicExpansionKind::CmpXChg
28000 : AtomicExpansionKind::None;
28003 AtomicRMWInst::BinOp Op = AI->getOperation();
28006 llvm_unreachable("Unknown atomic operation");
28007 case AtomicRMWInst::Xchg:
28008 case AtomicRMWInst::Add:
28009 case AtomicRMWInst::Sub:
28010 // It's better to use xadd, xsub or xchg for these in all cases.
28011 return AtomicExpansionKind::None;
28012 case AtomicRMWInst::Or:
28013 case AtomicRMWInst::And:
28014 case AtomicRMWInst::Xor:
28015 // If the atomicrmw's result isn't actually used, we can just add a "lock"
28016 // prefix to a normal instruction for these operations.
28017 return !AI->use_empty() ? AtomicExpansionKind::CmpXChg
28018 : AtomicExpansionKind::None;
28019 case AtomicRMWInst::Nand:
28020 case AtomicRMWInst::Max:
28021 case AtomicRMWInst::Min:
28022 case AtomicRMWInst::UMax:
28023 case AtomicRMWInst::UMin:
28024 case AtomicRMWInst::FAdd:
28025 case AtomicRMWInst::FSub:
28026 // These always require a non-trivial set of data operations on x86. We must
28027 // use a cmpxchg loop.
28028 return AtomicExpansionKind::CmpXChg;
28033 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
28034 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
28035 Type *MemType = AI->getType();
28036 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
28037 // there is no benefit in turning such RMWs into loads, and it is actually
28038 // harmful as it introduces a mfence.
28039 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
28042 // If this is a canonical idempotent atomicrmw w/no uses, we have a better
28043 // lowering available in lowerAtomicArith.
28044 // TODO: push more cases through this path.
28045 if (auto *C = dyn_cast<ConstantInt>(AI->getValOperand()))
28046 if (AI->getOperation() == AtomicRMWInst::Or && C->isZero() &&
28050 IRBuilder<> Builder(AI);
28051 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
28052 auto SSID = AI->getSyncScopeID();
28053 // We must restrict the ordering to avoid generating loads with Release or
28054 // ReleaseAcquire orderings.
28055 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
28057 // Before the load we need a fence. Here is an example lifted from
28058 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
28061 // x.store(1, relaxed);
28062 // r1 = y.fetch_add(0, release);
28064 // y.fetch_add(42, acquire);
28065 // r2 = x.load(relaxed);
28066 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
28067 // lowered to just a load without a fence. A mfence flushes the store buffer,
28068 // making the optimization clearly correct.
28069 // FIXME: it is required if isReleaseOrStronger(Order) but it is not clear
28070 // otherwise, we might be able to be more aggressive on relaxed idempotent
28071 // rmw. In practice, they do not look useful, so we don't try to be
28072 // especially clever.
28073 if (SSID == SyncScope::SingleThread)
28074 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
28075 // the IR level, so we must wrap it in an intrinsic.
28078 if (!Subtarget.hasMFence())
28079 // FIXME: it might make sense to use a locked operation here but on a
28080 // different cache-line to prevent cache-line bouncing. In practice it
28081 // is probably a small win, and x86 processors without mfence are rare
28082 // enough that we do not bother.
28086 llvm::Intrinsic::getDeclaration(M, Intrinsic::x86_sse2_mfence);
28087 Builder.CreateCall(MFence, {});
28089 // Finally we can emit the atomic load.
28091 Builder.CreateAlignedLoad(AI->getType(), AI->getPointerOperand(),
28092 Align(AI->getType()->getPrimitiveSizeInBits()));
28093 Loaded->setAtomic(Order, SSID);
28094 AI->replaceAllUsesWith(Loaded);
28095 AI->eraseFromParent();
28099 bool X86TargetLowering::lowerAtomicStoreAsStoreSDNode(const StoreInst &SI) const {
28100 if (!SI.isUnordered())
28102 return ExperimentalUnorderedISEL;
28104 bool X86TargetLowering::lowerAtomicLoadAsLoadSDNode(const LoadInst &LI) const {
28105 if (!LI.isUnordered())
28107 return ExperimentalUnorderedISEL;
28111 /// Emit a locked operation on a stack location which does not change any
28112 /// memory location, but does involve a lock prefix. Location is chosen to be
28113 /// a) very likely accessed only by a single thread to minimize cache traffic,
28114 /// and b) definitely dereferenceable. Returns the new Chain result.
28115 static SDValue emitLockedStackOp(SelectionDAG &DAG,
28116 const X86Subtarget &Subtarget,
28117 SDValue Chain, SDLoc DL) {
28118 // Implementation notes:
28119 // 1) LOCK prefix creates a full read/write reordering barrier for memory
28120 // operations issued by the current processor. As such, the location
28121 // referenced is not relevant for the ordering properties of the instruction.
28122 // See: Intel® 64 and IA-32 ArchitecturesSoftware Developer’s Manual,
28123 // 8.2.3.9 Loads and Stores Are Not Reordered with Locked Instructions
28124 // 2) Using an immediate operand appears to be the best encoding choice
28125 // here since it doesn't require an extra register.
28126 // 3) OR appears to be very slightly faster than ADD. (Though, the difference
28127 // is small enough it might just be measurement noise.)
28128 // 4) When choosing offsets, there are several contributing factors:
28129 // a) If there's no redzone, we default to TOS. (We could allocate a cache
28130 // line aligned stack object to improve this case.)
28131 // b) To minimize our chances of introducing a false dependence, we prefer
28132 // to offset the stack usage from TOS slightly.
28133 // c) To minimize concerns about cross thread stack usage - in particular,
28134 // the idiomatic MyThreadPool.run([&StackVars]() {...}) pattern which
28135 // captures state in the TOS frame and accesses it from many threads -
28136 // we want to use an offset such that the offset is in a distinct cache
28137 // line from the TOS frame.
28139 // For a general discussion of the tradeoffs and benchmark results, see:
28140 // https://shipilev.net/blog/2014/on-the-fence-with-dependencies/
28142 auto &MF = DAG.getMachineFunction();
28143 auto &TFL = *Subtarget.getFrameLowering();
28144 const unsigned SPOffset = TFL.has128ByteRedZone(MF) ? -64 : 0;
28146 if (Subtarget.is64Bit()) {
28147 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
28149 DAG.getRegister(X86::RSP, MVT::i64), // Base
28150 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
28151 DAG.getRegister(0, MVT::i64), // Index
28152 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
28153 DAG.getRegister(0, MVT::i16), // Segment.
28156 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
28158 return SDValue(Res, 1);
28161 SDValue Zero = DAG.getTargetConstant(0, DL, MVT::i32);
28163 DAG.getRegister(X86::ESP, MVT::i32), // Base
28164 DAG.getTargetConstant(1, DL, MVT::i8), // Scale
28165 DAG.getRegister(0, MVT::i32), // Index
28166 DAG.getTargetConstant(SPOffset, DL, MVT::i32), // Disp
28167 DAG.getRegister(0, MVT::i16), // Segment.
28171 SDNode *Res = DAG.getMachineNode(X86::OR32mi8Locked, DL, MVT::i32,
28173 return SDValue(Res, 1);
28176 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget &Subtarget,
28177 SelectionDAG &DAG) {
28179 AtomicOrdering FenceOrdering =
28180 static_cast<AtomicOrdering>(Op.getConstantOperandVal(1));
28181 SyncScope::ID FenceSSID =
28182 static_cast<SyncScope::ID>(Op.getConstantOperandVal(2));
28184 // The only fence that needs an instruction is a sequentially-consistent
28185 // cross-thread fence.
28186 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
28187 FenceSSID == SyncScope::System) {
28188 if (Subtarget.hasMFence())
28189 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
28191 SDValue Chain = Op.getOperand(0);
28192 return emitLockedStackOp(DAG, Subtarget, Chain, dl);
28195 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28196 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
28199 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget &Subtarget,
28200 SelectionDAG &DAG) {
28201 MVT T = Op.getSimpleValueType();
28205 switch(T.SimpleTy) {
28206 default: llvm_unreachable("Invalid value type!");
28207 case MVT::i8: Reg = X86::AL; size = 1; break;
28208 case MVT::i16: Reg = X86::AX; size = 2; break;
28209 case MVT::i32: Reg = X86::EAX; size = 4; break;
28211 assert(Subtarget.is64Bit() && "Node not type legal!");
28212 Reg = X86::RAX; size = 8;
28215 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
28216 Op.getOperand(2), SDValue());
28217 SDValue Ops[] = { cpIn.getValue(0),
28220 DAG.getTargetConstant(size, DL, MVT::i8),
28221 cpIn.getValue(1) };
28222 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
28223 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
28224 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
28228 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
28229 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
28230 MVT::i32, cpOut.getValue(2));
28231 SDValue Success = getSETCC(X86::COND_E, EFLAGS, DL, DAG);
28233 return DAG.getNode(ISD::MERGE_VALUES, DL, Op->getVTList(),
28234 cpOut, Success, EFLAGS.getValue(1));
28237 // Create MOVMSKB, taking into account whether we need to split for AVX1.
28238 static SDValue getPMOVMSKB(const SDLoc &DL, SDValue V, SelectionDAG &DAG,
28239 const X86Subtarget &Subtarget) {
28240 MVT InVT = V.getSimpleValueType();
28242 if (InVT == MVT::v64i8) {
28244 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
28245 Lo = getPMOVMSKB(DL, Lo, DAG, Subtarget);
28246 Hi = getPMOVMSKB(DL, Hi, DAG, Subtarget);
28247 Lo = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Lo);
28248 Hi = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Hi);
28249 Hi = DAG.getNode(ISD::SHL, DL, MVT::i64, Hi,
28250 DAG.getConstant(32, DL, MVT::i8));
28251 return DAG.getNode(ISD::OR, DL, MVT::i64, Lo, Hi);
28253 if (InVT == MVT::v32i8 && !Subtarget.hasInt256()) {
28255 std::tie(Lo, Hi) = DAG.SplitVector(V, DL);
28256 Lo = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Lo);
28257 Hi = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Hi);
28258 Hi = DAG.getNode(ISD::SHL, DL, MVT::i32, Hi,
28259 DAG.getConstant(16, DL, MVT::i8));
28260 return DAG.getNode(ISD::OR, DL, MVT::i32, Lo, Hi);
28263 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
28266 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget &Subtarget,
28267 SelectionDAG &DAG) {
28268 SDValue Src = Op.getOperand(0);
28269 MVT SrcVT = Src.getSimpleValueType();
28270 MVT DstVT = Op.getSimpleValueType();
28272 // Legalize (v64i1 (bitcast i64 (X))) by splitting the i64, bitcasting each
28273 // half to v32i1 and concatenating the result.
28274 if (SrcVT == MVT::i64 && DstVT == MVT::v64i1) {
28275 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
28276 assert(Subtarget.hasBWI() && "Expected BWI target");
28278 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
28279 DAG.getIntPtrConstant(0, dl));
28280 Lo = DAG.getBitcast(MVT::v32i1, Lo);
28281 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32, Src,
28282 DAG.getIntPtrConstant(1, dl));
28283 Hi = DAG.getBitcast(MVT::v32i1, Hi);
28284 return DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v64i1, Lo, Hi);
28287 // Use MOVMSK for vector to scalar conversion to prevent scalarization.
28288 if ((SrcVT == MVT::v16i1 || SrcVT == MVT::v32i1) && DstVT.isScalarInteger()) {
28289 assert(!Subtarget.hasAVX512() && "Should use K-registers with AVX512");
28290 MVT SExtVT = SrcVT == MVT::v16i1 ? MVT::v16i8 : MVT::v32i8;
28292 SDValue V = DAG.getSExtOrTrunc(Src, DL, SExtVT);
28293 V = getPMOVMSKB(DL, V, DAG, Subtarget);
28294 return DAG.getZExtOrTrunc(V, DL, DstVT);
28297 assert((SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8 ||
28298 SrcVT == MVT::i64) && "Unexpected VT!");
28300 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
28301 if (!(DstVT == MVT::f64 && SrcVT == MVT::i64) &&
28302 !(DstVT == MVT::x86mmx && SrcVT.isVector()))
28303 // This conversion needs to be expanded.
28307 if (SrcVT.isVector()) {
28308 // Widen the vector in input in the case of MVT::v2i32.
28309 // Example: from MVT::v2i32 to MVT::v4i32.
28310 MVT NewVT = MVT::getVectorVT(SrcVT.getVectorElementType(),
28311 SrcVT.getVectorNumElements() * 2);
28312 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewVT, Src,
28313 DAG.getUNDEF(SrcVT));
28315 assert(SrcVT == MVT::i64 && !Subtarget.is64Bit() &&
28316 "Unexpected source type in LowerBITCAST");
28317 Src = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Src);
28320 MVT V2X64VT = DstVT == MVT::f64 ? MVT::v2f64 : MVT::v2i64;
28321 Src = DAG.getNode(ISD::BITCAST, dl, V2X64VT, Src);
28323 if (DstVT == MVT::x86mmx)
28324 return DAG.getNode(X86ISD::MOVDQ2Q, dl, DstVT, Src);
28326 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, DstVT, Src,
28327 DAG.getIntPtrConstant(0, dl));
28330 /// Compute the horizontal sum of bytes in V for the elements of VT.
28332 /// Requires V to be a byte vector and VT to be an integer vector type with
28333 /// wider elements than V's type. The width of the elements of VT determines
28334 /// how many bytes of V are summed horizontally to produce each element of the
28336 static SDValue LowerHorizontalByteSum(SDValue V, MVT VT,
28337 const X86Subtarget &Subtarget,
28338 SelectionDAG &DAG) {
28340 MVT ByteVecVT = V.getSimpleValueType();
28341 MVT EltVT = VT.getVectorElementType();
28342 assert(ByteVecVT.getVectorElementType() == MVT::i8 &&
28343 "Expected value to have byte element type.");
28344 assert(EltVT != MVT::i8 &&
28345 "Horizontal byte sum only makes sense for wider elements!");
28346 unsigned VecSize = VT.getSizeInBits();
28347 assert(ByteVecVT.getSizeInBits() == VecSize && "Cannot change vector size!");
28349 // PSADBW instruction horizontally add all bytes and leave the result in i64
28350 // chunks, thus directly computes the pop count for v2i64 and v4i64.
28351 if (EltVT == MVT::i64) {
28352 SDValue Zeros = DAG.getConstant(0, DL, ByteVecVT);
28353 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
28354 V = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT, V, Zeros);
28355 return DAG.getBitcast(VT, V);
28358 if (EltVT == MVT::i32) {
28359 // We unpack the low half and high half into i32s interleaved with zeros so
28360 // that we can use PSADBW to horizontally sum them. The most useful part of
28361 // this is that it lines up the results of two PSADBW instructions to be
28362 // two v2i64 vectors which concatenated are the 4 population counts. We can
28363 // then use PACKUSWB to shrink and concatenate them into a v4i32 again.
28364 SDValue Zeros = DAG.getConstant(0, DL, VT);
28365 SDValue V32 = DAG.getBitcast(VT, V);
28366 SDValue Low = getUnpackl(DAG, DL, VT, V32, Zeros);
28367 SDValue High = getUnpackh(DAG, DL, VT, V32, Zeros);
28369 // Do the horizontal sums into two v2i64s.
28370 Zeros = DAG.getConstant(0, DL, ByteVecVT);
28371 MVT SadVecVT = MVT::getVectorVT(MVT::i64, VecSize / 64);
28372 Low = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
28373 DAG.getBitcast(ByteVecVT, Low), Zeros);
28374 High = DAG.getNode(X86ISD::PSADBW, DL, SadVecVT,
28375 DAG.getBitcast(ByteVecVT, High), Zeros);
28377 // Merge them together.
28378 MVT ShortVecVT = MVT::getVectorVT(MVT::i16, VecSize / 16);
28379 V = DAG.getNode(X86ISD::PACKUS, DL, ByteVecVT,
28380 DAG.getBitcast(ShortVecVT, Low),
28381 DAG.getBitcast(ShortVecVT, High));
28383 return DAG.getBitcast(VT, V);
28386 // The only element type left is i16.
28387 assert(EltVT == MVT::i16 && "Unknown how to handle type");
28389 // To obtain pop count for each i16 element starting from the pop count for
28390 // i8 elements, shift the i16s left by 8, sum as i8s, and then shift as i16s
28391 // right by 8. It is important to shift as i16s as i8 vector shift isn't
28392 // directly supported.
28393 SDValue ShifterV = DAG.getConstant(8, DL, VT);
28394 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
28395 V = DAG.getNode(ISD::ADD, DL, ByteVecVT, DAG.getBitcast(ByteVecVT, Shl),
28396 DAG.getBitcast(ByteVecVT, V));
28397 return DAG.getNode(ISD::SRL, DL, VT, DAG.getBitcast(VT, V), ShifterV);
28400 static SDValue LowerVectorCTPOPInRegLUT(SDValue Op, const SDLoc &DL,
28401 const X86Subtarget &Subtarget,
28402 SelectionDAG &DAG) {
28403 MVT VT = Op.getSimpleValueType();
28404 MVT EltVT = VT.getVectorElementType();
28405 int NumElts = VT.getVectorNumElements();
28407 assert(EltVT == MVT::i8 && "Only vXi8 vector CTPOP lowering supported.");
28409 // Implement a lookup table in register by using an algorithm based on:
28410 // http://wm.ite.pl/articles/sse-popcount.html
28412 // The general idea is that every lower byte nibble in the input vector is an
28413 // index into a in-register pre-computed pop count table. We then split up the
28414 // input vector in two new ones: (1) a vector with only the shifted-right
28415 // higher nibbles for each byte and (2) a vector with the lower nibbles (and
28416 // masked out higher ones) for each byte. PSHUFB is used separately with both
28417 // to index the in-register table. Next, both are added and the result is a
28418 // i8 vector where each element contains the pop count for input byte.
28419 const int LUT[16] = {/* 0 */ 0, /* 1 */ 1, /* 2 */ 1, /* 3 */ 2,
28420 /* 4 */ 1, /* 5 */ 2, /* 6 */ 2, /* 7 */ 3,
28421 /* 8 */ 1, /* 9 */ 2, /* a */ 2, /* b */ 3,
28422 /* c */ 2, /* d */ 3, /* e */ 3, /* f */ 4};
28424 SmallVector<SDValue, 64> LUTVec;
28425 for (int i = 0; i < NumElts; ++i)
28426 LUTVec.push_back(DAG.getConstant(LUT[i % 16], DL, MVT::i8));
28427 SDValue InRegLUT = DAG.getBuildVector(VT, DL, LUTVec);
28428 SDValue M0F = DAG.getConstant(0x0F, DL, VT);
28431 SDValue FourV = DAG.getConstant(4, DL, VT);
28432 SDValue HiNibbles = DAG.getNode(ISD::SRL, DL, VT, Op, FourV);
28435 SDValue LoNibbles = DAG.getNode(ISD::AND, DL, VT, Op, M0F);
28437 // The input vector is used as the shuffle mask that index elements into the
28438 // LUT. After counting low and high nibbles, add the vector to obtain the
28439 // final pop count per i8 element.
28440 SDValue HiPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, HiNibbles);
28441 SDValue LoPopCnt = DAG.getNode(X86ISD::PSHUFB, DL, VT, InRegLUT, LoNibbles);
28442 return DAG.getNode(ISD::ADD, DL, VT, HiPopCnt, LoPopCnt);
28445 // Please ensure that any codegen change from LowerVectorCTPOP is reflected in
28446 // updated cost models in X86TTIImpl::getIntrinsicInstrCost.
28447 static SDValue LowerVectorCTPOP(SDValue Op, const X86Subtarget &Subtarget,
28448 SelectionDAG &DAG) {
28449 MVT VT = Op.getSimpleValueType();
28450 assert((VT.is512BitVector() || VT.is256BitVector() || VT.is128BitVector()) &&
28451 "Unknown CTPOP type to handle");
28452 SDLoc DL(Op.getNode());
28453 SDValue Op0 = Op.getOperand(0);
28455 // TRUNC(CTPOP(ZEXT(X))) to make use of vXi32/vXi64 VPOPCNT instructions.
28456 if (Subtarget.hasVPOPCNTDQ()) {
28457 unsigned NumElems = VT.getVectorNumElements();
28458 assert((VT.getVectorElementType() == MVT::i8 ||
28459 VT.getVectorElementType() == MVT::i16) && "Unexpected type");
28460 if (NumElems < 16 || (NumElems == 16 && Subtarget.canExtendTo512DQ())) {
28461 MVT NewVT = MVT::getVectorVT(MVT::i32, NumElems);
28462 Op = DAG.getNode(ISD::ZERO_EXTEND, DL, NewVT, Op0);
28463 Op = DAG.getNode(ISD::CTPOP, DL, NewVT, Op);
28464 return DAG.getNode(ISD::TRUNCATE, DL, VT, Op);
28468 // Decompose 256-bit ops into smaller 128-bit ops.
28469 if (VT.is256BitVector() && !Subtarget.hasInt256())
28470 return splitVectorIntUnary(Op, DAG);
28472 // Decompose 512-bit ops into smaller 256-bit ops.
28473 if (VT.is512BitVector() && !Subtarget.hasBWI())
28474 return splitVectorIntUnary(Op, DAG);
28476 // For element types greater than i8, do vXi8 pop counts and a bytesum.
28477 if (VT.getScalarType() != MVT::i8) {
28478 MVT ByteVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
28479 SDValue ByteOp = DAG.getBitcast(ByteVT, Op0);
28480 SDValue PopCnt8 = DAG.getNode(ISD::CTPOP, DL, ByteVT, ByteOp);
28481 return LowerHorizontalByteSum(PopCnt8, VT, Subtarget, DAG);
28484 // We can't use the fast LUT approach, so fall back on LegalizeDAG.
28485 if (!Subtarget.hasSSSE3())
28488 return LowerVectorCTPOPInRegLUT(Op0, DL, Subtarget, DAG);
28491 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget &Subtarget,
28492 SelectionDAG &DAG) {
28493 assert(Op.getSimpleValueType().isVector() &&
28494 "We only do custom lowering for vector population count.");
28495 return LowerVectorCTPOP(Op, Subtarget, DAG);
28498 static SDValue LowerBITREVERSE_XOP(SDValue Op, SelectionDAG &DAG) {
28499 MVT VT = Op.getSimpleValueType();
28500 SDValue In = Op.getOperand(0);
28503 // For scalars, its still beneficial to transfer to/from the SIMD unit to
28504 // perform the BITREVERSE.
28505 if (!VT.isVector()) {
28506 MVT VecVT = MVT::getVectorVT(VT, 128 / VT.getSizeInBits());
28507 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, In);
28508 Res = DAG.getNode(ISD::BITREVERSE, DL, VecVT, Res);
28509 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Res,
28510 DAG.getIntPtrConstant(0, DL));
28513 int NumElts = VT.getVectorNumElements();
28514 int ScalarSizeInBytes = VT.getScalarSizeInBits() / 8;
28516 // Decompose 256-bit ops into smaller 128-bit ops.
28517 if (VT.is256BitVector())
28518 return splitVectorIntUnary(Op, DAG);
28520 assert(VT.is128BitVector() &&
28521 "Only 128-bit vector bitreverse lowering supported.");
28523 // VPPERM reverses the bits of a byte with the permute Op (2 << 5), and we
28524 // perform the BSWAP in the shuffle.
28525 // Its best to shuffle using the second operand as this will implicitly allow
28526 // memory folding for multiple vectors.
28527 SmallVector<SDValue, 16> MaskElts;
28528 for (int i = 0; i != NumElts; ++i) {
28529 for (int j = ScalarSizeInBytes - 1; j >= 0; --j) {
28530 int SourceByte = 16 + (i * ScalarSizeInBytes) + j;
28531 int PermuteByte = SourceByte | (2 << 5);
28532 MaskElts.push_back(DAG.getConstant(PermuteByte, DL, MVT::i8));
28536 SDValue Mask = DAG.getBuildVector(MVT::v16i8, DL, MaskElts);
28537 SDValue Res = DAG.getBitcast(MVT::v16i8, In);
28538 Res = DAG.getNode(X86ISD::VPPERM, DL, MVT::v16i8, DAG.getUNDEF(MVT::v16i8),
28540 return DAG.getBitcast(VT, Res);
28543 static SDValue LowerBITREVERSE(SDValue Op, const X86Subtarget &Subtarget,
28544 SelectionDAG &DAG) {
28545 MVT VT = Op.getSimpleValueType();
28547 if (Subtarget.hasXOP() && !VT.is512BitVector())
28548 return LowerBITREVERSE_XOP(Op, DAG);
28550 assert(Subtarget.hasSSSE3() && "SSSE3 required for BITREVERSE");
28552 SDValue In = Op.getOperand(0);
28555 // Split v64i8 without BWI so that we can still use the PSHUFB lowering.
28556 if (VT == MVT::v64i8 && !Subtarget.hasBWI())
28557 return splitVectorIntUnary(Op, DAG);
28559 unsigned NumElts = VT.getVectorNumElements();
28560 assert(VT.getScalarType() == MVT::i8 &&
28561 "Only byte vector BITREVERSE supported");
28563 // Decompose 256-bit ops into smaller 128-bit ops on pre-AVX2.
28564 if (VT.is256BitVector() && !Subtarget.hasInt256())
28565 return splitVectorIntUnary(Op, DAG);
28567 // Perform BITREVERSE using PSHUFB lookups. Each byte is split into
28568 // two nibbles and a PSHUFB lookup to find the bitreverse of each
28569 // 0-15 value (moved to the other nibble).
28570 SDValue NibbleMask = DAG.getConstant(0xF, DL, VT);
28571 SDValue Lo = DAG.getNode(ISD::AND, DL, VT, In, NibbleMask);
28572 SDValue Hi = DAG.getNode(ISD::SRL, DL, VT, In, DAG.getConstant(4, DL, VT));
28574 const int LoLUT[16] = {
28575 /* 0 */ 0x00, /* 1 */ 0x80, /* 2 */ 0x40, /* 3 */ 0xC0,
28576 /* 4 */ 0x20, /* 5 */ 0xA0, /* 6 */ 0x60, /* 7 */ 0xE0,
28577 /* 8 */ 0x10, /* 9 */ 0x90, /* a */ 0x50, /* b */ 0xD0,
28578 /* c */ 0x30, /* d */ 0xB0, /* e */ 0x70, /* f */ 0xF0};
28579 const int HiLUT[16] = {
28580 /* 0 */ 0x00, /* 1 */ 0x08, /* 2 */ 0x04, /* 3 */ 0x0C,
28581 /* 4 */ 0x02, /* 5 */ 0x0A, /* 6 */ 0x06, /* 7 */ 0x0E,
28582 /* 8 */ 0x01, /* 9 */ 0x09, /* a */ 0x05, /* b */ 0x0D,
28583 /* c */ 0x03, /* d */ 0x0B, /* e */ 0x07, /* f */ 0x0F};
28585 SmallVector<SDValue, 16> LoMaskElts, HiMaskElts;
28586 for (unsigned i = 0; i < NumElts; ++i) {
28587 LoMaskElts.push_back(DAG.getConstant(LoLUT[i % 16], DL, MVT::i8));
28588 HiMaskElts.push_back(DAG.getConstant(HiLUT[i % 16], DL, MVT::i8));
28591 SDValue LoMask = DAG.getBuildVector(VT, DL, LoMaskElts);
28592 SDValue HiMask = DAG.getBuildVector(VT, DL, HiMaskElts);
28593 Lo = DAG.getNode(X86ISD::PSHUFB, DL, VT, LoMask, Lo);
28594 Hi = DAG.getNode(X86ISD::PSHUFB, DL, VT, HiMask, Hi);
28595 return DAG.getNode(ISD::OR, DL, VT, Lo, Hi);
28598 static SDValue lowerAtomicArithWithLOCK(SDValue N, SelectionDAG &DAG,
28599 const X86Subtarget &Subtarget) {
28600 unsigned NewOpc = 0;
28601 switch (N->getOpcode()) {
28602 case ISD::ATOMIC_LOAD_ADD:
28603 NewOpc = X86ISD::LADD;
28605 case ISD::ATOMIC_LOAD_SUB:
28606 NewOpc = X86ISD::LSUB;
28608 case ISD::ATOMIC_LOAD_OR:
28609 NewOpc = X86ISD::LOR;
28611 case ISD::ATOMIC_LOAD_XOR:
28612 NewOpc = X86ISD::LXOR;
28614 case ISD::ATOMIC_LOAD_AND:
28615 NewOpc = X86ISD::LAND;
28618 llvm_unreachable("Unknown ATOMIC_LOAD_ opcode");
28621 MachineMemOperand *MMO = cast<MemSDNode>(N)->getMemOperand();
28623 return DAG.getMemIntrinsicNode(
28624 NewOpc, SDLoc(N), DAG.getVTList(MVT::i32, MVT::Other),
28625 {N->getOperand(0), N->getOperand(1), N->getOperand(2)},
28626 /*MemVT=*/N->getSimpleValueType(0), MMO);
28629 /// Lower atomic_load_ops into LOCK-prefixed operations.
28630 static SDValue lowerAtomicArith(SDValue N, SelectionDAG &DAG,
28631 const X86Subtarget &Subtarget) {
28632 AtomicSDNode *AN = cast<AtomicSDNode>(N.getNode());
28633 SDValue Chain = N->getOperand(0);
28634 SDValue LHS = N->getOperand(1);
28635 SDValue RHS = N->getOperand(2);
28636 unsigned Opc = N->getOpcode();
28637 MVT VT = N->getSimpleValueType(0);
28640 // We can lower atomic_load_add into LXADD. However, any other atomicrmw op
28641 // can only be lowered when the result is unused. They should have already
28642 // been transformed into a cmpxchg loop in AtomicExpand.
28643 if (N->hasAnyUseOfValue(0)) {
28644 // Handle (atomic_load_sub p, v) as (atomic_load_add p, -v), to be able to
28645 // select LXADD if LOCK_SUB can't be selected.
28646 if (Opc == ISD::ATOMIC_LOAD_SUB) {
28647 RHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), RHS);
28648 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, DL, VT, Chain, LHS,
28649 RHS, AN->getMemOperand());
28651 assert(Opc == ISD::ATOMIC_LOAD_ADD &&
28652 "Used AtomicRMW ops other than Add should have been expanded!");
28656 // Specialized lowering for the canonical form of an idemptotent atomicrmw.
28657 // The core idea here is that since the memory location isn't actually
28658 // changing, all we need is a lowering for the *ordering* impacts of the
28659 // atomicrmw. As such, we can chose a different operation and memory
28660 // location to minimize impact on other code.
28661 if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
28662 // On X86, the only ordering which actually requires an instruction is
28663 // seq_cst which isn't SingleThread, everything just needs to be preserved
28664 // during codegen and then dropped. Note that we expect (but don't assume),
28665 // that orderings other than seq_cst and acq_rel have been canonicalized to
28666 // a store or load.
28667 if (AN->getOrdering() == AtomicOrdering::SequentiallyConsistent &&
28668 AN->getSyncScopeID() == SyncScope::System) {
28669 // Prefer a locked operation against a stack location to minimize cache
28670 // traffic. This assumes that stack locations are very likely to be
28671 // accessed only by the owning thread.
28672 SDValue NewChain = emitLockedStackOp(DAG, Subtarget, Chain, DL);
28673 assert(!N->hasAnyUseOfValue(0));
28674 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28675 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28676 DAG.getUNDEF(VT), NewChain);
28678 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
28679 SDValue NewChain = DAG.getNode(X86ISD::MEMBARRIER, DL, MVT::Other, Chain);
28680 assert(!N->hasAnyUseOfValue(0));
28681 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28682 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28683 DAG.getUNDEF(VT), NewChain);
28686 SDValue LockOp = lowerAtomicArithWithLOCK(N, DAG, Subtarget);
28687 // RAUW the chain, but don't worry about the result, as it's unused.
28688 assert(!N->hasAnyUseOfValue(0));
28689 // NOTE: The getUNDEF is needed to give something for the unused result 0.
28690 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(),
28691 DAG.getUNDEF(VT), LockOp.getValue(1));
28694 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG,
28695 const X86Subtarget &Subtarget) {
28696 auto *Node = cast<AtomicSDNode>(Op.getNode());
28698 EVT VT = Node->getMemoryVT();
28700 bool IsSeqCst = Node->getOrdering() == AtomicOrdering::SequentiallyConsistent;
28701 bool IsTypeLegal = DAG.getTargetLoweringInfo().isTypeLegal(VT);
28703 // If this store is not sequentially consistent and the type is legal
28704 // we can just keep it.
28705 if (!IsSeqCst && IsTypeLegal)
28708 if (VT == MVT::i64 && !IsTypeLegal) {
28709 // For illegal i64 atomic_stores, we can try to use MOVQ or MOVLPS if SSE
28711 bool NoImplicitFloatOps =
28712 DAG.getMachineFunction().getFunction().hasFnAttribute(
28713 Attribute::NoImplicitFloat);
28714 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
28716 if (Subtarget.hasSSE1()) {
28717 SDValue SclToVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
28718 Node->getOperand(2));
28719 MVT StVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
28720 SclToVec = DAG.getBitcast(StVT, SclToVec);
28721 SDVTList Tys = DAG.getVTList(MVT::Other);
28722 SDValue Ops[] = {Node->getChain(), SclToVec, Node->getBasePtr()};
28723 Chain = DAG.getMemIntrinsicNode(X86ISD::VEXTRACT_STORE, dl, Tys, Ops,
28724 MVT::i64, Node->getMemOperand());
28725 } else if (Subtarget.hasX87()) {
28726 // First load this into an 80-bit X87 register using a stack temporary.
28727 // This will put the whole integer into the significand.
28728 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
28729 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
28730 MachinePointerInfo MPI =
28731 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
28733 DAG.getStore(Node->getChain(), dl, Node->getOperand(2), StackPtr,
28734 MPI, /*Align*/ 0, MachineMemOperand::MOStore);
28735 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
28736 SDValue LdOps[] = {Chain, StackPtr};
28738 DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, LdOps, MVT::i64, MPI,
28739 /*Align*/ None, MachineMemOperand::MOLoad);
28740 Chain = Value.getValue(1);
28742 // Now use an FIST to do the atomic store.
28743 SDValue StoreOps[] = {Chain, Value, Node->getBasePtr()};
28745 DAG.getMemIntrinsicNode(X86ISD::FIST, dl, DAG.getVTList(MVT::Other),
28746 StoreOps, MVT::i64, Node->getMemOperand());
28750 // If this is a sequentially consistent store, also emit an appropriate
28753 Chain = emitLockedStackOp(DAG, Subtarget, Chain, dl);
28760 // Convert seq_cst store -> xchg
28761 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
28762 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
28763 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
28764 Node->getMemoryVT(),
28765 Node->getOperand(0),
28766 Node->getOperand(1), Node->getOperand(2),
28767 Node->getMemOperand());
28768 return Swap.getValue(1);
28771 static SDValue LowerADDSUBCARRY(SDValue Op, SelectionDAG &DAG) {
28772 SDNode *N = Op.getNode();
28773 MVT VT = N->getSimpleValueType(0);
28775 // Let legalize expand this if it isn't a legal type yet.
28776 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
28779 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
28782 // Set the carry flag.
28783 SDValue Carry = Op.getOperand(2);
28784 EVT CarryVT = Carry.getValueType();
28785 Carry = DAG.getNode(X86ISD::ADD, DL, DAG.getVTList(CarryVT, MVT::i32),
28786 Carry, DAG.getAllOnesConstant(DL, CarryVT));
28788 unsigned Opc = Op.getOpcode() == ISD::ADDCARRY ? X86ISD::ADC : X86ISD::SBB;
28789 SDValue Sum = DAG.getNode(Opc, DL, VTs, Op.getOperand(0),
28790 Op.getOperand(1), Carry.getValue(1));
28792 SDValue SetCC = getSETCC(X86::COND_B, Sum.getValue(1), DL, DAG);
28793 if (N->getValueType(1) == MVT::i1)
28794 SetCC = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, SetCC);
28796 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
28799 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget &Subtarget,
28800 SelectionDAG &DAG) {
28801 assert(Subtarget.isTargetDarwin() && Subtarget.is64Bit());
28803 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
28804 // which returns the values as { float, float } (in XMM0) or
28805 // { double, double } (which is returned in XMM0, XMM1).
28807 SDValue Arg = Op.getOperand(0);
28808 EVT ArgVT = Arg.getValueType();
28809 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
28811 TargetLowering::ArgListTy Args;
28812 TargetLowering::ArgListEntry Entry;
28816 Entry.IsSExt = false;
28817 Entry.IsZExt = false;
28818 Args.push_back(Entry);
28820 bool isF64 = ArgVT == MVT::f64;
28821 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
28822 // the small struct {f32, f32} is returned in (eax, edx). For f64,
28823 // the results are returned via SRet in memory.
28824 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28825 RTLIB::Libcall LC = isF64 ? RTLIB::SINCOS_STRET_F64 : RTLIB::SINCOS_STRET_F32;
28826 const char *LibcallName = TLI.getLibcallName(LC);
28828 DAG.getExternalSymbol(LibcallName, TLI.getPointerTy(DAG.getDataLayout()));
28830 Type *RetTy = isF64 ? (Type *)StructType::get(ArgTy, ArgTy)
28831 : (Type *)FixedVectorType::get(ArgTy, 4);
28833 TargetLowering::CallLoweringInfo CLI(DAG);
28834 CLI.setDebugLoc(dl)
28835 .setChain(DAG.getEntryNode())
28836 .setLibCallee(CallingConv::C, RetTy, Callee, std::move(Args));
28838 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
28841 // Returned in xmm0 and xmm1.
28842 return CallResult.first;
28844 // Returned in bits 0:31 and 32:64 xmm0.
28845 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28846 CallResult.first, DAG.getIntPtrConstant(0, dl));
28847 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
28848 CallResult.first, DAG.getIntPtrConstant(1, dl));
28849 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
28850 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
28853 /// Widen a vector input to a vector of NVT. The
28854 /// input vector must have the same element type as NVT.
28855 static SDValue ExtendToType(SDValue InOp, MVT NVT, SelectionDAG &DAG,
28856 bool FillWithZeroes = false) {
28857 // Check if InOp already has the right width.
28858 MVT InVT = InOp.getSimpleValueType();
28862 if (InOp.isUndef())
28863 return DAG.getUNDEF(NVT);
28865 assert(InVT.getVectorElementType() == NVT.getVectorElementType() &&
28866 "input and widen element type must match");
28868 unsigned InNumElts = InVT.getVectorNumElements();
28869 unsigned WidenNumElts = NVT.getVectorNumElements();
28870 assert(WidenNumElts > InNumElts && WidenNumElts % InNumElts == 0 &&
28871 "Unexpected request for vector widening");
28874 if (InOp.getOpcode() == ISD::CONCAT_VECTORS &&
28875 InOp.getNumOperands() == 2) {
28876 SDValue N1 = InOp.getOperand(1);
28877 if ((ISD::isBuildVectorAllZeros(N1.getNode()) && FillWithZeroes) ||
28879 InOp = InOp.getOperand(0);
28880 InVT = InOp.getSimpleValueType();
28881 InNumElts = InVT.getVectorNumElements();
28884 if (ISD::isBuildVectorOfConstantSDNodes(InOp.getNode()) ||
28885 ISD::isBuildVectorOfConstantFPSDNodes(InOp.getNode())) {
28886 SmallVector<SDValue, 16> Ops;
28887 for (unsigned i = 0; i < InNumElts; ++i)
28888 Ops.push_back(InOp.getOperand(i));
28890 EVT EltVT = InOp.getOperand(0).getValueType();
28892 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, EltVT) :
28893 DAG.getUNDEF(EltVT);
28894 for (unsigned i = 0; i < WidenNumElts - InNumElts; ++i)
28895 Ops.push_back(FillVal);
28896 return DAG.getBuildVector(NVT, dl, Ops);
28898 SDValue FillVal = FillWithZeroes ? DAG.getConstant(0, dl, NVT) :
28900 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, NVT, FillVal,
28901 InOp, DAG.getIntPtrConstant(0, dl));
28904 static SDValue LowerMSCATTER(SDValue Op, const X86Subtarget &Subtarget,
28905 SelectionDAG &DAG) {
28906 assert(Subtarget.hasAVX512() &&
28907 "MGATHER/MSCATTER are supported on AVX-512 arch only");
28909 MaskedScatterSDNode *N = cast<MaskedScatterSDNode>(Op.getNode());
28910 SDValue Src = N->getValue();
28911 MVT VT = Src.getSimpleValueType();
28912 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported scatter op");
28915 SDValue Scale = N->getScale();
28916 SDValue Index = N->getIndex();
28917 SDValue Mask = N->getMask();
28918 SDValue Chain = N->getChain();
28919 SDValue BasePtr = N->getBasePtr();
28921 if (VT == MVT::v2f32 || VT == MVT::v2i32) {
28922 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
28923 // If the index is v2i64 and we have VLX we can use xmm for data and index.
28924 if (Index.getValueType() == MVT::v2i64 && Subtarget.hasVLX()) {
28925 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
28926 EVT WideVT = TLI.getTypeToTransformTo(*DAG.getContext(), VT);
28927 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT, Src, DAG.getUNDEF(VT));
28928 SDVTList VTs = DAG.getVTList(MVT::Other);
28929 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28930 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28931 N->getMemoryVT(), N->getMemOperand());
28936 MVT IndexVT = Index.getSimpleValueType();
28938 // If the index is v2i32, we're being called by type legalization and we
28939 // should just let the default handling take care of it.
28940 if (IndexVT == MVT::v2i32)
28943 // If we don't have VLX and neither the passthru or index is 512-bits, we
28944 // need to widen until one is.
28945 if (!Subtarget.hasVLX() && !VT.is512BitVector() &&
28946 !Index.getSimpleValueType().is512BitVector()) {
28947 // Determine how much we need to widen by to get a 512-bit type.
28948 unsigned Factor = std::min(512/VT.getSizeInBits(),
28949 512/IndexVT.getSizeInBits());
28950 unsigned NumElts = VT.getVectorNumElements() * Factor;
28952 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
28953 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
28954 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
28956 Src = ExtendToType(Src, VT, DAG);
28957 Index = ExtendToType(Index, IndexVT, DAG);
28958 Mask = ExtendToType(Mask, MaskVT, DAG, true);
28961 SDVTList VTs = DAG.getVTList(MVT::Other);
28962 SDValue Ops[] = {Chain, Src, Mask, BasePtr, Index, Scale};
28963 return DAG.getMemIntrinsicNode(X86ISD::MSCATTER, dl, VTs, Ops,
28964 N->getMemoryVT(), N->getMemOperand());
28967 static SDValue LowerMLOAD(SDValue Op, const X86Subtarget &Subtarget,
28968 SelectionDAG &DAG) {
28970 MaskedLoadSDNode *N = cast<MaskedLoadSDNode>(Op.getNode());
28971 MVT VT = Op.getSimpleValueType();
28972 MVT ScalarVT = VT.getScalarType();
28973 SDValue Mask = N->getMask();
28974 MVT MaskVT = Mask.getSimpleValueType();
28975 SDValue PassThru = N->getPassThru();
28978 // Handle AVX masked loads which don't support passthru other than 0.
28979 if (MaskVT.getVectorElementType() != MVT::i1) {
28980 // We also allow undef in the isel pattern.
28981 if (PassThru.isUndef() || ISD::isBuildVectorAllZeros(PassThru.getNode()))
28984 SDValue NewLoad = DAG.getMaskedLoad(
28985 VT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
28986 getZeroVector(VT, Subtarget, DAG, dl), N->getMemoryVT(),
28987 N->getMemOperand(), N->getAddressingMode(), N->getExtensionType(),
28988 N->isExpandingLoad());
28990 SDValue Select = DAG.getNode(ISD::VSELECT, dl, VT, Mask, NewLoad, PassThru);
28991 return DAG.getMergeValues({ Select, NewLoad.getValue(1) }, dl);
28994 assert((!N->isExpandingLoad() || Subtarget.hasAVX512()) &&
28995 "Expanding masked load is supported on AVX-512 target only!");
28997 assert((!N->isExpandingLoad() || ScalarVT.getSizeInBits() >= 32) &&
28998 "Expanding masked load is supported for 32 and 64-bit types only!");
29000 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
29001 "Cannot lower masked load op.");
29003 assert((ScalarVT.getSizeInBits() >= 32 ||
29004 (Subtarget.hasBWI() &&
29005 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
29006 "Unsupported masked load op.");
29008 // This operation is legal for targets with VLX, but without
29009 // VLX the vector should be widened to 512 bit
29010 unsigned NumEltsInWideVec = 512 / VT.getScalarSizeInBits();
29011 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
29012 PassThru = ExtendToType(PassThru, WideDataVT, DAG);
29014 // Mask element has to be i1.
29015 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
29016 "Unexpected mask type");
29018 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
29020 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
29021 SDValue NewLoad = DAG.getMaskedLoad(
29022 WideDataVT, dl, N->getChain(), N->getBasePtr(), N->getOffset(), Mask,
29023 PassThru, N->getMemoryVT(), N->getMemOperand(), N->getAddressingMode(),
29024 N->getExtensionType(), N->isExpandingLoad());
29027 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, NewLoad.getValue(0),
29028 DAG.getIntPtrConstant(0, dl));
29029 SDValue RetOps[] = {Extract, NewLoad.getValue(1)};
29030 return DAG.getMergeValues(RetOps, dl);
29033 static SDValue LowerMSTORE(SDValue Op, const X86Subtarget &Subtarget,
29034 SelectionDAG &DAG) {
29035 MaskedStoreSDNode *N = cast<MaskedStoreSDNode>(Op.getNode());
29036 SDValue DataToStore = N->getValue();
29037 MVT VT = DataToStore.getSimpleValueType();
29038 MVT ScalarVT = VT.getScalarType();
29039 SDValue Mask = N->getMask();
29042 assert((!N->isCompressingStore() || Subtarget.hasAVX512()) &&
29043 "Expanding masked load is supported on AVX-512 target only!");
29045 assert((!N->isCompressingStore() || ScalarVT.getSizeInBits() >= 32) &&
29046 "Expanding masked load is supported for 32 and 64-bit types only!");
29048 assert(Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
29049 "Cannot lower masked store op.");
29051 assert((ScalarVT.getSizeInBits() >= 32 ||
29052 (Subtarget.hasBWI() &&
29053 (ScalarVT == MVT::i8 || ScalarVT == MVT::i16))) &&
29054 "Unsupported masked store op.");
29056 // This operation is legal for targets with VLX, but without
29057 // VLX the vector should be widened to 512 bit
29058 unsigned NumEltsInWideVec = 512/VT.getScalarSizeInBits();
29059 MVT WideDataVT = MVT::getVectorVT(ScalarVT, NumEltsInWideVec);
29061 // Mask element has to be i1.
29062 assert(Mask.getSimpleValueType().getScalarType() == MVT::i1 &&
29063 "Unexpected mask type");
29065 MVT WideMaskVT = MVT::getVectorVT(MVT::i1, NumEltsInWideVec);
29067 DataToStore = ExtendToType(DataToStore, WideDataVT, DAG);
29068 Mask = ExtendToType(Mask, WideMaskVT, DAG, true);
29069 return DAG.getMaskedStore(N->getChain(), dl, DataToStore, N->getBasePtr(),
29070 N->getOffset(), Mask, N->getMemoryVT(),
29071 N->getMemOperand(), N->getAddressingMode(),
29072 N->isTruncatingStore(), N->isCompressingStore());
29075 static SDValue LowerMGATHER(SDValue Op, const X86Subtarget &Subtarget,
29076 SelectionDAG &DAG) {
29077 assert(Subtarget.hasAVX2() &&
29078 "MGATHER/MSCATTER are supported on AVX-512/AVX-2 arch only");
29080 MaskedGatherSDNode *N = cast<MaskedGatherSDNode>(Op.getNode());
29082 MVT VT = Op.getSimpleValueType();
29083 SDValue Index = N->getIndex();
29084 SDValue Mask = N->getMask();
29085 SDValue PassThru = N->getPassThru();
29086 MVT IndexVT = Index.getSimpleValueType();
29088 assert(VT.getScalarSizeInBits() >= 32 && "Unsupported gather op");
29090 // If the index is v2i32, we're being called by type legalization.
29091 if (IndexVT == MVT::v2i32)
29094 // If we don't have VLX and neither the passthru or index is 512-bits, we
29095 // need to widen until one is.
29097 if (Subtarget.hasAVX512() && !Subtarget.hasVLX() && !VT.is512BitVector() &&
29098 !IndexVT.is512BitVector()) {
29099 // Determine how much we need to widen by to get a 512-bit type.
29100 unsigned Factor = std::min(512/VT.getSizeInBits(),
29101 512/IndexVT.getSizeInBits());
29103 unsigned NumElts = VT.getVectorNumElements() * Factor;
29105 VT = MVT::getVectorVT(VT.getVectorElementType(), NumElts);
29106 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(), NumElts);
29107 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
29109 PassThru = ExtendToType(PassThru, VT, DAG);
29110 Index = ExtendToType(Index, IndexVT, DAG);
29111 Mask = ExtendToType(Mask, MaskVT, DAG, true);
29114 SDValue Ops[] = { N->getChain(), PassThru, Mask, N->getBasePtr(), Index,
29116 SDValue NewGather = DAG.getMemIntrinsicNode(
29117 X86ISD::MGATHER, dl, DAG.getVTList(VT, MVT::Other), Ops, N->getMemoryVT(),
29118 N->getMemOperand());
29119 SDValue Extract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, OrigVT,
29120 NewGather, DAG.getIntPtrConstant(0, dl));
29121 return DAG.getMergeValues({Extract, NewGather.getValue(1)}, dl);
29124 static SDValue LowerADDRSPACECAST(SDValue Op, SelectionDAG &DAG) {
29126 SDValue Src = Op.getOperand(0);
29127 MVT DstVT = Op.getSimpleValueType();
29129 AddrSpaceCastSDNode *N = cast<AddrSpaceCastSDNode>(Op.getNode());
29130 unsigned SrcAS = N->getSrcAddressSpace();
29132 assert(SrcAS != N->getDestAddressSpace() &&
29133 "addrspacecast must be between different address spaces");
29135 if (SrcAS == X86AS::PTR32_UPTR && DstVT == MVT::i64) {
29136 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Src);
29137 } else if (DstVT == MVT::i64) {
29138 Op = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Src);
29139 } else if (DstVT == MVT::i32) {
29140 Op = DAG.getNode(ISD::TRUNCATE, dl, DstVT, Src);
29142 report_fatal_error("Bad address space in addrspacecast");
29147 SDValue X86TargetLowering::LowerGC_TRANSITION(SDValue Op,
29148 SelectionDAG &DAG) const {
29149 // TODO: Eventually, the lowering of these nodes should be informed by or
29150 // deferred to the GC strategy for the function in which they appear. For
29151 // now, however, they must be lowered to something. Since they are logically
29152 // no-ops in the case of a null GC strategy (or a GC strategy which does not
29153 // require special handling for these nodes), lower them as literal NOOPs for
29155 SmallVector<SDValue, 2> Ops;
29157 Ops.push_back(Op.getOperand(0));
29158 if (Op->getGluedNode())
29159 Ops.push_back(Op->getOperand(Op->getNumOperands() - 1));
29162 SDVTList VTs = DAG.getVTList(MVT::Other, MVT::Glue);
29163 SDValue NOOP(DAG.getMachineNode(X86::NOOP, SDLoc(Op), VTs, Ops), 0);
29168 SDValue X86TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG,
29169 RTLIB::Libcall Call) const {
29171 bool IsStrict = Op->isStrictFPOpcode();
29172 unsigned Offset = IsStrict ? 1 : 0;
29173 SmallVector<SDValue, 2> Ops(Op->op_begin() + Offset, Op->op_end());
29176 SDValue Chain = IsStrict ? Op.getOperand(0) : SDValue();
29177 MakeLibCallOptions CallOptions;
29178 std::pair<SDValue, SDValue> Tmp = makeLibCall(DAG, Call, MVT::f128, Ops,
29179 CallOptions, dl, Chain);
29182 return DAG.getMergeValues({ Tmp.first, Tmp.second }, dl);
29187 // Custom split CVTPS2PH with wide types.
29188 static SDValue LowerCVTPS2PH(SDValue Op, SelectionDAG &DAG) {
29190 EVT VT = Op.getValueType();
29192 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
29194 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
29195 SDValue RC = Op.getOperand(1);
29196 Lo = DAG.getNode(X86ISD::CVTPS2PH, dl, LoVT, Lo, RC);
29197 Hi = DAG.getNode(X86ISD::CVTPS2PH, dl, HiVT, Hi, RC);
29198 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29201 /// Provide custom lowering hooks for some operations.
29202 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
29203 switch (Op.getOpcode()) {
29204 default: llvm_unreachable("Should not custom lower this!");
29205 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
29206 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
29207 return LowerCMP_SWAP(Op, Subtarget, DAG);
29208 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
29209 case ISD::ATOMIC_LOAD_ADD:
29210 case ISD::ATOMIC_LOAD_SUB:
29211 case ISD::ATOMIC_LOAD_OR:
29212 case ISD::ATOMIC_LOAD_XOR:
29213 case ISD::ATOMIC_LOAD_AND: return lowerAtomicArith(Op, DAG, Subtarget);
29214 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op, DAG, Subtarget);
29215 case ISD::BITREVERSE: return LowerBITREVERSE(Op, Subtarget, DAG);
29216 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
29217 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, Subtarget, DAG);
29218 case ISD::VECTOR_SHUFFLE: return lowerVECTOR_SHUFFLE(Op, Subtarget, DAG);
29219 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
29220 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
29221 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
29222 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
29223 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
29224 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, Subtarget,DAG);
29225 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
29226 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
29227 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
29228 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
29229 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
29230 case ISD::SHL_PARTS:
29231 case ISD::SRA_PARTS:
29232 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
29234 case ISD::FSHR: return LowerFunnelShift(Op, Subtarget, DAG);
29235 case ISD::STRICT_SINT_TO_FP:
29236 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
29237 case ISD::STRICT_UINT_TO_FP:
29238 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
29239 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
29240 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
29241 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
29242 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
29243 case ISD::ZERO_EXTEND_VECTOR_INREG:
29244 case ISD::SIGN_EXTEND_VECTOR_INREG:
29245 return LowerEXTEND_VECTOR_INREG(Op, Subtarget, DAG);
29246 case ISD::FP_TO_SINT:
29247 case ISD::STRICT_FP_TO_SINT:
29248 case ISD::FP_TO_UINT:
29249 case ISD::STRICT_FP_TO_UINT: return LowerFP_TO_INT(Op, DAG);
29250 case ISD::FP_EXTEND:
29251 case ISD::STRICT_FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
29252 case ISD::FP_ROUND:
29253 case ISD::STRICT_FP_ROUND: return LowerFP_ROUND(Op, DAG);
29254 case ISD::FP16_TO_FP:
29255 case ISD::STRICT_FP16_TO_FP: return LowerFP16_TO_FP(Op, DAG);
29256 case ISD::FP_TO_FP16:
29257 case ISD::STRICT_FP_TO_FP16: return LowerFP_TO_FP16(Op, DAG);
29258 case ISD::LOAD: return LowerLoad(Op, Subtarget, DAG);
29259 case ISD::STORE: return LowerStore(Op, Subtarget, DAG);
29261 case ISD::FSUB: return lowerFaddFsub(Op, DAG);
29262 case ISD::FROUND: return LowerFROUND(Op, DAG);
29264 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
29265 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
29266 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
29268 case ISD::LLRINT: return LowerLRINT_LLRINT(Op, DAG);
29270 case ISD::STRICT_FSETCC:
29271 case ISD::STRICT_FSETCCS: return LowerSETCC(Op, DAG);
29272 case ISD::SETCCCARRY: return LowerSETCCCARRY(Op, DAG);
29273 case ISD::SELECT: return LowerSELECT(Op, DAG);
29274 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
29275 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
29276 case ISD::VASTART: return LowerVASTART(Op, DAG);
29277 case ISD::VAARG: return LowerVAARG(Op, DAG);
29278 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
29279 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
29280 case ISD::INTRINSIC_VOID:
29281 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
29282 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
29283 case ISD::ADDROFRETURNADDR: return LowerADDROFRETURNADDR(Op, DAG);
29284 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
29285 case ISD::FRAME_TO_ARGS_OFFSET:
29286 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
29287 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
29288 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
29289 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
29290 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
29291 case ISD::EH_SJLJ_SETUP_DISPATCH:
29292 return lowerEH_SJLJ_SETUP_DISPATCH(Op, DAG);
29293 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
29294 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
29295 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
29297 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ(Op, Subtarget, DAG);
29299 case ISD::CTTZ_ZERO_UNDEF: return LowerCTTZ(Op, Subtarget, DAG);
29300 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
29302 case ISD::MULHU: return LowerMULH(Op, Subtarget, DAG);
29304 case ISD::ROTR: return LowerRotate(Op, Subtarget, DAG);
29307 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
29313 case ISD::UMULO: return LowerXALUO(Op, DAG);
29314 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
29315 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
29316 case ISD::ADDCARRY:
29317 case ISD::SUBCARRY: return LowerADDSUBCARRY(Op, DAG);
29319 case ISD::SUB: return lowerAddSub(Op, DAG, Subtarget);
29323 case ISD::SSUBSAT: return LowerADDSAT_SUBSAT(Op, DAG, Subtarget);
29327 case ISD::UMIN: return LowerMINMAX(Op, DAG);
29328 case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
29329 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
29330 case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
29331 case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
29332 case ISD::MGATHER: return LowerMGATHER(Op, Subtarget, DAG);
29333 case ISD::MSCATTER: return LowerMSCATTER(Op, Subtarget, DAG);
29334 case ISD::GC_TRANSITION_START:
29335 case ISD::GC_TRANSITION_END: return LowerGC_TRANSITION(Op, DAG);
29336 case ISD::ADDRSPACECAST: return LowerADDRSPACECAST(Op, DAG);
29337 case X86ISD::CVTPS2PH: return LowerCVTPS2PH(Op, DAG);
29341 /// Places new result values for the node in Results (their number
29342 /// and types must exactly match those of the original return values of
29343 /// the node), or leaves Results empty, which indicates that the node is not
29344 /// to be custom lowered after all.
29345 void X86TargetLowering::LowerOperationWrapper(SDNode *N,
29346 SmallVectorImpl<SDValue> &Results,
29347 SelectionDAG &DAG) const {
29348 SDValue Res = LowerOperation(SDValue(N, 0), DAG);
29350 if (!Res.getNode())
29353 // If the original node has one result, take the return value from
29354 // LowerOperation as is. It might not be result number 0.
29355 if (N->getNumValues() == 1) {
29356 Results.push_back(Res);
29360 // If the original node has multiple results, then the return node should
29361 // have the same number of results.
29362 assert((N->getNumValues() == Res->getNumValues()) &&
29363 "Lowering returned the wrong number of results!");
29365 // Places new result values base on N result number.
29366 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
29367 Results.push_back(Res.getValue(I));
29370 /// Replace a node with an illegal result type with a new node built out of
29372 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
29373 SmallVectorImpl<SDValue>&Results,
29374 SelectionDAG &DAG) const {
29376 switch (N->getOpcode()) {
29379 dbgs() << "ReplaceNodeResults: ";
29382 llvm_unreachable("Do not know how to custom type legalize this operation!");
29383 case X86ISD::CVTPH2PS: {
29384 EVT VT = N->getValueType(0);
29386 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
29388 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
29389 Lo = DAG.getNode(X86ISD::CVTPH2PS, dl, LoVT, Lo);
29390 Hi = DAG.getNode(X86ISD::CVTPH2PS, dl, HiVT, Hi);
29391 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29392 Results.push_back(Res);
29395 case X86ISD::STRICT_CVTPH2PS: {
29396 EVT VT = N->getValueType(0);
29398 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 1);
29400 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
29401 Lo = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {LoVT, MVT::Other},
29402 {N->getOperand(0), Lo});
29403 Hi = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {HiVT, MVT::Other},
29404 {N->getOperand(0), Hi});
29405 SDValue Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29406 Lo.getValue(1), Hi.getValue(1));
29407 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29408 Results.push_back(Res);
29409 Results.push_back(Chain);
29413 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
29414 // Use a v2i64 if possible.
29415 bool NoImplicitFloatOps =
29416 DAG.getMachineFunction().getFunction().hasFnAttribute(
29417 Attribute::NoImplicitFloat);
29418 if (isTypeLegal(MVT::v2i64) && !NoImplicitFloatOps) {
29420 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, N->getOperand(0));
29421 Wide = DAG.getNode(ISD::CTPOP, dl, MVT::v2i64, Wide);
29422 // Bit count should fit in 32-bits, extract it as that and then zero
29423 // extend to i64. Otherwise we end up extracting bits 63:32 separately.
29424 Wide = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Wide);
29425 Wide = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32, Wide,
29426 DAG.getIntPtrConstant(0, dl));
29427 Wide = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Wide);
29428 Results.push_back(Wide);
29433 EVT VT = N->getValueType(0);
29434 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29435 VT.getVectorElementType() == MVT::i8 && "Unexpected VT!");
29436 // Pre-promote these to vXi16 to avoid op legalization thinking all 16
29437 // elements are needed.
29438 MVT MulVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());
29439 SDValue Op0 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(0));
29440 SDValue Op1 = DAG.getNode(ISD::ANY_EXTEND, dl, MulVT, N->getOperand(1));
29441 SDValue Res = DAG.getNode(ISD::MUL, dl, MulVT, Op0, Op1);
29442 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29443 unsigned NumConcats = 16 / VT.getVectorNumElements();
29444 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29445 ConcatOps[0] = Res;
29446 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i8, ConcatOps);
29447 Results.push_back(Res);
29450 case X86ISD::VPMADDWD:
29451 case X86ISD::AVG: {
29452 // Legalize types for X86ISD::AVG/VPMADDWD by widening.
29453 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29455 EVT VT = N->getValueType(0);
29456 EVT InVT = N->getOperand(0).getValueType();
29457 assert(VT.getSizeInBits() < 128 && 128 % VT.getSizeInBits() == 0 &&
29458 "Expected a VT that divides into 128 bits.");
29459 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29460 "Unexpected type action!");
29461 unsigned NumConcat = 128 / InVT.getSizeInBits();
29463 EVT InWideVT = EVT::getVectorVT(*DAG.getContext(),
29464 InVT.getVectorElementType(),
29465 NumConcat * InVT.getVectorNumElements());
29466 EVT WideVT = EVT::getVectorVT(*DAG.getContext(),
29467 VT.getVectorElementType(),
29468 NumConcat * VT.getVectorNumElements());
29470 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getUNDEF(InVT));
29471 Ops[0] = N->getOperand(0);
29472 SDValue InVec0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
29473 Ops[0] = N->getOperand(1);
29474 SDValue InVec1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, InWideVT, Ops);
29476 SDValue Res = DAG.getNode(N->getOpcode(), dl, WideVT, InVec0, InVec1);
29477 Results.push_back(Res);
29481 assert(N->getValueType(0) == MVT::i64 &&
29482 "Unexpected type (!= i64) on ABS.");
29483 MVT HalfT = MVT::i32;
29484 SDValue Lo, Hi, Tmp;
29485 SDVTList VTList = DAG.getVTList(HalfT, MVT::i1);
29487 Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
29488 DAG.getConstant(0, dl, HalfT));
29489 Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(0),
29490 DAG.getConstant(1, dl, HalfT));
29492 ISD::SRA, dl, HalfT, Hi,
29493 DAG.getShiftAmountConstant(HalfT.getSizeInBits() - 1, HalfT, dl));
29494 Lo = DAG.getNode(ISD::UADDO, dl, VTList, Tmp, Lo);
29495 Hi = DAG.getNode(ISD::ADDCARRY, dl, VTList, Tmp, Hi,
29496 SDValue(Lo.getNode(), 1));
29497 Hi = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Hi);
29498 Lo = DAG.getNode(ISD::XOR, dl, HalfT, Tmp, Lo);
29499 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi));
29502 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
29503 case X86ISD::FMINC:
29505 case X86ISD::FMAXC:
29506 case X86ISD::FMAX: {
29507 EVT VT = N->getValueType(0);
29508 assert(VT == MVT::v2f32 && "Unexpected type (!= v2f32) on FMIN/FMAX.");
29509 SDValue UNDEF = DAG.getUNDEF(VT);
29510 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
29511 N->getOperand(0), UNDEF);
29512 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
29513 N->getOperand(1), UNDEF);
29514 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
29521 EVT VT = N->getValueType(0);
29522 if (VT.isVector()) {
29523 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29524 "Unexpected type action!");
29525 // If this RHS is a constant splat vector we can widen this and let
29526 // division/remainder by constant optimize it.
29527 // TODO: Can we do something for non-splat?
29529 if (ISD::isConstantSplatVector(N->getOperand(1).getNode(), SplatVal)) {
29530 unsigned NumConcats = 128 / VT.getSizeInBits();
29531 SmallVector<SDValue, 8> Ops0(NumConcats, DAG.getUNDEF(VT));
29532 Ops0[0] = N->getOperand(0);
29533 EVT ResVT = getTypeToTransformTo(*DAG.getContext(), VT);
29534 SDValue N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, ResVT, Ops0);
29535 SDValue N1 = DAG.getConstant(SplatVal, dl, ResVT);
29536 SDValue Res = DAG.getNode(N->getOpcode(), dl, ResVT, N0, N1);
29537 Results.push_back(Res);
29545 case ISD::UDIVREM: {
29546 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
29547 Results.push_back(V);
29550 case ISD::TRUNCATE: {
29551 MVT VT = N->getSimpleValueType(0);
29552 if (getTypeAction(*DAG.getContext(), VT) != TypeWidenVector)
29555 // The generic legalizer will try to widen the input type to the same
29556 // number of elements as the widened result type. But this isn't always
29557 // the best thing so do some custom legalization to avoid some cases.
29558 MVT WidenVT = getTypeToTransformTo(*DAG.getContext(), VT).getSimpleVT();
29559 SDValue In = N->getOperand(0);
29560 EVT InVT = In.getValueType();
29562 unsigned InBits = InVT.getSizeInBits();
29563 if (128 % InBits == 0) {
29564 // 128 bit and smaller inputs should avoid truncate all together and
29565 // just use a build_vector that will become a shuffle.
29566 // TODO: Widen and use a shuffle directly?
29567 MVT InEltVT = InVT.getSimpleVT().getVectorElementType();
29568 EVT EltVT = VT.getVectorElementType();
29569 unsigned WidenNumElts = WidenVT.getVectorNumElements();
29570 SmallVector<SDValue, 16> Ops(WidenNumElts, DAG.getUNDEF(EltVT));
29571 // Use the original element count so we don't do more scalar opts than
29573 unsigned MinElts = VT.getVectorNumElements();
29574 for (unsigned i=0; i < MinElts; ++i) {
29575 SDValue Val = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, InEltVT, In,
29576 DAG.getIntPtrConstant(i, dl));
29577 Ops[i] = DAG.getNode(ISD::TRUNCATE, dl, EltVT, Val);
29579 Results.push_back(DAG.getBuildVector(WidenVT, dl, Ops));
29582 // With AVX512 there are some cases that can use a target specific
29583 // truncate node to go from 256/512 to less than 128 with zeros in the
29584 // upper elements of the 128 bit result.
29585 if (Subtarget.hasAVX512() && isTypeLegal(InVT)) {
29586 // We can use VTRUNC directly if for 256 bits with VLX or for any 512.
29587 if ((InBits == 256 && Subtarget.hasVLX()) || InBits == 512) {
29588 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
29591 // There's one case we can widen to 512 bits and use VTRUNC.
29592 if (InVT == MVT::v4i64 && VT == MVT::v4i8 && isTypeLegal(MVT::v8i64)) {
29593 In = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i64, In,
29594 DAG.getUNDEF(MVT::v4i64));
29595 Results.push_back(DAG.getNode(X86ISD::VTRUNC, dl, WidenVT, In));
29599 if (Subtarget.hasVLX() && InVT == MVT::v8i64 && VT == MVT::v8i8 &&
29600 getTypeAction(*DAG.getContext(), InVT) == TypeSplitVector &&
29601 isTypeLegal(MVT::v4i64)) {
29602 // Input needs to be split and output needs to widened. Let's use two
29603 // VTRUNCs, and shuffle their results together into the wider type.
29605 std::tie(Lo, Hi) = DAG.SplitVector(In, dl);
29607 Lo = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Lo);
29608 Hi = DAG.getNode(X86ISD::VTRUNC, dl, MVT::v16i8, Hi);
29609 SDValue Res = DAG.getVectorShuffle(MVT::v16i8, dl, Lo, Hi,
29610 { 0, 1, 2, 3, 16, 17, 18, 19,
29611 -1, -1, -1, -1, -1, -1, -1, -1 });
29612 Results.push_back(Res);
29618 case ISD::ANY_EXTEND:
29619 // Right now, only MVT::v8i8 has Custom action for an illegal type.
29620 // It's intended to custom handle the input type.
29621 assert(N->getValueType(0) == MVT::v8i8 &&
29622 "Do not know how to legalize this Node");
29624 case ISD::SIGN_EXTEND:
29625 case ISD::ZERO_EXTEND: {
29626 EVT VT = N->getValueType(0);
29627 SDValue In = N->getOperand(0);
29628 EVT InVT = In.getValueType();
29629 if (!Subtarget.hasSSE41() && VT == MVT::v4i64 &&
29630 (InVT == MVT::v4i16 || InVT == MVT::v4i8)){
29631 assert(getTypeAction(*DAG.getContext(), InVT) == TypeWidenVector &&
29632 "Unexpected type action!");
29633 assert(N->getOpcode() == ISD::SIGN_EXTEND && "Unexpected opcode");
29634 // Custom split this so we can extend i8/i16->i32 invec. This is better
29635 // since sign_extend_inreg i8/i16->i64 requires an extend to i32 using
29636 // sra. Then extending from i32 to i64 using pcmpgt. By custom splitting
29637 // we allow the sra from the extend to i32 to be shared by the split.
29638 In = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, In);
29640 // Fill a vector with sign bits for each element.
29641 SDValue Zero = DAG.getConstant(0, dl, MVT::v4i32);
29642 SDValue SignBits = DAG.getSetCC(dl, MVT::v4i32, Zero, In, ISD::SETGT);
29644 // Create an unpackl and unpackh to interleave the sign bits then bitcast
29646 SDValue Lo = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
29648 Lo = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Lo);
29649 SDValue Hi = DAG.getVectorShuffle(MVT::v4i32, dl, In, SignBits,
29651 Hi = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Hi);
29653 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29654 Results.push_back(Res);
29658 if (VT == MVT::v16i32 || VT == MVT::v8i64) {
29659 if (!InVT.is128BitVector()) {
29660 // Not a 128 bit vector, but maybe type legalization will promote
29662 if (getTypeAction(*DAG.getContext(), InVT) != TypePromoteInteger)
29664 InVT = getTypeToTransformTo(*DAG.getContext(), InVT);
29665 if (!InVT.is128BitVector())
29668 // Promote the input to 128 bits. Type legalization will turn this into
29669 // zext_inreg/sext_inreg.
29670 In = DAG.getNode(N->getOpcode(), dl, InVT, In);
29673 // Perform custom splitting instead of the two stage extend we would get
29676 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0));
29677 assert(isTypeLegal(LoVT) && "Split VT not legal?");
29679 SDValue Lo = getExtendInVec(N->getOpcode(), dl, LoVT, In, DAG);
29681 // We need to shift the input over by half the number of elements.
29682 unsigned NumElts = InVT.getVectorNumElements();
29683 unsigned HalfNumElts = NumElts / 2;
29684 SmallVector<int, 16> ShufMask(NumElts, SM_SentinelUndef);
29685 for (unsigned i = 0; i != HalfNumElts; ++i)
29686 ShufMask[i] = i + HalfNumElts;
29688 SDValue Hi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
29689 Hi = getExtendInVec(N->getOpcode(), dl, HiVT, Hi, DAG);
29691 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Lo, Hi);
29692 Results.push_back(Res);
29696 case ISD::FP_TO_SINT:
29697 case ISD::STRICT_FP_TO_SINT:
29698 case ISD::FP_TO_UINT:
29699 case ISD::STRICT_FP_TO_UINT: {
29700 bool IsStrict = N->isStrictFPOpcode();
29701 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
29702 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
29703 EVT VT = N->getValueType(0);
29704 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29705 EVT SrcVT = Src.getValueType();
29707 if (VT.isVector() && VT.getScalarSizeInBits() < 32) {
29708 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29709 "Unexpected type action!");
29711 // Try to create a 128 bit vector, but don't exceed a 32 bit element.
29712 unsigned NewEltWidth = std::min(128 / VT.getVectorNumElements(), 32U);
29713 MVT PromoteVT = MVT::getVectorVT(MVT::getIntegerVT(NewEltWidth),
29714 VT.getVectorNumElements());
29718 Res = DAG.getNode(ISD::STRICT_FP_TO_SINT, dl, {PromoteVT, MVT::Other},
29719 {N->getOperand(0), Src});
29720 Chain = Res.getValue(1);
29722 Res = DAG.getNode(ISD::FP_TO_SINT, dl, PromoteVT, Src);
29724 // Preserve what we know about the size of the original result. Except
29725 // when the result is v2i32 since we can't widen the assert.
29726 if (PromoteVT != MVT::v2i32)
29727 Res = DAG.getNode(!IsSigned ? ISD::AssertZext : ISD::AssertSext,
29728 dl, PromoteVT, Res,
29729 DAG.getValueType(VT.getVectorElementType()));
29731 // Truncate back to the original width.
29732 Res = DAG.getNode(ISD::TRUNCATE, dl, VT, Res);
29734 // Now widen to 128 bits.
29735 unsigned NumConcats = 128 / VT.getSizeInBits();
29736 MVT ConcatVT = MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
29737 VT.getVectorNumElements() * NumConcats);
29738 SmallVector<SDValue, 8> ConcatOps(NumConcats, DAG.getUNDEF(VT));
29739 ConcatOps[0] = Res;
29740 Res = DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatVT, ConcatOps);
29741 Results.push_back(Res);
29743 Results.push_back(Chain);
29748 if (VT == MVT::v2i32) {
29749 assert((IsSigned || Subtarget.hasAVX512()) &&
29750 "Can only handle signed conversion without AVX512");
29751 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29752 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
29753 "Unexpected type action!");
29754 if (Src.getValueType() == MVT::v2f64) {
29757 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29759 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29761 // If we have VLX we can emit a target specific FP_TO_UINT node,.
29762 if (!IsSigned && !Subtarget.hasVLX()) {
29763 // Otherwise we can defer to the generic legalizer which will widen
29764 // the input as well. This will be further widened during op
29765 // legalization to v8i32<-v8f64.
29766 // For strict nodes we'll need to widen ourselves.
29767 // FIXME: Fix the type legalizer to safely widen strict nodes?
29770 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f64, Src,
29771 DAG.getConstantFP(0.0, dl, MVT::v2f64));
29772 Opc = N->getOpcode();
29777 Res = DAG.getNode(Opc, dl, {MVT::v4i32, MVT::Other},
29778 {N->getOperand(0), Src});
29779 Chain = Res.getValue(1);
29781 Res = DAG.getNode(Opc, dl, MVT::v4i32, Src);
29783 Results.push_back(Res);
29785 Results.push_back(Chain);
29789 // Custom widen strict v2f32->v2i32 by padding with zeros.
29790 // FIXME: Should generic type legalizer do this?
29791 if (Src.getValueType() == MVT::v2f32 && IsStrict) {
29792 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
29793 DAG.getConstantFP(0.0, dl, MVT::v2f32));
29794 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4i32, MVT::Other},
29795 {N->getOperand(0), Src});
29796 Results.push_back(Res);
29797 Results.push_back(Res.getValue(1));
29801 // The FP_TO_INTHelper below only handles f32/f64/f80 scalar inputs,
29802 // so early out here.
29806 assert(!VT.isVector() && "Vectors should have been handled above!");
29808 if (Subtarget.hasDQI() && VT == MVT::i64 &&
29809 (SrcVT == MVT::f32 || SrcVT == MVT::f64)) {
29810 assert(!Subtarget.is64Bit() && "i64 should be legal");
29811 unsigned NumElts = Subtarget.hasVLX() ? 2 : 8;
29812 // If we use a 128-bit result we might need to use a target specific node.
29814 std::max(NumElts, 128U / (unsigned)SrcVT.getSizeInBits());
29815 MVT VecVT = MVT::getVectorVT(MVT::i64, NumElts);
29816 MVT VecInVT = MVT::getVectorVT(SrcVT.getSimpleVT(), SrcElts);
29817 unsigned Opc = N->getOpcode();
29818 if (NumElts != SrcElts) {
29820 Opc = IsSigned ? X86ISD::STRICT_CVTTP2SI : X86ISD::STRICT_CVTTP2UI;
29822 Opc = IsSigned ? X86ISD::CVTTP2SI : X86ISD::CVTTP2UI;
29825 SDValue ZeroIdx = DAG.getIntPtrConstant(0, dl);
29826 SDValue Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VecInVT,
29827 DAG.getConstantFP(0.0, dl, VecInVT), Src,
29831 SDVTList Tys = DAG.getVTList(VecVT, MVT::Other);
29832 Res = DAG.getNode(Opc, SDLoc(N), Tys, N->getOperand(0), Res);
29833 Chain = Res.getValue(1);
29835 Res = DAG.getNode(Opc, SDLoc(N), VecVT, Res);
29836 Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Res, ZeroIdx);
29837 Results.push_back(Res);
29839 Results.push_back(Chain);
29844 if (SDValue V = FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, Chain)) {
29845 Results.push_back(V);
29847 Results.push_back(Chain);
29852 case ISD::LLRINT: {
29853 if (SDValue V = LRINT_LLRINTHelper(N, DAG))
29854 Results.push_back(V);
29858 case ISD::SINT_TO_FP:
29859 case ISD::STRICT_SINT_TO_FP:
29860 case ISD::UINT_TO_FP:
29861 case ISD::STRICT_UINT_TO_FP: {
29862 bool IsStrict = N->isStrictFPOpcode();
29863 bool IsSigned = N->getOpcode() == ISD::SINT_TO_FP ||
29864 N->getOpcode() == ISD::STRICT_SINT_TO_FP;
29865 EVT VT = N->getValueType(0);
29866 if (VT != MVT::v2f32)
29868 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29869 EVT SrcVT = Src.getValueType();
29870 if (Subtarget.hasDQI() && Subtarget.hasVLX() && SrcVT == MVT::v2i64) {
29872 unsigned Opc = IsSigned ? X86ISD::STRICT_CVTSI2P
29873 : X86ISD::STRICT_CVTUI2P;
29874 SDValue Res = DAG.getNode(Opc, dl, {MVT::v4f32, MVT::Other},
29875 {N->getOperand(0), Src});
29876 Results.push_back(Res);
29877 Results.push_back(Res.getValue(1));
29879 unsigned Opc = IsSigned ? X86ISD::CVTSI2P : X86ISD::CVTUI2P;
29880 Results.push_back(DAG.getNode(Opc, dl, MVT::v4f32, Src));
29884 if (SrcVT == MVT::v2i64 && !IsSigned && Subtarget.is64Bit() &&
29885 Subtarget.hasSSE41() && !Subtarget.hasAVX512()) {
29886 SDValue Zero = DAG.getConstant(0, dl, SrcVT);
29887 SDValue One = DAG.getConstant(1, dl, SrcVT);
29888 SDValue Sign = DAG.getNode(ISD::OR, dl, SrcVT,
29889 DAG.getNode(ISD::SRL, dl, SrcVT, Src, One),
29890 DAG.getNode(ISD::AND, dl, SrcVT, Src, One));
29891 SDValue IsNeg = DAG.getSetCC(dl, MVT::v2i64, Src, Zero, ISD::SETLT);
29892 SDValue SignSrc = DAG.getSelect(dl, SrcVT, IsNeg, Sign, Src);
29893 SmallVector<SDValue, 4> SignCvts(4, DAG.getConstantFP(0.0, dl, MVT::f32));
29894 for (int i = 0; i != 2; ++i) {
29895 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64,
29896 SignSrc, DAG.getIntPtrConstant(i, dl));
29899 DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {MVT::f32, MVT::Other},
29900 {N->getOperand(0), Elt});
29902 SignCvts[i] = DAG.getNode(ISD::SINT_TO_FP, dl, MVT::f32, Elt);
29904 SDValue SignCvt = DAG.getBuildVector(MVT::v4f32, dl, SignCvts);
29905 SDValue Slow, Chain;
29907 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
29908 SignCvts[0].getValue(1), SignCvts[1].getValue(1));
29909 Slow = DAG.getNode(ISD::STRICT_FADD, dl, {MVT::v4f32, MVT::Other},
29910 {Chain, SignCvt, SignCvt});
29911 Chain = Slow.getValue(1);
29913 Slow = DAG.getNode(ISD::FADD, dl, MVT::v4f32, SignCvt, SignCvt);
29915 IsNeg = DAG.getBitcast(MVT::v4i32, IsNeg);
29917 DAG.getVectorShuffle(MVT::v4i32, dl, IsNeg, IsNeg, {1, 3, -1, -1});
29918 SDValue Cvt = DAG.getSelect(dl, MVT::v4f32, IsNeg, Slow, SignCvt);
29919 Results.push_back(Cvt);
29921 Results.push_back(Chain);
29925 if (SrcVT != MVT::v2i32)
29928 if (IsSigned || Subtarget.hasAVX512()) {
29932 // Custom widen strict v2i32->v2f32 to avoid scalarization.
29933 // FIXME: Should generic type legalizer do this?
29934 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i32, Src,
29935 DAG.getConstant(0, dl, MVT::v2i32));
29936 SDValue Res = DAG.getNode(N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
29937 {N->getOperand(0), Src});
29938 Results.push_back(Res);
29939 Results.push_back(Res.getValue(1));
29943 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
29944 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64, Src);
29946 DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL), dl, MVT::v2f64);
29947 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
29948 DAG.getBitcast(MVT::v2i64, VBias));
29949 Or = DAG.getBitcast(MVT::v2f64, Or);
29951 SDValue Sub = DAG.getNode(ISD::STRICT_FSUB, dl, {MVT::v2f64, MVT::Other},
29952 {N->getOperand(0), Or, VBias});
29953 SDValue Res = DAG.getNode(X86ISD::STRICT_VFPROUND, dl,
29954 {MVT::v4f32, MVT::Other},
29955 {Sub.getValue(1), Sub});
29956 Results.push_back(Res);
29957 Results.push_back(Res.getValue(1));
29959 // TODO: Are there any fast-math-flags to propagate here?
29960 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
29961 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
29965 case ISD::STRICT_FP_ROUND:
29966 case ISD::FP_ROUND: {
29967 bool IsStrict = N->isStrictFPOpcode();
29968 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
29969 if (!isTypeLegal(Src.getValueType()))
29973 V = DAG.getNode(X86ISD::STRICT_VFPROUND, dl, {MVT::v4f32, MVT::Other},
29974 {N->getOperand(0), N->getOperand(1)});
29976 V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
29977 Results.push_back(V);
29979 Results.push_back(V.getValue(1));
29982 case ISD::FP_EXTEND:
29983 case ISD::STRICT_FP_EXTEND: {
29984 // Right now, only MVT::v2f32 has OperationAction for FP_EXTEND.
29985 // No other ValueType for FP_EXTEND should reach this point.
29986 assert(N->getValueType(0) == MVT::v2f32 &&
29987 "Do not know how to legalize this Node");
29990 case ISD::INTRINSIC_W_CHAIN: {
29991 unsigned IntNo = N->getConstantOperandVal(1);
29993 default : llvm_unreachable("Do not know how to custom type "
29994 "legalize this intrinsic operation!");
29995 case Intrinsic::x86_rdtsc:
29996 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget,
29998 case Intrinsic::x86_rdtscp:
29999 return getReadTimeStampCounter(N, dl, X86::RDTSCP, DAG, Subtarget,
30001 case Intrinsic::x86_rdpmc:
30002 expandIntrinsicWChainHelper(N, dl, DAG, X86::RDPMC, X86::ECX, Subtarget,
30005 case Intrinsic::x86_xgetbv:
30006 expandIntrinsicWChainHelper(N, dl, DAG, X86::XGETBV, X86::ECX, Subtarget,
30011 case ISD::READCYCLECOUNTER: {
30012 return getReadTimeStampCounter(N, dl, X86::RDTSC, DAG, Subtarget, Results);
30014 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
30015 EVT T = N->getValueType(0);
30016 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
30017 bool Regs64bit = T == MVT::i128;
30018 assert((!Regs64bit || Subtarget.hasCmpxchg16b()) &&
30019 "64-bit ATOMIC_CMP_SWAP_WITH_SUCCESS requires CMPXCHG16B");
30020 MVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
30021 SDValue cpInL, cpInH;
30022 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
30023 DAG.getConstant(0, dl, HalfT));
30024 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
30025 DAG.getConstant(1, dl, HalfT));
30026 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
30027 Regs64bit ? X86::RAX : X86::EAX,
30029 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
30030 Regs64bit ? X86::RDX : X86::EDX,
30031 cpInH, cpInL.getValue(1));
30032 SDValue swapInL, swapInH;
30033 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
30034 DAG.getConstant(0, dl, HalfT));
30035 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
30036 DAG.getConstant(1, dl, HalfT));
30038 DAG.getCopyToReg(cpInH.getValue(0), dl, Regs64bit ? X86::RCX : X86::ECX,
30039 swapInH, cpInH.getValue(1));
30040 // If the current function needs the base pointer, RBX,
30041 // we shouldn't use cmpxchg directly.
30042 // Indeed the lowering of that instruction will clobber
30043 // that register and since RBX will be a reserved register
30044 // the register allocator will not make sure its value will
30045 // be properly saved and restored around this live-range.
30046 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
30048 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
30049 Register BasePtr = TRI->getBaseRegister();
30050 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
30051 if (TRI->hasBasePointer(DAG.getMachineFunction()) &&
30052 (BasePtr == X86::RBX || BasePtr == X86::EBX)) {
30053 // ISel prefers the LCMPXCHG64 variant.
30054 // If that assert breaks, that means it is not the case anymore,
30055 // and we need to teach LCMPXCHG8_SAVE_EBX_DAG how to save RBX,
30056 // not just EBX. This is a matter of accepting i64 input for that
30057 // pseudo, and restoring into the register of the right wide
30058 // in expand pseudo. Everything else should just work.
30059 assert(((Regs64bit == (BasePtr == X86::RBX)) || BasePtr == X86::EBX) &&
30060 "Saving only half of the RBX");
30061 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_SAVE_RBX_DAG
30062 : X86ISD::LCMPXCHG8_SAVE_EBX_DAG;
30063 SDValue RBXSave = DAG.getCopyFromReg(swapInH.getValue(0), dl,
30064 Regs64bit ? X86::RBX : X86::EBX,
30065 HalfT, swapInH.getValue(1));
30066 SDValue Ops[] = {/*Chain*/ RBXSave.getValue(1), N->getOperand(1), swapInL,
30068 /*Glue*/ RBXSave.getValue(2)};
30069 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
30072 Regs64bit ? X86ISD::LCMPXCHG16_DAG : X86ISD::LCMPXCHG8_DAG;
30073 swapInL = DAG.getCopyToReg(swapInH.getValue(0), dl,
30074 Regs64bit ? X86::RBX : X86::EBX, swapInL,
30075 swapInH.getValue(1));
30076 SDValue Ops[] = {swapInL.getValue(0), N->getOperand(1),
30077 swapInL.getValue(1)};
30078 Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
30080 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
30081 Regs64bit ? X86::RAX : X86::EAX,
30082 HalfT, Result.getValue(1));
30083 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
30084 Regs64bit ? X86::RDX : X86::EDX,
30085 HalfT, cpOutL.getValue(2));
30086 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
30088 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
30089 MVT::i32, cpOutH.getValue(2));
30090 SDValue Success = getSETCC(X86::COND_E, EFLAGS, dl, DAG);
30091 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
30093 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
30094 Results.push_back(Success);
30095 Results.push_back(EFLAGS.getValue(1));
30098 case ISD::ATOMIC_LOAD: {
30099 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
30100 bool NoImplicitFloatOps =
30101 DAG.getMachineFunction().getFunction().hasFnAttribute(
30102 Attribute::NoImplicitFloat);
30103 if (!Subtarget.useSoftFloat() && !NoImplicitFloatOps) {
30104 auto *Node = cast<AtomicSDNode>(N);
30105 if (Subtarget.hasSSE1()) {
30106 // Use a VZEXT_LOAD which will be selected as MOVQ or XORPS+MOVLPS.
30107 // Then extract the lower 64-bits.
30108 MVT LdVT = Subtarget.hasSSE2() ? MVT::v2i64 : MVT::v4f32;
30109 SDVTList Tys = DAG.getVTList(LdVT, MVT::Other);
30110 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
30111 SDValue Ld = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
30112 MVT::i64, Node->getMemOperand());
30113 if (Subtarget.hasSSE2()) {
30114 SDValue Res = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Ld,
30115 DAG.getIntPtrConstant(0, dl));
30116 Results.push_back(Res);
30117 Results.push_back(Ld.getValue(1));
30120 // We use an alternative sequence for SSE1 that extracts as v2f32 and
30121 // then casts to i64. This avoids a 128-bit stack temporary being
30122 // created by type legalization if we were to cast v4f32->v2i64.
30123 SDValue Res = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Ld,
30124 DAG.getIntPtrConstant(0, dl));
30125 Res = DAG.getBitcast(MVT::i64, Res);
30126 Results.push_back(Res);
30127 Results.push_back(Ld.getValue(1));
30130 if (Subtarget.hasX87()) {
30131 // First load this into an 80-bit X87 register. This will put the whole
30132 // integer into the significand.
30133 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
30134 SDValue Ops[] = { Node->getChain(), Node->getBasePtr() };
30135 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::FILD,
30136 dl, Tys, Ops, MVT::i64,
30137 Node->getMemOperand());
30138 SDValue Chain = Result.getValue(1);
30140 // Now store the X87 register to a stack temporary and convert to i64.
30141 // This store is not atomic and doesn't need to be.
30142 // FIXME: We don't need a stack temporary if the result of the load
30143 // is already being stored. We could just directly store there.
30144 SDValue StackPtr = DAG.CreateStackTemporary(MVT::i64);
30145 int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();
30146 MachinePointerInfo MPI =
30147 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), SPFI);
30148 SDValue StoreOps[] = { Chain, Result, StackPtr };
30149 Chain = DAG.getMemIntrinsicNode(
30150 X86ISD::FIST, dl, DAG.getVTList(MVT::Other), StoreOps, MVT::i64,
30151 MPI, None /*Align*/, MachineMemOperand::MOStore);
30153 // Finally load the value back from the stack temporary and return it.
30154 // This load is not atomic and doesn't need to be.
30155 // This load will be further type legalized.
30156 Result = DAG.getLoad(MVT::i64, dl, Chain, StackPtr, MPI);
30157 Results.push_back(Result);
30158 Results.push_back(Result.getValue(1));
30162 // TODO: Use MOVLPS when SSE1 is available?
30163 // Delegate to generic TypeLegalization. Situations we can really handle
30164 // should have already been dealt with by AtomicExpandPass.cpp.
30167 case ISD::ATOMIC_SWAP:
30168 case ISD::ATOMIC_LOAD_ADD:
30169 case ISD::ATOMIC_LOAD_SUB:
30170 case ISD::ATOMIC_LOAD_AND:
30171 case ISD::ATOMIC_LOAD_OR:
30172 case ISD::ATOMIC_LOAD_XOR:
30173 case ISD::ATOMIC_LOAD_NAND:
30174 case ISD::ATOMIC_LOAD_MIN:
30175 case ISD::ATOMIC_LOAD_MAX:
30176 case ISD::ATOMIC_LOAD_UMIN:
30177 case ISD::ATOMIC_LOAD_UMAX:
30178 // Delegate to generic TypeLegalization. Situations we can really handle
30179 // should have already been dealt with by AtomicExpandPass.cpp.
30182 case ISD::BITCAST: {
30183 assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
30184 EVT DstVT = N->getValueType(0);
30185 EVT SrcVT = N->getOperand(0).getValueType();
30187 // If this is a bitcast from a v64i1 k-register to a i64 on a 32-bit target
30188 // we can split using the k-register rather than memory.
30189 if (SrcVT == MVT::v64i1 && DstVT == MVT::i64 && Subtarget.hasBWI()) {
30190 assert(!Subtarget.is64Bit() && "Expected 32-bit mode");
30192 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0);
30193 Lo = DAG.getBitcast(MVT::i32, Lo);
30194 Hi = DAG.getBitcast(MVT::i32, Hi);
30195 SDValue Res = DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
30196 Results.push_back(Res);
30200 if (DstVT.isVector() && SrcVT == MVT::x86mmx) {
30201 // FIXME: Use v4f32 for SSE1?
30202 assert(Subtarget.hasSSE2() && "Requires SSE2");
30203 assert(getTypeAction(*DAG.getContext(), DstVT) == TypeWidenVector &&
30204 "Unexpected type action!");
30205 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), DstVT);
30206 SDValue Res = DAG.getNode(X86ISD::MOVQ2DQ, dl, MVT::v2i64,
30208 Res = DAG.getBitcast(WideVT, Res);
30209 Results.push_back(Res);
30215 case ISD::MGATHER: {
30216 EVT VT = N->getValueType(0);
30217 if ((VT == MVT::v2f32 || VT == MVT::v2i32) &&
30218 (Subtarget.hasVLX() || !Subtarget.hasAVX512())) {
30219 auto *Gather = cast<MaskedGatherSDNode>(N);
30220 SDValue Index = Gather->getIndex();
30221 if (Index.getValueType() != MVT::v2i64)
30223 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
30224 "Unexpected type action!");
30225 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
30226 SDValue Mask = Gather->getMask();
30227 assert(Mask.getValueType() == MVT::v2i1 && "Unexpected mask type");
30228 SDValue PassThru = DAG.getNode(ISD::CONCAT_VECTORS, dl, WideVT,
30229 Gather->getPassThru(),
30231 if (!Subtarget.hasVLX()) {
30232 // We need to widen the mask, but the instruction will only use 2
30233 // of its elements. So we can use undef.
30234 Mask = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4i1, Mask,
30235 DAG.getUNDEF(MVT::v2i1));
30236 Mask = DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i32, Mask);
30238 SDValue Ops[] = { Gather->getChain(), PassThru, Mask,
30239 Gather->getBasePtr(), Index, Gather->getScale() };
30240 SDValue Res = DAG.getMemIntrinsicNode(
30241 X86ISD::MGATHER, dl, DAG.getVTList(WideVT, MVT::Other), Ops,
30242 Gather->getMemoryVT(), Gather->getMemOperand());
30243 Results.push_back(Res);
30244 Results.push_back(Res.getValue(1));
30250 // Use an f64/i64 load and a scalar_to_vector for v2f32/v2i32 loads. This
30251 // avoids scalarizing in 32-bit mode. In 64-bit mode this avoids a int->fp
30252 // cast since type legalization will try to use an i64 load.
30253 MVT VT = N->getSimpleValueType(0);
30254 assert(VT.isVector() && VT.getSizeInBits() == 64 && "Unexpected VT");
30255 assert(getTypeAction(*DAG.getContext(), VT) == TypeWidenVector &&
30256 "Unexpected type action!");
30257 if (!ISD::isNON_EXTLoad(N))
30259 auto *Ld = cast<LoadSDNode>(N);
30260 if (Subtarget.hasSSE2()) {
30261 MVT LdVT = Subtarget.is64Bit() && VT.isInteger() ? MVT::i64 : MVT::f64;
30262 SDValue Res = DAG.getLoad(LdVT, dl, Ld->getChain(), Ld->getBasePtr(),
30263 Ld->getPointerInfo(), Ld->getOriginalAlign(),
30264 Ld->getMemOperand()->getFlags());
30265 SDValue Chain = Res.getValue(1);
30266 MVT VecVT = MVT::getVectorVT(LdVT, 2);
30267 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Res);
30268 EVT WideVT = getTypeToTransformTo(*DAG.getContext(), VT);
30269 Res = DAG.getBitcast(WideVT, Res);
30270 Results.push_back(Res);
30271 Results.push_back(Chain);
30274 assert(Subtarget.hasSSE1() && "Expected SSE");
30275 SDVTList Tys = DAG.getVTList(MVT::v4f32, MVT::Other);
30276 SDValue Ops[] = {Ld->getChain(), Ld->getBasePtr()};
30277 SDValue Res = DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
30278 MVT::i64, Ld->getMemOperand());
30279 Results.push_back(Res);
30280 Results.push_back(Res.getValue(1));
30283 case ISD::ADDRSPACECAST: {
30284 SDValue V = LowerADDRSPACECAST(SDValue(N,0), DAG);
30285 Results.push_back(V);
30288 case ISD::BITREVERSE:
30289 assert(N->getValueType(0) == MVT::i64 && "Unexpected VT!");
30290 assert(Subtarget.hasXOP() && "Expected XOP");
30291 // We can use VPPERM by copying to a vector register and back. We'll need
30292 // to move the scalar in two i32 pieces.
30293 Results.push_back(LowerBITREVERSE(SDValue(N, 0), Subtarget, DAG));
30298 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
30299 switch ((X86ISD::NodeType)Opcode) {
30300 case X86ISD::FIRST_NUMBER: break;
30301 #define NODE_NAME_CASE(NODE) case X86ISD::NODE: return "X86ISD::" #NODE;
30302 NODE_NAME_CASE(BSF)
30303 NODE_NAME_CASE(BSR)
30304 NODE_NAME_CASE(FSHL)
30305 NODE_NAME_CASE(FSHR)
30306 NODE_NAME_CASE(FAND)
30307 NODE_NAME_CASE(FANDN)
30308 NODE_NAME_CASE(FOR)
30309 NODE_NAME_CASE(FXOR)
30310 NODE_NAME_CASE(FILD)
30311 NODE_NAME_CASE(FIST)
30312 NODE_NAME_CASE(FP_TO_INT_IN_MEM)
30313 NODE_NAME_CASE(FLD)
30314 NODE_NAME_CASE(FST)
30315 NODE_NAME_CASE(CALL)
30317 NODE_NAME_CASE(CMP)
30318 NODE_NAME_CASE(FCMP)
30319 NODE_NAME_CASE(STRICT_FCMP)
30320 NODE_NAME_CASE(STRICT_FCMPS)
30321 NODE_NAME_CASE(COMI)
30322 NODE_NAME_CASE(UCOMI)
30323 NODE_NAME_CASE(CMPM)
30324 NODE_NAME_CASE(STRICT_CMPM)
30325 NODE_NAME_CASE(CMPM_SAE)
30326 NODE_NAME_CASE(SETCC)
30327 NODE_NAME_CASE(SETCC_CARRY)
30328 NODE_NAME_CASE(FSETCC)
30329 NODE_NAME_CASE(FSETCCM)
30330 NODE_NAME_CASE(FSETCCM_SAE)
30331 NODE_NAME_CASE(CMOV)
30332 NODE_NAME_CASE(BRCOND)
30333 NODE_NAME_CASE(RET_FLAG)
30334 NODE_NAME_CASE(IRET)
30335 NODE_NAME_CASE(REP_STOS)
30336 NODE_NAME_CASE(REP_MOVS)
30337 NODE_NAME_CASE(GlobalBaseReg)
30338 NODE_NAME_CASE(Wrapper)
30339 NODE_NAME_CASE(WrapperRIP)
30340 NODE_NAME_CASE(MOVQ2DQ)
30341 NODE_NAME_CASE(MOVDQ2Q)
30342 NODE_NAME_CASE(MMX_MOVD2W)
30343 NODE_NAME_CASE(MMX_MOVW2D)
30344 NODE_NAME_CASE(PEXTRB)
30345 NODE_NAME_CASE(PEXTRW)
30346 NODE_NAME_CASE(INSERTPS)
30347 NODE_NAME_CASE(PINSRB)
30348 NODE_NAME_CASE(PINSRW)
30349 NODE_NAME_CASE(PSHUFB)
30350 NODE_NAME_CASE(ANDNP)
30351 NODE_NAME_CASE(BLENDI)
30352 NODE_NAME_CASE(BLENDV)
30353 NODE_NAME_CASE(HADD)
30354 NODE_NAME_CASE(HSUB)
30355 NODE_NAME_CASE(FHADD)
30356 NODE_NAME_CASE(FHSUB)
30357 NODE_NAME_CASE(CONFLICT)
30358 NODE_NAME_CASE(FMAX)
30359 NODE_NAME_CASE(FMAXS)
30360 NODE_NAME_CASE(FMAX_SAE)
30361 NODE_NAME_CASE(FMAXS_SAE)
30362 NODE_NAME_CASE(FMIN)
30363 NODE_NAME_CASE(FMINS)
30364 NODE_NAME_CASE(FMIN_SAE)
30365 NODE_NAME_CASE(FMINS_SAE)
30366 NODE_NAME_CASE(FMAXC)
30367 NODE_NAME_CASE(FMINC)
30368 NODE_NAME_CASE(FRSQRT)
30369 NODE_NAME_CASE(FRCP)
30370 NODE_NAME_CASE(EXTRQI)
30371 NODE_NAME_CASE(INSERTQI)
30372 NODE_NAME_CASE(TLSADDR)
30373 NODE_NAME_CASE(TLSBASEADDR)
30374 NODE_NAME_CASE(TLSCALL)
30375 NODE_NAME_CASE(EH_SJLJ_SETJMP)
30376 NODE_NAME_CASE(EH_SJLJ_LONGJMP)
30377 NODE_NAME_CASE(EH_SJLJ_SETUP_DISPATCH)
30378 NODE_NAME_CASE(EH_RETURN)
30379 NODE_NAME_CASE(TC_RETURN)
30380 NODE_NAME_CASE(FNSTCW16m)
30381 NODE_NAME_CASE(LCMPXCHG_DAG)
30382 NODE_NAME_CASE(LCMPXCHG8_DAG)
30383 NODE_NAME_CASE(LCMPXCHG16_DAG)
30384 NODE_NAME_CASE(LCMPXCHG8_SAVE_EBX_DAG)
30385 NODE_NAME_CASE(LCMPXCHG16_SAVE_RBX_DAG)
30386 NODE_NAME_CASE(LADD)
30387 NODE_NAME_CASE(LSUB)
30388 NODE_NAME_CASE(LOR)
30389 NODE_NAME_CASE(LXOR)
30390 NODE_NAME_CASE(LAND)
30391 NODE_NAME_CASE(VZEXT_MOVL)
30392 NODE_NAME_CASE(VZEXT_LOAD)
30393 NODE_NAME_CASE(VEXTRACT_STORE)
30394 NODE_NAME_CASE(VTRUNC)
30395 NODE_NAME_CASE(VTRUNCS)
30396 NODE_NAME_CASE(VTRUNCUS)
30397 NODE_NAME_CASE(VMTRUNC)
30398 NODE_NAME_CASE(VMTRUNCS)
30399 NODE_NAME_CASE(VMTRUNCUS)
30400 NODE_NAME_CASE(VTRUNCSTORES)
30401 NODE_NAME_CASE(VTRUNCSTOREUS)
30402 NODE_NAME_CASE(VMTRUNCSTORES)
30403 NODE_NAME_CASE(VMTRUNCSTOREUS)
30404 NODE_NAME_CASE(VFPEXT)
30405 NODE_NAME_CASE(STRICT_VFPEXT)
30406 NODE_NAME_CASE(VFPEXT_SAE)
30407 NODE_NAME_CASE(VFPEXTS)
30408 NODE_NAME_CASE(VFPEXTS_SAE)
30409 NODE_NAME_CASE(VFPROUND)
30410 NODE_NAME_CASE(STRICT_VFPROUND)
30411 NODE_NAME_CASE(VMFPROUND)
30412 NODE_NAME_CASE(VFPROUND_RND)
30413 NODE_NAME_CASE(VFPROUNDS)
30414 NODE_NAME_CASE(VFPROUNDS_RND)
30415 NODE_NAME_CASE(VSHLDQ)
30416 NODE_NAME_CASE(VSRLDQ)
30417 NODE_NAME_CASE(VSHL)
30418 NODE_NAME_CASE(VSRL)
30419 NODE_NAME_CASE(VSRA)
30420 NODE_NAME_CASE(VSHLI)
30421 NODE_NAME_CASE(VSRLI)
30422 NODE_NAME_CASE(VSRAI)
30423 NODE_NAME_CASE(VSHLV)
30424 NODE_NAME_CASE(VSRLV)
30425 NODE_NAME_CASE(VSRAV)
30426 NODE_NAME_CASE(VROTLI)
30427 NODE_NAME_CASE(VROTRI)
30428 NODE_NAME_CASE(VPPERM)
30429 NODE_NAME_CASE(CMPP)
30430 NODE_NAME_CASE(STRICT_CMPP)
30431 NODE_NAME_CASE(PCMPEQ)
30432 NODE_NAME_CASE(PCMPGT)
30433 NODE_NAME_CASE(PHMINPOS)
30434 NODE_NAME_CASE(ADD)
30435 NODE_NAME_CASE(SUB)
30436 NODE_NAME_CASE(ADC)
30437 NODE_NAME_CASE(SBB)
30438 NODE_NAME_CASE(SMUL)
30439 NODE_NAME_CASE(UMUL)
30441 NODE_NAME_CASE(XOR)
30442 NODE_NAME_CASE(AND)
30443 NODE_NAME_CASE(BEXTR)
30444 NODE_NAME_CASE(BZHI)
30445 NODE_NAME_CASE(PDEP)
30446 NODE_NAME_CASE(PEXT)
30447 NODE_NAME_CASE(MUL_IMM)
30448 NODE_NAME_CASE(MOVMSK)
30449 NODE_NAME_CASE(PTEST)
30450 NODE_NAME_CASE(TESTP)
30451 NODE_NAME_CASE(KORTEST)
30452 NODE_NAME_CASE(KTEST)
30453 NODE_NAME_CASE(KADD)
30454 NODE_NAME_CASE(KSHIFTL)
30455 NODE_NAME_CASE(KSHIFTR)
30456 NODE_NAME_CASE(PACKSS)
30457 NODE_NAME_CASE(PACKUS)
30458 NODE_NAME_CASE(PALIGNR)
30459 NODE_NAME_CASE(VALIGN)
30460 NODE_NAME_CASE(VSHLD)
30461 NODE_NAME_CASE(VSHRD)
30462 NODE_NAME_CASE(VSHLDV)
30463 NODE_NAME_CASE(VSHRDV)
30464 NODE_NAME_CASE(PSHUFD)
30465 NODE_NAME_CASE(PSHUFHW)
30466 NODE_NAME_CASE(PSHUFLW)
30467 NODE_NAME_CASE(SHUFP)
30468 NODE_NAME_CASE(SHUF128)
30469 NODE_NAME_CASE(MOVLHPS)
30470 NODE_NAME_CASE(MOVHLPS)
30471 NODE_NAME_CASE(MOVDDUP)
30472 NODE_NAME_CASE(MOVSHDUP)
30473 NODE_NAME_CASE(MOVSLDUP)
30474 NODE_NAME_CASE(MOVSD)
30475 NODE_NAME_CASE(MOVSS)
30476 NODE_NAME_CASE(UNPCKL)
30477 NODE_NAME_CASE(UNPCKH)
30478 NODE_NAME_CASE(VBROADCAST)
30479 NODE_NAME_CASE(VBROADCAST_LOAD)
30480 NODE_NAME_CASE(VBROADCASTM)
30481 NODE_NAME_CASE(SUBV_BROADCAST)
30482 NODE_NAME_CASE(VPERMILPV)
30483 NODE_NAME_CASE(VPERMILPI)
30484 NODE_NAME_CASE(VPERM2X128)
30485 NODE_NAME_CASE(VPERMV)
30486 NODE_NAME_CASE(VPERMV3)
30487 NODE_NAME_CASE(VPERMI)
30488 NODE_NAME_CASE(VPTERNLOG)
30489 NODE_NAME_CASE(VFIXUPIMM)
30490 NODE_NAME_CASE(VFIXUPIMM_SAE)
30491 NODE_NAME_CASE(VFIXUPIMMS)
30492 NODE_NAME_CASE(VFIXUPIMMS_SAE)
30493 NODE_NAME_CASE(VRANGE)
30494 NODE_NAME_CASE(VRANGE_SAE)
30495 NODE_NAME_CASE(VRANGES)
30496 NODE_NAME_CASE(VRANGES_SAE)
30497 NODE_NAME_CASE(PMULUDQ)
30498 NODE_NAME_CASE(PMULDQ)
30499 NODE_NAME_CASE(PSADBW)
30500 NODE_NAME_CASE(DBPSADBW)
30501 NODE_NAME_CASE(VASTART_SAVE_XMM_REGS)
30502 NODE_NAME_CASE(VAARG_64)
30503 NODE_NAME_CASE(WIN_ALLOCA)
30504 NODE_NAME_CASE(MEMBARRIER)
30505 NODE_NAME_CASE(MFENCE)
30506 NODE_NAME_CASE(SEG_ALLOCA)
30507 NODE_NAME_CASE(PROBED_ALLOCA)
30508 NODE_NAME_CASE(RDRAND)
30509 NODE_NAME_CASE(RDSEED)
30510 NODE_NAME_CASE(RDPKRU)
30511 NODE_NAME_CASE(WRPKRU)
30512 NODE_NAME_CASE(VPMADDUBSW)
30513 NODE_NAME_CASE(VPMADDWD)
30514 NODE_NAME_CASE(VPSHA)
30515 NODE_NAME_CASE(VPSHL)
30516 NODE_NAME_CASE(VPCOM)
30517 NODE_NAME_CASE(VPCOMU)
30518 NODE_NAME_CASE(VPERMIL2)
30519 NODE_NAME_CASE(FMSUB)
30520 NODE_NAME_CASE(STRICT_FMSUB)
30521 NODE_NAME_CASE(FNMADD)
30522 NODE_NAME_CASE(STRICT_FNMADD)
30523 NODE_NAME_CASE(FNMSUB)
30524 NODE_NAME_CASE(STRICT_FNMSUB)
30525 NODE_NAME_CASE(FMADDSUB)
30526 NODE_NAME_CASE(FMSUBADD)
30527 NODE_NAME_CASE(FMADD_RND)
30528 NODE_NAME_CASE(FNMADD_RND)
30529 NODE_NAME_CASE(FMSUB_RND)
30530 NODE_NAME_CASE(FNMSUB_RND)
30531 NODE_NAME_CASE(FMADDSUB_RND)
30532 NODE_NAME_CASE(FMSUBADD_RND)
30533 NODE_NAME_CASE(VPMADD52H)
30534 NODE_NAME_CASE(VPMADD52L)
30535 NODE_NAME_CASE(VRNDSCALE)
30536 NODE_NAME_CASE(STRICT_VRNDSCALE)
30537 NODE_NAME_CASE(VRNDSCALE_SAE)
30538 NODE_NAME_CASE(VRNDSCALES)
30539 NODE_NAME_CASE(VRNDSCALES_SAE)
30540 NODE_NAME_CASE(VREDUCE)
30541 NODE_NAME_CASE(VREDUCE_SAE)
30542 NODE_NAME_CASE(VREDUCES)
30543 NODE_NAME_CASE(VREDUCES_SAE)
30544 NODE_NAME_CASE(VGETMANT)
30545 NODE_NAME_CASE(VGETMANT_SAE)
30546 NODE_NAME_CASE(VGETMANTS)
30547 NODE_NAME_CASE(VGETMANTS_SAE)
30548 NODE_NAME_CASE(PCMPESTR)
30549 NODE_NAME_CASE(PCMPISTR)
30550 NODE_NAME_CASE(XTEST)
30551 NODE_NAME_CASE(COMPRESS)
30552 NODE_NAME_CASE(EXPAND)
30553 NODE_NAME_CASE(SELECTS)
30554 NODE_NAME_CASE(ADDSUB)
30555 NODE_NAME_CASE(RCP14)
30556 NODE_NAME_CASE(RCP14S)
30557 NODE_NAME_CASE(RCP28)
30558 NODE_NAME_CASE(RCP28_SAE)
30559 NODE_NAME_CASE(RCP28S)
30560 NODE_NAME_CASE(RCP28S_SAE)
30561 NODE_NAME_CASE(EXP2)
30562 NODE_NAME_CASE(EXP2_SAE)
30563 NODE_NAME_CASE(RSQRT14)
30564 NODE_NAME_CASE(RSQRT14S)
30565 NODE_NAME_CASE(RSQRT28)
30566 NODE_NAME_CASE(RSQRT28_SAE)
30567 NODE_NAME_CASE(RSQRT28S)
30568 NODE_NAME_CASE(RSQRT28S_SAE)
30569 NODE_NAME_CASE(FADD_RND)
30570 NODE_NAME_CASE(FADDS)
30571 NODE_NAME_CASE(FADDS_RND)
30572 NODE_NAME_CASE(FSUB_RND)
30573 NODE_NAME_CASE(FSUBS)
30574 NODE_NAME_CASE(FSUBS_RND)
30575 NODE_NAME_CASE(FMUL_RND)
30576 NODE_NAME_CASE(FMULS)
30577 NODE_NAME_CASE(FMULS_RND)
30578 NODE_NAME_CASE(FDIV_RND)
30579 NODE_NAME_CASE(FDIVS)
30580 NODE_NAME_CASE(FDIVS_RND)
30581 NODE_NAME_CASE(FSQRT_RND)
30582 NODE_NAME_CASE(FSQRTS)
30583 NODE_NAME_CASE(FSQRTS_RND)
30584 NODE_NAME_CASE(FGETEXP)
30585 NODE_NAME_CASE(FGETEXP_SAE)
30586 NODE_NAME_CASE(FGETEXPS)
30587 NODE_NAME_CASE(FGETEXPS_SAE)
30588 NODE_NAME_CASE(SCALEF)
30589 NODE_NAME_CASE(SCALEF_RND)
30590 NODE_NAME_CASE(SCALEFS)
30591 NODE_NAME_CASE(SCALEFS_RND)
30592 NODE_NAME_CASE(AVG)
30593 NODE_NAME_CASE(MULHRS)
30594 NODE_NAME_CASE(SINT_TO_FP_RND)
30595 NODE_NAME_CASE(UINT_TO_FP_RND)
30596 NODE_NAME_CASE(CVTTP2SI)
30597 NODE_NAME_CASE(CVTTP2UI)
30598 NODE_NAME_CASE(STRICT_CVTTP2SI)
30599 NODE_NAME_CASE(STRICT_CVTTP2UI)
30600 NODE_NAME_CASE(MCVTTP2SI)
30601 NODE_NAME_CASE(MCVTTP2UI)
30602 NODE_NAME_CASE(CVTTP2SI_SAE)
30603 NODE_NAME_CASE(CVTTP2UI_SAE)
30604 NODE_NAME_CASE(CVTTS2SI)
30605 NODE_NAME_CASE(CVTTS2UI)
30606 NODE_NAME_CASE(CVTTS2SI_SAE)
30607 NODE_NAME_CASE(CVTTS2UI_SAE)
30608 NODE_NAME_CASE(CVTSI2P)
30609 NODE_NAME_CASE(CVTUI2P)
30610 NODE_NAME_CASE(STRICT_CVTSI2P)
30611 NODE_NAME_CASE(STRICT_CVTUI2P)
30612 NODE_NAME_CASE(MCVTSI2P)
30613 NODE_NAME_CASE(MCVTUI2P)
30614 NODE_NAME_CASE(VFPCLASS)
30615 NODE_NAME_CASE(VFPCLASSS)
30616 NODE_NAME_CASE(MULTISHIFT)
30617 NODE_NAME_CASE(SCALAR_SINT_TO_FP)
30618 NODE_NAME_CASE(SCALAR_SINT_TO_FP_RND)
30619 NODE_NAME_CASE(SCALAR_UINT_TO_FP)
30620 NODE_NAME_CASE(SCALAR_UINT_TO_FP_RND)
30621 NODE_NAME_CASE(CVTPS2PH)
30622 NODE_NAME_CASE(STRICT_CVTPS2PH)
30623 NODE_NAME_CASE(MCVTPS2PH)
30624 NODE_NAME_CASE(CVTPH2PS)
30625 NODE_NAME_CASE(STRICT_CVTPH2PS)
30626 NODE_NAME_CASE(CVTPH2PS_SAE)
30627 NODE_NAME_CASE(CVTP2SI)
30628 NODE_NAME_CASE(CVTP2UI)
30629 NODE_NAME_CASE(MCVTP2SI)
30630 NODE_NAME_CASE(MCVTP2UI)
30631 NODE_NAME_CASE(CVTP2SI_RND)
30632 NODE_NAME_CASE(CVTP2UI_RND)
30633 NODE_NAME_CASE(CVTS2SI)
30634 NODE_NAME_CASE(CVTS2UI)
30635 NODE_NAME_CASE(CVTS2SI_RND)
30636 NODE_NAME_CASE(CVTS2UI_RND)
30637 NODE_NAME_CASE(CVTNE2PS2BF16)
30638 NODE_NAME_CASE(CVTNEPS2BF16)
30639 NODE_NAME_CASE(MCVTNEPS2BF16)
30640 NODE_NAME_CASE(DPBF16PS)
30641 NODE_NAME_CASE(LWPINS)
30642 NODE_NAME_CASE(MGATHER)
30643 NODE_NAME_CASE(MSCATTER)
30644 NODE_NAME_CASE(VPDPBUSD)
30645 NODE_NAME_CASE(VPDPBUSDS)
30646 NODE_NAME_CASE(VPDPWSSD)
30647 NODE_NAME_CASE(VPDPWSSDS)
30648 NODE_NAME_CASE(VPSHUFBITQMB)
30649 NODE_NAME_CASE(GF2P8MULB)
30650 NODE_NAME_CASE(GF2P8AFFINEQB)
30651 NODE_NAME_CASE(GF2P8AFFINEINVQB)
30652 NODE_NAME_CASE(NT_CALL)
30653 NODE_NAME_CASE(NT_BRIND)
30654 NODE_NAME_CASE(UMWAIT)
30655 NODE_NAME_CASE(TPAUSE)
30656 NODE_NAME_CASE(ENQCMD)
30657 NODE_NAME_CASE(ENQCMDS)
30658 NODE_NAME_CASE(VP2INTERSECT)
30661 #undef NODE_NAME_CASE
30664 /// Return true if the addressing mode represented by AM is legal for this
30665 /// target, for a load/store of the specified type.
30666 bool X86TargetLowering::isLegalAddressingMode(const DataLayout &DL,
30667 const AddrMode &AM, Type *Ty,
30669 Instruction *I) const {
30670 // X86 supports extremely general addressing modes.
30671 CodeModel::Model M = getTargetMachine().getCodeModel();
30673 // X86 allows a sign-extended 32-bit immediate field as a displacement.
30674 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
30678 unsigned GVFlags = Subtarget.classifyGlobalReference(AM.BaseGV);
30680 // If a reference to this global requires an extra load, we can't fold it.
30681 if (isGlobalStubReference(GVFlags))
30684 // If BaseGV requires a register for the PIC base, we cannot also have a
30685 // BaseReg specified.
30686 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
30689 // If lower 4G is not available, then we must use rip-relative addressing.
30690 if ((M != CodeModel::Small || isPositionIndependent()) &&
30691 Subtarget.is64Bit() && (AM.BaseOffs || AM.Scale > 1))
30695 switch (AM.Scale) {
30701 // These scales always work.
30706 // These scales are formed with basereg+scalereg. Only accept if there is
30711 default: // Other stuff never works.
30718 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
30719 unsigned Bits = Ty->getScalarSizeInBits();
30721 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
30722 // particularly cheaper than those without.
30726 // XOP has v16i8/v8i16/v4i32/v2i64 variable vector shifts.
30727 // Splitting for v32i8/v16i16 on XOP+AVX2 targets is still preferred.
30728 if (Subtarget.hasXOP() &&
30729 (Bits == 8 || Bits == 16 || Bits == 32 || Bits == 64))
30732 // AVX2 has vpsllv[dq] instructions (and other shifts) that make variable
30733 // shifts just as cheap as scalar ones.
30734 if (Subtarget.hasAVX2() && (Bits == 32 || Bits == 64))
30737 // AVX512BW has shifts such as vpsllvw.
30738 if (Subtarget.hasBWI() && Bits == 16)
30741 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
30742 // fully general vector.
30746 bool X86TargetLowering::isBinOp(unsigned Opcode) const {
30748 // These are non-commutative binops.
30749 // TODO: Add more X86ISD opcodes once we have test coverage.
30750 case X86ISD::ANDNP:
30751 case X86ISD::PCMPGT:
30754 case X86ISD::FANDN:
30758 return TargetLoweringBase::isBinOp(Opcode);
30761 bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
30763 // TODO: Add more X86ISD opcodes once we have test coverage.
30764 case X86ISD::PCMPEQ:
30765 case X86ISD::PMULDQ:
30766 case X86ISD::PMULUDQ:
30767 case X86ISD::FMAXC:
30768 case X86ISD::FMINC:
30775 return TargetLoweringBase::isCommutativeBinOp(Opcode);
30778 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
30779 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30781 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
30782 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
30783 return NumBits1 > NumBits2;
30786 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
30787 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
30790 if (!isTypeLegal(EVT::getEVT(Ty1)))
30793 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
30795 // Assuming the caller doesn't have a zeroext or signext return parameter,
30796 // truncation all the way down to i1 is valid.
30800 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
30801 return isInt<32>(Imm);
30804 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
30805 // Can also use sub to handle negated immediates.
30806 return isInt<32>(Imm);
30809 bool X86TargetLowering::isLegalStoreImmediate(int64_t Imm) const {
30810 return isInt<32>(Imm);
30813 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
30814 if (!VT1.isScalarInteger() || !VT2.isScalarInteger())
30816 unsigned NumBits1 = VT1.getSizeInBits();
30817 unsigned NumBits2 = VT2.getSizeInBits();
30818 return NumBits1 > NumBits2;
30821 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
30822 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30823 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget.is64Bit();
30826 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
30827 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
30828 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget.is64Bit();
30831 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
30832 EVT VT1 = Val.getValueType();
30833 if (isZExtFree(VT1, VT2))
30836 if (Val.getOpcode() != ISD::LOAD)
30839 if (!VT1.isSimple() || !VT1.isInteger() ||
30840 !VT2.isSimple() || !VT2.isInteger())
30843 switch (VT1.getSimpleVT().SimpleTy) {
30848 // X86 has 8, 16, and 32-bit zero-extending loads.
30855 bool X86TargetLowering::shouldSinkOperands(Instruction *I,
30856 SmallVectorImpl<Use *> &Ops) const {
30857 // A uniform shift amount in a vector shift or funnel shift may be much
30858 // cheaper than a generic variable vector shift, so make that pattern visible
30859 // to SDAG by sinking the shuffle instruction next to the shift.
30860 int ShiftAmountOpNum = -1;
30862 ShiftAmountOpNum = 1;
30863 else if (auto *II = dyn_cast<IntrinsicInst>(I)) {
30864 if (II->getIntrinsicID() == Intrinsic::fshl ||
30865 II->getIntrinsicID() == Intrinsic::fshr)
30866 ShiftAmountOpNum = 2;
30869 if (ShiftAmountOpNum == -1)
30872 auto *Shuf = dyn_cast<ShuffleVectorInst>(I->getOperand(ShiftAmountOpNum));
30873 if (Shuf && getSplatIndex(Shuf->getShuffleMask()) >= 0 &&
30874 isVectorShiftByScalarCheap(I->getType())) {
30875 Ops.push_back(&I->getOperandUse(ShiftAmountOpNum));
30882 bool X86TargetLowering::shouldConvertPhiType(Type *From, Type *To) const {
30883 if (!Subtarget.is64Bit())
30885 return TargetLowering::shouldConvertPhiType(From, To);
30888 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue ExtVal) const {
30889 if (isa<MaskedLoadSDNode>(ExtVal.getOperand(0)))
30892 EVT SrcVT = ExtVal.getOperand(0).getValueType();
30894 // There is no extending load for vXi1.
30895 if (SrcVT.getScalarType() == MVT::i1)
30901 bool X86TargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
30903 if (!Subtarget.hasAnyFMA())
30906 VT = VT.getScalarType();
30908 if (!VT.isSimple())
30911 switch (VT.getSimpleVT().SimpleTy) {
30922 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
30923 // i16 instructions are longer (0x66 prefix) and potentially slower.
30924 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
30927 /// Targets can use this to indicate that they only support *some*
30928 /// VECTOR_SHUFFLE operations, those with specific masks.
30929 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
30930 /// are assumed to be legal.
30931 bool X86TargetLowering::isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const {
30932 if (!VT.isSimple())
30935 // Not for i1 vectors
30936 if (VT.getSimpleVT().getScalarType() == MVT::i1)
30939 // Very little shuffling can be done for 64-bit vectors right now.
30940 if (VT.getSimpleVT().getSizeInBits() == 64)
30943 // We only care that the types being shuffled are legal. The lowering can
30944 // handle any possible shuffle mask that results.
30945 return isTypeLegal(VT.getSimpleVT());
30948 bool X86TargetLowering::isVectorClearMaskLegal(ArrayRef<int> Mask,
30950 // Don't convert an 'and' into a shuffle that we don't directly support.
30951 // vpblendw and vpshufb for 256-bit vectors are not available on AVX1.
30952 if (!Subtarget.hasAVX2())
30953 if (VT == MVT::v32i8 || VT == MVT::v16i16)
30956 // Just delegate to the generic legality, clear masks aren't special.
30957 return isShuffleMaskLegal(Mask, VT);
30960 bool X86TargetLowering::areJTsAllowed(const Function *Fn) const {
30961 // If the subtarget is using thunks, we need to not generate jump tables.
30962 if (Subtarget.useIndirectThunkBranches())
30965 // Otherwise, fallback on the generic logic.
30966 return TargetLowering::areJTsAllowed(Fn);
30969 //===----------------------------------------------------------------------===//
30970 // X86 Scheduler Hooks
30971 //===----------------------------------------------------------------------===//
30973 // Returns true if EFLAG is consumed after this iterator in the rest of the
30974 // basic block or any successors of the basic block.
30975 static bool isEFLAGSLiveAfter(MachineBasicBlock::iterator Itr,
30976 MachineBasicBlock *BB) {
30977 // Scan forward through BB for a use/def of EFLAGS.
30978 for (MachineBasicBlock::iterator miI = std::next(Itr), miE = BB->end();
30979 miI != miE; ++miI) {
30980 const MachineInstr& mi = *miI;
30981 if (mi.readsRegister(X86::EFLAGS))
30983 // If we found a def, we can stop searching.
30984 if (mi.definesRegister(X86::EFLAGS))
30988 // If we hit the end of the block, check whether EFLAGS is live into a
30990 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
30991 sEnd = BB->succ_end();
30992 sItr != sEnd; ++sItr) {
30993 MachineBasicBlock* succ = *sItr;
30994 if (succ->isLiveIn(X86::EFLAGS))
31001 /// Utility function to emit xbegin specifying the start of an RTM region.
31002 static MachineBasicBlock *emitXBegin(MachineInstr &MI, MachineBasicBlock *MBB,
31003 const TargetInstrInfo *TII) {
31004 DebugLoc DL = MI.getDebugLoc();
31006 const BasicBlock *BB = MBB->getBasicBlock();
31007 MachineFunction::iterator I = ++MBB->getIterator();
31009 // For the v = xbegin(), we generate
31018 // eax = # XABORT_DEF
31022 // v = phi(s0/mainBB, s1/fallBB)
31024 MachineBasicBlock *thisMBB = MBB;
31025 MachineFunction *MF = MBB->getParent();
31026 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
31027 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
31028 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
31029 MF->insert(I, mainMBB);
31030 MF->insert(I, fallMBB);
31031 MF->insert(I, sinkMBB);
31033 if (isEFLAGSLiveAfter(MI, MBB)) {
31034 mainMBB->addLiveIn(X86::EFLAGS);
31035 fallMBB->addLiveIn(X86::EFLAGS);
31036 sinkMBB->addLiveIn(X86::EFLAGS);
31039 // Transfer the remainder of BB and its successor edges to sinkMBB.
31040 sinkMBB->splice(sinkMBB->begin(), MBB,
31041 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31042 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
31044 MachineRegisterInfo &MRI = MF->getRegInfo();
31045 Register DstReg = MI.getOperand(0).getReg();
31046 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
31047 Register mainDstReg = MRI.createVirtualRegister(RC);
31048 Register fallDstReg = MRI.createVirtualRegister(RC);
31052 // # fallthrough to mainMBB
31053 // # abortion to fallMBB
31054 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(fallMBB);
31055 thisMBB->addSuccessor(mainMBB);
31056 thisMBB->addSuccessor(fallMBB);
31059 // mainDstReg := -1
31060 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), mainDstReg).addImm(-1);
31061 BuildMI(mainMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
31062 mainMBB->addSuccessor(sinkMBB);
31065 // ; pseudo instruction to model hardware's definition from XABORT
31066 // EAX := XABORT_DEF
31067 // fallDstReg := EAX
31068 BuildMI(fallMBB, DL, TII->get(X86::XABORT_DEF));
31069 BuildMI(fallMBB, DL, TII->get(TargetOpcode::COPY), fallDstReg)
31071 fallMBB->addSuccessor(sinkMBB);
31074 // DstReg := phi(mainDstReg/mainBB, fallDstReg/fallBB)
31075 BuildMI(*sinkMBB, sinkMBB->begin(), DL, TII->get(X86::PHI), DstReg)
31076 .addReg(mainDstReg).addMBB(mainMBB)
31077 .addReg(fallDstReg).addMBB(fallMBB);
31079 MI.eraseFromParent();
31085 MachineBasicBlock *
31086 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr &MI,
31087 MachineBasicBlock *MBB) const {
31088 // Emit va_arg instruction on X86-64.
31090 // Operands to this pseudo-instruction:
31091 // 0 ) Output : destination address (reg)
31092 // 1-5) Input : va_list address (addr, i64mem)
31093 // 6 ) ArgSize : Size (in bytes) of vararg type
31094 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
31095 // 8 ) Align : Alignment of type
31096 // 9 ) EFLAGS (implicit-def)
31098 assert(MI.getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
31099 static_assert(X86::AddrNumOperands == 5,
31100 "VAARG_64 assumes 5 address operands");
31102 Register DestReg = MI.getOperand(0).getReg();
31103 MachineOperand &Base = MI.getOperand(1);
31104 MachineOperand &Scale = MI.getOperand(2);
31105 MachineOperand &Index = MI.getOperand(3);
31106 MachineOperand &Disp = MI.getOperand(4);
31107 MachineOperand &Segment = MI.getOperand(5);
31108 unsigned ArgSize = MI.getOperand(6).getImm();
31109 unsigned ArgMode = MI.getOperand(7).getImm();
31110 Align Alignment = Align(MI.getOperand(8).getImm());
31112 MachineFunction *MF = MBB->getParent();
31114 // Memory Reference
31115 assert(MI.hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
31117 MachineMemOperand *OldMMO = MI.memoperands().front();
31119 // Clone the MMO into two separate MMOs for loading and storing
31120 MachineMemOperand *LoadOnlyMMO = MF->getMachineMemOperand(
31121 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOStore);
31122 MachineMemOperand *StoreOnlyMMO = MF->getMachineMemOperand(
31123 OldMMO, OldMMO->getFlags() & ~MachineMemOperand::MOLoad);
31125 // Machine Information
31126 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31127 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
31128 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
31129 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
31130 DebugLoc DL = MI.getDebugLoc();
31132 // struct va_list {
31135 // i64 overflow_area (address)
31136 // i64 reg_save_area (address)
31138 // sizeof(va_list) = 24
31139 // alignment(va_list) = 8
31141 unsigned TotalNumIntRegs = 6;
31142 unsigned TotalNumXMMRegs = 8;
31143 bool UseGPOffset = (ArgMode == 1);
31144 bool UseFPOffset = (ArgMode == 2);
31145 unsigned MaxOffset = TotalNumIntRegs * 8 +
31146 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
31148 /* Align ArgSize to a multiple of 8 */
31149 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
31150 bool NeedsAlign = (Alignment > 8);
31152 MachineBasicBlock *thisMBB = MBB;
31153 MachineBasicBlock *overflowMBB;
31154 MachineBasicBlock *offsetMBB;
31155 MachineBasicBlock *endMBB;
31157 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
31158 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
31159 unsigned OffsetReg = 0;
31161 if (!UseGPOffset && !UseFPOffset) {
31162 // If we only pull from the overflow region, we don't create a branch.
31163 // We don't need to alter control flow.
31164 OffsetDestReg = 0; // unused
31165 OverflowDestReg = DestReg;
31167 offsetMBB = nullptr;
31168 overflowMBB = thisMBB;
31171 // First emit code to check if gp_offset (or fp_offset) is below the bound.
31172 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
31173 // If not, pull from overflow_area. (branch to overflowMBB)
31178 // offsetMBB overflowMBB
31183 // Registers for the PHI in endMBB
31184 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
31185 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
31187 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
31188 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31189 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31190 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31192 MachineFunction::iterator MBBIter = ++MBB->getIterator();
31194 // Insert the new basic blocks
31195 MF->insert(MBBIter, offsetMBB);
31196 MF->insert(MBBIter, overflowMBB);
31197 MF->insert(MBBIter, endMBB);
31199 // Transfer the remainder of MBB and its successor edges to endMBB.
31200 endMBB->splice(endMBB->begin(), thisMBB,
31201 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
31202 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
31204 // Make offsetMBB and overflowMBB successors of thisMBB
31205 thisMBB->addSuccessor(offsetMBB);
31206 thisMBB->addSuccessor(overflowMBB);
31208 // endMBB is a successor of both offsetMBB and overflowMBB
31209 offsetMBB->addSuccessor(endMBB);
31210 overflowMBB->addSuccessor(endMBB);
31212 // Load the offset value into a register
31213 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
31214 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
31218 .addDisp(Disp, UseFPOffset ? 4 : 0)
31220 .setMemRefs(LoadOnlyMMO);
31222 // Check if there is enough room left to pull this argument.
31223 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
31225 .addImm(MaxOffset + 8 - ArgSizeA8);
31227 // Branch to "overflowMBB" if offset >= max
31228 // Fall through to "offsetMBB" otherwise
31229 BuildMI(thisMBB, DL, TII->get(X86::JCC_1))
31230 .addMBB(overflowMBB).addImm(X86::COND_AE);
31233 // In offsetMBB, emit code to use the reg_save_area.
31235 assert(OffsetReg != 0);
31237 // Read the reg_save_area address.
31238 Register RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
31239 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
31245 .setMemRefs(LoadOnlyMMO);
31247 // Zero-extend the offset
31248 Register OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
31249 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
31252 .addImm(X86::sub_32bit);
31254 // Add the offset to the reg_save_area to get the final address.
31255 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
31256 .addReg(OffsetReg64)
31257 .addReg(RegSaveReg);
31259 // Compute the offset for the next argument
31260 Register NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
31261 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
31263 .addImm(UseFPOffset ? 16 : 8);
31265 // Store it back into the va_list.
31266 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
31270 .addDisp(Disp, UseFPOffset ? 4 : 0)
31272 .addReg(NextOffsetReg)
31273 .setMemRefs(StoreOnlyMMO);
31276 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
31281 // Emit code to use overflow area
31284 // Load the overflow_area address into a register.
31285 Register OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
31286 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
31292 .setMemRefs(LoadOnlyMMO);
31294 // If we need to align it, do so. Otherwise, just copy the address
31295 // to OverflowDestReg.
31297 // Align the overflow address
31298 Register TmpReg = MRI.createVirtualRegister(AddrRegClass);
31300 // aligned_addr = (addr + (align-1)) & ~(align-1)
31301 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
31302 .addReg(OverflowAddrReg)
31303 .addImm(Alignment.value() - 1);
31305 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
31307 .addImm(~(uint64_t)(Alignment.value() - 1));
31309 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
31310 .addReg(OverflowAddrReg);
31313 // Compute the next overflow address after this argument.
31314 // (the overflow address should be kept 8-byte aligned)
31315 Register NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
31316 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
31317 .addReg(OverflowDestReg)
31318 .addImm(ArgSizeA8);
31320 // Store the new overflow address.
31321 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
31327 .addReg(NextAddrReg)
31328 .setMemRefs(StoreOnlyMMO);
31330 // If we branched, emit the PHI to the front of endMBB.
31332 BuildMI(*endMBB, endMBB->begin(), DL,
31333 TII->get(X86::PHI), DestReg)
31334 .addReg(OffsetDestReg).addMBB(offsetMBB)
31335 .addReg(OverflowDestReg).addMBB(overflowMBB);
31338 // Erase the pseudo instruction
31339 MI.eraseFromParent();
31344 MachineBasicBlock *X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
31345 MachineInstr &MI, MachineBasicBlock *MBB) const {
31346 // Emit code to save XMM registers to the stack. The ABI says that the
31347 // number of registers to save is given in %al, so it's theoretically
31348 // possible to do an indirect jump trick to avoid saving all of them,
31349 // however this code takes a simpler approach and just executes all
31350 // of the stores if %al is non-zero. It's less code, and it's probably
31351 // easier on the hardware branch predictor, and stores aren't all that
31352 // expensive anyway.
31354 // Create the new basic blocks. One block contains all the XMM stores,
31355 // and one block is the final destination regardless of whether any
31356 // stores were performed.
31357 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
31358 MachineFunction *F = MBB->getParent();
31359 MachineFunction::iterator MBBIter = ++MBB->getIterator();
31360 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
31361 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
31362 F->insert(MBBIter, XMMSaveMBB);
31363 F->insert(MBBIter, EndMBB);
31365 // Transfer the remainder of MBB and its successor edges to EndMBB.
31366 EndMBB->splice(EndMBB->begin(), MBB,
31367 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31368 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
31370 // The original block will now fall through to the XMM save block.
31371 MBB->addSuccessor(XMMSaveMBB);
31372 // The XMMSaveMBB will fall through to the end block.
31373 XMMSaveMBB->addSuccessor(EndMBB);
31375 // Now add the instructions.
31376 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31377 DebugLoc DL = MI.getDebugLoc();
31379 Register CountReg = MI.getOperand(0).getReg();
31380 int64_t RegSaveFrameIndex = MI.getOperand(1).getImm();
31381 int64_t VarArgsFPOffset = MI.getOperand(2).getImm();
31383 if (!Subtarget.isCallingConvWin64(F->getFunction().getCallingConv())) {
31384 // If %al is 0, branch around the XMM save block.
31385 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
31386 BuildMI(MBB, DL, TII->get(X86::JCC_1)).addMBB(EndMBB).addImm(X86::COND_E);
31387 MBB->addSuccessor(EndMBB);
31390 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
31391 // that was just emitted, but clearly shouldn't be "saved".
31392 assert((MI.getNumOperands() <= 3 ||
31393 !MI.getOperand(MI.getNumOperands() - 1).isReg() ||
31394 MI.getOperand(MI.getNumOperands() - 1).getReg() == X86::EFLAGS) &&
31395 "Expected last argument to be EFLAGS");
31396 unsigned MOVOpc = Subtarget.hasAVX() ? X86::VMOVAPSmr : X86::MOVAPSmr;
31397 // In the XMM save block, save all the XMM argument registers.
31398 for (int i = 3, e = MI.getNumOperands() - 1; i != e; ++i) {
31399 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
31400 MachineMemOperand *MMO = F->getMachineMemOperand(
31401 MachinePointerInfo::getFixedStack(*F, RegSaveFrameIndex, Offset),
31402 MachineMemOperand::MOStore,
31403 /*Size=*/16, Align(16));
31404 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
31405 .addFrameIndex(RegSaveFrameIndex)
31406 .addImm(/*Scale=*/1)
31407 .addReg(/*IndexReg=*/0)
31408 .addImm(/*Disp=*/Offset)
31409 .addReg(/*Segment=*/0)
31410 .addReg(MI.getOperand(i).getReg())
31411 .addMemOperand(MMO);
31414 MI.eraseFromParent(); // The pseudo instruction is gone now.
31419 // The EFLAGS operand of SelectItr might be missing a kill marker
31420 // because there were multiple uses of EFLAGS, and ISel didn't know
31421 // which to mark. Figure out whether SelectItr should have had a
31422 // kill marker, and set it if it should. Returns the correct kill
31424 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
31425 MachineBasicBlock* BB,
31426 const TargetRegisterInfo* TRI) {
31427 if (isEFLAGSLiveAfter(SelectItr, BB))
31430 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
31431 // out. SelectMI should have a kill flag on EFLAGS.
31432 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
31436 // Return true if it is OK for this CMOV pseudo-opcode to be cascaded
31437 // together with other CMOV pseudo-opcodes into a single basic-block with
31438 // conditional jump around it.
31439 static bool isCMOVPseudo(MachineInstr &MI) {
31440 switch (MI.getOpcode()) {
31441 case X86::CMOV_FR32:
31442 case X86::CMOV_FR32X:
31443 case X86::CMOV_FR64:
31444 case X86::CMOV_FR64X:
31445 case X86::CMOV_GR8:
31446 case X86::CMOV_GR16:
31447 case X86::CMOV_GR32:
31448 case X86::CMOV_RFP32:
31449 case X86::CMOV_RFP64:
31450 case X86::CMOV_RFP80:
31451 case X86::CMOV_VR64:
31452 case X86::CMOV_VR128:
31453 case X86::CMOV_VR128X:
31454 case X86::CMOV_VR256:
31455 case X86::CMOV_VR256X:
31456 case X86::CMOV_VR512:
31457 case X86::CMOV_VK1:
31458 case X86::CMOV_VK2:
31459 case X86::CMOV_VK4:
31460 case X86::CMOV_VK8:
31461 case X86::CMOV_VK16:
31462 case X86::CMOV_VK32:
31463 case X86::CMOV_VK64:
31471 // Helper function, which inserts PHI functions into SinkMBB:
31472 // %Result(i) = phi [ %FalseValue(i), FalseMBB ], [ %TrueValue(i), TrueMBB ],
31473 // where %FalseValue(i) and %TrueValue(i) are taken from the consequent CMOVs
31474 // in [MIItBegin, MIItEnd) range. It returns the last MachineInstrBuilder for
31475 // the last PHI function inserted.
31476 static MachineInstrBuilder createPHIsForCMOVsInSinkBB(
31477 MachineBasicBlock::iterator MIItBegin, MachineBasicBlock::iterator MIItEnd,
31478 MachineBasicBlock *TrueMBB, MachineBasicBlock *FalseMBB,
31479 MachineBasicBlock *SinkMBB) {
31480 MachineFunction *MF = TrueMBB->getParent();
31481 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
31482 DebugLoc DL = MIItBegin->getDebugLoc();
31484 X86::CondCode CC = X86::CondCode(MIItBegin->getOperand(3).getImm());
31485 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
31487 MachineBasicBlock::iterator SinkInsertionPoint = SinkMBB->begin();
31489 // As we are creating the PHIs, we have to be careful if there is more than
31490 // one. Later CMOVs may reference the results of earlier CMOVs, but later
31491 // PHIs have to reference the individual true/false inputs from earlier PHIs.
31492 // That also means that PHI construction must work forward from earlier to
31493 // later, and that the code must maintain a mapping from earlier PHI's
31494 // destination registers, and the registers that went into the PHI.
31495 DenseMap<unsigned, std::pair<unsigned, unsigned>> RegRewriteTable;
31496 MachineInstrBuilder MIB;
31498 for (MachineBasicBlock::iterator MIIt = MIItBegin; MIIt != MIItEnd; ++MIIt) {
31499 Register DestReg = MIIt->getOperand(0).getReg();
31500 Register Op1Reg = MIIt->getOperand(1).getReg();
31501 Register Op2Reg = MIIt->getOperand(2).getReg();
31503 // If this CMOV we are generating is the opposite condition from
31504 // the jump we generated, then we have to swap the operands for the
31505 // PHI that is going to be generated.
31506 if (MIIt->getOperand(3).getImm() == OppCC)
31507 std::swap(Op1Reg, Op2Reg);
31509 if (RegRewriteTable.find(Op1Reg) != RegRewriteTable.end())
31510 Op1Reg = RegRewriteTable[Op1Reg].first;
31512 if (RegRewriteTable.find(Op2Reg) != RegRewriteTable.end())
31513 Op2Reg = RegRewriteTable[Op2Reg].second;
31515 MIB = BuildMI(*SinkMBB, SinkInsertionPoint, DL, TII->get(X86::PHI), DestReg)
31521 // Add this PHI to the rewrite table.
31522 RegRewriteTable[DestReg] = std::make_pair(Op1Reg, Op2Reg);
31528 // Lower cascaded selects in form of (SecondCmov (FirstCMOV F, T, cc1), T, cc2).
31529 MachineBasicBlock *
31530 X86TargetLowering::EmitLoweredCascadedSelect(MachineInstr &FirstCMOV,
31531 MachineInstr &SecondCascadedCMOV,
31532 MachineBasicBlock *ThisMBB) const {
31533 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31534 DebugLoc DL = FirstCMOV.getDebugLoc();
31536 // We lower cascaded CMOVs such as
31538 // (SecondCascadedCMOV (FirstCMOV F, T, cc1), T, cc2)
31540 // to two successive branches.
31542 // Without this, we would add a PHI between the two jumps, which ends up
31543 // creating a few copies all around. For instance, for
31545 // (sitofp (zext (fcmp une)))
31547 // we would generate:
31549 // ucomiss %xmm1, %xmm0
31550 // movss <1.0f>, %xmm0
31551 // movaps %xmm0, %xmm1
31553 // xorps %xmm1, %xmm1
31556 // movaps %xmm1, %xmm0
31560 // because this custom-inserter would have generated:
31572 // A: X = ...; Y = ...
31574 // C: Z = PHI [X, A], [Y, B]
31576 // E: PHI [X, C], [Z, D]
31578 // If we lower both CMOVs in a single step, we can instead generate:
31590 // A: X = ...; Y = ...
31592 // E: PHI [X, A], [X, C], [Y, D]
31594 // Which, in our sitofp/fcmp example, gives us something like:
31596 // ucomiss %xmm1, %xmm0
31597 // movss <1.0f>, %xmm0
31600 // xorps %xmm0, %xmm0
31605 // We lower cascaded CMOV into two successive branches to the same block.
31606 // EFLAGS is used by both, so mark it as live in the second.
31607 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31608 MachineFunction *F = ThisMBB->getParent();
31609 MachineBasicBlock *FirstInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
31610 MachineBasicBlock *SecondInsertedMBB = F->CreateMachineBasicBlock(LLVM_BB);
31611 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31613 MachineFunction::iterator It = ++ThisMBB->getIterator();
31614 F->insert(It, FirstInsertedMBB);
31615 F->insert(It, SecondInsertedMBB);
31616 F->insert(It, SinkMBB);
31618 // For a cascaded CMOV, we lower it to two successive branches to
31619 // the same block (SinkMBB). EFLAGS is used by both, so mark it as live in
31620 // the FirstInsertedMBB.
31621 FirstInsertedMBB->addLiveIn(X86::EFLAGS);
31623 // If the EFLAGS register isn't dead in the terminator, then claim that it's
31624 // live into the sink and copy blocks.
31625 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31626 if (!SecondCascadedCMOV.killsRegister(X86::EFLAGS) &&
31627 !checkAndUpdateEFLAGSKill(SecondCascadedCMOV, ThisMBB, TRI)) {
31628 SecondInsertedMBB->addLiveIn(X86::EFLAGS);
31629 SinkMBB->addLiveIn(X86::EFLAGS);
31632 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31633 SinkMBB->splice(SinkMBB->begin(), ThisMBB,
31634 std::next(MachineBasicBlock::iterator(FirstCMOV)),
31636 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31638 // Fallthrough block for ThisMBB.
31639 ThisMBB->addSuccessor(FirstInsertedMBB);
31640 // The true block target of the first branch is always SinkMBB.
31641 ThisMBB->addSuccessor(SinkMBB);
31642 // Fallthrough block for FirstInsertedMBB.
31643 FirstInsertedMBB->addSuccessor(SecondInsertedMBB);
31644 // The true block for the branch of FirstInsertedMBB.
31645 FirstInsertedMBB->addSuccessor(SinkMBB);
31646 // This is fallthrough.
31647 SecondInsertedMBB->addSuccessor(SinkMBB);
31649 // Create the conditional branch instructions.
31650 X86::CondCode FirstCC = X86::CondCode(FirstCMOV.getOperand(3).getImm());
31651 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(FirstCC);
31653 X86::CondCode SecondCC =
31654 X86::CondCode(SecondCascadedCMOV.getOperand(3).getImm());
31655 BuildMI(FirstInsertedMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(SecondCC);
31658 // %Result = phi [ %FalseValue, SecondInsertedMBB ], [ %TrueValue, ThisMBB ]
31659 Register DestReg = FirstCMOV.getOperand(0).getReg();
31660 Register Op1Reg = FirstCMOV.getOperand(1).getReg();
31661 Register Op2Reg = FirstCMOV.getOperand(2).getReg();
31662 MachineInstrBuilder MIB =
31663 BuildMI(*SinkMBB, SinkMBB->begin(), DL, TII->get(X86::PHI), DestReg)
31665 .addMBB(SecondInsertedMBB)
31669 // The second SecondInsertedMBB provides the same incoming value as the
31670 // FirstInsertedMBB (the True operand of the SELECT_CC/CMOV nodes).
31671 MIB.addReg(FirstCMOV.getOperand(2).getReg()).addMBB(FirstInsertedMBB);
31672 // Copy the PHI result to the register defined by the second CMOV.
31673 BuildMI(*SinkMBB, std::next(MachineBasicBlock::iterator(MIB.getInstr())), DL,
31674 TII->get(TargetOpcode::COPY),
31675 SecondCascadedCMOV.getOperand(0).getReg())
31676 .addReg(FirstCMOV.getOperand(0).getReg());
31678 // Now remove the CMOVs.
31679 FirstCMOV.eraseFromParent();
31680 SecondCascadedCMOV.eraseFromParent();
31685 MachineBasicBlock *
31686 X86TargetLowering::EmitLoweredSelect(MachineInstr &MI,
31687 MachineBasicBlock *ThisMBB) const {
31688 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31689 DebugLoc DL = MI.getDebugLoc();
31691 // To "insert" a SELECT_CC instruction, we actually have to insert the
31692 // diamond control-flow pattern. The incoming instruction knows the
31693 // destination vreg to set, the condition code register to branch on, the
31694 // true/false values to select between and a branch opcode to use.
31699 // cmpTY ccX, r1, r2
31701 // fallthrough --> FalseMBB
31703 // This code lowers all pseudo-CMOV instructions. Generally it lowers these
31704 // as described above, by inserting a BB, and then making a PHI at the join
31705 // point to select the true and false operands of the CMOV in the PHI.
31707 // The code also handles two different cases of multiple CMOV opcodes
31711 // In this case, there are multiple CMOVs in a row, all which are based on
31712 // the same condition setting (or the exact opposite condition setting).
31713 // In this case we can lower all the CMOVs using a single inserted BB, and
31714 // then make a number of PHIs at the join point to model the CMOVs. The only
31715 // trickiness here, is that in a case like:
31717 // t2 = CMOV cond1 t1, f1
31718 // t3 = CMOV cond1 t2, f2
31720 // when rewriting this into PHIs, we have to perform some renaming on the
31721 // temps since you cannot have a PHI operand refer to a PHI result earlier
31722 // in the same block. The "simple" but wrong lowering would be:
31724 // t2 = PHI t1(BB1), f1(BB2)
31725 // t3 = PHI t2(BB1), f2(BB2)
31727 // but clearly t2 is not defined in BB1, so that is incorrect. The proper
31728 // renaming is to note that on the path through BB1, t2 is really just a
31729 // copy of t1, and do that renaming, properly generating:
31731 // t2 = PHI t1(BB1), f1(BB2)
31732 // t3 = PHI t1(BB1), f2(BB2)
31735 // CMOV ((CMOV F, T, cc1), T, cc2) is checked here and handled by a separate
31736 // function - EmitLoweredCascadedSelect.
31738 X86::CondCode CC = X86::CondCode(MI.getOperand(3).getImm());
31739 X86::CondCode OppCC = X86::GetOppositeBranchCondition(CC);
31740 MachineInstr *LastCMOV = &MI;
31741 MachineBasicBlock::iterator NextMIIt = MachineBasicBlock::iterator(MI);
31743 // Check for case 1, where there are multiple CMOVs with the same condition
31744 // first. Of the two cases of multiple CMOV lowerings, case 1 reduces the
31745 // number of jumps the most.
31747 if (isCMOVPseudo(MI)) {
31748 // See if we have a string of CMOVS with the same condition. Skip over
31749 // intervening debug insts.
31750 while (NextMIIt != ThisMBB->end() && isCMOVPseudo(*NextMIIt) &&
31751 (NextMIIt->getOperand(3).getImm() == CC ||
31752 NextMIIt->getOperand(3).getImm() == OppCC)) {
31753 LastCMOV = &*NextMIIt;
31754 NextMIIt = next_nodbg(NextMIIt, ThisMBB->end());
31758 // This checks for case 2, but only do this if we didn't already find
31759 // case 1, as indicated by LastCMOV == MI.
31760 if (LastCMOV == &MI && NextMIIt != ThisMBB->end() &&
31761 NextMIIt->getOpcode() == MI.getOpcode() &&
31762 NextMIIt->getOperand(2).getReg() == MI.getOperand(2).getReg() &&
31763 NextMIIt->getOperand(1).getReg() == MI.getOperand(0).getReg() &&
31764 NextMIIt->getOperand(1).isKill()) {
31765 return EmitLoweredCascadedSelect(MI, *NextMIIt, ThisMBB);
31768 const BasicBlock *LLVM_BB = ThisMBB->getBasicBlock();
31769 MachineFunction *F = ThisMBB->getParent();
31770 MachineBasicBlock *FalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
31771 MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
31773 MachineFunction::iterator It = ++ThisMBB->getIterator();
31774 F->insert(It, FalseMBB);
31775 F->insert(It, SinkMBB);
31777 // If the EFLAGS register isn't dead in the terminator, then claim that it's
31778 // live into the sink and copy blocks.
31779 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
31780 if (!LastCMOV->killsRegister(X86::EFLAGS) &&
31781 !checkAndUpdateEFLAGSKill(LastCMOV, ThisMBB, TRI)) {
31782 FalseMBB->addLiveIn(X86::EFLAGS);
31783 SinkMBB->addLiveIn(X86::EFLAGS);
31786 // Transfer any debug instructions inside the CMOV sequence to the sunk block.
31787 auto DbgEnd = MachineBasicBlock::iterator(LastCMOV);
31788 auto DbgIt = MachineBasicBlock::iterator(MI);
31789 while (DbgIt != DbgEnd) {
31790 auto Next = std::next(DbgIt);
31791 if (DbgIt->isDebugInstr())
31792 SinkMBB->push_back(DbgIt->removeFromParent());
31796 // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
31797 SinkMBB->splice(SinkMBB->end(), ThisMBB,
31798 std::next(MachineBasicBlock::iterator(LastCMOV)),
31800 SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
31802 // Fallthrough block for ThisMBB.
31803 ThisMBB->addSuccessor(FalseMBB);
31804 // The true block target of the first (or only) branch is always a SinkMBB.
31805 ThisMBB->addSuccessor(SinkMBB);
31806 // Fallthrough block for FalseMBB.
31807 FalseMBB->addSuccessor(SinkMBB);
31809 // Create the conditional branch instruction.
31810 BuildMI(ThisMBB, DL, TII->get(X86::JCC_1)).addMBB(SinkMBB).addImm(CC);
31813 // %Result = phi [ %FalseValue, FalseMBB ], [ %TrueValue, ThisMBB ]
31815 MachineBasicBlock::iterator MIItBegin = MachineBasicBlock::iterator(MI);
31816 MachineBasicBlock::iterator MIItEnd =
31817 std::next(MachineBasicBlock::iterator(LastCMOV));
31818 createPHIsForCMOVsInSinkBB(MIItBegin, MIItEnd, ThisMBB, FalseMBB, SinkMBB);
31820 // Now remove the CMOV(s).
31821 ThisMBB->erase(MIItBegin, MIItEnd);
31826 static unsigned getSUBriOpcode(bool IsLP64, int64_t Imm) {
31829 return X86::SUB64ri8;
31830 return X86::SUB64ri32;
31833 return X86::SUB32ri8;
31834 return X86::SUB32ri;
31838 MachineBasicBlock *
31839 X86TargetLowering::EmitLoweredProbedAlloca(MachineInstr &MI,
31840 MachineBasicBlock *MBB) const {
31841 MachineFunction *MF = MBB->getParent();
31842 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31843 const X86FrameLowering &TFI = *Subtarget.getFrameLowering();
31844 DebugLoc DL = MI.getDebugLoc();
31845 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
31847 const unsigned ProbeSize = getStackProbeSize(*MF);
31849 MachineRegisterInfo &MRI = MF->getRegInfo();
31850 MachineBasicBlock *testMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31851 MachineBasicBlock *tailMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31852 MachineBasicBlock *blockMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31854 MachineFunction::iterator MBBIter = ++MBB->getIterator();
31855 MF->insert(MBBIter, testMBB);
31856 MF->insert(MBBIter, blockMBB);
31857 MF->insert(MBBIter, tailMBB);
31859 Register sizeVReg = MI.getOperand(1).getReg();
31861 Register physSPReg = TFI.Uses64BitFramePtr ? X86::RSP : X86::ESP;
31863 Register TmpStackPtr = MRI.createVirtualRegister(
31864 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
31865 Register FinalStackPtr = MRI.createVirtualRegister(
31866 TFI.Uses64BitFramePtr ? &X86::GR64RegClass : &X86::GR32RegClass);
31868 BuildMI(*MBB, {MI}, DL, TII->get(TargetOpcode::COPY), TmpStackPtr)
31869 .addReg(physSPReg);
31871 const unsigned Opc = TFI.Uses64BitFramePtr ? X86::SUB64rr : X86::SUB32rr;
31872 BuildMI(*MBB, {MI}, DL, TII->get(Opc), FinalStackPtr)
31873 .addReg(TmpStackPtr)
31879 BuildMI(testMBB, DL,
31880 TII->get(TFI.Uses64BitFramePtr ? X86::CMP64rr : X86::CMP32rr))
31881 .addReg(FinalStackPtr)
31882 .addReg(physSPReg);
31884 BuildMI(testMBB, DL, TII->get(X86::JCC_1))
31886 .addImm(X86::COND_GE);
31887 testMBB->addSuccessor(blockMBB);
31888 testMBB->addSuccessor(tailMBB);
31890 // Touch the block then extend it. This is done on the opposite side of
31891 // static probe where we allocate then touch, to avoid the need of probing the
31892 // tail of the static alloca. Possible scenarios are:
31894 // + ---- <- ------------ <- ------------- <- ------------ +
31896 // [free probe] -> [page alloc] -> [alloc probe] -> [tail alloc] + -> [dyn probe] -> [page alloc] -> [dyn probe] -> [tail alloc] +
31898 // + <- ----------- <- ------------ <- ----------- <- ------------ +
31900 // The property we want to enforce is to never have more than [page alloc] between two probes.
31902 const unsigned XORMIOpc =
31903 TFI.Uses64BitFramePtr ? X86::XOR64mi8 : X86::XOR32mi8;
31904 addRegOffset(BuildMI(blockMBB, DL, TII->get(XORMIOpc)), physSPReg, false, 0)
31907 BuildMI(blockMBB, DL,
31908 TII->get(getSUBriOpcode(TFI.Uses64BitFramePtr, ProbeSize)), physSPReg)
31910 .addImm(ProbeSize);
31913 BuildMI(blockMBB, DL, TII->get(X86::JMP_1)).addMBB(testMBB);
31914 blockMBB->addSuccessor(testMBB);
31916 // Replace original instruction by the expected stack ptr
31917 BuildMI(tailMBB, DL, TII->get(TargetOpcode::COPY), MI.getOperand(0).getReg())
31918 .addReg(FinalStackPtr);
31920 tailMBB->splice(tailMBB->end(), MBB,
31921 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
31922 tailMBB->transferSuccessorsAndUpdatePHIs(MBB);
31923 MBB->addSuccessor(testMBB);
31925 // Delete the original pseudo instruction.
31926 MI.eraseFromParent();
31932 MachineBasicBlock *
31933 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr &MI,
31934 MachineBasicBlock *BB) const {
31935 MachineFunction *MF = BB->getParent();
31936 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
31937 DebugLoc DL = MI.getDebugLoc();
31938 const BasicBlock *LLVM_BB = BB->getBasicBlock();
31940 assert(MF->shouldSplitStack());
31942 const bool Is64Bit = Subtarget.is64Bit();
31943 const bool IsLP64 = Subtarget.isTarget64BitLP64();
31945 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
31946 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
31949 // ... [Till the alloca]
31950 // If stacklet is not large enough, jump to mallocMBB
31953 // Allocate by subtracting from RSP
31954 // Jump to continueMBB
31957 // Allocate by call to runtime
31961 // [rest of original BB]
31964 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31965 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31966 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
31968 MachineRegisterInfo &MRI = MF->getRegInfo();
31969 const TargetRegisterClass *AddrRegClass =
31970 getRegClassFor(getPointerTy(MF->getDataLayout()));
31972 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31973 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
31974 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
31975 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
31976 sizeVReg = MI.getOperand(1).getReg(),
31978 IsLP64 || Subtarget.isTargetNaCl64() ? X86::RSP : X86::ESP;
31980 MachineFunction::iterator MBBIter = ++BB->getIterator();
31982 MF->insert(MBBIter, bumpMBB);
31983 MF->insert(MBBIter, mallocMBB);
31984 MF->insert(MBBIter, continueMBB);
31986 continueMBB->splice(continueMBB->begin(), BB,
31987 std::next(MachineBasicBlock::iterator(MI)), BB->end());
31988 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
31990 // Add code to the main basic block to check if the stack limit has been hit,
31991 // and if so, jump to mallocMBB otherwise to bumpMBB.
31992 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
31993 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
31994 .addReg(tmpSPVReg).addReg(sizeVReg);
31995 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
31996 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
31997 .addReg(SPLimitVReg);
31998 BuildMI(BB, DL, TII->get(X86::JCC_1)).addMBB(mallocMBB).addImm(X86::COND_G);
32000 // bumpMBB simply decreases the stack pointer, since we know the current
32001 // stacklet has enough space.
32002 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
32003 .addReg(SPLimitVReg);
32004 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
32005 .addReg(SPLimitVReg);
32006 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
32008 // Calls into a routine in libgcc to allocate more space from the heap.
32009 const uint32_t *RegMask =
32010 Subtarget.getRegisterInfo()->getCallPreservedMask(*MF, CallingConv::C);
32012 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
32014 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
32015 .addExternalSymbol("__morestack_allocate_stack_space")
32016 .addRegMask(RegMask)
32017 .addReg(X86::RDI, RegState::Implicit)
32018 .addReg(X86::RAX, RegState::ImplicitDefine);
32019 } else if (Is64Bit) {
32020 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
32022 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
32023 .addExternalSymbol("__morestack_allocate_stack_space")
32024 .addRegMask(RegMask)
32025 .addReg(X86::EDI, RegState::Implicit)
32026 .addReg(X86::EAX, RegState::ImplicitDefine);
32028 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
32030 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
32031 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
32032 .addExternalSymbol("__morestack_allocate_stack_space")
32033 .addRegMask(RegMask)
32034 .addReg(X86::EAX, RegState::ImplicitDefine);
32038 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
32041 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
32042 .addReg(IsLP64 ? X86::RAX : X86::EAX);
32043 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
32045 // Set up the CFG correctly.
32046 BB->addSuccessor(bumpMBB);
32047 BB->addSuccessor(mallocMBB);
32048 mallocMBB->addSuccessor(continueMBB);
32049 bumpMBB->addSuccessor(continueMBB);
32051 // Take care of the PHI nodes.
32052 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
32053 MI.getOperand(0).getReg())
32054 .addReg(mallocPtrVReg)
32056 .addReg(bumpSPPtrVReg)
32059 // Delete the original pseudo instruction.
32060 MI.eraseFromParent();
32063 return continueMBB;
32066 MachineBasicBlock *
32067 X86TargetLowering::EmitLoweredCatchRet(MachineInstr &MI,
32068 MachineBasicBlock *BB) const {
32069 MachineFunction *MF = BB->getParent();
32070 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
32071 MachineBasicBlock *TargetMBB = MI.getOperand(0).getMBB();
32072 DebugLoc DL = MI.getDebugLoc();
32074 assert(!isAsynchronousEHPersonality(
32075 classifyEHPersonality(MF->getFunction().getPersonalityFn())) &&
32076 "SEH does not use catchret!");
32078 // Only 32-bit EH needs to worry about manually restoring stack pointers.
32079 if (!Subtarget.is32Bit())
32082 // C++ EH creates a new target block to hold the restore code, and wires up
32083 // the new block to the return destination with a normal JMP_4.
32084 MachineBasicBlock *RestoreMBB =
32085 MF->CreateMachineBasicBlock(BB->getBasicBlock());
32086 assert(BB->succ_size() == 1);
32087 MF->insert(std::next(BB->getIterator()), RestoreMBB);
32088 RestoreMBB->transferSuccessorsAndUpdatePHIs(BB);
32089 BB->addSuccessor(RestoreMBB);
32090 MI.getOperand(0).setMBB(RestoreMBB);
32092 // Marking this as an EH pad but not a funclet entry block causes PEI to
32093 // restore stack pointers in the block.
32094 RestoreMBB->setIsEHPad(true);
32096 auto RestoreMBBI = RestoreMBB->begin();
32097 BuildMI(*RestoreMBB, RestoreMBBI, DL, TII.get(X86::JMP_4)).addMBB(TargetMBB);
32101 MachineBasicBlock *
32102 X86TargetLowering::EmitLoweredTLSAddr(MachineInstr &MI,
32103 MachineBasicBlock *BB) const {
32104 // So, here we replace TLSADDR with the sequence:
32105 // adjust_stackdown -> TLSADDR -> adjust_stackup.
32106 // We need this because TLSADDR is lowered into calls
32107 // inside MC, therefore without the two markers shrink-wrapping
32108 // may push the prologue/epilogue pass them.
32109 const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
32110 DebugLoc DL = MI.getDebugLoc();
32111 MachineFunction &MF = *BB->getParent();
32113 // Emit CALLSEQ_START right before the instruction.
32114 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
32115 MachineInstrBuilder CallseqStart =
32116 BuildMI(MF, DL, TII.get(AdjStackDown)).addImm(0).addImm(0).addImm(0);
32117 BB->insert(MachineBasicBlock::iterator(MI), CallseqStart);
32119 // Emit CALLSEQ_END right after the instruction.
32120 // We don't call erase from parent because we want to keep the
32121 // original instruction around.
32122 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
32123 MachineInstrBuilder CallseqEnd =
32124 BuildMI(MF, DL, TII.get(AdjStackUp)).addImm(0).addImm(0);
32125 BB->insertAfter(MachineBasicBlock::iterator(MI), CallseqEnd);
32130 MachineBasicBlock *
32131 X86TargetLowering::EmitLoweredTLSCall(MachineInstr &MI,
32132 MachineBasicBlock *BB) const {
32133 // This is pretty easy. We're taking the value that we received from
32134 // our load from the relocation, sticking it in either RDI (x86-64)
32135 // or EAX and doing an indirect call. The return value will then
32136 // be in the normal return register.
32137 MachineFunction *F = BB->getParent();
32138 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32139 DebugLoc DL = MI.getDebugLoc();
32141 assert(Subtarget.isTargetDarwin() && "Darwin only instr emitted?");
32142 assert(MI.getOperand(3).isGlobal() && "This should be a global");
32144 // Get a register mask for the lowered call.
32145 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
32146 // proper register mask.
32147 const uint32_t *RegMask =
32148 Subtarget.is64Bit() ?
32149 Subtarget.getRegisterInfo()->getDarwinTLSCallPreservedMask() :
32150 Subtarget.getRegisterInfo()->getCallPreservedMask(*F, CallingConv::C);
32151 if (Subtarget.is64Bit()) {
32152 MachineInstrBuilder MIB =
32153 BuildMI(*BB, MI, DL, TII->get(X86::MOV64rm), X86::RDI)
32157 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
32158 MI.getOperand(3).getTargetFlags())
32160 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
32161 addDirectMem(MIB, X86::RDI);
32162 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
32163 } else if (!isPositionIndependent()) {
32164 MachineInstrBuilder MIB =
32165 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
32169 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
32170 MI.getOperand(3).getTargetFlags())
32172 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
32173 addDirectMem(MIB, X86::EAX);
32174 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
32176 MachineInstrBuilder MIB =
32177 BuildMI(*BB, MI, DL, TII->get(X86::MOV32rm), X86::EAX)
32178 .addReg(TII->getGlobalBaseReg(F))
32181 .addGlobalAddress(MI.getOperand(3).getGlobal(), 0,
32182 MI.getOperand(3).getTargetFlags())
32184 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
32185 addDirectMem(MIB, X86::EAX);
32186 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
32189 MI.eraseFromParent(); // The pseudo instruction is gone now.
32193 static unsigned getOpcodeForIndirectThunk(unsigned RPOpc) {
32195 case X86::INDIRECT_THUNK_CALL32:
32196 return X86::CALLpcrel32;
32197 case X86::INDIRECT_THUNK_CALL64:
32198 return X86::CALL64pcrel32;
32199 case X86::INDIRECT_THUNK_TCRETURN32:
32200 return X86::TCRETURNdi;
32201 case X86::INDIRECT_THUNK_TCRETURN64:
32202 return X86::TCRETURNdi64;
32204 llvm_unreachable("not indirect thunk opcode");
32207 static const char *getIndirectThunkSymbol(const X86Subtarget &Subtarget,
32209 if (Subtarget.useRetpolineExternalThunk()) {
32210 // When using an external thunk for retpolines, we pick names that match the
32211 // names GCC happens to use as well. This helps simplify the implementation
32212 // of the thunks for kernels where they have no easy ability to create
32213 // aliases and are doing non-trivial configuration of the thunk's body. For
32214 // example, the Linux kernel will do boot-time hot patching of the thunk
32215 // bodies and cannot easily export aliases of these to loaded modules.
32217 // Note that at any point in the future, we may need to change the semantics
32218 // of how we implement retpolines and at that time will likely change the
32219 // name of the called thunk. Essentially, there is no hard guarantee that
32220 // LLVM will generate calls to specific thunks, we merely make a best-effort
32221 // attempt to help out kernels and other systems where duplicating the
32222 // thunks is costly.
32225 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32226 return "__x86_indirect_thunk_eax";
32228 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32229 return "__x86_indirect_thunk_ecx";
32231 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32232 return "__x86_indirect_thunk_edx";
32234 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32235 return "__x86_indirect_thunk_edi";
32237 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
32238 return "__x86_indirect_thunk_r11";
32240 llvm_unreachable("unexpected reg for external indirect thunk");
32243 if (Subtarget.useRetpolineIndirectCalls() ||
32244 Subtarget.useRetpolineIndirectBranches()) {
32245 // When targeting an internal COMDAT thunk use an LLVM-specific name.
32248 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32249 return "__llvm_retpoline_eax";
32251 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32252 return "__llvm_retpoline_ecx";
32254 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32255 return "__llvm_retpoline_edx";
32257 assert(!Subtarget.is64Bit() && "Should not be using a 32-bit thunk!");
32258 return "__llvm_retpoline_edi";
32260 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
32261 return "__llvm_retpoline_r11";
32263 llvm_unreachable("unexpected reg for retpoline");
32266 if (Subtarget.useLVIControlFlowIntegrity()) {
32267 assert(Subtarget.is64Bit() && "Should not be using a 64-bit thunk!");
32268 return "__llvm_lvi_thunk_r11";
32270 llvm_unreachable("getIndirectThunkSymbol() invoked without thunk feature");
32273 MachineBasicBlock *
32274 X86TargetLowering::EmitLoweredIndirectThunk(MachineInstr &MI,
32275 MachineBasicBlock *BB) const {
32276 // Copy the virtual register into the R11 physical register and
32277 // call the retpoline thunk.
32278 DebugLoc DL = MI.getDebugLoc();
32279 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32280 Register CalleeVReg = MI.getOperand(0).getReg();
32281 unsigned Opc = getOpcodeForIndirectThunk(MI.getOpcode());
32283 // Find an available scratch register to hold the callee. On 64-bit, we can
32284 // just use R11, but we scan for uses anyway to ensure we don't generate
32285 // incorrect code. On 32-bit, we use one of EAX, ECX, or EDX that isn't
32286 // already a register use operand to the call to hold the callee. If none
32287 // are available, use EDI instead. EDI is chosen because EBX is the PIC base
32288 // register and ESI is the base pointer to realigned stack frames with VLAs.
32289 SmallVector<unsigned, 3> AvailableRegs;
32290 if (Subtarget.is64Bit())
32291 AvailableRegs.push_back(X86::R11);
32293 AvailableRegs.append({X86::EAX, X86::ECX, X86::EDX, X86::EDI});
32295 // Zero out any registers that are already used.
32296 for (const auto &MO : MI.operands()) {
32297 if (MO.isReg() && MO.isUse())
32298 for (unsigned &Reg : AvailableRegs)
32299 if (Reg == MO.getReg())
32303 // Choose the first remaining non-zero available register.
32304 unsigned AvailableReg = 0;
32305 for (unsigned MaybeReg : AvailableRegs) {
32307 AvailableReg = MaybeReg;
32312 report_fatal_error("calling convention incompatible with retpoline, no "
32313 "available registers");
32315 const char *Symbol = getIndirectThunkSymbol(Subtarget, AvailableReg);
32317 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), AvailableReg)
32318 .addReg(CalleeVReg);
32319 MI.getOperand(0).ChangeToES(Symbol);
32320 MI.setDesc(TII->get(Opc));
32321 MachineInstrBuilder(*BB->getParent(), &MI)
32322 .addReg(AvailableReg, RegState::Implicit | RegState::Kill);
32326 /// SetJmp implies future control flow change upon calling the corresponding
32328 /// Instead of using the 'return' instruction, the long jump fixes the stack and
32329 /// performs an indirect branch. To do so it uses the registers that were stored
32330 /// in the jump buffer (when calling SetJmp).
32331 /// In case the shadow stack is enabled we need to fix it as well, because some
32332 /// return addresses will be skipped.
32333 /// The function will save the SSP for future fixing in the function
32334 /// emitLongJmpShadowStackFix.
32335 /// \sa emitLongJmpShadowStackFix
32336 /// \param [in] MI The temporary Machine Instruction for the builtin.
32337 /// \param [in] MBB The Machine Basic Block that will be modified.
32338 void X86TargetLowering::emitSetJmpShadowStackFix(MachineInstr &MI,
32339 MachineBasicBlock *MBB) const {
32340 DebugLoc DL = MI.getDebugLoc();
32341 MachineFunction *MF = MBB->getParent();
32342 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32343 MachineRegisterInfo &MRI = MF->getRegInfo();
32344 MachineInstrBuilder MIB;
32346 // Memory Reference.
32347 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32348 MI.memoperands_end());
32350 // Initialize a register with zero.
32351 MVT PVT = getPointerTy(MF->getDataLayout());
32352 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
32353 Register ZReg = MRI.createVirtualRegister(PtrRC);
32354 unsigned XorRROpc = (PVT == MVT::i64) ? X86::XOR64rr : X86::XOR32rr;
32355 BuildMI(*MBB, MI, DL, TII->get(XorRROpc))
32357 .addReg(ZReg, RegState::Undef)
32358 .addReg(ZReg, RegState::Undef);
32360 // Read the current SSP Register value to the zeroed register.
32361 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
32362 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
32363 BuildMI(*MBB, MI, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
32365 // Write the SSP register value to offset 3 in input memory buffer.
32366 unsigned PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
32367 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrStoreOpc));
32368 const int64_t SSPOffset = 3 * PVT.getStoreSize();
32369 const unsigned MemOpndSlot = 1;
32370 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32371 if (i == X86::AddrDisp)
32372 MIB.addDisp(MI.getOperand(MemOpndSlot + i), SSPOffset);
32374 MIB.add(MI.getOperand(MemOpndSlot + i));
32376 MIB.addReg(SSPCopyReg);
32377 MIB.setMemRefs(MMOs);
32380 MachineBasicBlock *
32381 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
32382 MachineBasicBlock *MBB) const {
32383 DebugLoc DL = MI.getDebugLoc();
32384 MachineFunction *MF = MBB->getParent();
32385 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32386 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
32387 MachineRegisterInfo &MRI = MF->getRegInfo();
32389 const BasicBlock *BB = MBB->getBasicBlock();
32390 MachineFunction::iterator I = ++MBB->getIterator();
32392 // Memory Reference
32393 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32394 MI.memoperands_end());
32397 unsigned MemOpndSlot = 0;
32399 unsigned CurOp = 0;
32401 DstReg = MI.getOperand(CurOp++).getReg();
32402 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
32403 assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
32405 Register mainDstReg = MRI.createVirtualRegister(RC);
32406 Register restoreDstReg = MRI.createVirtualRegister(RC);
32408 MemOpndSlot = CurOp;
32410 MVT PVT = getPointerTy(MF->getDataLayout());
32411 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
32412 "Invalid Pointer Size!");
32414 // For v = setjmp(buf), we generate
32417 // buf[LabelOffset] = restoreMBB <-- takes address of restoreMBB
32418 // SjLjSetup restoreMBB
32424 // v = phi(main, restore)
32427 // if base pointer being used, load it from frame
32430 MachineBasicBlock *thisMBB = MBB;
32431 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
32432 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
32433 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
32434 MF->insert(I, mainMBB);
32435 MF->insert(I, sinkMBB);
32436 MF->push_back(restoreMBB);
32437 restoreMBB->setHasAddressTaken();
32439 MachineInstrBuilder MIB;
32441 // Transfer the remainder of BB and its successor edges to sinkMBB.
32442 sinkMBB->splice(sinkMBB->begin(), MBB,
32443 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
32444 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
32447 unsigned PtrStoreOpc = 0;
32448 unsigned LabelReg = 0;
32449 const int64_t LabelOffset = 1 * PVT.getStoreSize();
32450 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
32451 !isPositionIndependent();
32453 // Prepare IP either in reg or imm.
32454 if (!UseImmLabel) {
32455 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
32456 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
32457 LabelReg = MRI.createVirtualRegister(PtrRC);
32458 if (Subtarget.is64Bit()) {
32459 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
32463 .addMBB(restoreMBB)
32466 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
32467 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
32468 .addReg(XII->getGlobalBaseReg(MF))
32471 .addMBB(restoreMBB, Subtarget.classifyBlockAddressReference())
32475 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
32477 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
32478 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32479 if (i == X86::AddrDisp)
32480 MIB.addDisp(MI.getOperand(MemOpndSlot + i), LabelOffset);
32482 MIB.add(MI.getOperand(MemOpndSlot + i));
32485 MIB.addReg(LabelReg);
32487 MIB.addMBB(restoreMBB);
32488 MIB.setMemRefs(MMOs);
32490 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
32491 emitSetJmpShadowStackFix(MI, thisMBB);
32495 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
32496 .addMBB(restoreMBB);
32498 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
32499 MIB.addRegMask(RegInfo->getNoPreservedMask());
32500 thisMBB->addSuccessor(mainMBB);
32501 thisMBB->addSuccessor(restoreMBB);
32505 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
32506 mainMBB->addSuccessor(sinkMBB);
32509 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
32510 TII->get(X86::PHI), DstReg)
32511 .addReg(mainDstReg).addMBB(mainMBB)
32512 .addReg(restoreDstReg).addMBB(restoreMBB);
32515 if (RegInfo->hasBasePointer(*MF)) {
32516 const bool Uses64BitFramePtr =
32517 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32518 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
32519 X86FI->setRestoreBasePointer(MF);
32520 Register FramePtr = RegInfo->getFrameRegister(*MF);
32521 Register BasePtr = RegInfo->getBaseRegister();
32522 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
32523 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
32524 FramePtr, true, X86FI->getRestoreBasePointerOffset())
32525 .setMIFlag(MachineInstr::FrameSetup);
32527 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
32528 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
32529 restoreMBB->addSuccessor(sinkMBB);
32531 MI.eraseFromParent();
32535 /// Fix the shadow stack using the previously saved SSP pointer.
32536 /// \sa emitSetJmpShadowStackFix
32537 /// \param [in] MI The temporary Machine Instruction for the builtin.
32538 /// \param [in] MBB The Machine Basic Block that will be modified.
32539 /// \return The sink MBB that will perform the future indirect branch.
32540 MachineBasicBlock *
32541 X86TargetLowering::emitLongJmpShadowStackFix(MachineInstr &MI,
32542 MachineBasicBlock *MBB) const {
32543 DebugLoc DL = MI.getDebugLoc();
32544 MachineFunction *MF = MBB->getParent();
32545 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32546 MachineRegisterInfo &MRI = MF->getRegInfo();
32548 // Memory Reference
32549 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32550 MI.memoperands_end());
32552 MVT PVT = getPointerTy(MF->getDataLayout());
32553 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
32556 // xor vreg1, vreg1
32558 // test vreg1, vreg1
32559 // je sinkMBB # Jump if Shadow Stack is not supported
32561 // mov buf+24/12(%rip), vreg2
32562 // sub vreg1, vreg2
32563 // jbe sinkMBB # No need to fix the Shadow Stack
32566 // incssp vreg2 # fix the SSP according to the lower 8 bits
32569 // fixShadowLoopPrepareMBB:
32572 // fixShadowLoopMBB:
32575 // jne fixShadowLoopMBB # Iterate until you finish fixing
32576 // # the Shadow Stack
32579 MachineFunction::iterator I = ++MBB->getIterator();
32580 const BasicBlock *BB = MBB->getBasicBlock();
32582 MachineBasicBlock *checkSspMBB = MF->CreateMachineBasicBlock(BB);
32583 MachineBasicBlock *fallMBB = MF->CreateMachineBasicBlock(BB);
32584 MachineBasicBlock *fixShadowMBB = MF->CreateMachineBasicBlock(BB);
32585 MachineBasicBlock *fixShadowLoopPrepareMBB = MF->CreateMachineBasicBlock(BB);
32586 MachineBasicBlock *fixShadowLoopMBB = MF->CreateMachineBasicBlock(BB);
32587 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
32588 MF->insert(I, checkSspMBB);
32589 MF->insert(I, fallMBB);
32590 MF->insert(I, fixShadowMBB);
32591 MF->insert(I, fixShadowLoopPrepareMBB);
32592 MF->insert(I, fixShadowLoopMBB);
32593 MF->insert(I, sinkMBB);
32595 // Transfer the remainder of BB and its successor edges to sinkMBB.
32596 sinkMBB->splice(sinkMBB->begin(), MBB, MachineBasicBlock::iterator(MI),
32598 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
32600 MBB->addSuccessor(checkSspMBB);
32602 // Initialize a register with zero.
32603 Register ZReg = MRI.createVirtualRegister(&X86::GR32RegClass);
32604 BuildMI(checkSspMBB, DL, TII->get(X86::MOV32r0), ZReg);
32606 if (PVT == MVT::i64) {
32607 Register TmpZReg = MRI.createVirtualRegister(PtrRC);
32608 BuildMI(checkSspMBB, DL, TII->get(X86::SUBREG_TO_REG), TmpZReg)
32611 .addImm(X86::sub_32bit);
32615 // Read the current SSP Register value to the zeroed register.
32616 Register SSPCopyReg = MRI.createVirtualRegister(PtrRC);
32617 unsigned RdsspOpc = (PVT == MVT::i64) ? X86::RDSSPQ : X86::RDSSPD;
32618 BuildMI(checkSspMBB, DL, TII->get(RdsspOpc), SSPCopyReg).addReg(ZReg);
32620 // Check whether the result of the SSP register is zero and jump directly
32622 unsigned TestRROpc = (PVT == MVT::i64) ? X86::TEST64rr : X86::TEST32rr;
32623 BuildMI(checkSspMBB, DL, TII->get(TestRROpc))
32624 .addReg(SSPCopyReg)
32625 .addReg(SSPCopyReg);
32626 BuildMI(checkSspMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
32627 checkSspMBB->addSuccessor(sinkMBB);
32628 checkSspMBB->addSuccessor(fallMBB);
32630 // Reload the previously saved SSP register value.
32631 Register PrevSSPReg = MRI.createVirtualRegister(PtrRC);
32632 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
32633 const int64_t SPPOffset = 3 * PVT.getStoreSize();
32634 MachineInstrBuilder MIB =
32635 BuildMI(fallMBB, DL, TII->get(PtrLoadOpc), PrevSSPReg);
32636 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32637 const MachineOperand &MO = MI.getOperand(i);
32638 if (i == X86::AddrDisp)
32639 MIB.addDisp(MO, SPPOffset);
32640 else if (MO.isReg()) // Don't add the whole operand, we don't want to
32641 // preserve kill flags.
32642 MIB.addReg(MO.getReg());
32646 MIB.setMemRefs(MMOs);
32648 // Subtract the current SSP from the previous SSP.
32649 Register SspSubReg = MRI.createVirtualRegister(PtrRC);
32650 unsigned SubRROpc = (PVT == MVT::i64) ? X86::SUB64rr : X86::SUB32rr;
32651 BuildMI(fallMBB, DL, TII->get(SubRROpc), SspSubReg)
32652 .addReg(PrevSSPReg)
32653 .addReg(SSPCopyReg);
32655 // Jump to sink in case PrevSSPReg <= SSPCopyReg.
32656 BuildMI(fallMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_BE);
32657 fallMBB->addSuccessor(sinkMBB);
32658 fallMBB->addSuccessor(fixShadowMBB);
32660 // Shift right by 2/3 for 32/64 because incssp multiplies the argument by 4/8.
32661 unsigned ShrRIOpc = (PVT == MVT::i64) ? X86::SHR64ri : X86::SHR32ri;
32662 unsigned Offset = (PVT == MVT::i64) ? 3 : 2;
32663 Register SspFirstShrReg = MRI.createVirtualRegister(PtrRC);
32664 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspFirstShrReg)
32668 // Increase SSP when looking only on the lower 8 bits of the delta.
32669 unsigned IncsspOpc = (PVT == MVT::i64) ? X86::INCSSPQ : X86::INCSSPD;
32670 BuildMI(fixShadowMBB, DL, TII->get(IncsspOpc)).addReg(SspFirstShrReg);
32672 // Reset the lower 8 bits.
32673 Register SspSecondShrReg = MRI.createVirtualRegister(PtrRC);
32674 BuildMI(fixShadowMBB, DL, TII->get(ShrRIOpc), SspSecondShrReg)
32675 .addReg(SspFirstShrReg)
32678 // Jump if the result of the shift is zero.
32679 BuildMI(fixShadowMBB, DL, TII->get(X86::JCC_1)).addMBB(sinkMBB).addImm(X86::COND_E);
32680 fixShadowMBB->addSuccessor(sinkMBB);
32681 fixShadowMBB->addSuccessor(fixShadowLoopPrepareMBB);
32683 // Do a single shift left.
32684 unsigned ShlR1Opc = (PVT == MVT::i64) ? X86::SHL64r1 : X86::SHL32r1;
32685 Register SspAfterShlReg = MRI.createVirtualRegister(PtrRC);
32686 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(ShlR1Opc), SspAfterShlReg)
32687 .addReg(SspSecondShrReg);
32689 // Save the value 128 to a register (will be used next with incssp).
32690 Register Value128InReg = MRI.createVirtualRegister(PtrRC);
32691 unsigned MovRIOpc = (PVT == MVT::i64) ? X86::MOV64ri32 : X86::MOV32ri;
32692 BuildMI(fixShadowLoopPrepareMBB, DL, TII->get(MovRIOpc), Value128InReg)
32694 fixShadowLoopPrepareMBB->addSuccessor(fixShadowLoopMBB);
32696 // Since incssp only looks at the lower 8 bits, we might need to do several
32697 // iterations of incssp until we finish fixing the shadow stack.
32698 Register DecReg = MRI.createVirtualRegister(PtrRC);
32699 Register CounterReg = MRI.createVirtualRegister(PtrRC);
32700 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::PHI), CounterReg)
32701 .addReg(SspAfterShlReg)
32702 .addMBB(fixShadowLoopPrepareMBB)
32704 .addMBB(fixShadowLoopMBB);
32706 // Every iteration we increase the SSP by 128.
32707 BuildMI(fixShadowLoopMBB, DL, TII->get(IncsspOpc)).addReg(Value128InReg);
32709 // Every iteration we decrement the counter by 1.
32710 unsigned DecROpc = (PVT == MVT::i64) ? X86::DEC64r : X86::DEC32r;
32711 BuildMI(fixShadowLoopMBB, DL, TII->get(DecROpc), DecReg).addReg(CounterReg);
32713 // Jump if the counter is not zero yet.
32714 BuildMI(fixShadowLoopMBB, DL, TII->get(X86::JCC_1)).addMBB(fixShadowLoopMBB).addImm(X86::COND_NE);
32715 fixShadowLoopMBB->addSuccessor(sinkMBB);
32716 fixShadowLoopMBB->addSuccessor(fixShadowLoopMBB);
32721 MachineBasicBlock *
32722 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
32723 MachineBasicBlock *MBB) const {
32724 DebugLoc DL = MI.getDebugLoc();
32725 MachineFunction *MF = MBB->getParent();
32726 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
32727 MachineRegisterInfo &MRI = MF->getRegInfo();
32729 // Memory Reference
32730 SmallVector<MachineMemOperand *, 2> MMOs(MI.memoperands_begin(),
32731 MI.memoperands_end());
32733 MVT PVT = getPointerTy(MF->getDataLayout());
32734 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
32735 "Invalid Pointer Size!");
32737 const TargetRegisterClass *RC =
32738 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
32739 Register Tmp = MRI.createVirtualRegister(RC);
32740 // Since FP is only updated here but NOT referenced, it's treated as GPR.
32741 const X86RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
32742 Register FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
32743 Register SP = RegInfo->getStackRegister();
32745 MachineInstrBuilder MIB;
32747 const int64_t LabelOffset = 1 * PVT.getStoreSize();
32748 const int64_t SPOffset = 2 * PVT.getStoreSize();
32750 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
32751 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
32753 MachineBasicBlock *thisMBB = MBB;
32755 // When CET and shadow stack is enabled, we need to fix the Shadow Stack.
32756 if (MF->getMMI().getModule()->getModuleFlag("cf-protection-return")) {
32757 thisMBB = emitLongJmpShadowStackFix(MI, thisMBB);
32761 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), FP);
32762 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32763 const MachineOperand &MO = MI.getOperand(i);
32764 if (MO.isReg()) // Don't add the whole operand, we don't want to
32765 // preserve kill flags.
32766 MIB.addReg(MO.getReg());
32770 MIB.setMemRefs(MMOs);
32773 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
32774 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32775 const MachineOperand &MO = MI.getOperand(i);
32776 if (i == X86::AddrDisp)
32777 MIB.addDisp(MO, LabelOffset);
32778 else if (MO.isReg()) // Don't add the whole operand, we don't want to
32779 // preserve kill flags.
32780 MIB.addReg(MO.getReg());
32784 MIB.setMemRefs(MMOs);
32787 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrLoadOpc), SP);
32788 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
32789 if (i == X86::AddrDisp)
32790 MIB.addDisp(MI.getOperand(i), SPOffset);
32792 MIB.add(MI.getOperand(i)); // We can preserve the kill flags here, it's
32793 // the last instruction of the expansion.
32795 MIB.setMemRefs(MMOs);
32798 BuildMI(*thisMBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
32800 MI.eraseFromParent();
32804 void X86TargetLowering::SetupEntryBlockForSjLj(MachineInstr &MI,
32805 MachineBasicBlock *MBB,
32806 MachineBasicBlock *DispatchBB,
32808 DebugLoc DL = MI.getDebugLoc();
32809 MachineFunction *MF = MBB->getParent();
32810 MachineRegisterInfo *MRI = &MF->getRegInfo();
32811 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32813 MVT PVT = getPointerTy(MF->getDataLayout());
32814 assert((PVT == MVT::i64 || PVT == MVT::i32) && "Invalid Pointer Size!");
32819 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
32820 !isPositionIndependent();
32823 Op = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
32825 const TargetRegisterClass *TRC =
32826 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
32827 VR = MRI->createVirtualRegister(TRC);
32828 Op = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
32830 if (Subtarget.is64Bit())
32831 BuildMI(*MBB, MI, DL, TII->get(X86::LEA64r), VR)
32835 .addMBB(DispatchBB)
32838 BuildMI(*MBB, MI, DL, TII->get(X86::LEA32r), VR)
32839 .addReg(0) /* TII->getGlobalBaseReg(MF) */
32842 .addMBB(DispatchBB, Subtarget.classifyBlockAddressReference())
32846 MachineInstrBuilder MIB = BuildMI(*MBB, MI, DL, TII->get(Op));
32847 addFrameReference(MIB, FI, Subtarget.is64Bit() ? 56 : 36);
32849 MIB.addMBB(DispatchBB);
32854 MachineBasicBlock *
32855 X86TargetLowering::EmitSjLjDispatchBlock(MachineInstr &MI,
32856 MachineBasicBlock *BB) const {
32857 DebugLoc DL = MI.getDebugLoc();
32858 MachineFunction *MF = BB->getParent();
32859 MachineRegisterInfo *MRI = &MF->getRegInfo();
32860 const X86InstrInfo *TII = Subtarget.getInstrInfo();
32861 int FI = MF->getFrameInfo().getFunctionContextIndex();
32863 // Get a mapping of the call site numbers to all of the landing pads they're
32864 // associated with.
32865 DenseMap<unsigned, SmallVector<MachineBasicBlock *, 2>> CallSiteNumToLPad;
32866 unsigned MaxCSNum = 0;
32867 for (auto &MBB : *MF) {
32868 if (!MBB.isEHPad())
32871 MCSymbol *Sym = nullptr;
32872 for (const auto &MI : MBB) {
32873 if (MI.isDebugInstr())
32876 assert(MI.isEHLabel() && "expected EH_LABEL");
32877 Sym = MI.getOperand(0).getMCSymbol();
32881 if (!MF->hasCallSiteLandingPad(Sym))
32884 for (unsigned CSI : MF->getCallSiteLandingPad(Sym)) {
32885 CallSiteNumToLPad[CSI].push_back(&MBB);
32886 MaxCSNum = std::max(MaxCSNum, CSI);
32890 // Get an ordered list of the machine basic blocks for the jump table.
32891 std::vector<MachineBasicBlock *> LPadList;
32892 SmallPtrSet<MachineBasicBlock *, 32> InvokeBBs;
32893 LPadList.reserve(CallSiteNumToLPad.size());
32895 for (unsigned CSI = 1; CSI <= MaxCSNum; ++CSI) {
32896 for (auto &LP : CallSiteNumToLPad[CSI]) {
32897 LPadList.push_back(LP);
32898 InvokeBBs.insert(LP->pred_begin(), LP->pred_end());
32902 assert(!LPadList.empty() &&
32903 "No landing pad destinations for the dispatch jump table!");
32905 // Create the MBBs for the dispatch code.
32907 // Shove the dispatch's address into the return slot in the function context.
32908 MachineBasicBlock *DispatchBB = MF->CreateMachineBasicBlock();
32909 DispatchBB->setIsEHPad(true);
32911 MachineBasicBlock *TrapBB = MF->CreateMachineBasicBlock();
32912 BuildMI(TrapBB, DL, TII->get(X86::TRAP));
32913 DispatchBB->addSuccessor(TrapBB);
32915 MachineBasicBlock *DispContBB = MF->CreateMachineBasicBlock();
32916 DispatchBB->addSuccessor(DispContBB);
32919 MF->push_back(DispatchBB);
32920 MF->push_back(DispContBB);
32921 MF->push_back(TrapBB);
32923 // Insert code into the entry block that creates and registers the function
32925 SetupEntryBlockForSjLj(MI, BB, DispatchBB, FI);
32927 // Create the jump table and associated information
32928 unsigned JTE = getJumpTableEncoding();
32929 MachineJumpTableInfo *JTI = MF->getOrCreateJumpTableInfo(JTE);
32930 unsigned MJTI = JTI->createJumpTableIndex(LPadList);
32932 const X86RegisterInfo &RI = TII->getRegisterInfo();
32933 // Add a register mask with no preserved registers. This results in all
32934 // registers being marked as clobbered.
32935 if (RI.hasBasePointer(*MF)) {
32936 const bool FPIs64Bit =
32937 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
32938 X86MachineFunctionInfo *MFI = MF->getInfo<X86MachineFunctionInfo>();
32939 MFI->setRestoreBasePointer(MF);
32941 Register FP = RI.getFrameRegister(*MF);
32942 Register BP = RI.getBaseRegister();
32943 unsigned Op = FPIs64Bit ? X86::MOV64rm : X86::MOV32rm;
32944 addRegOffset(BuildMI(DispatchBB, DL, TII->get(Op), BP), FP, true,
32945 MFI->getRestoreBasePointerOffset())
32946 .addRegMask(RI.getNoPreservedMask());
32948 BuildMI(DispatchBB, DL, TII->get(X86::NOOP))
32949 .addRegMask(RI.getNoPreservedMask());
32952 // IReg is used as an index in a memory operand and therefore can't be SP
32953 Register IReg = MRI->createVirtualRegister(&X86::GR32_NOSPRegClass);
32954 addFrameReference(BuildMI(DispatchBB, DL, TII->get(X86::MOV32rm), IReg), FI,
32955 Subtarget.is64Bit() ? 8 : 4);
32956 BuildMI(DispatchBB, DL, TII->get(X86::CMP32ri))
32958 .addImm(LPadList.size());
32959 BuildMI(DispatchBB, DL, TII->get(X86::JCC_1)).addMBB(TrapBB).addImm(X86::COND_AE);
32961 if (Subtarget.is64Bit()) {
32962 Register BReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32963 Register IReg64 = MRI->createVirtualRegister(&X86::GR64_NOSPRegClass);
32965 // leaq .LJTI0_0(%rip), BReg
32966 BuildMI(DispContBB, DL, TII->get(X86::LEA64r), BReg)
32970 .addJumpTableIndex(MJTI)
32972 // movzx IReg64, IReg
32973 BuildMI(DispContBB, DL, TII->get(TargetOpcode::SUBREG_TO_REG), IReg64)
32976 .addImm(X86::sub_32bit);
32979 case MachineJumpTableInfo::EK_BlockAddress:
32980 // jmpq *(BReg,IReg64,8)
32981 BuildMI(DispContBB, DL, TII->get(X86::JMP64m))
32988 case MachineJumpTableInfo::EK_LabelDifference32: {
32989 Register OReg = MRI->createVirtualRegister(&X86::GR32RegClass);
32990 Register OReg64 = MRI->createVirtualRegister(&X86::GR64RegClass);
32991 Register TReg = MRI->createVirtualRegister(&X86::GR64RegClass);
32993 // movl (BReg,IReg64,4), OReg
32994 BuildMI(DispContBB, DL, TII->get(X86::MOV32rm), OReg)
33000 // movsx OReg64, OReg
33001 BuildMI(DispContBB, DL, TII->get(X86::MOVSX64rr32), OReg64).addReg(OReg);
33002 // addq BReg, OReg64, TReg
33003 BuildMI(DispContBB, DL, TII->get(X86::ADD64rr), TReg)
33007 BuildMI(DispContBB, DL, TII->get(X86::JMP64r)).addReg(TReg);
33011 llvm_unreachable("Unexpected jump table encoding");
33014 // jmpl *.LJTI0_0(,IReg,4)
33015 BuildMI(DispContBB, DL, TII->get(X86::JMP32m))
33019 .addJumpTableIndex(MJTI)
33023 // Add the jump table entries as successors to the MBB.
33024 SmallPtrSet<MachineBasicBlock *, 8> SeenMBBs;
33025 for (auto &LP : LPadList)
33026 if (SeenMBBs.insert(LP).second)
33027 DispContBB->addSuccessor(LP);
33029 // N.B. the order the invoke BBs are processed in doesn't matter here.
33030 SmallVector<MachineBasicBlock *, 64> MBBLPads;
33031 const MCPhysReg *SavedRegs = MF->getRegInfo().getCalleeSavedRegs();
33032 for (MachineBasicBlock *MBB : InvokeBBs) {
33033 // Remove the landing pad successor from the invoke block and replace it
33034 // with the new dispatch block.
33035 // Keep a copy of Successors since it's modified inside the loop.
33036 SmallVector<MachineBasicBlock *, 8> Successors(MBB->succ_rbegin(),
33038 // FIXME: Avoid quadratic complexity.
33039 for (auto MBBS : Successors) {
33040 if (MBBS->isEHPad()) {
33041 MBB->removeSuccessor(MBBS);
33042 MBBLPads.push_back(MBBS);
33046 MBB->addSuccessor(DispatchBB);
33048 // Find the invoke call and mark all of the callee-saved registers as
33049 // 'implicit defined' so that they're spilled. This prevents code from
33050 // moving instructions to before the EH block, where they will never be
33052 for (auto &II : reverse(*MBB)) {
33056 DenseMap<unsigned, bool> DefRegs;
33057 for (auto &MOp : II.operands())
33059 DefRegs[MOp.getReg()] = true;
33061 MachineInstrBuilder MIB(*MF, &II);
33062 for (unsigned RegIdx = 0; SavedRegs[RegIdx]; ++RegIdx) {
33063 unsigned Reg = SavedRegs[RegIdx];
33065 MIB.addReg(Reg, RegState::ImplicitDefine | RegState::Dead);
33072 // Mark all former landing pads as non-landing pads. The dispatch is the only
33073 // landing pad now.
33074 for (auto &LP : MBBLPads)
33075 LP->setIsEHPad(false);
33077 // The instruction is gone now.
33078 MI.eraseFromParent();
33082 MachineBasicBlock *
33083 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
33084 MachineBasicBlock *BB) const {
33085 MachineFunction *MF = BB->getParent();
33086 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
33087 DebugLoc DL = MI.getDebugLoc();
33089 auto TMMImmToTMMReg = [](unsigned Imm) {
33090 assert (Imm < 8 && "Illegal tmm index");
33091 return X86::TMM0 + Imm;
33093 switch (MI.getOpcode()) {
33094 default: llvm_unreachable("Unexpected instr type to insert");
33095 case X86::TLS_addr32:
33096 case X86::TLS_addr64:
33097 case X86::TLS_base_addr32:
33098 case X86::TLS_base_addr64:
33099 return EmitLoweredTLSAddr(MI, BB);
33100 case X86::INDIRECT_THUNK_CALL32:
33101 case X86::INDIRECT_THUNK_CALL64:
33102 case X86::INDIRECT_THUNK_TCRETURN32:
33103 case X86::INDIRECT_THUNK_TCRETURN64:
33104 return EmitLoweredIndirectThunk(MI, BB);
33105 case X86::CATCHRET:
33106 return EmitLoweredCatchRet(MI, BB);
33107 case X86::SEG_ALLOCA_32:
33108 case X86::SEG_ALLOCA_64:
33109 return EmitLoweredSegAlloca(MI, BB);
33110 case X86::PROBED_ALLOCA_32:
33111 case X86::PROBED_ALLOCA_64:
33112 return EmitLoweredProbedAlloca(MI, BB);
33113 case X86::TLSCall_32:
33114 case X86::TLSCall_64:
33115 return EmitLoweredTLSCall(MI, BB);
33116 case X86::CMOV_FR32:
33117 case X86::CMOV_FR32X:
33118 case X86::CMOV_FR64:
33119 case X86::CMOV_FR64X:
33120 case X86::CMOV_GR8:
33121 case X86::CMOV_GR16:
33122 case X86::CMOV_GR32:
33123 case X86::CMOV_RFP32:
33124 case X86::CMOV_RFP64:
33125 case X86::CMOV_RFP80:
33126 case X86::CMOV_VR64:
33127 case X86::CMOV_VR128:
33128 case X86::CMOV_VR128X:
33129 case X86::CMOV_VR256:
33130 case X86::CMOV_VR256X:
33131 case X86::CMOV_VR512:
33132 case X86::CMOV_VK1:
33133 case X86::CMOV_VK2:
33134 case X86::CMOV_VK4:
33135 case X86::CMOV_VK8:
33136 case X86::CMOV_VK16:
33137 case X86::CMOV_VK32:
33138 case X86::CMOV_VK64:
33139 return EmitLoweredSelect(MI, BB);
33141 case X86::RDFLAGS32:
33142 case X86::RDFLAGS64: {
33144 MI.getOpcode() == X86::RDFLAGS32 ? X86::PUSHF32 : X86::PUSHF64;
33145 unsigned Pop = MI.getOpcode() == X86::RDFLAGS32 ? X86::POP32r : X86::POP64r;
33146 MachineInstr *Push = BuildMI(*BB, MI, DL, TII->get(PushF));
33147 // Permit reads of the EFLAGS and DF registers without them being defined.
33148 // This intrinsic exists to read external processor state in flags, such as
33149 // the trap flag, interrupt flag, and direction flag, none of which are
33150 // modeled by the backend.
33151 assert(Push->getOperand(2).getReg() == X86::EFLAGS &&
33152 "Unexpected register in operand!");
33153 Push->getOperand(2).setIsUndef();
33154 assert(Push->getOperand(3).getReg() == X86::DF &&
33155 "Unexpected register in operand!");
33156 Push->getOperand(3).setIsUndef();
33157 BuildMI(*BB, MI, DL, TII->get(Pop), MI.getOperand(0).getReg());
33159 MI.eraseFromParent(); // The pseudo is gone now.
33163 case X86::WRFLAGS32:
33164 case X86::WRFLAGS64: {
33166 MI.getOpcode() == X86::WRFLAGS32 ? X86::PUSH32r : X86::PUSH64r;
33168 MI.getOpcode() == X86::WRFLAGS32 ? X86::POPF32 : X86::POPF64;
33169 BuildMI(*BB, MI, DL, TII->get(Push)).addReg(MI.getOperand(0).getReg());
33170 BuildMI(*BB, MI, DL, TII->get(PopF));
33172 MI.eraseFromParent(); // The pseudo is gone now.
33176 case X86::FP32_TO_INT16_IN_MEM:
33177 case X86::FP32_TO_INT32_IN_MEM:
33178 case X86::FP32_TO_INT64_IN_MEM:
33179 case X86::FP64_TO_INT16_IN_MEM:
33180 case X86::FP64_TO_INT32_IN_MEM:
33181 case X86::FP64_TO_INT64_IN_MEM:
33182 case X86::FP80_TO_INT16_IN_MEM:
33183 case X86::FP80_TO_INT32_IN_MEM:
33184 case X86::FP80_TO_INT64_IN_MEM: {
33185 // Change the floating point control register to use "round towards zero"
33186 // mode when truncating to an integer value.
33187 int OrigCWFrameIdx =
33188 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
33189 addFrameReference(BuildMI(*BB, MI, DL,
33190 TII->get(X86::FNSTCW16m)), OrigCWFrameIdx);
33192 // Load the old value of the control word...
33193 Register OldCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
33194 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOVZX32rm16), OldCW),
33197 // OR 0b11 into bit 10 and 11. 0b11 is the encoding for round toward zero.
33198 Register NewCW = MF->getRegInfo().createVirtualRegister(&X86::GR32RegClass);
33199 BuildMI(*BB, MI, DL, TII->get(X86::OR32ri), NewCW)
33200 .addReg(OldCW, RegState::Kill).addImm(0xC00);
33202 // Extract to 16 bits.
33204 MF->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
33205 BuildMI(*BB, MI, DL, TII->get(TargetOpcode::COPY), NewCW16)
33206 .addReg(NewCW, RegState::Kill, X86::sub_16bit);
33208 // Prepare memory for FLDCW.
33209 int NewCWFrameIdx =
33210 MF->getFrameInfo().CreateStackObject(2, Align(2), false);
33211 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)),
33213 .addReg(NewCW16, RegState::Kill);
33215 // Reload the modified control word now...
33216 addFrameReference(BuildMI(*BB, MI, DL,
33217 TII->get(X86::FLDCW16m)), NewCWFrameIdx);
33219 // Get the X86 opcode to use.
33221 switch (MI.getOpcode()) {
33222 default: llvm_unreachable("illegal opcode!");
33223 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
33224 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
33225 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
33226 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
33227 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
33228 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
33229 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
33230 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
33231 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
33234 X86AddressMode AM = getAddressFromInstr(&MI, 0);
33235 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
33236 .addReg(MI.getOperand(X86::AddrNumOperands).getReg());
33238 // Reload the original control word now.
33239 addFrameReference(BuildMI(*BB, MI, DL,
33240 TII->get(X86::FLDCW16m)), OrigCWFrameIdx);
33242 MI.eraseFromParent(); // The pseudo instruction is gone now.
33248 return emitXBegin(MI, BB, Subtarget.getInstrInfo());
33250 case X86::VASTART_SAVE_XMM_REGS:
33251 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
33253 case X86::VAARG_64:
33254 return EmitVAARG64WithCustomInserter(MI, BB);
33256 case X86::EH_SjLj_SetJmp32:
33257 case X86::EH_SjLj_SetJmp64:
33258 return emitEHSjLjSetJmp(MI, BB);
33260 case X86::EH_SjLj_LongJmp32:
33261 case X86::EH_SjLj_LongJmp64:
33262 return emitEHSjLjLongJmp(MI, BB);
33264 case X86::Int_eh_sjlj_setup_dispatch:
33265 return EmitSjLjDispatchBlock(MI, BB);
33267 case TargetOpcode::STATEPOINT:
33268 // As an implementation detail, STATEPOINT shares the STACKMAP format at
33269 // this point in the process. We diverge later.
33270 return emitPatchPoint(MI, BB);
33272 case TargetOpcode::STACKMAP:
33273 case TargetOpcode::PATCHPOINT:
33274 return emitPatchPoint(MI, BB);
33276 case TargetOpcode::PATCHABLE_EVENT_CALL:
33277 return emitXRayCustomEvent(MI, BB);
33279 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
33280 return emitXRayTypedEvent(MI, BB);
33282 case X86::LCMPXCHG8B: {
33283 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
33284 // In addition to 4 E[ABCD] registers implied by encoding, CMPXCHG8B
33285 // requires a memory operand. If it happens that current architecture is
33286 // i686 and for current function we need a base pointer
33287 // - which is ESI for i686 - register allocator would not be able to
33288 // allocate registers for an address in form of X(%reg, %reg, Y)
33289 // - there never would be enough unreserved registers during regalloc
33290 // (without the need for base ptr the only option would be X(%edi, %esi, Y).
33291 // We are giving a hand to register allocator by precomputing the address in
33292 // a new vreg using LEA.
33294 // If it is not i686 or there is no base pointer - nothing to do here.
33295 if (!Subtarget.is32Bit() || !TRI->hasBasePointer(*MF))
33298 // Even though this code does not necessarily needs the base pointer to
33299 // be ESI, we check for that. The reason: if this assert fails, there are
33300 // some changes happened in the compiler base pointer handling, which most
33301 // probably have to be addressed somehow here.
33302 assert(TRI->getBaseRegister() == X86::ESI &&
33303 "LCMPXCHG8B custom insertion for i686 is written with X86::ESI as a "
33304 "base pointer in mind");
33306 MachineRegisterInfo &MRI = MF->getRegInfo();
33307 MVT SPTy = getPointerTy(MF->getDataLayout());
33308 const TargetRegisterClass *AddrRegClass = getRegClassFor(SPTy);
33309 Register computedAddrVReg = MRI.createVirtualRegister(AddrRegClass);
33311 X86AddressMode AM = getAddressFromInstr(&MI, 0);
33312 // Regalloc does not need any help when the memory operand of CMPXCHG8B
33313 // does not use index register.
33314 if (AM.IndexReg == X86::NoRegister)
33317 // After X86TargetLowering::ReplaceNodeResults CMPXCHG8B is glued to its
33318 // four operand definitions that are E[ABCD] registers. We skip them and
33319 // then insert the LEA.
33320 MachineBasicBlock::reverse_iterator RMBBI(MI.getReverseIterator());
33321 while (RMBBI != BB->rend() && (RMBBI->definesRegister(X86::EAX) ||
33322 RMBBI->definesRegister(X86::EBX) ||
33323 RMBBI->definesRegister(X86::ECX) ||
33324 RMBBI->definesRegister(X86::EDX))) {
33327 MachineBasicBlock::iterator MBBI(RMBBI);
33329 BuildMI(*BB, *MBBI, DL, TII->get(X86::LEA32r), computedAddrVReg), AM);
33331 setDirectAddressInInstr(&MI, 0, computedAddrVReg);
33335 case X86::LCMPXCHG16B:
33337 case X86::LCMPXCHG8B_SAVE_EBX:
33338 case X86::LCMPXCHG16B_SAVE_RBX: {
33340 MI.getOpcode() == X86::LCMPXCHG8B_SAVE_EBX ? X86::EBX : X86::RBX;
33341 if (!BB->isLiveIn(BasePtr))
33342 BB->addLiveIn(BasePtr);
33345 case TargetOpcode::PREALLOCATED_SETUP: {
33346 assert(Subtarget.is32Bit() && "preallocated only used in 32-bit");
33347 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
33348 MFI->setHasPreallocatedCall(true);
33349 int64_t PreallocatedId = MI.getOperand(0).getImm();
33350 size_t StackAdjustment = MFI->getPreallocatedStackSize(PreallocatedId);
33351 assert(StackAdjustment != 0 && "0 stack adjustment");
33352 LLVM_DEBUG(dbgs() << "PREALLOCATED_SETUP stack adjustment "
33353 << StackAdjustment << "\n");
33354 BuildMI(*BB, MI, DL, TII->get(X86::SUB32ri), X86::ESP)
33356 .addImm(StackAdjustment);
33357 MI.eraseFromParent();
33360 case TargetOpcode::PREALLOCATED_ARG: {
33361 assert(Subtarget.is32Bit() && "preallocated calls only used in 32-bit");
33362 int64_t PreallocatedId = MI.getOperand(1).getImm();
33363 int64_t ArgIdx = MI.getOperand(2).getImm();
33364 auto MFI = MF->getInfo<X86MachineFunctionInfo>();
33365 size_t ArgOffset = MFI->getPreallocatedArgOffsets(PreallocatedId)[ArgIdx];
33366 LLVM_DEBUG(dbgs() << "PREALLOCATED_ARG arg index " << ArgIdx
33367 << ", arg offset " << ArgOffset << "\n");
33368 // stack pointer + offset
33370 BuildMI(*BB, MI, DL, TII->get(X86::LEA32r), MI.getOperand(0).getReg()),
33371 X86::ESP, false, ArgOffset);
33372 MI.eraseFromParent();
33375 case X86::PTDPBSSD:
33376 case X86::PTDPBSUD:
33377 case X86::PTDPBUSD:
33378 case X86::PTDPBUUD:
33379 case X86::PTDPBF16PS: {
33380 const DebugLoc &DL = MI.getDebugLoc();
33382 switch (MI.getOpcode()) {
33383 case X86::PTDPBSSD: Opc = X86::TDPBSSD; break;
33384 case X86::PTDPBSUD: Opc = X86::TDPBSUD; break;
33385 case X86::PTDPBUSD: Opc = X86::TDPBUSD; break;
33386 case X86::PTDPBUUD: Opc = X86::TDPBUUD; break;
33387 case X86::PTDPBF16PS: Opc = X86::TDPBF16PS; break;
33390 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
33391 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Define);
33392 MIB.addReg(TMMImmToTMMReg(MI.getOperand(0).getImm()), RegState::Undef);
33393 MIB.addReg(TMMImmToTMMReg(MI.getOperand(1).getImm()), RegState::Undef);
33394 MIB.addReg(TMMImmToTMMReg(MI.getOperand(2).getImm()), RegState::Undef);
33396 MI.eraseFromParent(); // The pseudo is gone now.
33399 case X86::PTILEZERO: {
33400 const DebugLoc &DL = MI.getDebugLoc();
33401 unsigned Imm = MI.getOperand(0).getImm();
33402 BuildMI(*BB, MI, DL, TII->get(X86::TILEZERO), TMMImmToTMMReg(Imm));
33403 MI.eraseFromParent(); // The pseudo is gone now.
33406 case X86::PTILELOADD:
33407 case X86::PTILELOADDT1:
33408 case X86::PTILESTORED: {
33409 const DebugLoc &DL = MI.getDebugLoc();
33411 switch (MI.getOpcode()) {
33412 case X86::PTILELOADD: Opc = X86::TILELOADD; break;
33413 case X86::PTILELOADDT1: Opc = X86::TILELOADDT1; break;
33414 case X86::PTILESTORED: Opc = X86::TILESTORED; break;
33417 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII->get(Opc));
33418 unsigned CurOp = 0;
33419 if (Opc != X86::TILESTORED)
33420 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
33423 MIB.add(MI.getOperand(CurOp++)); // base
33424 MIB.add(MI.getOperand(CurOp++)); // scale
33425 MIB.add(MI.getOperand(CurOp++)); // index -- stride
33426 MIB.add(MI.getOperand(CurOp++)); // displacement
33427 MIB.add(MI.getOperand(CurOp++)); // segment
33429 if (Opc == X86::TILESTORED)
33430 MIB.addReg(TMMImmToTMMReg(MI.getOperand(CurOp++).getImm()),
33433 MI.eraseFromParent(); // The pseudo is gone now.
33439 //===----------------------------------------------------------------------===//
33440 // X86 Optimization Hooks
33441 //===----------------------------------------------------------------------===//
33444 X86TargetLowering::targetShrinkDemandedConstant(SDValue Op,
33445 const APInt &DemandedBits,
33446 const APInt &DemandedElts,
33447 TargetLoweringOpt &TLO) const {
33448 EVT VT = Op.getValueType();
33449 unsigned Opcode = Op.getOpcode();
33450 unsigned EltSize = VT.getScalarSizeInBits();
33452 if (VT.isVector()) {
33453 // If the constant is only all signbits in the active bits, then we should
33454 // extend it to the entire constant to allow it act as a boolean constant
33456 auto NeedsSignExtension = [&](SDValue V, unsigned ActiveBits) {
33457 if (!ISD::isBuildVectorOfConstantSDNodes(V.getNode()))
33459 for (unsigned i = 0, e = V.getNumOperands(); i != e; ++i) {
33460 if (!DemandedElts[i] || V.getOperand(i).isUndef())
33462 const APInt &Val = V.getConstantOperandAPInt(i);
33463 if (Val.getBitWidth() > Val.getNumSignBits() &&
33464 Val.trunc(ActiveBits).getNumSignBits() == ActiveBits)
33469 // For vectors - if we have a constant, then try to sign extend.
33470 // TODO: Handle AND/ANDN cases.
33471 unsigned ActiveBits = DemandedBits.getActiveBits();
33472 if (EltSize > ActiveBits && EltSize > 1 && isTypeLegal(VT) &&
33473 (Opcode == ISD::OR || Opcode == ISD::XOR) &&
33474 NeedsSignExtension(Op.getOperand(1), ActiveBits)) {
33475 EVT ExtSVT = EVT::getIntegerVT(*TLO.DAG.getContext(), ActiveBits);
33476 EVT ExtVT = EVT::getVectorVT(*TLO.DAG.getContext(), ExtSVT,
33477 VT.getVectorNumElements());
33479 TLO.DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(Op), VT,
33480 Op.getOperand(1), TLO.DAG.getValueType(ExtVT));
33482 TLO.DAG.getNode(Opcode, SDLoc(Op), VT, Op.getOperand(0), NewC);
33483 return TLO.CombineTo(Op, NewOp);
33488 // Only optimize Ands to prevent shrinking a constant that could be
33489 // matched by movzx.
33490 if (Opcode != ISD::AND)
33493 // Make sure the RHS really is a constant.
33494 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
33498 const APInt &Mask = C->getAPIntValue();
33500 // Clear all non-demanded bits initially.
33501 APInt ShrunkMask = Mask & DemandedBits;
33503 // Find the width of the shrunk mask.
33504 unsigned Width = ShrunkMask.getActiveBits();
33506 // If the mask is all 0s there's nothing to do here.
33510 // Find the next power of 2 width, rounding up to a byte.
33511 Width = PowerOf2Ceil(std::max(Width, 8U));
33512 // Truncate the width to size to handle illegal types.
33513 Width = std::min(Width, EltSize);
33515 // Calculate a possible zero extend mask for this constant.
33516 APInt ZeroExtendMask = APInt::getLowBitsSet(EltSize, Width);
33518 // If we aren't changing the mask, just return true to keep it and prevent
33519 // the caller from optimizing.
33520 if (ZeroExtendMask == Mask)
33523 // Make sure the new mask can be represented by a combination of mask bits
33524 // and non-demanded bits.
33525 if (!ZeroExtendMask.isSubsetOf(Mask | ~DemandedBits))
33528 // Replace the constant with the zero extend mask.
33530 SDValue NewC = TLO.DAG.getConstant(ZeroExtendMask, DL, VT);
33531 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
33532 return TLO.CombineTo(Op, NewOp);
33535 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
33537 const APInt &DemandedElts,
33538 const SelectionDAG &DAG,
33539 unsigned Depth) const {
33540 unsigned BitWidth = Known.getBitWidth();
33541 unsigned NumElts = DemandedElts.getBitWidth();
33542 unsigned Opc = Op.getOpcode();
33543 EVT VT = Op.getValueType();
33544 assert((Opc >= ISD::BUILTIN_OP_END ||
33545 Opc == ISD::INTRINSIC_WO_CHAIN ||
33546 Opc == ISD::INTRINSIC_W_CHAIN ||
33547 Opc == ISD::INTRINSIC_VOID) &&
33548 "Should use MaskedValueIsZero if you don't know whether Op"
33549 " is a target node!");
33554 case X86ISD::SETCC:
33555 Known.Zero.setBitsFrom(1);
33557 case X86ISD::MOVMSK: {
33558 unsigned NumLoBits = Op.getOperand(0).getValueType().getVectorNumElements();
33559 Known.Zero.setBitsFrom(NumLoBits);
33562 case X86ISD::PEXTRB:
33563 case X86ISD::PEXTRW: {
33564 SDValue Src = Op.getOperand(0);
33565 EVT SrcVT = Src.getValueType();
33566 APInt DemandedElt = APInt::getOneBitSet(SrcVT.getVectorNumElements(),
33567 Op.getConstantOperandVal(1));
33568 Known = DAG.computeKnownBits(Src, DemandedElt, Depth + 1);
33569 Known = Known.anyextOrTrunc(BitWidth);
33570 Known.Zero.setBitsFrom(SrcVT.getScalarSizeInBits());
33573 case X86ISD::VSRAI:
33574 case X86ISD::VSHLI:
33575 case X86ISD::VSRLI: {
33576 unsigned ShAmt = Op.getConstantOperandVal(1);
33577 if (ShAmt >= VT.getScalarSizeInBits()) {
33578 Known.setAllZero();
33582 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
33583 if (Opc == X86ISD::VSHLI) {
33584 Known.Zero <<= ShAmt;
33585 Known.One <<= ShAmt;
33586 // Low bits are known zero.
33587 Known.Zero.setLowBits(ShAmt);
33588 } else if (Opc == X86ISD::VSRLI) {
33589 Known.Zero.lshrInPlace(ShAmt);
33590 Known.One.lshrInPlace(ShAmt);
33591 // High bits are known zero.
33592 Known.Zero.setHighBits(ShAmt);
33594 Known.Zero.ashrInPlace(ShAmt);
33595 Known.One.ashrInPlace(ShAmt);
33599 case X86ISD::PACKUS: {
33600 // PACKUS is just a truncation if the upper half is zero.
33601 APInt DemandedLHS, DemandedRHS;
33602 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
33604 Known.One = APInt::getAllOnesValue(BitWidth * 2);
33605 Known.Zero = APInt::getAllOnesValue(BitWidth * 2);
33608 if (!!DemandedLHS) {
33609 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedLHS, Depth + 1);
33610 Known.One &= Known2.One;
33611 Known.Zero &= Known2.Zero;
33613 if (!!DemandedRHS) {
33614 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedRHS, Depth + 1);
33615 Known.One &= Known2.One;
33616 Known.Zero &= Known2.Zero;
33619 if (Known.countMinLeadingZeros() < BitWidth)
33621 Known = Known.trunc(BitWidth);
33624 case X86ISD::ANDNP: {
33626 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
33627 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
33629 // ANDNP = (~X & Y);
33630 Known.One &= Known2.Zero;
33631 Known.Zero |= Known2.One;
33634 case X86ISD::FOR: {
33636 Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
33637 Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
33642 case X86ISD::PSADBW: {
33643 assert(VT.getScalarType() == MVT::i64 &&
33644 Op.getOperand(0).getValueType().getScalarType() == MVT::i8 &&
33645 "Unexpected PSADBW types");
33647 // PSADBW - fills low 16 bits and zeros upper 48 bits of each i64 result.
33648 Known.Zero.setBitsFrom(16);
33651 case X86ISD::CMOV: {
33652 Known = DAG.computeKnownBits(Op.getOperand(1), Depth + 1);
33653 // If we don't know any bits, early out.
33654 if (Known.isUnknown())
33656 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
33658 // Only known if known in both the LHS and RHS.
33659 Known.One &= Known2.One;
33660 Known.Zero &= Known2.Zero;
33663 case X86ISD::BEXTR: {
33664 SDValue Op0 = Op.getOperand(0);
33665 SDValue Op1 = Op.getOperand(1);
33667 if (auto* Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
33668 unsigned Shift = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 0);
33669 unsigned Length = Cst1->getAPIntValue().extractBitsAsZExtValue(8, 8);
33671 // If the length is 0, the result is 0.
33673 Known.setAllZero();
33677 if ((Shift + Length) <= BitWidth) {
33678 Known = DAG.computeKnownBits(Op0, Depth + 1);
33679 Known = Known.extractBits(Length, Shift);
33680 Known = Known.zextOrTrunc(BitWidth);
33685 case X86ISD::CVTSI2P:
33686 case X86ISD::CVTUI2P:
33687 case X86ISD::CVTP2SI:
33688 case X86ISD::CVTP2UI:
33689 case X86ISD::MCVTP2SI:
33690 case X86ISD::MCVTP2UI:
33691 case X86ISD::CVTTP2SI:
33692 case X86ISD::CVTTP2UI:
33693 case X86ISD::MCVTTP2SI:
33694 case X86ISD::MCVTTP2UI:
33695 case X86ISD::MCVTSI2P:
33696 case X86ISD::MCVTUI2P:
33697 case X86ISD::VFPROUND:
33698 case X86ISD::VMFPROUND:
33699 case X86ISD::CVTPS2PH:
33700 case X86ISD::MCVTPS2PH: {
33701 // Conversions - upper elements are known zero.
33702 EVT SrcVT = Op.getOperand(0).getValueType();
33703 if (SrcVT.isVector()) {
33704 unsigned NumSrcElts = SrcVT.getVectorNumElements();
33705 if (NumElts > NumSrcElts &&
33706 DemandedElts.countTrailingZeros() >= NumSrcElts)
33707 Known.setAllZero();
33711 case X86ISD::STRICT_CVTTP2SI:
33712 case X86ISD::STRICT_CVTTP2UI:
33713 case X86ISD::STRICT_CVTSI2P:
33714 case X86ISD::STRICT_CVTUI2P:
33715 case X86ISD::STRICT_VFPROUND:
33716 case X86ISD::STRICT_CVTPS2PH: {
33717 // Strict Conversions - upper elements are known zero.
33718 EVT SrcVT = Op.getOperand(1).getValueType();
33719 if (SrcVT.isVector()) {
33720 unsigned NumSrcElts = SrcVT.getVectorNumElements();
33721 if (NumElts > NumSrcElts &&
33722 DemandedElts.countTrailingZeros() >= NumSrcElts)
33723 Known.setAllZero();
33727 case X86ISD::MOVQ2DQ: {
33728 // Move from MMX to XMM. Upper half of XMM should be 0.
33729 if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
33730 Known.setAllZero();
33735 // Handle target shuffles.
33736 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
33737 if (isTargetShuffle(Opc)) {
33739 SmallVector<int, 64> Mask;
33740 SmallVector<SDValue, 2> Ops;
33741 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
33743 unsigned NumOps = Ops.size();
33744 unsigned NumElts = VT.getVectorNumElements();
33745 if (Mask.size() == NumElts) {
33746 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
33747 Known.Zero.setAllBits(); Known.One.setAllBits();
33748 for (unsigned i = 0; i != NumElts; ++i) {
33749 if (!DemandedElts[i])
33752 if (M == SM_SentinelUndef) {
33753 // For UNDEF elements, we don't know anything about the common state
33754 // of the shuffle result.
33757 } else if (M == SM_SentinelZero) {
33758 Known.One.clearAllBits();
33761 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
33762 "Shuffle index out of range");
33764 unsigned OpIdx = (unsigned)M / NumElts;
33765 unsigned EltIdx = (unsigned)M % NumElts;
33766 if (Ops[OpIdx].getValueType() != VT) {
33767 // TODO - handle target shuffle ops with different value types.
33771 DemandedOps[OpIdx].setBit(EltIdx);
33773 // Known bits are the values that are shared by every demanded element.
33774 for (unsigned i = 0; i != NumOps && !Known.isUnknown(); ++i) {
33775 if (!DemandedOps[i])
33778 DAG.computeKnownBits(Ops[i], DemandedOps[i], Depth + 1);
33779 Known.One &= Known2.One;
33780 Known.Zero &= Known2.Zero;
33787 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
33788 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
33789 unsigned Depth) const {
33790 EVT VT = Op.getValueType();
33791 unsigned VTBits = VT.getScalarSizeInBits();
33792 unsigned Opcode = Op.getOpcode();
33794 case X86ISD::SETCC_CARRY:
33795 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
33798 case X86ISD::VTRUNC: {
33799 SDValue Src = Op.getOperand(0);
33800 MVT SrcVT = Src.getSimpleValueType();
33801 unsigned NumSrcBits = SrcVT.getScalarSizeInBits();
33802 assert(VTBits < NumSrcBits && "Illegal truncation input type");
33803 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
33804 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
33805 if (Tmp > (NumSrcBits - VTBits))
33806 return Tmp - (NumSrcBits - VTBits);
33810 case X86ISD::PACKSS: {
33811 // PACKSS is just a truncation if the sign bits extend to the packed size.
33812 APInt DemandedLHS, DemandedRHS;
33813 getPackDemandedElts(Op.getValueType(), DemandedElts, DemandedLHS,
33816 unsigned SrcBits = Op.getOperand(0).getScalarValueSizeInBits();
33817 unsigned Tmp0 = SrcBits, Tmp1 = SrcBits;
33819 Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), DemandedLHS, Depth + 1);
33821 Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), DemandedRHS, Depth + 1);
33822 unsigned Tmp = std::min(Tmp0, Tmp1);
33823 if (Tmp > (SrcBits - VTBits))
33824 return Tmp - (SrcBits - VTBits);
33828 case X86ISD::VSHLI: {
33829 SDValue Src = Op.getOperand(0);
33830 const APInt &ShiftVal = Op.getConstantOperandAPInt(1);
33831 if (ShiftVal.uge(VTBits))
33832 return VTBits; // Shifted all bits out --> zero.
33833 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
33834 if (ShiftVal.uge(Tmp))
33835 return 1; // Shifted all sign bits out --> unknown.
33836 return Tmp - ShiftVal.getZExtValue();
33839 case X86ISD::VSRAI: {
33840 SDValue Src = Op.getOperand(0);
33841 APInt ShiftVal = Op.getConstantOperandAPInt(1);
33842 if (ShiftVal.uge(VTBits - 1))
33843 return VTBits; // Sign splat.
33844 unsigned Tmp = DAG.ComputeNumSignBits(Src, DemandedElts, Depth + 1);
33846 return ShiftVal.uge(VTBits) ? VTBits : ShiftVal.getZExtValue();
33849 case X86ISD::PCMPGT:
33850 case X86ISD::PCMPEQ:
33852 case X86ISD::VPCOM:
33853 case X86ISD::VPCOMU:
33854 // Vector compares return zero/all-bits result values.
33857 case X86ISD::ANDNP: {
33859 DAG.ComputeNumSignBits(Op.getOperand(0), DemandedElts, Depth + 1);
33860 if (Tmp0 == 1) return 1; // Early out.
33862 DAG.ComputeNumSignBits(Op.getOperand(1), DemandedElts, Depth + 1);
33863 return std::min(Tmp0, Tmp1);
33866 case X86ISD::CMOV: {
33867 unsigned Tmp0 = DAG.ComputeNumSignBits(Op.getOperand(0), Depth+1);
33868 if (Tmp0 == 1) return 1; // Early out.
33869 unsigned Tmp1 = DAG.ComputeNumSignBits(Op.getOperand(1), Depth+1);
33870 return std::min(Tmp0, Tmp1);
33874 // Handle target shuffles.
33875 // TODO - use resolveTargetShuffleInputs once we can limit recursive depth.
33876 if (isTargetShuffle(Opcode)) {
33878 SmallVector<int, 64> Mask;
33879 SmallVector<SDValue, 2> Ops;
33880 if (getTargetShuffleMask(Op.getNode(), VT.getSimpleVT(), true, Ops, Mask,
33882 unsigned NumOps = Ops.size();
33883 unsigned NumElts = VT.getVectorNumElements();
33884 if (Mask.size() == NumElts) {
33885 SmallVector<APInt, 2> DemandedOps(NumOps, APInt(NumElts, 0));
33886 for (unsigned i = 0; i != NumElts; ++i) {
33887 if (!DemandedElts[i])
33890 if (M == SM_SentinelUndef) {
33891 // For UNDEF elements, we don't know anything about the common state
33892 // of the shuffle result.
33894 } else if (M == SM_SentinelZero) {
33895 // Zero = all sign bits.
33898 assert(0 <= M && (unsigned)M < (NumOps * NumElts) &&
33899 "Shuffle index out of range");
33901 unsigned OpIdx = (unsigned)M / NumElts;
33902 unsigned EltIdx = (unsigned)M % NumElts;
33903 if (Ops[OpIdx].getValueType() != VT) {
33904 // TODO - handle target shuffle ops with different value types.
33907 DemandedOps[OpIdx].setBit(EltIdx);
33909 unsigned Tmp0 = VTBits;
33910 for (unsigned i = 0; i != NumOps && Tmp0 > 1; ++i) {
33911 if (!DemandedOps[i])
33914 DAG.ComputeNumSignBits(Ops[i], DemandedOps[i], Depth + 1);
33915 Tmp0 = std::min(Tmp0, Tmp1);
33926 SDValue X86TargetLowering::unwrapAddress(SDValue N) const {
33927 if (N->getOpcode() == X86ISD::Wrapper || N->getOpcode() == X86ISD::WrapperRIP)
33928 return N->getOperand(0);
33932 // Helper to look for a normal load that can be narrowed into a vzload with the
33933 // specified VT and memory VT. Returns SDValue() on failure.
33934 static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
33935 SelectionDAG &DAG) {
33936 // Can't if the load is volatile or atomic.
33937 if (!LN->isSimple())
33940 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
33941 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
33942 return DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, SDLoc(LN), Tys, Ops, MemVT,
33943 LN->getPointerInfo(), LN->getOriginalAlign(),
33944 LN->getMemOperand()->getFlags());
33947 // Attempt to match a combined shuffle mask against supported unary shuffle
33949 // TODO: Investigate sharing more of this with shuffle lowering.
33950 static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
33951 bool AllowFloatDomain, bool AllowIntDomain,
33952 SDValue &V1, const SDLoc &DL, SelectionDAG &DAG,
33953 const X86Subtarget &Subtarget, unsigned &Shuffle,
33954 MVT &SrcVT, MVT &DstVT) {
33955 unsigned NumMaskElts = Mask.size();
33956 unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
33958 // Match against a VZEXT_MOVL vXi32 zero-extending instruction.
33959 if (MaskEltSize == 32 && isUndefOrEqual(Mask[0], 0) &&
33960 isUndefOrZero(Mask[1]) && isUndefInRange(Mask, 2, NumMaskElts - 2)) {
33961 Shuffle = X86ISD::VZEXT_MOVL;
33962 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
33966 // Match against a ANY/ZERO_EXTEND_VECTOR_INREG instruction.
33967 // TODO: Add 512-bit vector support (split AVX512F and AVX512BW).
33968 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSE41()) ||
33969 (MaskVT.is256BitVector() && Subtarget.hasInt256()))) {
33970 unsigned MaxScale = 64 / MaskEltSize;
33971 for (unsigned Scale = 2; Scale <= MaxScale; Scale *= 2) {
33972 bool MatchAny = true;
33973 bool MatchZero = true;
33974 unsigned NumDstElts = NumMaskElts / Scale;
33975 for (unsigned i = 0; i != NumDstElts && (MatchAny || MatchZero); ++i) {
33976 if (!isUndefOrEqual(Mask[i * Scale], (int)i)) {
33977 MatchAny = MatchZero = false;
33980 MatchAny &= isUndefInRange(Mask, (i * Scale) + 1, Scale - 1);
33981 MatchZero &= isUndefOrZeroInRange(Mask, (i * Scale) + 1, Scale - 1);
33983 if (MatchAny || MatchZero) {
33984 assert(MatchZero && "Failed to match zext but matched aext?");
33985 unsigned SrcSize = std::max(128u, NumDstElts * MaskEltSize);
33986 MVT ScalarTy = MaskVT.isInteger() ? MaskVT.getScalarType() :
33987 MVT::getIntegerVT(MaskEltSize);
33988 SrcVT = MVT::getVectorVT(ScalarTy, SrcSize / MaskEltSize);
33990 if (SrcVT.getSizeInBits() != MaskVT.getSizeInBits())
33991 V1 = extractSubVector(V1, 0, DAG, DL, SrcSize);
33993 Shuffle = unsigned(MatchAny ? ISD::ANY_EXTEND : ISD::ZERO_EXTEND);
33994 if (SrcVT.getVectorNumElements() != NumDstElts)
33995 Shuffle = getOpcode_EXTEND_VECTOR_INREG(Shuffle);
33997 DstVT = MVT::getIntegerVT(Scale * MaskEltSize);
33998 DstVT = MVT::getVectorVT(DstVT, NumDstElts);
34004 // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS).
34005 if (((MaskEltSize == 32) || (MaskEltSize == 64 && Subtarget.hasSSE2())) &&
34006 isUndefOrEqual(Mask[0], 0) &&
34007 isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) {
34008 Shuffle = X86ISD::VZEXT_MOVL;
34009 SrcVT = DstVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT;
34013 // Check if we have SSE3 which will let us use MOVDDUP etc. The
34014 // instructions are no slower than UNPCKLPD but has the option to
34015 // fold the input operand into even an unaligned memory load.
34016 if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
34017 if (isTargetShuffleEquivalent(Mask, {0, 0})) {
34018 Shuffle = X86ISD::MOVDDUP;
34019 SrcVT = DstVT = MVT::v2f64;
34022 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
34023 Shuffle = X86ISD::MOVSLDUP;
34024 SrcVT = DstVT = MVT::v4f32;
34027 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3})) {
34028 Shuffle = X86ISD::MOVSHDUP;
34029 SrcVT = DstVT = MVT::v4f32;
34034 if (MaskVT.is256BitVector() && AllowFloatDomain) {
34035 assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
34036 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2})) {
34037 Shuffle = X86ISD::MOVDDUP;
34038 SrcVT = DstVT = MVT::v4f64;
34041 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
34042 Shuffle = X86ISD::MOVSLDUP;
34043 SrcVT = DstVT = MVT::v8f32;
34046 if (isTargetShuffleEquivalent(Mask, {1, 1, 3, 3, 5, 5, 7, 7})) {
34047 Shuffle = X86ISD::MOVSHDUP;
34048 SrcVT = DstVT = MVT::v8f32;
34053 if (MaskVT.is512BitVector() && AllowFloatDomain) {
34054 assert(Subtarget.hasAVX512() &&
34055 "AVX512 required for 512-bit vector shuffles");
34056 if (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2, 4, 4, 6, 6})) {
34057 Shuffle = X86ISD::MOVDDUP;
34058 SrcVT = DstVT = MVT::v8f64;
34061 if (isTargetShuffleEquivalent(
34062 Mask, {0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14})) {
34063 Shuffle = X86ISD::MOVSLDUP;
34064 SrcVT = DstVT = MVT::v16f32;
34067 if (isTargetShuffleEquivalent(
34068 Mask, {1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15})) {
34069 Shuffle = X86ISD::MOVSHDUP;
34070 SrcVT = DstVT = MVT::v16f32;
34078 // Attempt to match a combined shuffle mask against supported unary immediate
34079 // permute instructions.
34080 // TODO: Investigate sharing more of this with shuffle lowering.
34081 static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
34082 const APInt &Zeroable,
34083 bool AllowFloatDomain, bool AllowIntDomain,
34084 const X86Subtarget &Subtarget,
34085 unsigned &Shuffle, MVT &ShuffleVT,
34086 unsigned &PermuteImm) {
34087 unsigned NumMaskElts = Mask.size();
34088 unsigned InputSizeInBits = MaskVT.getSizeInBits();
34089 unsigned MaskScalarSizeInBits = InputSizeInBits / NumMaskElts;
34090 MVT MaskEltVT = MVT::getIntegerVT(MaskScalarSizeInBits);
34091 bool ContainsZeros = isAnyZero(Mask);
34093 // Handle VPERMI/VPERMILPD vXi64/vXi64 patterns.
34094 if (!ContainsZeros && MaskScalarSizeInBits == 64) {
34095 // Check for lane crossing permutes.
34096 if (is128BitLaneCrossingShuffleMask(MaskEltVT, Mask)) {
34097 // PERMPD/PERMQ permutes within a 256-bit vector (AVX2+).
34098 if (Subtarget.hasAVX2() && MaskVT.is256BitVector()) {
34099 Shuffle = X86ISD::VPERMI;
34100 ShuffleVT = (AllowFloatDomain ? MVT::v4f64 : MVT::v4i64);
34101 PermuteImm = getV4X86ShuffleImm(Mask);
34104 if (Subtarget.hasAVX512() && MaskVT.is512BitVector()) {
34105 SmallVector<int, 4> RepeatedMask;
34106 if (is256BitLaneRepeatedShuffleMask(MVT::v8f64, Mask, RepeatedMask)) {
34107 Shuffle = X86ISD::VPERMI;
34108 ShuffleVT = (AllowFloatDomain ? MVT::v8f64 : MVT::v8i64);
34109 PermuteImm = getV4X86ShuffleImm(RepeatedMask);
34113 } else if (AllowFloatDomain && Subtarget.hasAVX()) {
34114 // VPERMILPD can permute with a non-repeating shuffle.
34115 Shuffle = X86ISD::VPERMILPI;
34116 ShuffleVT = MVT::getVectorVT(MVT::f64, Mask.size());
34118 for (int i = 0, e = Mask.size(); i != e; ++i) {
34120 if (M == SM_SentinelUndef)
34122 assert(((M / 2) == (i / 2)) && "Out of range shuffle mask index");
34123 PermuteImm |= (M & 1) << i;
34129 // Handle PSHUFD/VPERMILPI vXi32/vXf32 repeated patterns.
34130 // AVX introduced the VPERMILPD/VPERMILPS float permutes, before then we
34131 // had to use 2-input SHUFPD/SHUFPS shuffles (not handled here).
34132 if ((MaskScalarSizeInBits == 64 || MaskScalarSizeInBits == 32) &&
34133 !ContainsZeros && (AllowIntDomain || Subtarget.hasAVX())) {
34134 SmallVector<int, 4> RepeatedMask;
34135 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
34136 // Narrow the repeated mask to create 32-bit element permutes.
34137 SmallVector<int, 4> WordMask = RepeatedMask;
34138 if (MaskScalarSizeInBits == 64)
34139 narrowShuffleMaskElts(2, RepeatedMask, WordMask);
34141 Shuffle = (AllowIntDomain ? X86ISD::PSHUFD : X86ISD::VPERMILPI);
34142 ShuffleVT = (AllowIntDomain ? MVT::i32 : MVT::f32);
34143 ShuffleVT = MVT::getVectorVT(ShuffleVT, InputSizeInBits / 32);
34144 PermuteImm = getV4X86ShuffleImm(WordMask);
34149 // Handle PSHUFLW/PSHUFHW vXi16 repeated patterns.
34150 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits == 16) {
34151 SmallVector<int, 4> RepeatedMask;
34152 if (is128BitLaneRepeatedShuffleMask(MaskEltVT, Mask, RepeatedMask)) {
34153 ArrayRef<int> LoMask(RepeatedMask.data() + 0, 4);
34154 ArrayRef<int> HiMask(RepeatedMask.data() + 4, 4);
34156 // PSHUFLW: permute lower 4 elements only.
34157 if (isUndefOrInRange(LoMask, 0, 4) &&
34158 isSequentialOrUndefInRange(HiMask, 0, 4, 4)) {
34159 Shuffle = X86ISD::PSHUFLW;
34160 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
34161 PermuteImm = getV4X86ShuffleImm(LoMask);
34165 // PSHUFHW: permute upper 4 elements only.
34166 if (isUndefOrInRange(HiMask, 4, 8) &&
34167 isSequentialOrUndefInRange(LoMask, 0, 4, 0)) {
34168 // Offset the HiMask so that we can create the shuffle immediate.
34169 int OffsetHiMask[4];
34170 for (int i = 0; i != 4; ++i)
34171 OffsetHiMask[i] = (HiMask[i] < 0 ? HiMask[i] : HiMask[i] - 4);
34173 Shuffle = X86ISD::PSHUFHW;
34174 ShuffleVT = MVT::getVectorVT(MVT::i16, InputSizeInBits / 16);
34175 PermuteImm = getV4X86ShuffleImm(OffsetHiMask);
34181 // Attempt to match against byte/bit shifts.
34182 if (AllowIntDomain &&
34183 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
34184 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
34185 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34186 int ShiftAmt = matchShuffleAsShift(ShuffleVT, Shuffle, MaskScalarSizeInBits,
34187 Mask, 0, Zeroable, Subtarget);
34188 if (0 < ShiftAmt && (!ShuffleVT.is512BitVector() || Subtarget.hasBWI() ||
34189 32 <= ShuffleVT.getScalarSizeInBits())) {
34190 PermuteImm = (unsigned)ShiftAmt;
34195 // Attempt to match against bit rotates.
34196 if (!ContainsZeros && AllowIntDomain && MaskScalarSizeInBits < 64 &&
34197 ((MaskVT.is128BitVector() && Subtarget.hasXOP()) ||
34198 Subtarget.hasAVX512())) {
34199 int RotateAmt = matchShuffleAsBitRotate(ShuffleVT, MaskScalarSizeInBits,
34201 if (0 < RotateAmt) {
34202 Shuffle = X86ISD::VROTLI;
34203 PermuteImm = (unsigned)RotateAmt;
34211 // Attempt to match a combined unary shuffle mask against supported binary
34212 // shuffle instructions.
34213 // TODO: Investigate sharing more of this with shuffle lowering.
34214 static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
34215 bool AllowFloatDomain, bool AllowIntDomain,
34216 SDValue &V1, SDValue &V2, const SDLoc &DL,
34217 SelectionDAG &DAG, const X86Subtarget &Subtarget,
34218 unsigned &Shuffle, MVT &SrcVT, MVT &DstVT,
34220 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
34222 if (MaskVT.is128BitVector()) {
34223 if (isTargetShuffleEquivalent(Mask, {0, 0}) && AllowFloatDomain) {
34225 V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
34226 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
34227 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
34230 if (isTargetShuffleEquivalent(Mask, {1, 1}) && AllowFloatDomain) {
34232 Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
34233 SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
34236 if (isTargetShuffleEquivalent(Mask, {0, 3}) && Subtarget.hasSSE2() &&
34237 (AllowFloatDomain || !Subtarget.hasSSE41())) {
34239 Shuffle = X86ISD::MOVSD;
34240 SrcVT = DstVT = MVT::v2f64;
34243 if (isTargetShuffleEquivalent(Mask, {4, 1, 2, 3}) &&
34244 (AllowFloatDomain || !Subtarget.hasSSE41())) {
34245 Shuffle = X86ISD::MOVSS;
34246 SrcVT = DstVT = MVT::v4f32;
34251 // Attempt to match against either an unary or binary PACKSS/PACKUS shuffle.
34252 if (((MaskVT == MVT::v8i16 || MaskVT == MVT::v16i8) && Subtarget.hasSSE2()) ||
34253 ((MaskVT == MVT::v16i16 || MaskVT == MVT::v32i8) && Subtarget.hasInt256()) ||
34254 ((MaskVT == MVT::v32i16 || MaskVT == MVT::v64i8) && Subtarget.hasBWI())) {
34255 if (matchShuffleWithPACK(MaskVT, SrcVT, V1, V2, Shuffle, Mask, DAG,
34262 // Attempt to match against either a unary or binary UNPCKL/UNPCKH shuffle.
34263 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) ||
34264 (MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
34265 (MaskVT.is256BitVector() && 32 <= EltSizeInBits && Subtarget.hasAVX()) ||
34266 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
34267 (MaskVT.is512BitVector() && Subtarget.hasAVX512())) {
34268 if (matchShuffleWithUNPCK(MaskVT, V1, V2, Shuffle, IsUnary, Mask, DL, DAG,
34270 SrcVT = DstVT = MaskVT;
34271 if (MaskVT.is256BitVector() && !Subtarget.hasAVX2())
34272 SrcVT = DstVT = (32 == EltSizeInBits ? MVT::v8f32 : MVT::v4f64);
34280 static bool matchBinaryPermuteShuffle(
34281 MVT MaskVT, ArrayRef<int> Mask, const APInt &Zeroable,
34282 bool AllowFloatDomain, bool AllowIntDomain, SDValue &V1, SDValue &V2,
34283 const SDLoc &DL, SelectionDAG &DAG, const X86Subtarget &Subtarget,
34284 unsigned &Shuffle, MVT &ShuffleVT, unsigned &PermuteImm) {
34285 unsigned NumMaskElts = Mask.size();
34286 unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
34288 // Attempt to match against VALIGND/VALIGNQ rotate.
34289 if (AllowIntDomain && (EltSizeInBits == 64 || EltSizeInBits == 32) &&
34290 ((MaskVT.is128BitVector() && Subtarget.hasVLX()) ||
34291 (MaskVT.is256BitVector() && Subtarget.hasVLX()) ||
34292 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34293 if (!isAnyZero(Mask)) {
34294 int Rotation = matchShuffleAsElementRotate(V1, V2, Mask);
34295 if (0 < Rotation) {
34296 Shuffle = X86ISD::VALIGN;
34297 if (EltSizeInBits == 64)
34298 ShuffleVT = MVT::getVectorVT(MVT::i64, MaskVT.getSizeInBits() / 64);
34300 ShuffleVT = MVT::getVectorVT(MVT::i32, MaskVT.getSizeInBits() / 32);
34301 PermuteImm = Rotation;
34307 // Attempt to match against PALIGNR byte rotate.
34308 if (AllowIntDomain && ((MaskVT.is128BitVector() && Subtarget.hasSSSE3()) ||
34309 (MaskVT.is256BitVector() && Subtarget.hasAVX2()) ||
34310 (MaskVT.is512BitVector() && Subtarget.hasBWI()))) {
34311 int ByteRotation = matchShuffleAsByteRotate(MaskVT, V1, V2, Mask);
34312 if (0 < ByteRotation) {
34313 Shuffle = X86ISD::PALIGNR;
34314 ShuffleVT = MVT::getVectorVT(MVT::i8, MaskVT.getSizeInBits() / 8);
34315 PermuteImm = ByteRotation;
34320 // Attempt to combine to X86ISD::BLENDI.
34321 if ((NumMaskElts <= 8 && ((Subtarget.hasSSE41() && MaskVT.is128BitVector()) ||
34322 (Subtarget.hasAVX() && MaskVT.is256BitVector()))) ||
34323 (MaskVT == MVT::v16i16 && Subtarget.hasAVX2())) {
34324 uint64_t BlendMask = 0;
34325 bool ForceV1Zero = false, ForceV2Zero = false;
34326 SmallVector<int, 8> TargetMask(Mask.begin(), Mask.end());
34327 if (matchShuffleAsBlend(V1, V2, TargetMask, Zeroable, ForceV1Zero,
34328 ForceV2Zero, BlendMask)) {
34329 if (MaskVT == MVT::v16i16) {
34330 // We can only use v16i16 PBLENDW if the lanes are repeated.
34331 SmallVector<int, 8> RepeatedMask;
34332 if (isRepeatedTargetShuffleMask(128, MaskVT, TargetMask,
34334 assert(RepeatedMask.size() == 8 &&
34335 "Repeated mask size doesn't match!");
34337 for (int i = 0; i < 8; ++i)
34338 if (RepeatedMask[i] >= 8)
34339 PermuteImm |= 1 << i;
34340 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
34341 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
34342 Shuffle = X86ISD::BLENDI;
34343 ShuffleVT = MaskVT;
34347 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
34348 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
34349 PermuteImm = (unsigned)BlendMask;
34350 Shuffle = X86ISD::BLENDI;
34351 ShuffleVT = MaskVT;
34357 // Attempt to combine to INSERTPS, but only if it has elements that need to
34359 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
34360 MaskVT.is128BitVector() && isAnyZero(Mask) &&
34361 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
34362 Shuffle = X86ISD::INSERTPS;
34363 ShuffleVT = MVT::v4f32;
34367 // Attempt to combine to SHUFPD.
34368 if (AllowFloatDomain && EltSizeInBits == 64 &&
34369 ((MaskVT.is128BitVector() && Subtarget.hasSSE2()) ||
34370 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
34371 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34372 bool ForceV1Zero = false, ForceV2Zero = false;
34373 if (matchShuffleWithSHUFPD(MaskVT, V1, V2, ForceV1Zero, ForceV2Zero,
34374 PermuteImm, Mask, Zeroable)) {
34375 V1 = ForceV1Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V1;
34376 V2 = ForceV2Zero ? getZeroVector(MaskVT, Subtarget, DAG, DL) : V2;
34377 Shuffle = X86ISD::SHUFP;
34378 ShuffleVT = MVT::getVectorVT(MVT::f64, MaskVT.getSizeInBits() / 64);
34383 // Attempt to combine to SHUFPS.
34384 if (AllowFloatDomain && EltSizeInBits == 32 &&
34385 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) ||
34386 (MaskVT.is256BitVector() && Subtarget.hasAVX()) ||
34387 (MaskVT.is512BitVector() && Subtarget.hasAVX512()))) {
34388 SmallVector<int, 4> RepeatedMask;
34389 if (isRepeatedTargetShuffleMask(128, MaskVT, Mask, RepeatedMask)) {
34390 // Match each half of the repeated mask, to determine if its just
34391 // referencing one of the vectors, is zeroable or entirely undef.
34392 auto MatchHalf = [&](unsigned Offset, int &S0, int &S1) {
34393 int M0 = RepeatedMask[Offset];
34394 int M1 = RepeatedMask[Offset + 1];
34396 if (isUndefInRange(RepeatedMask, Offset, 2)) {
34397 return DAG.getUNDEF(MaskVT);
34398 } else if (isUndefOrZeroInRange(RepeatedMask, Offset, 2)) {
34399 S0 = (SM_SentinelUndef == M0 ? -1 : 0);
34400 S1 = (SM_SentinelUndef == M1 ? -1 : 1);
34401 return getZeroVector(MaskVT, Subtarget, DAG, DL);
34402 } else if (isUndefOrInRange(M0, 0, 4) && isUndefOrInRange(M1, 0, 4)) {
34403 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
34404 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
34406 } else if (isUndefOrInRange(M0, 4, 8) && isUndefOrInRange(M1, 4, 8)) {
34407 S0 = (SM_SentinelUndef == M0 ? -1 : M0 & 3);
34408 S1 = (SM_SentinelUndef == M1 ? -1 : M1 & 3);
34415 int ShufMask[4] = {-1, -1, -1, -1};
34416 SDValue Lo = MatchHalf(0, ShufMask[0], ShufMask[1]);
34417 SDValue Hi = MatchHalf(2, ShufMask[2], ShufMask[3]);
34422 Shuffle = X86ISD::SHUFP;
34423 ShuffleVT = MVT::getVectorVT(MVT::f32, MaskVT.getSizeInBits() / 32);
34424 PermuteImm = getV4X86ShuffleImm(ShufMask);
34430 // Attempt to combine to INSERTPS more generally if X86ISD::SHUFP failed.
34431 if (AllowFloatDomain && EltSizeInBits == 32 && Subtarget.hasSSE41() &&
34432 MaskVT.is128BitVector() &&
34433 matchShuffleAsInsertPS(V1, V2, PermuteImm, Zeroable, Mask, DAG)) {
34434 Shuffle = X86ISD::INSERTPS;
34435 ShuffleVT = MVT::v4f32;
34442 static SDValue combineX86ShuffleChainWithExtract(
34443 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
34444 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
34445 const X86Subtarget &Subtarget);
34447 /// Combine an arbitrary chain of shuffles into a single instruction if
34450 /// This is the leaf of the recursive combine below. When we have found some
34451 /// chain of single-use x86 shuffle instructions and accumulated the combined
34452 /// shuffle mask represented by them, this will try to pattern match that mask
34453 /// into either a single instruction if there is a special purpose instruction
34454 /// for this operation, or into a PSHUFB instruction which is a fully general
34455 /// instruction but should only be used to replace chains over a certain depth.
34456 static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
34457 ArrayRef<int> BaseMask, int Depth,
34458 bool HasVariableMask,
34459 bool AllowVariableMask, SelectionDAG &DAG,
34460 const X86Subtarget &Subtarget) {
34461 assert(!BaseMask.empty() && "Cannot combine an empty shuffle mask!");
34462 assert((Inputs.size() == 1 || Inputs.size() == 2) &&
34463 "Unexpected number of shuffle inputs!");
34465 // Find the inputs that enter the chain. Note that multiple uses are OK
34466 // here, we're not going to remove the operands we find.
34467 bool UnaryShuffle = (Inputs.size() == 1);
34468 SDValue V1 = peekThroughBitcasts(Inputs[0]);
34469 SDValue V2 = (UnaryShuffle ? DAG.getUNDEF(V1.getValueType())
34470 : peekThroughBitcasts(Inputs[1]));
34472 MVT VT1 = V1.getSimpleValueType();
34473 MVT VT2 = V2.getSimpleValueType();
34474 MVT RootVT = Root.getSimpleValueType();
34475 assert(VT1.getSizeInBits() == RootVT.getSizeInBits() &&
34476 VT2.getSizeInBits() == RootVT.getSizeInBits() &&
34477 "Vector size mismatch");
34482 unsigned NumBaseMaskElts = BaseMask.size();
34483 if (NumBaseMaskElts == 1) {
34484 assert(BaseMask[0] == 0 && "Invalid shuffle index found!");
34485 return DAG.getBitcast(RootVT, V1);
34488 bool OptForSize = DAG.shouldOptForSize();
34489 unsigned RootSizeInBits = RootVT.getSizeInBits();
34490 unsigned NumRootElts = RootVT.getVectorNumElements();
34491 unsigned BaseMaskEltSizeInBits = RootSizeInBits / NumBaseMaskElts;
34492 bool FloatDomain = VT1.isFloatingPoint() || VT2.isFloatingPoint() ||
34493 (RootVT.isFloatingPoint() && Depth >= 1) ||
34494 (RootVT.is256BitVector() && !Subtarget.hasAVX2());
34496 // Don't combine if we are a AVX512/EVEX target and the mask element size
34497 // is different from the root element size - this would prevent writemasks
34498 // from being reused.
34499 bool IsMaskedShuffle = false;
34500 if (RootSizeInBits == 512 || (Subtarget.hasVLX() && RootSizeInBits >= 128)) {
34501 if (Root.hasOneUse() && Root->use_begin()->getOpcode() == ISD::VSELECT &&
34502 Root->use_begin()->getOperand(0).getScalarValueSizeInBits() == 1) {
34503 IsMaskedShuffle = true;
34507 // If we are shuffling a broadcast (and not introducing zeros) then
34508 // we can just use the broadcast directly. This works for smaller broadcast
34509 // elements as well as they already repeat across each mask element
34510 if (UnaryShuffle && isTargetShuffleSplat(V1) && !isAnyZero(BaseMask) &&
34511 (BaseMaskEltSizeInBits % V1.getScalarValueSizeInBits()) == 0) {
34512 return DAG.getBitcast(RootVT, V1);
34515 // Attempt to match a subvector broadcast.
34516 // shuffle(insert_subvector(undef, sub, 0), undef, 0, 0, 0, 0)
34517 if (UnaryShuffle &&
34518 (BaseMaskEltSizeInBits == 128 || BaseMaskEltSizeInBits == 256)) {
34519 SmallVector<int, 64> BroadcastMask(NumBaseMaskElts, 0);
34520 if (isTargetShuffleEquivalent(BaseMask, BroadcastMask)) {
34521 SDValue Src = Inputs[0];
34522 if (Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
34523 Src.getOperand(0).isUndef() &&
34524 Src.getOperand(1).getValueSizeInBits() == BaseMaskEltSizeInBits &&
34525 MayFoldLoad(Src.getOperand(1)) && isNullConstant(Src.getOperand(2))) {
34526 return DAG.getBitcast(RootVT, DAG.getNode(X86ISD::SUBV_BROADCAST, DL,
34527 Src.getValueType(),
34528 Src.getOperand(1)));
34533 // Handle 128/256-bit lane shuffles of 512-bit vectors.
34534 if (RootVT.is512BitVector() &&
34535 (NumBaseMaskElts == 2 || NumBaseMaskElts == 4)) {
34536 MVT ShuffleVT = (FloatDomain ? MVT::v8f64 : MVT::v8i64);
34538 // If the upper subvectors are zeroable, then an extract+insert is more
34539 // optimal than using X86ISD::SHUF128. The insertion is free, even if it has
34540 // to zero the upper subvectors.
34541 if (isUndefOrZeroInRange(BaseMask, 1, NumBaseMaskElts - 1)) {
34542 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
34543 return SDValue(); // Nothing to do!
34544 assert(isInRange(BaseMask[0], 0, NumBaseMaskElts) &&
34545 "Unexpected lane shuffle");
34546 Res = DAG.getBitcast(ShuffleVT, V1);
34547 unsigned SubIdx = BaseMask[0] * (8 / NumBaseMaskElts);
34548 bool UseZero = isAnyZero(BaseMask);
34549 Res = extractSubVector(Res, SubIdx, DAG, DL, BaseMaskEltSizeInBits);
34550 Res = widenSubVector(Res, UseZero, Subtarget, DAG, DL, RootSizeInBits);
34551 return DAG.getBitcast(RootVT, Res);
34554 // Narrow shuffle mask to v4x128.
34555 SmallVector<int, 4> Mask;
34556 assert((BaseMaskEltSizeInBits % 128) == 0 && "Illegal mask size");
34557 narrowShuffleMaskElts(BaseMaskEltSizeInBits / 128, BaseMask, Mask);
34559 // Try to lower to vshuf64x2/vshuf32x4.
34560 auto MatchSHUF128 = [](MVT ShuffleVT, const SDLoc &DL, ArrayRef<int> Mask,
34561 SDValue V1, SDValue V2, SelectionDAG &DAG) {
34562 unsigned PermMask = 0;
34563 // Insure elements came from the same Op.
34564 SDValue Ops[2] = {DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT)};
34565 for (int i = 0; i < 4; ++i) {
34566 assert(Mask[i] >= -1 && "Illegal shuffle sentinel value");
34570 SDValue Op = Mask[i] >= 4 ? V2 : V1;
34571 unsigned OpIndex = i / 2;
34572 if (Ops[OpIndex].isUndef())
34574 else if (Ops[OpIndex] != Op)
34577 // Convert the 128-bit shuffle mask selection values into 128-bit
34578 // selection bits defined by a vshuf64x2 instruction's immediate control
34580 PermMask |= (Mask[i] % 4) << (i * 2);
34583 return DAG.getNode(X86ISD::SHUF128, DL, ShuffleVT,
34584 DAG.getBitcast(ShuffleVT, Ops[0]),
34585 DAG.getBitcast(ShuffleVT, Ops[1]),
34586 DAG.getTargetConstant(PermMask, DL, MVT::i8));
34589 // FIXME: Is there a better way to do this? is256BitLaneRepeatedShuffleMask
34590 // doesn't work because our mask is for 128 bits and we don't have an MVT
34593 UnaryShuffle && isUndefOrInRange(Mask[0], 0, 2) &&
34594 isUndefOrInRange(Mask[1], 0, 2) && isUndefOrInRange(Mask[2], 2, 4) &&
34595 isUndefOrInRange(Mask[3], 2, 4) &&
34596 (Mask[0] < 0 || Mask[2] < 0 || Mask[0] == (Mask[2] % 2)) &&
34597 (Mask[1] < 0 || Mask[3] < 0 || Mask[1] == (Mask[3] % 2));
34599 if (!isAnyZero(Mask) && !PreferPERMQ) {
34600 if (SDValue V = MatchSHUF128(ShuffleVT, DL, Mask, V1, V2, DAG))
34601 return DAG.getBitcast(RootVT, V);
34605 // Handle 128-bit lane shuffles of 256-bit vectors.
34606 if (RootVT.is256BitVector() && NumBaseMaskElts == 2) {
34607 MVT ShuffleVT = (FloatDomain ? MVT::v4f64 : MVT::v4i64);
34609 // If the upper half is zeroable, then an extract+insert is more optimal
34610 // than using X86ISD::VPERM2X128. The insertion is free, even if it has to
34611 // zero the upper half.
34612 if (isUndefOrZero(BaseMask[1])) {
34613 if (Depth == 0 && Root.getOpcode() == ISD::INSERT_SUBVECTOR)
34614 return SDValue(); // Nothing to do!
34615 assert(isInRange(BaseMask[0], 0, 2) && "Unexpected lane shuffle");
34616 Res = DAG.getBitcast(ShuffleVT, V1);
34617 Res = extract128BitVector(Res, BaseMask[0] * 2, DAG, DL);
34618 Res = widenSubVector(Res, BaseMask[1] == SM_SentinelZero, Subtarget, DAG,
34620 return DAG.getBitcast(RootVT, Res);
34623 if (Depth == 0 && Root.getOpcode() == X86ISD::VPERM2X128)
34624 return SDValue(); // Nothing to do!
34626 // If we have AVX2, prefer to use VPERMQ/VPERMPD for unary shuffles unless
34627 // we need to use the zeroing feature.
34628 // Prefer blends for sequential shuffles unless we are optimizing for size.
34629 if (UnaryShuffle &&
34630 !(Subtarget.hasAVX2() && isUndefOrInRange(BaseMask, 0, 2)) &&
34631 (OptForSize || !isSequentialOrUndefOrZeroInRange(BaseMask, 0, 2, 0))) {
34632 unsigned PermMask = 0;
34633 PermMask |= ((BaseMask[0] < 0 ? 0x8 : (BaseMask[0] & 1)) << 0);
34634 PermMask |= ((BaseMask[1] < 0 ? 0x8 : (BaseMask[1] & 1)) << 4);
34636 Res = DAG.getBitcast(ShuffleVT, V1);
34637 Res = DAG.getNode(X86ISD::VPERM2X128, DL, ShuffleVT, Res,
34638 DAG.getUNDEF(ShuffleVT),
34639 DAG.getTargetConstant(PermMask, DL, MVT::i8));
34640 return DAG.getBitcast(RootVT, Res);
34643 if (Depth == 0 && Root.getOpcode() == X86ISD::SHUF128)
34644 return SDValue(); // Nothing to do!
34646 // TODO - handle AVX512VL cases with X86ISD::SHUF128.
34647 if (!UnaryShuffle && !IsMaskedShuffle) {
34648 assert(llvm::all_of(BaseMask, [](int M) { return 0 <= M && M < 4; }) &&
34649 "Unexpected shuffle sentinel value");
34650 // Prefer blends to X86ISD::VPERM2X128.
34651 if (!((BaseMask[0] == 0 && BaseMask[1] == 3) ||
34652 (BaseMask[0] == 2 && BaseMask[1] == 1))) {
34653 unsigned PermMask = 0;
34654 PermMask |= ((BaseMask[0] & 3) << 0);
34655 PermMask |= ((BaseMask[1] & 3) << 4);
34658 X86ISD::VPERM2X128, DL, ShuffleVT,
34659 DAG.getBitcast(ShuffleVT, isInRange(BaseMask[0], 0, 2) ? V1 : V2),
34660 DAG.getBitcast(ShuffleVT, isInRange(BaseMask[1], 0, 2) ? V1 : V2),
34661 DAG.getTargetConstant(PermMask, DL, MVT::i8));
34662 return DAG.getBitcast(RootVT, Res);
34667 // For masks that have been widened to 128-bit elements or more,
34668 // narrow back down to 64-bit elements.
34669 SmallVector<int, 64> Mask;
34670 if (BaseMaskEltSizeInBits > 64) {
34671 assert((BaseMaskEltSizeInBits % 64) == 0 && "Illegal mask size");
34672 int MaskScale = BaseMaskEltSizeInBits / 64;
34673 narrowShuffleMaskElts(MaskScale, BaseMask, Mask);
34675 Mask.assign(BaseMask.begin(), BaseMask.end());
34678 // For masked shuffles, we're trying to match the root width for better
34679 // writemask folding, attempt to scale the mask.
34680 // TODO - variable shuffles might need this to be widened again.
34681 if (IsMaskedShuffle && NumRootElts > Mask.size()) {
34682 assert((NumRootElts % Mask.size()) == 0 && "Illegal mask size");
34683 int MaskScale = NumRootElts / Mask.size();
34684 SmallVector<int, 64> ScaledMask;
34685 narrowShuffleMaskElts(MaskScale, Mask, ScaledMask);
34686 Mask = std::move(ScaledMask);
34689 unsigned NumMaskElts = Mask.size();
34690 unsigned MaskEltSizeInBits = RootSizeInBits / NumMaskElts;
34692 // Determine the effective mask value type.
34693 FloatDomain &= (32 <= MaskEltSizeInBits);
34694 MVT MaskVT = FloatDomain ? MVT::getFloatingPointVT(MaskEltSizeInBits)
34695 : MVT::getIntegerVT(MaskEltSizeInBits);
34696 MaskVT = MVT::getVectorVT(MaskVT, NumMaskElts);
34698 // Only allow legal mask types.
34699 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
34702 // Attempt to match the mask against known shuffle patterns.
34703 MVT ShuffleSrcVT, ShuffleVT;
34704 unsigned Shuffle, PermuteImm;
34706 // Which shuffle domains are permitted?
34707 // Permit domain crossing at higher combine depths.
34708 // TODO: Should we indicate which domain is preferred if both are allowed?
34709 bool AllowFloatDomain = FloatDomain || (Depth >= 3);
34710 bool AllowIntDomain = (!FloatDomain || (Depth >= 3)) && Subtarget.hasSSE2() &&
34711 (!MaskVT.is256BitVector() || Subtarget.hasAVX2());
34713 // Determine zeroable mask elements.
34714 APInt KnownUndef, KnownZero;
34715 resolveZeroablesFromTargetShuffle(Mask, KnownUndef, KnownZero);
34716 APInt Zeroable = KnownUndef | KnownZero;
34718 if (UnaryShuffle) {
34719 // Attempt to match against broadcast-from-vector.
34720 // Limit AVX1 to cases where we're loading+broadcasting a scalar element.
34721 if ((Subtarget.hasAVX2() ||
34722 (Subtarget.hasAVX() && 32 <= MaskEltSizeInBits)) &&
34723 (!IsMaskedShuffle || NumRootElts == NumMaskElts)) {
34724 SmallVector<int, 64> BroadcastMask(NumMaskElts, 0);
34725 if (isTargetShuffleEquivalent(Mask, BroadcastMask)) {
34726 if (V1.getValueType() == MaskVT &&
34727 V1.getOpcode() == ISD::SCALAR_TO_VECTOR &&
34728 MayFoldLoad(V1.getOperand(0))) {
34729 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
34730 return SDValue(); // Nothing to do!
34731 Res = V1.getOperand(0);
34732 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
34733 return DAG.getBitcast(RootVT, Res);
34735 if (Subtarget.hasAVX2()) {
34736 if (Depth == 0 && Root.getOpcode() == X86ISD::VBROADCAST)
34737 return SDValue(); // Nothing to do!
34738 Res = DAG.getBitcast(MaskVT, V1);
34739 Res = DAG.getNode(X86ISD::VBROADCAST, DL, MaskVT, Res);
34740 return DAG.getBitcast(RootVT, Res);
34745 SDValue NewV1 = V1; // Save operand in case early exit happens.
34746 if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
34747 DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
34749 (!IsMaskedShuffle ||
34750 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34751 if (Depth == 0 && Root.getOpcode() == Shuffle)
34752 return SDValue(); // Nothing to do!
34753 Res = DAG.getBitcast(ShuffleSrcVT, NewV1);
34754 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res);
34755 return DAG.getBitcast(RootVT, Res);
34758 if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
34759 AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
34761 (!IsMaskedShuffle ||
34762 (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34763 if (Depth == 0 && Root.getOpcode() == Shuffle)
34764 return SDValue(); // Nothing to do!
34765 Res = DAG.getBitcast(ShuffleVT, V1);
34766 Res = DAG.getNode(Shuffle, DL, ShuffleVT, Res,
34767 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
34768 return DAG.getBitcast(RootVT, Res);
34772 // Attempt to combine to INSERTPS, but only if the inserted element has come
34774 // TODO: Handle other insertions here as well?
34775 if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
34776 MaskEltSizeInBits == 32 && Subtarget.hasSSE41() &&
34777 !isTargetShuffleEquivalent(Mask, {4, 1, 2, 3})) {
34778 SDValue SrcV1 = V1, SrcV2 = V2;
34779 if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask, DAG) &&
34780 SrcV2.getOpcode() == ISD::SCALAR_TO_VECTOR) {
34781 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
34782 return SDValue(); // Nothing to do!
34783 Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
34784 DAG.getBitcast(MVT::v4f32, SrcV1),
34785 DAG.getBitcast(MVT::v4f32, SrcV2),
34786 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
34787 return DAG.getBitcast(RootVT, Res);
34791 SDValue NewV1 = V1; // Save operands in case early exit happens.
34792 SDValue NewV2 = V2;
34793 if (matchBinaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, NewV1,
34794 NewV2, DL, DAG, Subtarget, Shuffle, ShuffleSrcVT,
34795 ShuffleVT, UnaryShuffle) &&
34796 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34797 if (Depth == 0 && Root.getOpcode() == Shuffle)
34798 return SDValue(); // Nothing to do!
34799 NewV1 = DAG.getBitcast(ShuffleSrcVT, NewV1);
34800 NewV2 = DAG.getBitcast(ShuffleSrcVT, NewV2);
34801 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2);
34802 return DAG.getBitcast(RootVT, Res);
34805 NewV1 = V1; // Save operands in case early exit happens.
34807 if (matchBinaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
34808 AllowIntDomain, NewV1, NewV2, DL, DAG,
34809 Subtarget, Shuffle, ShuffleVT, PermuteImm) &&
34810 (!IsMaskedShuffle || (NumRootElts == ShuffleVT.getVectorNumElements()))) {
34811 if (Depth == 0 && Root.getOpcode() == Shuffle)
34812 return SDValue(); // Nothing to do!
34813 NewV1 = DAG.getBitcast(ShuffleVT, NewV1);
34814 NewV2 = DAG.getBitcast(ShuffleVT, NewV2);
34815 Res = DAG.getNode(Shuffle, DL, ShuffleVT, NewV1, NewV2,
34816 DAG.getTargetConstant(PermuteImm, DL, MVT::i8));
34817 return DAG.getBitcast(RootVT, Res);
34820 // Typically from here on, we need an integer version of MaskVT.
34821 MVT IntMaskVT = MVT::getIntegerVT(MaskEltSizeInBits);
34822 IntMaskVT = MVT::getVectorVT(IntMaskVT, NumMaskElts);
34824 // Annoyingly, SSE4A instructions don't map into the above match helpers.
34825 if (Subtarget.hasSSE4A() && AllowIntDomain && RootSizeInBits == 128) {
34826 uint64_t BitLen, BitIdx;
34827 if (matchShuffleAsEXTRQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx,
34829 if (Depth == 0 && Root.getOpcode() == X86ISD::EXTRQI)
34830 return SDValue(); // Nothing to do!
34831 V1 = DAG.getBitcast(IntMaskVT, V1);
34832 Res = DAG.getNode(X86ISD::EXTRQI, DL, IntMaskVT, V1,
34833 DAG.getTargetConstant(BitLen, DL, MVT::i8),
34834 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
34835 return DAG.getBitcast(RootVT, Res);
34838 if (matchShuffleAsINSERTQ(IntMaskVT, V1, V2, Mask, BitLen, BitIdx)) {
34839 if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTQI)
34840 return SDValue(); // Nothing to do!
34841 V1 = DAG.getBitcast(IntMaskVT, V1);
34842 V2 = DAG.getBitcast(IntMaskVT, V2);
34843 Res = DAG.getNode(X86ISD::INSERTQI, DL, IntMaskVT, V1, V2,
34844 DAG.getTargetConstant(BitLen, DL, MVT::i8),
34845 DAG.getTargetConstant(BitIdx, DL, MVT::i8));
34846 return DAG.getBitcast(RootVT, Res);
34850 // Match shuffle against TRUNCATE patterns.
34851 if (AllowIntDomain && MaskEltSizeInBits < 64 && Subtarget.hasAVX512()) {
34852 // Match against a VTRUNC instruction, accounting for src/dst sizes.
34853 if (matchShuffleAsVTRUNC(ShuffleSrcVT, ShuffleVT, IntMaskVT, Mask, Zeroable,
34855 bool IsTRUNCATE = ShuffleVT.getVectorNumElements() ==
34856 ShuffleSrcVT.getVectorNumElements();
34858 IsTRUNCATE ? (unsigned)ISD::TRUNCATE : (unsigned)X86ISD::VTRUNC;
34859 if (Depth == 0 && Root.getOpcode() == Opc)
34860 return SDValue(); // Nothing to do!
34861 V1 = DAG.getBitcast(ShuffleSrcVT, V1);
34862 Res = DAG.getNode(Opc, DL, ShuffleVT, V1);
34863 if (ShuffleVT.getSizeInBits() < RootSizeInBits)
34864 Res = widenSubVector(Res, true, Subtarget, DAG, DL, RootSizeInBits);
34865 return DAG.getBitcast(RootVT, Res);
34868 // Do we need a more general binary truncation pattern?
34869 if (RootSizeInBits < 512 &&
34870 ((RootVT.is256BitVector() && Subtarget.useAVX512Regs()) ||
34871 (RootVT.is128BitVector() && Subtarget.hasVLX())) &&
34872 (MaskEltSizeInBits > 8 || Subtarget.hasBWI()) &&
34873 isSequentialOrUndefInRange(Mask, 0, NumMaskElts, 0, 2)) {
34874 if (Depth == 0 && Root.getOpcode() == ISD::TRUNCATE)
34875 return SDValue(); // Nothing to do!
34876 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
34877 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts / 2);
34878 V1 = DAG.getBitcast(ShuffleSrcVT, V1);
34879 V2 = DAG.getBitcast(ShuffleSrcVT, V2);
34880 ShuffleSrcVT = MVT::getIntegerVT(MaskEltSizeInBits * 2);
34881 ShuffleSrcVT = MVT::getVectorVT(ShuffleSrcVT, NumMaskElts);
34882 Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, ShuffleSrcVT, V1, V2);
34883 Res = DAG.getNode(ISD::TRUNCATE, DL, IntMaskVT, Res);
34884 return DAG.getBitcast(RootVT, Res);
34888 // Don't try to re-form single instruction chains under any circumstances now
34889 // that we've done encoding canonicalization for them.
34893 // Depth threshold above which we can efficiently use variable mask shuffles.
34894 int VariableShuffleDepth = Subtarget.hasFastVariableShuffle() ? 1 : 2;
34895 AllowVariableMask &= (Depth >= VariableShuffleDepth) || HasVariableMask;
34897 bool MaskContainsZeros = isAnyZero(Mask);
34899 if (is128BitLaneCrossingShuffleMask(MaskVT, Mask)) {
34900 // If we have a single input lane-crossing shuffle then lower to VPERMV.
34901 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
34902 ((Subtarget.hasAVX2() &&
34903 (MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
34904 (Subtarget.hasAVX512() &&
34905 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
34906 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
34907 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
34908 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
34909 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
34910 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
34911 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
34912 Res = DAG.getBitcast(MaskVT, V1);
34913 Res = DAG.getNode(X86ISD::VPERMV, DL, MaskVT, VPermMask, Res);
34914 return DAG.getBitcast(RootVT, Res);
34917 // Lower a unary+zero lane-crossing shuffle as VPERMV3 with a zero
34918 // vector as the second source.
34919 if (UnaryShuffle && AllowVariableMask &&
34920 ((Subtarget.hasAVX512() &&
34921 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
34922 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
34923 (Subtarget.hasVLX() &&
34924 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
34925 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
34926 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
34927 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
34928 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
34929 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
34930 // Adjust shuffle mask - replace SM_SentinelZero with second source index.
34931 for (unsigned i = 0; i != NumMaskElts; ++i)
34932 if (Mask[i] == SM_SentinelZero)
34933 Mask[i] = NumMaskElts + i;
34935 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
34936 Res = DAG.getBitcast(MaskVT, V1);
34937 SDValue Zero = getZeroVector(MaskVT, Subtarget, DAG, DL);
34938 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, Res, VPermMask, Zero);
34939 return DAG.getBitcast(RootVT, Res);
34942 // If that failed and either input is extracted then try to combine as a
34943 // shuffle with the larger type.
34944 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
34945 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
34947 return WideShuffle;
34949 // If we have a dual input lane-crossing shuffle then lower to VPERMV3.
34950 if (AllowVariableMask && !MaskContainsZeros &&
34951 ((Subtarget.hasAVX512() &&
34952 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
34953 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
34954 (Subtarget.hasVLX() &&
34955 (MaskVT == MVT::v4f64 || MaskVT == MVT::v4i64 ||
34956 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
34957 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
34958 (Subtarget.hasBWI() && Subtarget.hasVLX() && MaskVT == MVT::v16i16) ||
34959 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
34960 (Subtarget.hasVBMI() && Subtarget.hasVLX() && MaskVT == MVT::v32i8))) {
34961 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
34962 V1 = DAG.getBitcast(MaskVT, V1);
34963 V2 = DAG.getBitcast(MaskVT, V2);
34964 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
34965 return DAG.getBitcast(RootVT, Res);
34970 // See if we can combine a single input shuffle with zeros to a bit-mask,
34971 // which is much simpler than any shuffle.
34972 if (UnaryShuffle && MaskContainsZeros && AllowVariableMask &&
34973 isSequentialOrUndefOrZeroInRange(Mask, 0, NumMaskElts, 0) &&
34974 DAG.getTargetLoweringInfo().isTypeLegal(MaskVT)) {
34975 APInt Zero = APInt::getNullValue(MaskEltSizeInBits);
34976 APInt AllOnes = APInt::getAllOnesValue(MaskEltSizeInBits);
34977 APInt UndefElts(NumMaskElts, 0);
34978 SmallVector<APInt, 64> EltBits(NumMaskElts, Zero);
34979 for (unsigned i = 0; i != NumMaskElts; ++i) {
34981 if (M == SM_SentinelUndef) {
34982 UndefElts.setBit(i);
34985 if (M == SM_SentinelZero)
34987 EltBits[i] = AllOnes;
34989 SDValue BitMask = getConstVector(EltBits, UndefElts, MaskVT, DAG, DL);
34990 Res = DAG.getBitcast(MaskVT, V1);
34991 unsigned AndOpcode =
34992 MaskVT.isFloatingPoint() ? unsigned(X86ISD::FAND) : unsigned(ISD::AND);
34993 Res = DAG.getNode(AndOpcode, DL, MaskVT, Res, BitMask);
34994 return DAG.getBitcast(RootVT, Res);
34997 // If we have a single input shuffle with different shuffle patterns in the
34998 // the 128-bit lanes use the variable mask to VPERMILPS.
34999 // TODO Combine other mask types at higher depths.
35000 if (UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
35001 ((MaskVT == MVT::v8f32 && Subtarget.hasAVX()) ||
35002 (MaskVT == MVT::v16f32 && Subtarget.hasAVX512()))) {
35003 SmallVector<SDValue, 16> VPermIdx;
35004 for (int M : Mask) {
35006 M < 0 ? DAG.getUNDEF(MVT::i32) : DAG.getConstant(M % 4, DL, MVT::i32);
35007 VPermIdx.push_back(Idx);
35009 SDValue VPermMask = DAG.getBuildVector(IntMaskVT, DL, VPermIdx);
35010 Res = DAG.getBitcast(MaskVT, V1);
35011 Res = DAG.getNode(X86ISD::VPERMILPV, DL, MaskVT, Res, VPermMask);
35012 return DAG.getBitcast(RootVT, Res);
35015 // With XOP, binary shuffles of 128/256-bit floating point vectors can combine
35016 // to VPERMIL2PD/VPERMIL2PS.
35017 if (AllowVariableMask && Subtarget.hasXOP() &&
35018 (MaskVT == MVT::v2f64 || MaskVT == MVT::v4f64 || MaskVT == MVT::v4f32 ||
35019 MaskVT == MVT::v8f32)) {
35020 // VPERMIL2 Operation.
35021 // Bits[3] - Match Bit.
35022 // Bits[2:1] - (Per Lane) PD Shuffle Mask.
35023 // Bits[2:0] - (Per Lane) PS Shuffle Mask.
35024 unsigned NumLanes = MaskVT.getSizeInBits() / 128;
35025 unsigned NumEltsPerLane = NumMaskElts / NumLanes;
35026 SmallVector<int, 8> VPerm2Idx;
35027 unsigned M2ZImm = 0;
35028 for (int M : Mask) {
35029 if (M == SM_SentinelUndef) {
35030 VPerm2Idx.push_back(-1);
35033 if (M == SM_SentinelZero) {
35035 VPerm2Idx.push_back(8);
35038 int Index = (M % NumEltsPerLane) + ((M / NumMaskElts) * NumEltsPerLane);
35039 Index = (MaskVT.getScalarSizeInBits() == 64 ? Index << 1 : Index);
35040 VPerm2Idx.push_back(Index);
35042 V1 = DAG.getBitcast(MaskVT, V1);
35043 V2 = DAG.getBitcast(MaskVT, V2);
35044 SDValue VPerm2MaskOp = getConstVector(VPerm2Idx, IntMaskVT, DAG, DL, true);
35045 Res = DAG.getNode(X86ISD::VPERMIL2, DL, MaskVT, V1, V2, VPerm2MaskOp,
35046 DAG.getTargetConstant(M2ZImm, DL, MVT::i8));
35047 return DAG.getBitcast(RootVT, Res);
35050 // If we have 3 or more shuffle instructions or a chain involving a variable
35051 // mask, we can replace them with a single PSHUFB instruction profitably.
35052 // Intel's manuals suggest only using PSHUFB if doing so replacing 5
35053 // instructions, but in practice PSHUFB tends to be *very* fast so we're
35054 // more aggressive.
35055 if (UnaryShuffle && AllowVariableMask &&
35056 ((RootVT.is128BitVector() && Subtarget.hasSSSE3()) ||
35057 (RootVT.is256BitVector() && Subtarget.hasAVX2()) ||
35058 (RootVT.is512BitVector() && Subtarget.hasBWI()))) {
35059 SmallVector<SDValue, 16> PSHUFBMask;
35060 int NumBytes = RootVT.getSizeInBits() / 8;
35061 int Ratio = NumBytes / NumMaskElts;
35062 for (int i = 0; i < NumBytes; ++i) {
35063 int M = Mask[i / Ratio];
35064 if (M == SM_SentinelUndef) {
35065 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
35068 if (M == SM_SentinelZero) {
35069 PSHUFBMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
35072 M = Ratio * M + i % Ratio;
35073 assert((M / 16) == (i / 16) && "Lane crossing detected");
35074 PSHUFBMask.push_back(DAG.getConstant(M, DL, MVT::i8));
35076 MVT ByteVT = MVT::getVectorVT(MVT::i8, NumBytes);
35077 Res = DAG.getBitcast(ByteVT, V1);
35078 SDValue PSHUFBMaskOp = DAG.getBuildVector(ByteVT, DL, PSHUFBMask);
35079 Res = DAG.getNode(X86ISD::PSHUFB, DL, ByteVT, Res, PSHUFBMaskOp);
35080 return DAG.getBitcast(RootVT, Res);
35083 // With XOP, if we have a 128-bit binary input shuffle we can always combine
35084 // to VPPERM. We match the depth requirement of PSHUFB - VPPERM is never
35085 // slower than PSHUFB on targets that support both.
35086 if (AllowVariableMask && RootVT.is128BitVector() && Subtarget.hasXOP()) {
35087 // VPPERM Mask Operation
35088 // Bits[4:0] - Byte Index (0 - 31)
35089 // Bits[7:5] - Permute Operation (0 - Source byte, 4 - ZERO)
35090 SmallVector<SDValue, 16> VPPERMMask;
35092 int Ratio = NumBytes / NumMaskElts;
35093 for (int i = 0; i < NumBytes; ++i) {
35094 int M = Mask[i / Ratio];
35095 if (M == SM_SentinelUndef) {
35096 VPPERMMask.push_back(DAG.getUNDEF(MVT::i8));
35099 if (M == SM_SentinelZero) {
35100 VPPERMMask.push_back(DAG.getConstant(0x80, DL, MVT::i8));
35103 M = Ratio * M + i % Ratio;
35104 VPPERMMask.push_back(DAG.getConstant(M, DL, MVT::i8));
35106 MVT ByteVT = MVT::v16i8;
35107 V1 = DAG.getBitcast(ByteVT, V1);
35108 V2 = DAG.getBitcast(ByteVT, V2);
35109 SDValue VPPERMMaskOp = DAG.getBuildVector(ByteVT, DL, VPPERMMask);
35110 Res = DAG.getNode(X86ISD::VPPERM, DL, ByteVT, V1, V2, VPPERMMaskOp);
35111 return DAG.getBitcast(RootVT, Res);
35114 // If that failed and either input is extracted then try to combine as a
35115 // shuffle with the larger type.
35116 if (SDValue WideShuffle = combineX86ShuffleChainWithExtract(
35117 Inputs, Root, BaseMask, Depth, HasVariableMask, AllowVariableMask,
35119 return WideShuffle;
35121 // If we have a dual input shuffle then lower to VPERMV3.
35122 if (!UnaryShuffle && AllowVariableMask && !MaskContainsZeros &&
35123 ((Subtarget.hasAVX512() &&
35124 (MaskVT == MVT::v8f64 || MaskVT == MVT::v8i64 ||
35125 MaskVT == MVT::v16f32 || MaskVT == MVT::v16i32)) ||
35126 (Subtarget.hasVLX() &&
35127 (MaskVT == MVT::v2f64 || MaskVT == MVT::v2i64 || MaskVT == MVT::v4f64 ||
35128 MaskVT == MVT::v4i64 || MaskVT == MVT::v4f32 || MaskVT == MVT::v4i32 ||
35129 MaskVT == MVT::v8f32 || MaskVT == MVT::v8i32)) ||
35130 (Subtarget.hasBWI() && MaskVT == MVT::v32i16) ||
35131 (Subtarget.hasBWI() && Subtarget.hasVLX() &&
35132 (MaskVT == MVT::v8i16 || MaskVT == MVT::v16i16)) ||
35133 (Subtarget.hasVBMI() && MaskVT == MVT::v64i8) ||
35134 (Subtarget.hasVBMI() && Subtarget.hasVLX() &&
35135 (MaskVT == MVT::v16i8 || MaskVT == MVT::v32i8)))) {
35136 SDValue VPermMask = getConstVector(Mask, IntMaskVT, DAG, DL, true);
35137 V1 = DAG.getBitcast(MaskVT, V1);
35138 V2 = DAG.getBitcast(MaskVT, V2);
35139 Res = DAG.getNode(X86ISD::VPERMV3, DL, MaskVT, V1, VPermMask, V2);
35140 return DAG.getBitcast(RootVT, Res);
35143 // Failed to find any combines.
35147 // Combine an arbitrary chain of shuffles + extract_subvectors into a single
35148 // instruction if possible.
35150 // Wrapper for combineX86ShuffleChain that extends the shuffle mask to a larger
35151 // type size to attempt to combine:
35152 // shuffle(extract_subvector(x,c1),extract_subvector(y,c2),m1)
35154 // extract_subvector(shuffle(x,y,m2),0)
35155 static SDValue combineX86ShuffleChainWithExtract(
35156 ArrayRef<SDValue> Inputs, SDValue Root, ArrayRef<int> BaseMask, int Depth,
35157 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
35158 const X86Subtarget &Subtarget) {
35159 unsigned NumMaskElts = BaseMask.size();
35160 unsigned NumInputs = Inputs.size();
35161 if (NumInputs == 0)
35164 SmallVector<SDValue, 4> WideInputs(Inputs.begin(), Inputs.end());
35165 SmallVector<unsigned, 4> Offsets(NumInputs, 0);
35167 // Peek through subvectors.
35168 // TODO: Support inter-mixed EXTRACT_SUBVECTORs + BITCASTs?
35169 unsigned WideSizeInBits = WideInputs[0].getValueSizeInBits();
35170 for (unsigned i = 0; i != NumInputs; ++i) {
35171 SDValue &Src = WideInputs[i];
35172 unsigned &Offset = Offsets[i];
35173 Src = peekThroughBitcasts(Src);
35174 EVT BaseVT = Src.getValueType();
35175 while (Src.getOpcode() == ISD::EXTRACT_SUBVECTOR) {
35176 Offset += Src.getConstantOperandVal(1);
35177 Src = Src.getOperand(0);
35179 WideSizeInBits = std::max(WideSizeInBits,
35180 (unsigned)Src.getValueSizeInBits());
35181 assert((Offset % BaseVT.getVectorNumElements()) == 0 &&
35182 "Unexpected subvector extraction");
35183 Offset /= BaseVT.getVectorNumElements();
35184 Offset *= NumMaskElts;
35187 // Bail if we're always extracting from the lowest subvectors,
35188 // combineX86ShuffleChain should match this for the current width.
35189 if (llvm::all_of(Offsets, [](unsigned Offset) { return Offset == 0; }))
35192 EVT RootVT = Root.getValueType();
35193 unsigned RootSizeInBits = RootVT.getSizeInBits();
35194 unsigned Scale = WideSizeInBits / RootSizeInBits;
35195 assert((WideSizeInBits % RootSizeInBits) == 0 &&
35196 "Unexpected subvector extraction");
35198 // If the src vector types aren't the same, see if we can extend
35199 // them to match each other.
35200 // TODO: Support different scalar types?
35201 EVT WideSVT = WideInputs[0].getValueType().getScalarType();
35202 if (llvm::any_of(WideInputs, [&WideSVT, &DAG](SDValue Op) {
35203 return !DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType()) ||
35204 Op.getValueType().getScalarType() != WideSVT;
35208 for (SDValue &NewInput : WideInputs) {
35209 assert((WideSizeInBits % NewInput.getValueSizeInBits()) == 0 &&
35210 "Shuffle vector size mismatch");
35211 if (WideSizeInBits > NewInput.getValueSizeInBits())
35212 NewInput = widenSubVector(NewInput, false, Subtarget, DAG,
35213 SDLoc(NewInput), WideSizeInBits);
35214 assert(WideSizeInBits == NewInput.getValueSizeInBits() &&
35215 "Unexpected subvector extraction");
35218 // Create new mask for larger type.
35219 for (unsigned i = 1; i != NumInputs; ++i)
35220 Offsets[i] += i * Scale * NumMaskElts;
35222 SmallVector<int, 64> WideMask(BaseMask.begin(), BaseMask.end());
35223 for (int &M : WideMask) {
35226 M = (M % NumMaskElts) + Offsets[M / NumMaskElts];
35228 WideMask.append((Scale - 1) * NumMaskElts, SM_SentinelUndef);
35230 // Remove unused/repeated shuffle source ops.
35231 resolveTargetShuffleInputsAndMask(WideInputs, WideMask);
35232 assert(!WideInputs.empty() && "Shuffle with no inputs detected");
35234 if (WideInputs.size() > 2)
35237 // Increase depth for every upper subvector we've peeked through.
35238 Depth += count_if(Offsets, [](unsigned Offset) { return Offset > 0; });
35240 // Attempt to combine wider chain.
35241 // TODO: Can we use a better Root?
35242 SDValue WideRoot = WideInputs[0];
35243 if (SDValue WideShuffle = combineX86ShuffleChain(
35244 WideInputs, WideRoot, WideMask, Depth, HasVariableMask,
35245 AllowVariableMask, DAG, Subtarget)) {
35247 extractSubVector(WideShuffle, 0, DAG, SDLoc(Root), RootSizeInBits);
35248 return DAG.getBitcast(RootVT, WideShuffle);
35253 // Attempt to constant fold all of the constant source ops.
35254 // Returns true if the entire shuffle is folded to a constant.
35255 // TODO: Extend this to merge multiple constant Ops and update the mask.
35256 static SDValue combineX86ShufflesConstants(ArrayRef<SDValue> Ops,
35257 ArrayRef<int> Mask, SDValue Root,
35258 bool HasVariableMask,
35260 const X86Subtarget &Subtarget) {
35261 MVT VT = Root.getSimpleValueType();
35263 unsigned SizeInBits = VT.getSizeInBits();
35264 unsigned NumMaskElts = Mask.size();
35265 unsigned MaskSizeInBits = SizeInBits / NumMaskElts;
35266 unsigned NumOps = Ops.size();
35268 // Extract constant bits from each source op.
35269 bool OneUseConstantOp = false;
35270 SmallVector<APInt, 16> UndefEltsOps(NumOps);
35271 SmallVector<SmallVector<APInt, 16>, 16> RawBitsOps(NumOps);
35272 for (unsigned i = 0; i != NumOps; ++i) {
35273 SDValue SrcOp = Ops[i];
35274 OneUseConstantOp |= SrcOp.hasOneUse();
35275 if (!getTargetConstantBitsFromNode(SrcOp, MaskSizeInBits, UndefEltsOps[i],
35280 // Only fold if at least one of the constants is only used once or
35281 // the combined shuffle has included a variable mask shuffle, this
35282 // is to avoid constant pool bloat.
35283 if (!OneUseConstantOp && !HasVariableMask)
35286 // Shuffle the constant bits according to the mask.
35288 APInt UndefElts(NumMaskElts, 0);
35289 APInt ZeroElts(NumMaskElts, 0);
35290 APInt ConstantElts(NumMaskElts, 0);
35291 SmallVector<APInt, 8> ConstantBitData(NumMaskElts,
35292 APInt::getNullValue(MaskSizeInBits));
35293 for (unsigned i = 0; i != NumMaskElts; ++i) {
35295 if (M == SM_SentinelUndef) {
35296 UndefElts.setBit(i);
35298 } else if (M == SM_SentinelZero) {
35299 ZeroElts.setBit(i);
35302 assert(0 <= M && M < (int)(NumMaskElts * NumOps));
35304 unsigned SrcOpIdx = (unsigned)M / NumMaskElts;
35305 unsigned SrcMaskIdx = (unsigned)M % NumMaskElts;
35307 auto &SrcUndefElts = UndefEltsOps[SrcOpIdx];
35308 if (SrcUndefElts[SrcMaskIdx]) {
35309 UndefElts.setBit(i);
35313 auto &SrcEltBits = RawBitsOps[SrcOpIdx];
35314 APInt &Bits = SrcEltBits[SrcMaskIdx];
35316 ZeroElts.setBit(i);
35320 ConstantElts.setBit(i);
35321 ConstantBitData[i] = Bits;
35323 assert((UndefElts | ZeroElts | ConstantElts).isAllOnesValue());
35325 // Attempt to create a zero vector.
35326 if ((UndefElts | ZeroElts).isAllOnesValue())
35327 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG, DL);
35329 // Create the constant data.
35331 if (VT.isFloatingPoint() && (MaskSizeInBits == 32 || MaskSizeInBits == 64))
35332 MaskSVT = MVT::getFloatingPointVT(MaskSizeInBits);
35334 MaskSVT = MVT::getIntegerVT(MaskSizeInBits);
35336 MVT MaskVT = MVT::getVectorVT(MaskSVT, NumMaskElts);
35337 if (!DAG.getTargetLoweringInfo().isTypeLegal(MaskVT))
35340 SDValue CstOp = getConstVector(ConstantBitData, UndefElts, MaskVT, DAG, DL);
35341 return DAG.getBitcast(VT, CstOp);
35344 /// Fully generic combining of x86 shuffle instructions.
35346 /// This should be the last combine run over the x86 shuffle instructions. Once
35347 /// they have been fully optimized, this will recursively consider all chains
35348 /// of single-use shuffle instructions, build a generic model of the cumulative
35349 /// shuffle operation, and check for simpler instructions which implement this
35350 /// operation. We use this primarily for two purposes:
35352 /// 1) Collapse generic shuffles to specialized single instructions when
35353 /// equivalent. In most cases, this is just an encoding size win, but
35354 /// sometimes we will collapse multiple generic shuffles into a single
35355 /// special-purpose shuffle.
35356 /// 2) Look for sequences of shuffle instructions with 3 or more total
35357 /// instructions, and replace them with the slightly more expensive SSSE3
35358 /// PSHUFB instruction if available. We do this as the last combining step
35359 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
35360 /// a suitable short sequence of other instructions. The PSHUFB will either
35361 /// use a register or have to read from memory and so is slightly (but only
35362 /// slightly) more expensive than the other shuffle instructions.
35364 /// Because this is inherently a quadratic operation (for each shuffle in
35365 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
35366 /// This should never be an issue in practice as the shuffle lowering doesn't
35367 /// produce sequences of more than 8 instructions.
35369 /// FIXME: We will currently miss some cases where the redundant shuffling
35370 /// would simplify under the threshold for PSHUFB formation because of
35371 /// combine-ordering. To fix this, we should do the redundant instruction
35372 /// combining in this recursive walk.
35373 static SDValue combineX86ShufflesRecursively(
35374 ArrayRef<SDValue> SrcOps, int SrcOpIndex, SDValue Root,
35375 ArrayRef<int> RootMask, ArrayRef<const SDNode *> SrcNodes, unsigned Depth,
35376 bool HasVariableMask, bool AllowVariableMask, SelectionDAG &DAG,
35377 const X86Subtarget &Subtarget) {
35378 assert(RootMask.size() > 0 &&
35379 (RootMask.size() > 1 || (RootMask[0] == 0 && SrcOpIndex == 0)) &&
35380 "Illegal shuffle root mask");
35382 // Bound the depth of our recursive combine because this is ultimately
35383 // quadratic in nature.
35384 const unsigned MaxRecursionDepth = 8;
35385 if (Depth >= MaxRecursionDepth)
35388 // Directly rip through bitcasts to find the underlying operand.
35389 SDValue Op = SrcOps[SrcOpIndex];
35390 Op = peekThroughOneUseBitcasts(Op);
35392 MVT VT = Op.getSimpleValueType();
35393 if (!VT.isVector())
35394 return SDValue(); // Bail if we hit a non-vector.
35396 assert(Root.getSimpleValueType().isVector() &&
35397 "Shuffles operate on vector types!");
35398 unsigned RootSizeInBits = Root.getSimpleValueType().getSizeInBits();
35399 assert(VT.getSizeInBits() == RootSizeInBits &&
35400 "Can only combine shuffles of the same vector register size.");
35402 // Extract target shuffle mask and resolve sentinels and inputs.
35403 // TODO - determine Op's demanded elts from RootMask.
35404 SmallVector<int, 64> OpMask;
35405 SmallVector<SDValue, 2> OpInputs;
35406 APInt OpUndef, OpZero;
35407 APInt OpDemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
35408 bool IsOpVariableMask = isTargetShuffleVariableMask(Op.getOpcode());
35409 if (!getTargetShuffleInputs(Op, OpDemandedElts, OpInputs, OpMask, OpUndef,
35410 OpZero, DAG, Depth, false))
35413 // Shuffle inputs must be the same size as the result, bail on any larger
35414 // inputs and widen any smaller inputs.
35415 if (llvm::any_of(OpInputs, [RootSizeInBits](SDValue Op) {
35416 return Op.getValueSizeInBits() > RootSizeInBits;
35420 for (SDValue &Op : OpInputs)
35421 if (Op.getValueSizeInBits() < RootSizeInBits)
35422 Op = widenSubVector(peekThroughOneUseBitcasts(Op), false, Subtarget, DAG,
35423 SDLoc(Op), RootSizeInBits);
35425 SmallVector<int, 64> Mask;
35426 SmallVector<SDValue, 16> Ops;
35428 // We don't need to merge masks if the root is empty.
35429 bool EmptyRoot = (Depth == 0) && (RootMask.size() == 1);
35431 // Only resolve zeros if it will remove an input, otherwise we might end
35432 // up in an infinite loop.
35433 bool ResolveKnownZeros = true;
35434 if (!OpZero.isNullValue()) {
35435 APInt UsedInputs = APInt::getNullValue(OpInputs.size());
35436 for (int i = 0, e = OpMask.size(); i != e; ++i) {
35438 if (OpUndef[i] || OpZero[i] || isUndefOrZero(M))
35440 UsedInputs.setBit(M / OpMask.size());
35441 if (UsedInputs.isAllOnesValue()) {
35442 ResolveKnownZeros = false;
35447 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero,
35448 ResolveKnownZeros);
35451 Ops.append(OpInputs.begin(), OpInputs.end());
35453 resolveTargetShuffleFromZeroables(OpMask, OpUndef, OpZero);
35455 // Add the inputs to the Ops list, avoiding duplicates.
35456 Ops.append(SrcOps.begin(), SrcOps.end());
35458 auto AddOp = [&Ops](SDValue Input, int InsertionPoint) -> int {
35459 // Attempt to find an existing match.
35460 SDValue InputBC = peekThroughBitcasts(Input);
35461 for (int i = 0, e = Ops.size(); i < e; ++i)
35462 if (InputBC == peekThroughBitcasts(Ops[i]))
35464 // Match failed - should we replace an existing Op?
35465 if (InsertionPoint >= 0) {
35466 Ops[InsertionPoint] = Input;
35467 return InsertionPoint;
35469 // Add to the end of the Ops list.
35470 Ops.push_back(Input);
35471 return Ops.size() - 1;
35474 SmallVector<int, 2> OpInputIdx;
35475 for (SDValue OpInput : OpInputs)
35476 OpInputIdx.push_back(
35477 AddOp(OpInput, OpInputIdx.empty() ? SrcOpIndex : -1));
35479 assert(((RootMask.size() > OpMask.size() &&
35480 RootMask.size() % OpMask.size() == 0) ||
35481 (OpMask.size() > RootMask.size() &&
35482 OpMask.size() % RootMask.size() == 0) ||
35483 OpMask.size() == RootMask.size()) &&
35484 "The smaller number of elements must divide the larger.");
35486 // This function can be performance-critical, so we rely on the power-of-2
35487 // knowledge that we have about the mask sizes to replace div/rem ops with
35488 // bit-masks and shifts.
35489 assert(isPowerOf2_32(RootMask.size()) &&
35490 "Non-power-of-2 shuffle mask sizes");
35491 assert(isPowerOf2_32(OpMask.size()) && "Non-power-of-2 shuffle mask sizes");
35492 unsigned RootMaskSizeLog2 = countTrailingZeros(RootMask.size());
35493 unsigned OpMaskSizeLog2 = countTrailingZeros(OpMask.size());
35495 unsigned MaskWidth = std::max<unsigned>(OpMask.size(), RootMask.size());
35496 unsigned RootRatio =
35497 std::max<unsigned>(1, OpMask.size() >> RootMaskSizeLog2);
35498 unsigned OpRatio = std::max<unsigned>(1, RootMask.size() >> OpMaskSizeLog2);
35499 assert((RootRatio == 1 || OpRatio == 1) &&
35500 "Must not have a ratio for both incoming and op masks!");
35502 assert(isPowerOf2_32(MaskWidth) && "Non-power-of-2 shuffle mask sizes");
35503 assert(isPowerOf2_32(RootRatio) && "Non-power-of-2 shuffle mask sizes");
35504 assert(isPowerOf2_32(OpRatio) && "Non-power-of-2 shuffle mask sizes");
35505 unsigned RootRatioLog2 = countTrailingZeros(RootRatio);
35506 unsigned OpRatioLog2 = countTrailingZeros(OpRatio);
35508 Mask.resize(MaskWidth, SM_SentinelUndef);
35510 // Merge this shuffle operation's mask into our accumulated mask. Note that
35511 // this shuffle's mask will be the first applied to the input, followed by
35512 // the root mask to get us all the way to the root value arrangement. The
35513 // reason for this order is that we are recursing up the operation chain.
35514 for (unsigned i = 0; i < MaskWidth; ++i) {
35515 unsigned RootIdx = i >> RootRatioLog2;
35516 if (RootMask[RootIdx] < 0) {
35517 // This is a zero or undef lane, we're done.
35518 Mask[i] = RootMask[RootIdx];
35522 unsigned RootMaskedIdx =
35524 ? RootMask[RootIdx]
35525 : (RootMask[RootIdx] << RootRatioLog2) + (i & (RootRatio - 1));
35527 // Just insert the scaled root mask value if it references an input other
35528 // than the SrcOp we're currently inserting.
35529 if ((RootMaskedIdx < (SrcOpIndex * MaskWidth)) ||
35530 (((SrcOpIndex + 1) * MaskWidth) <= RootMaskedIdx)) {
35531 Mask[i] = RootMaskedIdx;
35535 RootMaskedIdx = RootMaskedIdx & (MaskWidth - 1);
35536 unsigned OpIdx = RootMaskedIdx >> OpRatioLog2;
35537 if (OpMask[OpIdx] < 0) {
35538 // The incoming lanes are zero or undef, it doesn't matter which ones we
35540 Mask[i] = OpMask[OpIdx];
35544 // Ok, we have non-zero lanes, map them through to one of the Op's inputs.
35545 unsigned OpMaskedIdx = OpRatio == 1 ? OpMask[OpIdx]
35546 : (OpMask[OpIdx] << OpRatioLog2) +
35547 (RootMaskedIdx & (OpRatio - 1));
35549 OpMaskedIdx = OpMaskedIdx & (MaskWidth - 1);
35550 int InputIdx = OpMask[OpIdx] / (int)OpMask.size();
35551 assert(0 <= OpInputIdx[InputIdx] && "Unknown target shuffle input");
35552 OpMaskedIdx += OpInputIdx[InputIdx] * MaskWidth;
35554 Mask[i] = OpMaskedIdx;
35558 // Remove unused/repeated shuffle source ops.
35559 resolveTargetShuffleInputsAndMask(Ops, Mask);
35561 // Handle the all undef/zero cases early.
35562 if (all_of(Mask, [](int Idx) { return Idx == SM_SentinelUndef; }))
35563 return DAG.getUNDEF(Root.getValueType());
35565 // TODO - should we handle the mixed zero/undef case as well? Just returning
35566 // a zero mask will lose information on undef elements possibly reducing
35567 // future combine possibilities.
35568 if (all_of(Mask, [](int Idx) { return Idx < 0; }))
35569 return getZeroVector(Root.getSimpleValueType(), Subtarget, DAG,
35572 assert(!Ops.empty() && "Shuffle with no inputs detected");
35573 HasVariableMask |= IsOpVariableMask;
35575 // Update the list of shuffle nodes that have been combined so far.
35576 SmallVector<const SDNode *, 16> CombinedNodes(SrcNodes.begin(),
35578 CombinedNodes.push_back(Op.getNode());
35580 // See if we can recurse into each shuffle source op (if it's a target
35581 // shuffle). The source op should only be generally combined if it either has
35582 // a single use (i.e. current Op) or all its users have already been combined,
35583 // if not then we can still combine but should prevent generation of variable
35584 // shuffles to avoid constant pool bloat.
35585 // Don't recurse if we already have more source ops than we can combine in
35586 // the remaining recursion depth.
35587 if (Ops.size() < (MaxRecursionDepth - Depth)) {
35588 for (int i = 0, e = Ops.size(); i < e; ++i) {
35589 // For empty roots, we need to resolve zeroable elements before combining
35590 // them with other shuffles.
35591 SmallVector<int, 64> ResolvedMask = Mask;
35593 resolveTargetShuffleFromZeroables(ResolvedMask, OpUndef, OpZero);
35594 bool AllowVar = false;
35595 if (Ops[i].getNode()->hasOneUse() ||
35596 SDNode::areOnlyUsersOf(CombinedNodes, Ops[i].getNode()))
35597 AllowVar = AllowVariableMask;
35598 if (SDValue Res = combineX86ShufflesRecursively(
35599 Ops, i, Root, ResolvedMask, CombinedNodes, Depth + 1,
35600 HasVariableMask, AllowVar, DAG, Subtarget))
35605 // Attempt to constant fold all of the constant source ops.
35606 if (SDValue Cst = combineX86ShufflesConstants(
35607 Ops, Mask, Root, HasVariableMask, DAG, Subtarget))
35610 // We can only combine unary and binary shuffle mask cases.
35611 if (Ops.size() <= 2) {
35612 // Minor canonicalization of the accumulated shuffle mask to make it easier
35613 // to match below. All this does is detect masks with sequential pairs of
35614 // elements, and shrink them to the half-width mask. It does this in a loop
35615 // so it will reduce the size of the mask to the minimal width mask which
35616 // performs an equivalent shuffle.
35617 SmallVector<int, 64> WidenedMask;
35618 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
35619 Mask = std::move(WidenedMask);
35622 // Canonicalization of binary shuffle masks to improve pattern matching by
35623 // commuting the inputs.
35624 if (Ops.size() == 2 && canonicalizeShuffleMaskWithCommute(Mask)) {
35625 ShuffleVectorSDNode::commuteMask(Mask);
35626 std::swap(Ops[0], Ops[1]);
35629 // Finally, try to combine into a single shuffle instruction.
35630 return combineX86ShuffleChain(Ops, Root, Mask, Depth, HasVariableMask,
35631 AllowVariableMask, DAG, Subtarget);
35634 // If that failed and any input is extracted then try to combine as a
35635 // shuffle with the larger type.
35636 return combineX86ShuffleChainWithExtract(Ops, Root, Mask, Depth,
35637 HasVariableMask, AllowVariableMask,
35641 /// Helper entry wrapper to combineX86ShufflesRecursively.
35642 static SDValue combineX86ShufflesRecursively(SDValue Op, SelectionDAG &DAG,
35643 const X86Subtarget &Subtarget) {
35644 return combineX86ShufflesRecursively({Op}, 0, Op, {0}, {}, /*Depth*/ 0,
35645 /*HasVarMask*/ false,
35646 /*AllowVarMask*/ true, DAG, Subtarget);
35649 /// Get the PSHUF-style mask from PSHUF node.
35651 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
35652 /// PSHUF-style masks that can be reused with such instructions.
35653 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
35654 MVT VT = N.getSimpleValueType();
35655 SmallVector<int, 4> Mask;
35656 SmallVector<SDValue, 2> Ops;
35659 getTargetShuffleMask(N.getNode(), VT, false, Ops, Mask, IsUnary);
35663 // If we have more than 128-bits, only the low 128-bits of shuffle mask
35664 // matter. Check that the upper masks are repeats and remove them.
35665 if (VT.getSizeInBits() > 128) {
35666 int LaneElts = 128 / VT.getScalarSizeInBits();
35668 for (int i = 1, NumLanes = VT.getSizeInBits() / 128; i < NumLanes; ++i)
35669 for (int j = 0; j < LaneElts; ++j)
35670 assert(Mask[j] == Mask[i * LaneElts + j] - (LaneElts * i) &&
35671 "Mask doesn't repeat in high 128-bit lanes!");
35673 Mask.resize(LaneElts);
35676 switch (N.getOpcode()) {
35677 case X86ISD::PSHUFD:
35679 case X86ISD::PSHUFLW:
35682 case X86ISD::PSHUFHW:
35683 Mask.erase(Mask.begin(), Mask.begin() + 4);
35684 for (int &M : Mask)
35688 llvm_unreachable("No valid shuffle instruction found!");
35692 /// Search for a combinable shuffle across a chain ending in pshufd.
35694 /// We walk up the chain and look for a combinable shuffle, skipping over
35695 /// shuffles that we could hoist this shuffle's transformation past without
35696 /// altering anything.
35698 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
35699 SelectionDAG &DAG) {
35700 assert(N.getOpcode() == X86ISD::PSHUFD &&
35701 "Called with something other than an x86 128-bit half shuffle!");
35704 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
35705 // of the shuffles in the chain so that we can form a fresh chain to replace
35707 SmallVector<SDValue, 8> Chain;
35708 SDValue V = N.getOperand(0);
35709 for (; V.hasOneUse(); V = V.getOperand(0)) {
35710 switch (V.getOpcode()) {
35712 return SDValue(); // Nothing combined!
35715 // Skip bitcasts as we always know the type for the target specific
35719 case X86ISD::PSHUFD:
35720 // Found another dword shuffle.
35723 case X86ISD::PSHUFLW:
35724 // Check that the low words (being shuffled) are the identity in the
35725 // dword shuffle, and the high words are self-contained.
35726 if (Mask[0] != 0 || Mask[1] != 1 ||
35727 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
35730 Chain.push_back(V);
35733 case X86ISD::PSHUFHW:
35734 // Check that the high words (being shuffled) are the identity in the
35735 // dword shuffle, and the low words are self-contained.
35736 if (Mask[2] != 2 || Mask[3] != 3 ||
35737 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
35740 Chain.push_back(V);
35743 case X86ISD::UNPCKL:
35744 case X86ISD::UNPCKH:
35745 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
35746 // shuffle into a preceding word shuffle.
35747 if (V.getSimpleValueType().getVectorElementType() != MVT::i8 &&
35748 V.getSimpleValueType().getVectorElementType() != MVT::i16)
35751 // Search for a half-shuffle which we can combine with.
35752 unsigned CombineOp =
35753 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
35754 if (V.getOperand(0) != V.getOperand(1) ||
35755 !V->isOnlyUserOf(V.getOperand(0).getNode()))
35757 Chain.push_back(V);
35758 V = V.getOperand(0);
35760 switch (V.getOpcode()) {
35762 return SDValue(); // Nothing to combine.
35764 case X86ISD::PSHUFLW:
35765 case X86ISD::PSHUFHW:
35766 if (V.getOpcode() == CombineOp)
35769 Chain.push_back(V);
35773 V = V.getOperand(0);
35777 } while (V.hasOneUse());
35780 // Break out of the loop if we break out of the switch.
35784 if (!V.hasOneUse())
35785 // We fell out of the loop without finding a viable combining instruction.
35788 // Merge this node's mask and our incoming mask.
35789 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
35790 for (int &M : Mask)
35792 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
35793 getV4X86ShuffleImm8ForMask(Mask, DL, DAG));
35795 // Rebuild the chain around this new shuffle.
35796 while (!Chain.empty()) {
35797 SDValue W = Chain.pop_back_val();
35799 if (V.getValueType() != W.getOperand(0).getValueType())
35800 V = DAG.getBitcast(W.getOperand(0).getValueType(), V);
35802 switch (W.getOpcode()) {
35804 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
35806 case X86ISD::UNPCKL:
35807 case X86ISD::UNPCKH:
35808 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
35811 case X86ISD::PSHUFD:
35812 case X86ISD::PSHUFLW:
35813 case X86ISD::PSHUFHW:
35814 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
35818 if (V.getValueType() != N.getValueType())
35819 V = DAG.getBitcast(N.getValueType(), V);
35821 // Return the new chain to replace N.
35825 // Attempt to commute shufps LHS loads:
35826 // permilps(shufps(load(),x)) --> permilps(shufps(x,load()))
35827 static SDValue combineCommutableSHUFP(SDValue N, MVT VT, const SDLoc &DL,
35828 SelectionDAG &DAG) {
35829 // TODO: Add vXf64 support.
35830 if (VT != MVT::v4f32 && VT != MVT::v8f32 && VT != MVT::v16f32)
35833 // SHUFP(LHS, RHS) -> SHUFP(RHS, LHS) iff LHS is foldable + RHS is not.
35834 auto commuteSHUFP = [&VT, &DL, &DAG](SDValue Parent, SDValue V) {
35835 if (V.getOpcode() != X86ISD::SHUFP || !Parent->isOnlyUserOf(V.getNode()))
35837 SDValue N0 = V.getOperand(0);
35838 SDValue N1 = V.getOperand(1);
35839 unsigned Imm = V.getConstantOperandVal(2);
35840 if (!MayFoldLoad(peekThroughOneUseBitcasts(N0)) ||
35841 MayFoldLoad(peekThroughOneUseBitcasts(N1)))
35843 Imm = ((Imm & 0x0F) << 4) | ((Imm & 0xF0) >> 4);
35844 return DAG.getNode(X86ISD::SHUFP, DL, VT, N1, N0,
35845 DAG.getTargetConstant(Imm, DL, MVT::i8));
35848 switch (N.getOpcode()) {
35849 case X86ISD::VPERMILPI:
35850 if (SDValue NewSHUFP = commuteSHUFP(N, N.getOperand(0))) {
35851 unsigned Imm = N.getConstantOperandVal(1);
35852 return DAG.getNode(X86ISD::VPERMILPI, DL, VT, NewSHUFP,
35853 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
35856 case X86ISD::SHUFP: {
35857 SDValue N0 = N.getOperand(0);
35858 SDValue N1 = N.getOperand(1);
35859 unsigned Imm = N.getConstantOperandVal(2);
35861 if (SDValue NewSHUFP = commuteSHUFP(N, N0))
35862 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, NewSHUFP,
35863 DAG.getTargetConstant(Imm ^ 0xAA, DL, MVT::i8));
35864 } else if (SDValue NewSHUFP = commuteSHUFP(N, N0)) {
35865 return DAG.getNode(X86ISD::SHUFP, DL, VT, NewSHUFP, N1,
35866 DAG.getTargetConstant(Imm ^ 0x0A, DL, MVT::i8));
35867 } else if (SDValue NewSHUFP = commuteSHUFP(N, N1)) {
35868 return DAG.getNode(X86ISD::SHUFP, DL, VT, N0, NewSHUFP,
35869 DAG.getTargetConstant(Imm ^ 0xA0, DL, MVT::i8));
35878 /// Try to combine x86 target specific shuffles.
35879 static SDValue combineTargetShuffle(SDValue N, SelectionDAG &DAG,
35880 TargetLowering::DAGCombinerInfo &DCI,
35881 const X86Subtarget &Subtarget) {
35883 MVT VT = N.getSimpleValueType();
35884 SmallVector<int, 4> Mask;
35885 unsigned Opcode = N.getOpcode();
35888 SmallVector<int, 64> TargetMask;
35889 SmallVector<SDValue, 2> TargetOps;
35890 if (isTargetShuffle(Opcode))
35891 getTargetShuffleMask(N.getNode(), VT, true, TargetOps, TargetMask, IsUnary);
35893 // Combine binary shuffle of 2 similar 'Horizontal' instructions into a
35894 // single instruction. Attempt to match a v2X64 repeating shuffle pattern that
35895 // represents the LHS/RHS inputs for the lower/upper halves.
35896 SmallVector<int, 16> TargetMask128;
35897 if (!TargetMask.empty() && 0 < TargetOps.size() && TargetOps.size() <= 2 &&
35898 isRepeatedTargetShuffleMask(128, VT, TargetMask, TargetMask128)) {
35899 SmallVector<int, 16> WidenedMask128 = TargetMask128;
35900 while (WidenedMask128.size() > 2) {
35901 SmallVector<int, 16> WidenedMask;
35902 if (!canWidenShuffleElements(WidenedMask128, WidenedMask))
35904 WidenedMask128 = std::move(WidenedMask);
35906 if (WidenedMask128.size() == 2) {
35907 assert(isUndefOrZeroOrInRange(WidenedMask128, 0, 4) && "Illegal shuffle");
35908 SDValue BC0 = peekThroughBitcasts(TargetOps.front());
35909 SDValue BC1 = peekThroughBitcasts(TargetOps.back());
35910 EVT VT0 = BC0.getValueType();
35911 EVT VT1 = BC1.getValueType();
35912 unsigned Opcode0 = BC0.getOpcode();
35913 unsigned Opcode1 = BC1.getOpcode();
35914 bool isHoriz = (Opcode0 == X86ISD::FHADD || Opcode0 == X86ISD::HADD ||
35915 Opcode0 == X86ISD::FHSUB || Opcode0 == X86ISD::HSUB);
35916 if (Opcode0 == Opcode1 && VT0 == VT1 &&
35917 (isHoriz || Opcode0 == X86ISD::PACKSS || Opcode0 == X86ISD::PACKUS)) {
35918 bool SingleOp = (TargetOps.size() == 1);
35919 if (!isHoriz || shouldUseHorizontalOp(SingleOp, DAG, Subtarget)) {
35920 SDValue Lo = isInRange(WidenedMask128[0], 0, 2) ? BC0 : BC1;
35921 SDValue Hi = isInRange(WidenedMask128[1], 0, 2) ? BC0 : BC1;
35922 Lo = Lo.getOperand(WidenedMask128[0] & 1);
35923 Hi = Hi.getOperand(WidenedMask128[1] & 1);
35925 MVT SrcVT = BC0.getOperand(0).getSimpleValueType();
35926 SDValue Undef = DAG.getUNDEF(SrcVT);
35927 SDValue Zero = getZeroVector(SrcVT, Subtarget, DAG, DL);
35928 Lo = (WidenedMask128[0] == SM_SentinelZero ? Zero : Lo);
35929 Hi = (WidenedMask128[1] == SM_SentinelZero ? Zero : Hi);
35930 Lo = (WidenedMask128[0] == SM_SentinelUndef ? Undef : Lo);
35931 Hi = (WidenedMask128[1] == SM_SentinelUndef ? Undef : Hi);
35933 SDValue Horiz = DAG.getNode(Opcode0, DL, VT0, Lo, Hi);
35934 return DAG.getBitcast(VT, Horiz);
35940 if (SDValue R = combineCommutableSHUFP(N, VT, DL, DAG))
35943 // Canonicalize UNARYSHUFFLE(XOR(X,-1) -> XOR(UNARYSHUFFLE(X),-1) to
35944 // help expose the 'NOT' pattern further up the DAG.
35945 // TODO: This might be beneficial for any binop with a 'splattable' operand.
35947 case X86ISD::MOVDDUP:
35948 case X86ISD::PSHUFD: {
35949 SDValue Src = N.getOperand(0);
35950 if (Src.hasOneUse() && Src.getValueType() == VT) {
35951 if (SDValue Not = IsNOT(Src, DAG, /*OneUse*/ true)) {
35952 Not = DAG.getBitcast(VT, Not);
35953 Not = Opcode == X86ISD::MOVDDUP
35954 ? DAG.getNode(Opcode, DL, VT, Not)
35955 : DAG.getNode(Opcode, DL, VT, Not, N.getOperand(1));
35956 EVT IntVT = Not.getValueType().changeTypeToInteger();
35957 SDValue AllOnes = DAG.getConstant(-1, DL, IntVT);
35958 Not = DAG.getBitcast(IntVT, Not);
35959 Not = DAG.getNode(ISD::XOR, DL, IntVT, Not, AllOnes);
35960 return DAG.getBitcast(VT, Not);
35967 // Handle specific target shuffles.
35969 case X86ISD::MOVDDUP: {
35970 SDValue Src = N.getOperand(0);
35971 // Turn a 128-bit MOVDDUP of a full vector load into movddup+vzload.
35972 if (VT == MVT::v2f64 && Src.hasOneUse() &&
35973 ISD::isNormalLoad(Src.getNode())) {
35974 LoadSDNode *LN = cast<LoadSDNode>(Src);
35975 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::f64, MVT::v2f64, DAG)) {
35976 SDValue Movddup = DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, VZLoad);
35977 DCI.CombineTo(N.getNode(), Movddup);
35978 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
35979 DCI.recursivelyDeleteUnusedNodes(LN);
35980 return N; // Return N so it doesn't get rechecked!
35986 case X86ISD::VBROADCAST: {
35987 SDValue Src = N.getOperand(0);
35988 SDValue BC = peekThroughBitcasts(Src);
35989 EVT SrcVT = Src.getValueType();
35990 EVT BCVT = BC.getValueType();
35992 // If broadcasting from another shuffle, attempt to simplify it.
35993 // TODO - we really need a general SimplifyDemandedVectorElts mechanism.
35994 if (isTargetShuffle(BC.getOpcode()) &&
35995 VT.getScalarSizeInBits() % BCVT.getScalarSizeInBits() == 0) {
35996 unsigned Scale = VT.getScalarSizeInBits() / BCVT.getScalarSizeInBits();
35997 SmallVector<int, 16> DemandedMask(BCVT.getVectorNumElements(),
35999 for (unsigned i = 0; i != Scale; ++i)
36000 DemandedMask[i] = i;
36001 if (SDValue Res = combineX86ShufflesRecursively(
36002 {BC}, 0, BC, DemandedMask, {}, /*Depth*/ 0,
36003 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
36004 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
36005 DAG.getBitcast(SrcVT, Res));
36008 // broadcast(bitcast(src)) -> bitcast(broadcast(src))
36009 // 32-bit targets have to bitcast i64 to f64, so better to bitcast upward.
36010 if (Src.getOpcode() == ISD::BITCAST &&
36011 SrcVT.getScalarSizeInBits() == BCVT.getScalarSizeInBits() &&
36012 DAG.getTargetLoweringInfo().isTypeLegal(BCVT)) {
36013 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), BCVT.getScalarType(),
36014 VT.getVectorNumElements());
36015 return DAG.getBitcast(VT, DAG.getNode(X86ISD::VBROADCAST, DL, NewVT, BC));
36018 // Reduce broadcast source vector to lowest 128-bits.
36019 if (SrcVT.getSizeInBits() > 128)
36020 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
36021 extract128BitVector(Src, 0, DAG, DL));
36023 // broadcast(scalar_to_vector(x)) -> broadcast(x).
36024 if (Src.getOpcode() == ISD::SCALAR_TO_VECTOR)
36025 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Src.getOperand(0));
36027 // Share broadcast with the longest vector and extract low subvector (free).
36028 // Ensure the same SDValue from the SDNode use is being used.
36029 for (SDNode *User : Src->uses())
36030 if (User != N.getNode() && User->getOpcode() == X86ISD::VBROADCAST &&
36031 Src == User->getOperand(0) &&
36032 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
36033 return extractSubVector(SDValue(User, 0), 0, DAG, DL,
36034 VT.getSizeInBits());
36037 // vbroadcast(scalarload X) -> vbroadcast_load X
36038 // For float loads, extract other uses of the scalar from the broadcast.
36039 if (!SrcVT.isVector() && (Src.hasOneUse() || VT.isFloatingPoint()) &&
36040 ISD::isNormalLoad(Src.getNode())) {
36041 LoadSDNode *LN = cast<LoadSDNode>(Src);
36042 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36043 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36045 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
36046 LN->getMemoryVT(), LN->getMemOperand());
36047 // If the load value is used only by N, replace it via CombineTo N.
36048 bool NoReplaceExtract = Src.hasOneUse();
36049 DCI.CombineTo(N.getNode(), BcastLd);
36050 if (NoReplaceExtract) {
36051 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36052 DCI.recursivelyDeleteUnusedNodes(LN);
36054 SDValue Scl = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SrcVT, BcastLd,
36055 DAG.getIntPtrConstant(0, DL));
36056 DCI.CombineTo(LN, Scl, BcastLd.getValue(1));
36058 return N; // Return N so it doesn't get rechecked!
36061 // Due to isTypeDesirableForOp, we won't always shrink a load truncated to
36062 // i16. So shrink it ourselves if we can make a broadcast_load.
36063 if (SrcVT == MVT::i16 && Src.getOpcode() == ISD::TRUNCATE &&
36064 Src.hasOneUse() && Src.getOperand(0).hasOneUse()) {
36065 assert(Subtarget.hasAVX2() && "Expected AVX2");
36066 SDValue TruncIn = Src.getOperand(0);
36068 // If this is a truncate of a non extending load we can just narrow it to
36069 // use a broadcast_load.
36070 if (ISD::isNormalLoad(TruncIn.getNode())) {
36071 LoadSDNode *LN = cast<LoadSDNode>(TruncIn);
36072 // Unless its volatile or atomic.
36073 if (LN->isSimple()) {
36074 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36075 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36076 SDValue BcastLd = DAG.getMemIntrinsicNode(
36077 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
36078 LN->getPointerInfo(), LN->getOriginalAlign(),
36079 LN->getMemOperand()->getFlags());
36080 DCI.CombineTo(N.getNode(), BcastLd);
36081 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36082 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
36083 return N; // Return N so it doesn't get rechecked!
36087 // If this is a truncate of an i16 extload, we can directly replace it.
36088 if (ISD::isUNINDEXEDLoad(Src.getOperand(0).getNode()) &&
36089 ISD::isEXTLoad(Src.getOperand(0).getNode())) {
36090 LoadSDNode *LN = cast<LoadSDNode>(Src.getOperand(0));
36091 if (LN->getMemoryVT().getSizeInBits() == 16) {
36092 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36093 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36095 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
36096 LN->getMemoryVT(), LN->getMemOperand());
36097 DCI.CombineTo(N.getNode(), BcastLd);
36098 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36099 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
36100 return N; // Return N so it doesn't get rechecked!
36104 // If this is a truncate of load that has been shifted right, we can
36105 // offset the pointer and use a narrower load.
36106 if (TruncIn.getOpcode() == ISD::SRL &&
36107 TruncIn.getOperand(0).hasOneUse() &&
36108 isa<ConstantSDNode>(TruncIn.getOperand(1)) &&
36109 ISD::isNormalLoad(TruncIn.getOperand(0).getNode())) {
36110 LoadSDNode *LN = cast<LoadSDNode>(TruncIn.getOperand(0));
36111 unsigned ShiftAmt = TruncIn.getConstantOperandVal(1);
36112 // Make sure the shift amount and the load size are divisible by 16.
36113 // Don't do this if the load is volatile or atomic.
36114 if (ShiftAmt % 16 == 0 && TruncIn.getValueSizeInBits() % 16 == 0 &&
36116 unsigned Offset = ShiftAmt / 8;
36117 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36118 SDValue Ptr = DAG.getMemBasePlusOffset(LN->getBasePtr(), Offset, DL);
36119 SDValue Ops[] = { LN->getChain(), Ptr };
36120 SDValue BcastLd = DAG.getMemIntrinsicNode(
36121 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::i16,
36122 LN->getPointerInfo().getWithOffset(Offset),
36123 LN->getOriginalAlign(),
36124 LN->getMemOperand()->getFlags());
36125 DCI.CombineTo(N.getNode(), BcastLd);
36126 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36127 DCI.recursivelyDeleteUnusedNodes(Src.getNode());
36128 return N; // Return N so it doesn't get rechecked!
36133 // vbroadcast(vzload X) -> vbroadcast_load X
36134 if (Src.getOpcode() == X86ISD::VZEXT_LOAD && Src.hasOneUse()) {
36135 MemSDNode *LN = cast<MemIntrinsicSDNode>(Src);
36136 if (LN->getMemoryVT().getSizeInBits() == VT.getScalarSizeInBits()) {
36137 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36138 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36140 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, DL, Tys, Ops,
36141 LN->getMemoryVT(), LN->getMemOperand());
36142 DCI.CombineTo(N.getNode(), BcastLd);
36143 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36144 DCI.recursivelyDeleteUnusedNodes(LN);
36145 return N; // Return N so it doesn't get rechecked!
36149 // vbroadcast(vector load X) -> vbroadcast_load
36150 if (SrcVT == MVT::v2f64 && Src.hasOneUse() &&
36151 ISD::isNormalLoad(Src.getNode())) {
36152 LoadSDNode *LN = cast<LoadSDNode>(Src);
36153 // Unless the load is volatile or atomic.
36154 if (LN->isSimple()) {
36155 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36156 SDValue Ops[] = { LN->getChain(), LN->getBasePtr() };
36157 SDValue BcastLd = DAG.getMemIntrinsicNode(
36158 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MVT::f64,
36159 LN->getPointerInfo(), LN->getOriginalAlign(),
36160 LN->getMemOperand()->getFlags());
36161 DCI.CombineTo(N.getNode(), BcastLd);
36162 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), BcastLd.getValue(1));
36163 DCI.recursivelyDeleteUnusedNodes(LN);
36164 return N; // Return N so it doesn't get rechecked!
36170 case X86ISD::VZEXT_MOVL: {
36171 SDValue N0 = N.getOperand(0);
36173 // If this a vzmovl of a full vector load, replace it with a vzload, unless
36174 // the load is volatile.
36175 if (N0.hasOneUse() && ISD::isNormalLoad(N0.getNode())) {
36176 auto *LN = cast<LoadSDNode>(N0);
36177 if (SDValue VZLoad =
36178 narrowLoadToVZLoad(LN, VT.getVectorElementType(), VT, DAG)) {
36179 DCI.CombineTo(N.getNode(), VZLoad);
36180 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
36181 DCI.recursivelyDeleteUnusedNodes(LN);
36186 // If this a VZEXT_MOVL of a VBROADCAST_LOAD, we don't need the broadcast
36187 // and can just use a VZEXT_LOAD.
36188 // FIXME: Is there some way to do this with SimplifyDemandedVectorElts?
36189 if (N0.hasOneUse() && N0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
36190 auto *LN = cast<MemSDNode>(N0);
36191 if (VT.getScalarSizeInBits() == LN->getMemoryVT().getSizeInBits()) {
36192 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
36193 SDValue Ops[] = {LN->getChain(), LN->getBasePtr()};
36195 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops,
36196 LN->getMemoryVT(), LN->getMemOperand());
36197 DCI.CombineTo(N.getNode(), VZLoad);
36198 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
36199 DCI.recursivelyDeleteUnusedNodes(LN);
36204 // Turn (v2i64 (vzext_movl (scalar_to_vector (i64 X)))) into
36205 // (v2i64 (bitcast (v4i32 (vzext_movl (scalar_to_vector (i32 (trunc X)))))))
36206 // if the upper bits of the i64 are zero.
36207 if (N0.hasOneUse() && N0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
36208 N0.getOperand(0).hasOneUse() &&
36209 N0.getOperand(0).getValueType() == MVT::i64) {
36210 SDValue In = N0.getOperand(0);
36211 APInt Mask = APInt::getHighBitsSet(64, 32);
36212 if (DAG.MaskedValueIsZero(In, Mask)) {
36213 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, In);
36214 MVT VecVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() * 2);
36215 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VecVT, Trunc);
36216 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, DL, VecVT, SclVec);
36217 return DAG.getBitcast(VT, Movl);
36221 // Load a scalar integer constant directly to XMM instead of transferring an
36222 // immediate value from GPR.
36223 // vzext_movl (scalar_to_vector C) --> load [C,0...]
36224 if (N0.getOpcode() == ISD::SCALAR_TO_VECTOR) {
36225 if (auto *C = dyn_cast<ConstantSDNode>(N0.getOperand(0))) {
36226 // Create a vector constant - scalar constant followed by zeros.
36227 EVT ScalarVT = N0.getOperand(0).getValueType();
36228 Type *ScalarTy = ScalarVT.getTypeForEVT(*DAG.getContext());
36229 unsigned NumElts = VT.getVectorNumElements();
36230 Constant *Zero = ConstantInt::getNullValue(ScalarTy);
36231 SmallVector<Constant *, 32> ConstantVec(NumElts, Zero);
36232 ConstantVec[0] = const_cast<ConstantInt *>(C->getConstantIntValue());
36234 // Load the vector constant from constant pool.
36235 MVT PVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
36236 SDValue CP = DAG.getConstantPool(ConstantVector::get(ConstantVec), PVT);
36237 MachinePointerInfo MPI =
36238 MachinePointerInfo::getConstantPool(DAG.getMachineFunction());
36239 Align Alignment = cast<ConstantPoolSDNode>(CP)->getAlign();
36240 return DAG.getLoad(VT, DL, DAG.getEntryNode(), CP, MPI, Alignment,
36241 MachineMemOperand::MOLoad);
36247 case X86ISD::BLENDI: {
36248 SDValue N0 = N.getOperand(0);
36249 SDValue N1 = N.getOperand(1);
36251 // blend(bitcast(x),bitcast(y)) -> bitcast(blend(x,y)) to narrower types.
36252 // TODO: Handle MVT::v16i16 repeated blend mask.
36253 if (N0.getOpcode() == ISD::BITCAST && N1.getOpcode() == ISD::BITCAST &&
36254 N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()) {
36255 MVT SrcVT = N0.getOperand(0).getSimpleValueType();
36256 if ((VT.getScalarSizeInBits() % SrcVT.getScalarSizeInBits()) == 0 &&
36257 SrcVT.getScalarSizeInBits() >= 32) {
36258 unsigned BlendMask = N.getConstantOperandVal(2);
36259 unsigned Size = VT.getVectorNumElements();
36260 unsigned Scale = VT.getScalarSizeInBits() / SrcVT.getScalarSizeInBits();
36261 BlendMask = scaleVectorShuffleBlendMask(BlendMask, Size, Scale);
36262 return DAG.getBitcast(
36263 VT, DAG.getNode(X86ISD::BLENDI, DL, SrcVT, N0.getOperand(0),
36265 DAG.getTargetConstant(BlendMask, DL, MVT::i8)));
36270 case X86ISD::VPERMI: {
36271 // vpermi(bitcast(x)) -> bitcast(vpermi(x)) for same number of elements.
36272 // TODO: Remove when we have preferred domains in combineX86ShuffleChain.
36273 SDValue N0 = N.getOperand(0);
36274 SDValue N1 = N.getOperand(1);
36275 unsigned EltSizeInBits = VT.getScalarSizeInBits();
36276 if (N0.getOpcode() == ISD::BITCAST &&
36277 N0.getOperand(0).getScalarValueSizeInBits() == EltSizeInBits) {
36278 SDValue Src = N0.getOperand(0);
36279 EVT SrcVT = Src.getValueType();
36280 SDValue Res = DAG.getNode(X86ISD::VPERMI, DL, SrcVT, Src, N1);
36281 return DAG.getBitcast(VT, Res);
36285 case X86ISD::VPERM2X128: {
36286 // If both 128-bit values were inserted into high halves of 256-bit values,
36287 // the shuffle can be reduced to a concatenation of subvectors:
36288 // vperm2x128 (ins ?, X, C1), (ins ?, Y, C2), 0x31 --> concat X, Y
36289 // Note: We are only looking for the exact high/high shuffle mask because we
36290 // expect to fold other similar patterns before creating this opcode.
36291 SDValue Ins0 = peekThroughBitcasts(N.getOperand(0));
36292 SDValue Ins1 = peekThroughBitcasts(N.getOperand(1));
36293 unsigned Imm = N.getConstantOperandVal(2);
36294 if (!(Imm == 0x31 &&
36295 Ins0.getOpcode() == ISD::INSERT_SUBVECTOR &&
36296 Ins1.getOpcode() == ISD::INSERT_SUBVECTOR &&
36297 Ins0.getValueType() == Ins1.getValueType()))
36300 SDValue X = Ins0.getOperand(1);
36301 SDValue Y = Ins1.getOperand(1);
36302 unsigned C1 = Ins0.getConstantOperandVal(2);
36303 unsigned C2 = Ins1.getConstantOperandVal(2);
36304 MVT SrcVT = X.getSimpleValueType();
36305 unsigned SrcElts = SrcVT.getVectorNumElements();
36306 if (SrcVT != Y.getSimpleValueType() || SrcVT.getSizeInBits() != 128 ||
36307 C1 != SrcElts || C2 != SrcElts)
36310 return DAG.getBitcast(VT, DAG.getNode(ISD::CONCAT_VECTORS, DL,
36311 Ins1.getValueType(), X, Y));
36313 case X86ISD::PSHUFD:
36314 case X86ISD::PSHUFLW:
36315 case X86ISD::PSHUFHW:
36316 Mask = getPSHUFShuffleMask(N);
36317 assert(Mask.size() == 4);
36319 case X86ISD::MOVSD:
36320 case X86ISD::MOVSS: {
36321 SDValue N0 = N.getOperand(0);
36322 SDValue N1 = N.getOperand(1);
36324 // Canonicalize scalar FPOps:
36325 // MOVS*(N0, OP(N0, N1)) --> MOVS*(N0, SCALAR_TO_VECTOR(OP(N0[0], N1[0])))
36326 // If commutable, allow OP(N1[0], N0[0]).
36327 unsigned Opcode1 = N1.getOpcode();
36328 if (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL || Opcode1 == ISD::FSUB ||
36329 Opcode1 == ISD::FDIV) {
36330 SDValue N10 = N1.getOperand(0);
36331 SDValue N11 = N1.getOperand(1);
36333 (N11 == N0 && (Opcode1 == ISD::FADD || Opcode1 == ISD::FMUL))) {
36335 std::swap(N10, N11);
36336 MVT SVT = VT.getVectorElementType();
36337 SDValue ZeroIdx = DAG.getIntPtrConstant(0, DL);
36338 N10 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N10, ZeroIdx);
36339 N11 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, SVT, N11, ZeroIdx);
36340 SDValue Scl = DAG.getNode(Opcode1, DL, SVT, N10, N11);
36341 SDValue SclVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
36342 return DAG.getNode(Opcode, DL, VT, N0, SclVec);
36348 case X86ISD::INSERTPS: {
36349 assert(VT == MVT::v4f32 && "INSERTPS ValueType must be MVT::v4f32");
36350 SDValue Op0 = N.getOperand(0);
36351 SDValue Op1 = N.getOperand(1);
36352 unsigned InsertPSMask = N.getConstantOperandVal(2);
36353 unsigned SrcIdx = (InsertPSMask >> 6) & 0x3;
36354 unsigned DstIdx = (InsertPSMask >> 4) & 0x3;
36355 unsigned ZeroMask = InsertPSMask & 0xF;
36357 // If we zero out all elements from Op0 then we don't need to reference it.
36358 if (((ZeroMask | (1u << DstIdx)) == 0xF) && !Op0.isUndef())
36359 return DAG.getNode(X86ISD::INSERTPS, DL, VT, DAG.getUNDEF(VT), Op1,
36360 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36362 // If we zero out the element from Op1 then we don't need to reference it.
36363 if ((ZeroMask & (1u << DstIdx)) && !Op1.isUndef())
36364 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
36365 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36367 // Attempt to merge insertps Op1 with an inner target shuffle node.
36368 SmallVector<int, 8> TargetMask1;
36369 SmallVector<SDValue, 2> Ops1;
36370 APInt KnownUndef1, KnownZero1;
36371 if (getTargetShuffleAndZeroables(Op1, TargetMask1, Ops1, KnownUndef1,
36373 if (KnownUndef1[SrcIdx] || KnownZero1[SrcIdx]) {
36374 // Zero/UNDEF insertion - zero out element and remove dependency.
36375 InsertPSMask |= (1u << DstIdx);
36376 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, DAG.getUNDEF(VT),
36377 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36379 // Update insertps mask srcidx and reference the source input directly.
36380 int M = TargetMask1[SrcIdx];
36381 assert(0 <= M && M < 8 && "Shuffle index out of range");
36382 InsertPSMask = (InsertPSMask & 0x3f) | ((M & 0x3) << 6);
36383 Op1 = Ops1[M < 4 ? 0 : 1];
36384 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
36385 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36388 // Attempt to merge insertps Op0 with an inner target shuffle node.
36389 SmallVector<int, 8> TargetMask0;
36390 SmallVector<SDValue, 2> Ops0;
36391 APInt KnownUndef0, KnownZero0;
36392 if (getTargetShuffleAndZeroables(Op0, TargetMask0, Ops0, KnownUndef0,
36394 bool Updated = false;
36395 bool UseInput00 = false;
36396 bool UseInput01 = false;
36397 for (int i = 0; i != 4; ++i) {
36398 if ((InsertPSMask & (1u << i)) || (i == (int)DstIdx)) {
36399 // No change if element is already zero or the inserted element.
36401 } else if (KnownUndef0[i] || KnownZero0[i]) {
36402 // If the target mask is undef/zero then we must zero the element.
36403 InsertPSMask |= (1u << i);
36408 // The input vector element must be inline.
36409 int M = TargetMask0[i];
36410 if (M != i && M != (i + 4))
36413 // Determine which inputs of the target shuffle we're using.
36414 UseInput00 |= (0 <= M && M < 4);
36415 UseInput01 |= (4 <= M);
36418 // If we're not using both inputs of the target shuffle then use the
36419 // referenced input directly.
36420 if (UseInput00 && !UseInput01) {
36423 } else if (!UseInput00 && UseInput01) {
36429 return DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0, Op1,
36430 DAG.getTargetConstant(InsertPSMask, DL, MVT::i8));
36433 // If we're inserting an element from a vbroadcast load, fold the
36434 // load into the X86insertps instruction. We need to convert the scalar
36435 // load to a vector and clear the source lane of the INSERTPS control.
36436 if (Op1.getOpcode() == X86ISD::VBROADCAST_LOAD && Op1.hasOneUse()) {
36437 auto *MemIntr = cast<MemIntrinsicSDNode>(Op1);
36438 if (MemIntr->getMemoryVT().getScalarSizeInBits() == 32) {
36439 SDValue Load = DAG.getLoad(MVT::f32, DL, MemIntr->getChain(),
36440 MemIntr->getBasePtr(),
36441 MemIntr->getMemOperand());
36442 SDValue Insert = DAG.getNode(X86ISD::INSERTPS, DL, VT, Op0,
36443 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT,
36445 DAG.getTargetConstant(InsertPSMask & 0x3f, DL, MVT::i8));
36446 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
36457 // Nuke no-op shuffles that show up after combining.
36458 if (isNoopShuffleMask(Mask))
36459 return N.getOperand(0);
36461 // Look for simplifications involving one or two shuffle instructions.
36462 SDValue V = N.getOperand(0);
36463 switch (N.getOpcode()) {
36466 case X86ISD::PSHUFLW:
36467 case X86ISD::PSHUFHW:
36468 assert(VT.getVectorElementType() == MVT::i16 && "Bad word shuffle type!");
36470 // See if this reduces to a PSHUFD which is no more expensive and can
36471 // combine with more operations. Note that it has to at least flip the
36472 // dwords as otherwise it would have been removed as a no-op.
36473 if (makeArrayRef(Mask).equals({2, 3, 0, 1})) {
36474 int DMask[] = {0, 1, 2, 3};
36475 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
36476 DMask[DOffset + 0] = DOffset + 1;
36477 DMask[DOffset + 1] = DOffset + 0;
36478 MVT DVT = MVT::getVectorVT(MVT::i32, VT.getVectorNumElements() / 2);
36479 V = DAG.getBitcast(DVT, V);
36480 V = DAG.getNode(X86ISD::PSHUFD, DL, DVT, V,
36481 getV4X86ShuffleImm8ForMask(DMask, DL, DAG));
36482 return DAG.getBitcast(VT, V);
36485 // Look for shuffle patterns which can be implemented as a single unpack.
36486 // FIXME: This doesn't handle the location of the PSHUFD generically, and
36487 // only works when we have a PSHUFD followed by two half-shuffles.
36488 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
36489 (V.getOpcode() == X86ISD::PSHUFLW ||
36490 V.getOpcode() == X86ISD::PSHUFHW) &&
36491 V.getOpcode() != N.getOpcode() &&
36492 V.hasOneUse() && V.getOperand(0).hasOneUse()) {
36493 SDValue D = peekThroughOneUseBitcasts(V.getOperand(0));
36494 if (D.getOpcode() == X86ISD::PSHUFD) {
36495 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
36496 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
36497 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
36498 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
36500 for (int i = 0; i < 4; ++i) {
36501 WordMask[i + NOffset] = Mask[i] + NOffset;
36502 WordMask[i + VOffset] = VMask[i] + VOffset;
36504 // Map the word mask through the DWord mask.
36506 for (int i = 0; i < 8; ++i)
36507 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
36508 if (makeArrayRef(MappedMask).equals({0, 0, 1, 1, 2, 2, 3, 3}) ||
36509 makeArrayRef(MappedMask).equals({4, 4, 5, 5, 6, 6, 7, 7})) {
36510 // We can replace all three shuffles with an unpack.
36511 V = DAG.getBitcast(VT, D.getOperand(0));
36512 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
36521 case X86ISD::PSHUFD:
36522 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG))
36531 /// Checks if the shuffle mask takes subsequent elements
36532 /// alternately from two vectors.
36533 /// For example <0, 5, 2, 7> or <8, 1, 10, 3, 12, 5, 14, 7> are both correct.
36534 static bool isAddSubOrSubAddMask(ArrayRef<int> Mask, bool &Op0Even) {
36536 int ParitySrc[2] = {-1, -1};
36537 unsigned Size = Mask.size();
36538 for (unsigned i = 0; i != Size; ++i) {
36543 // Make sure we are using the matching element from the input.
36544 if ((M % Size) != i)
36547 // Make sure we use the same input for all elements of the same parity.
36548 int Src = M / Size;
36549 if (ParitySrc[i % 2] >= 0 && ParitySrc[i % 2] != Src)
36551 ParitySrc[i % 2] = Src;
36554 // Make sure each input is used.
36555 if (ParitySrc[0] < 0 || ParitySrc[1] < 0 || ParitySrc[0] == ParitySrc[1])
36558 Op0Even = ParitySrc[0] == 0;
36562 /// Returns true iff the shuffle node \p N can be replaced with ADDSUB(SUBADD)
36563 /// operation. If true is returned then the operands of ADDSUB(SUBADD) operation
36564 /// are written to the parameters \p Opnd0 and \p Opnd1.
36566 /// We combine shuffle to ADDSUB(SUBADD) directly on the abstract vector shuffle nodes
36567 /// so it is easier to generically match. We also insert dummy vector shuffle
36568 /// nodes for the operands which explicitly discard the lanes which are unused
36569 /// by this operation to try to flow through the rest of the combiner
36570 /// the fact that they're unused.
36571 static bool isAddSubOrSubAdd(SDNode *N, const X86Subtarget &Subtarget,
36572 SelectionDAG &DAG, SDValue &Opnd0, SDValue &Opnd1,
36575 EVT VT = N->getValueType(0);
36576 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36577 if (!Subtarget.hasSSE3() || !TLI.isTypeLegal(VT) ||
36578 !VT.getSimpleVT().isFloatingPoint())
36581 // We only handle target-independent shuffles.
36582 // FIXME: It would be easy and harmless to use the target shuffle mask
36583 // extraction tool to support more.
36584 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
36587 SDValue V1 = N->getOperand(0);
36588 SDValue V2 = N->getOperand(1);
36590 // Make sure we have an FADD and an FSUB.
36591 if ((V1.getOpcode() != ISD::FADD && V1.getOpcode() != ISD::FSUB) ||
36592 (V2.getOpcode() != ISD::FADD && V2.getOpcode() != ISD::FSUB) ||
36593 V1.getOpcode() == V2.getOpcode())
36596 // If there are other uses of these operations we can't fold them.
36597 if (!V1->hasOneUse() || !V2->hasOneUse())
36600 // Ensure that both operations have the same operands. Note that we can
36601 // commute the FADD operands.
36603 if (V1.getOpcode() == ISD::FSUB) {
36604 LHS = V1->getOperand(0); RHS = V1->getOperand(1);
36605 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
36606 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
36609 assert(V2.getOpcode() == ISD::FSUB && "Unexpected opcode");
36610 LHS = V2->getOperand(0); RHS = V2->getOperand(1);
36611 if ((V1->getOperand(0) != LHS || V1->getOperand(1) != RHS) &&
36612 (V1->getOperand(0) != RHS || V1->getOperand(1) != LHS))
36616 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
36618 if (!isAddSubOrSubAddMask(Mask, Op0Even))
36621 // It's a subadd if the vector in the even parity is an FADD.
36622 IsSubAdd = Op0Even ? V1->getOpcode() == ISD::FADD
36623 : V2->getOpcode() == ISD::FADD;
36630 /// Combine shuffle of two fma nodes into FMAddSub or FMSubAdd.
36631 static SDValue combineShuffleToFMAddSub(SDNode *N,
36632 const X86Subtarget &Subtarget,
36633 SelectionDAG &DAG) {
36634 // We only handle target-independent shuffles.
36635 // FIXME: It would be easy and harmless to use the target shuffle mask
36636 // extraction tool to support more.
36637 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
36640 MVT VT = N->getSimpleValueType(0);
36641 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36642 if (!Subtarget.hasAnyFMA() || !TLI.isTypeLegal(VT))
36645 // We're trying to match (shuffle fma(a, b, c), X86Fmsub(a, b, c).
36646 SDValue Op0 = N->getOperand(0);
36647 SDValue Op1 = N->getOperand(1);
36648 SDValue FMAdd = Op0, FMSub = Op1;
36649 if (FMSub.getOpcode() != X86ISD::FMSUB)
36650 std::swap(FMAdd, FMSub);
36652 if (FMAdd.getOpcode() != ISD::FMA || FMSub.getOpcode() != X86ISD::FMSUB ||
36653 FMAdd.getOperand(0) != FMSub.getOperand(0) || !FMAdd.hasOneUse() ||
36654 FMAdd.getOperand(1) != FMSub.getOperand(1) || !FMSub.hasOneUse() ||
36655 FMAdd.getOperand(2) != FMSub.getOperand(2))
36658 // Check for correct shuffle mask.
36659 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
36661 if (!isAddSubOrSubAddMask(Mask, Op0Even))
36664 // FMAddSub takes zeroth operand from FMSub node.
36666 bool IsSubAdd = Op0Even ? Op0 == FMAdd : Op1 == FMAdd;
36667 unsigned Opcode = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
36668 return DAG.getNode(Opcode, DL, VT, FMAdd.getOperand(0), FMAdd.getOperand(1),
36669 FMAdd.getOperand(2));
36672 /// Try to combine a shuffle into a target-specific add-sub or
36673 /// mul-add-sub node.
36674 static SDValue combineShuffleToAddSubOrFMAddSub(SDNode *N,
36675 const X86Subtarget &Subtarget,
36676 SelectionDAG &DAG) {
36677 if (SDValue V = combineShuffleToFMAddSub(N, Subtarget, DAG))
36680 SDValue Opnd0, Opnd1;
36682 if (!isAddSubOrSubAdd(N, Subtarget, DAG, Opnd0, Opnd1, IsSubAdd))
36685 MVT VT = N->getSimpleValueType(0);
36688 // Try to generate X86ISD::FMADDSUB node here.
36690 if (isFMAddSubOrFMSubAdd(Subtarget, DAG, Opnd0, Opnd1, Opnd2, 2)) {
36691 unsigned Opc = IsSubAdd ? X86ISD::FMSUBADD : X86ISD::FMADDSUB;
36692 return DAG.getNode(Opc, DL, VT, Opnd0, Opnd1, Opnd2);
36698 // Do not generate X86ISD::ADDSUB node for 512-bit types even though
36699 // the ADDSUB idiom has been successfully recognized. There are no known
36700 // X86 targets with 512-bit ADDSUB instructions!
36701 if (VT.is512BitVector())
36704 return DAG.getNode(X86ISD::ADDSUB, DL, VT, Opnd0, Opnd1);
36707 // We are looking for a shuffle where both sources are concatenated with undef
36708 // and have a width that is half of the output's width. AVX2 has VPERMD/Q, so
36709 // if we can express this as a single-source shuffle, that's preferable.
36710 static SDValue combineShuffleOfConcatUndef(SDNode *N, SelectionDAG &DAG,
36711 const X86Subtarget &Subtarget) {
36712 if (!Subtarget.hasAVX2() || !isa<ShuffleVectorSDNode>(N))
36715 EVT VT = N->getValueType(0);
36717 // We only care about shuffles of 128/256-bit vectors of 32/64-bit values.
36718 if (!VT.is128BitVector() && !VT.is256BitVector())
36721 if (VT.getVectorElementType() != MVT::i32 &&
36722 VT.getVectorElementType() != MVT::i64 &&
36723 VT.getVectorElementType() != MVT::f32 &&
36724 VT.getVectorElementType() != MVT::f64)
36727 SDValue N0 = N->getOperand(0);
36728 SDValue N1 = N->getOperand(1);
36730 // Check that both sources are concats with undef.
36731 if (N0.getOpcode() != ISD::CONCAT_VECTORS ||
36732 N1.getOpcode() != ISD::CONCAT_VECTORS || N0.getNumOperands() != 2 ||
36733 N1.getNumOperands() != 2 || !N0.getOperand(1).isUndef() ||
36734 !N1.getOperand(1).isUndef())
36737 // Construct the new shuffle mask. Elements from the first source retain their
36738 // index, but elements from the second source no longer need to skip an undef.
36739 SmallVector<int, 8> Mask;
36740 int NumElts = VT.getVectorNumElements();
36742 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
36743 for (int Elt : SVOp->getMask())
36744 Mask.push_back(Elt < NumElts ? Elt : (Elt - NumElts / 2));
36747 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, N0.getOperand(0),
36749 return DAG.getVectorShuffle(VT, DL, Concat, DAG.getUNDEF(VT), Mask);
36752 /// Eliminate a redundant shuffle of a horizontal math op.
36753 static SDValue foldShuffleOfHorizOp(SDNode *N, SelectionDAG &DAG) {
36754 unsigned Opcode = N->getOpcode();
36755 if (Opcode != X86ISD::MOVDDUP && Opcode != X86ISD::VBROADCAST)
36756 if (Opcode != ISD::VECTOR_SHUFFLE || !N->getOperand(1).isUndef())
36759 // For a broadcast, peek through an extract element of index 0 to find the
36760 // horizontal op: broadcast (ext_vec_elt HOp, 0)
36761 EVT VT = N->getValueType(0);
36762 if (Opcode == X86ISD::VBROADCAST) {
36763 SDValue SrcOp = N->getOperand(0);
36764 if (SrcOp.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
36765 SrcOp.getValueType() == MVT::f64 &&
36766 SrcOp.getOperand(0).getValueType() == VT &&
36767 isNullConstant(SrcOp.getOperand(1)))
36768 N = SrcOp.getNode();
36771 SDValue HOp = N->getOperand(0);
36772 if (HOp.getOpcode() != X86ISD::HADD && HOp.getOpcode() != X86ISD::FHADD &&
36773 HOp.getOpcode() != X86ISD::HSUB && HOp.getOpcode() != X86ISD::FHSUB)
36776 // 128-bit horizontal math instructions are defined to operate on adjacent
36777 // lanes of each operand as:
36778 // v4X32: A[0] + A[1] , A[2] + A[3] , B[0] + B[1] , B[2] + B[3]
36779 // ...similarly for v2f64 and v8i16.
36780 if (!HOp.getOperand(0).isUndef() && !HOp.getOperand(1).isUndef() &&
36781 HOp.getOperand(0) != HOp.getOperand(1))
36784 // The shuffle that we are eliminating may have allowed the horizontal op to
36785 // have an undemanded (undefined) operand. Duplicate the other (defined)
36786 // operand to ensure that the results are defined across all lanes without the
36788 auto updateHOp = [](SDValue HorizOp, SelectionDAG &DAG) {
36790 if (HorizOp.getOperand(0).isUndef()) {
36791 assert(!HorizOp.getOperand(1).isUndef() && "Not expecting foldable h-op");
36792 X = HorizOp.getOperand(1);
36793 } else if (HorizOp.getOperand(1).isUndef()) {
36794 assert(!HorizOp.getOperand(0).isUndef() && "Not expecting foldable h-op");
36795 X = HorizOp.getOperand(0);
36799 return DAG.getNode(HorizOp.getOpcode(), SDLoc(HorizOp),
36800 HorizOp.getValueType(), X, X);
36803 // When the operands of a horizontal math op are identical, the low half of
36804 // the result is the same as the high half. If a target shuffle is also
36805 // replicating low and high halves (and without changing the type/length of
36806 // the vector), we don't need the shuffle.
36807 if (Opcode == X86ISD::MOVDDUP || Opcode == X86ISD::VBROADCAST) {
36808 if (HOp.getScalarValueSizeInBits() == 64 && HOp.getValueType() == VT) {
36809 // movddup (hadd X, X) --> hadd X, X
36810 // broadcast (extract_vec_elt (hadd X, X), 0) --> hadd X, X
36811 assert((HOp.getValueType() == MVT::v2f64 ||
36812 HOp.getValueType() == MVT::v4f64) && "Unexpected type for h-op");
36813 return updateHOp(HOp, DAG);
36818 // shuffle (hadd X, X), undef, [low half...high half] --> hadd X, X
36819 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(N)->getMask();
36820 // TODO: Other mask possibilities like {1,1} and {1,0} could be added here,
36821 // but this should be tied to whatever horizontal op matching and shuffle
36822 // canonicalization are producing.
36823 if (HOp.getValueSizeInBits() == 128 &&
36824 (isTargetShuffleEquivalent(Mask, {0, 0}) ||
36825 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1}) ||
36826 isTargetShuffleEquivalent(Mask, {0, 1, 2, 3, 0, 1, 2, 3})))
36827 return updateHOp(HOp, DAG);
36829 if (HOp.getValueSizeInBits() == 256 &&
36830 (isTargetShuffleEquivalent(Mask, {0, 0, 2, 2}) ||
36831 isTargetShuffleEquivalent(Mask, {0, 1, 0, 1, 4, 5, 4, 5}) ||
36832 isTargetShuffleEquivalent(
36833 Mask, {0, 1, 2, 3, 0, 1, 2, 3, 8, 9, 10, 11, 8, 9, 10, 11})))
36834 return updateHOp(HOp, DAG);
36839 /// If we have a shuffle of AVX/AVX512 (256/512 bit) vectors that only uses the
36840 /// low half of each source vector and does not set any high half elements in
36841 /// the destination vector, narrow the shuffle to half its original size.
36842 static SDValue narrowShuffle(ShuffleVectorSDNode *Shuf, SelectionDAG &DAG) {
36843 if (!Shuf->getValueType(0).isSimple())
36845 MVT VT = Shuf->getSimpleValueType(0);
36846 if (!VT.is256BitVector() && !VT.is512BitVector())
36849 // See if we can ignore all of the high elements of the shuffle.
36850 ArrayRef<int> Mask = Shuf->getMask();
36851 if (!isUndefUpperHalf(Mask))
36854 // Check if the shuffle mask accesses only the low half of each input vector
36855 // (half-index output is 0 or 2).
36856 int HalfIdx1, HalfIdx2;
36857 SmallVector<int, 8> HalfMask(Mask.size() / 2);
36858 if (!getHalfShuffleMask(Mask, HalfMask, HalfIdx1, HalfIdx2) ||
36859 (HalfIdx1 % 2 == 1) || (HalfIdx2 % 2 == 1))
36862 // Create a half-width shuffle to replace the unnecessarily wide shuffle.
36863 // The trick is knowing that all of the insert/extract are actually free
36864 // subregister (zmm<->ymm or ymm<->xmm) ops. That leaves us with a shuffle
36865 // of narrow inputs into a narrow output, and that is always cheaper than
36866 // the wide shuffle that we started with.
36867 return getShuffleHalfVectors(SDLoc(Shuf), Shuf->getOperand(0),
36868 Shuf->getOperand(1), HalfMask, HalfIdx1,
36869 HalfIdx2, false, DAG, /*UseConcat*/true);
36872 static SDValue combineShuffle(SDNode *N, SelectionDAG &DAG,
36873 TargetLowering::DAGCombinerInfo &DCI,
36874 const X86Subtarget &Subtarget) {
36875 if (auto *Shuf = dyn_cast<ShuffleVectorSDNode>(N))
36876 if (SDValue V = narrowShuffle(Shuf, DAG))
36879 // If we have legalized the vector types, look for blends of FADD and FSUB
36880 // nodes that we can fuse into an ADDSUB, FMADDSUB, or FMSUBADD node.
36882 EVT VT = N->getValueType(0);
36883 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
36884 if (TLI.isTypeLegal(VT)) {
36885 if (SDValue AddSub = combineShuffleToAddSubOrFMAddSub(N, Subtarget, DAG))
36888 if (SDValue HAddSub = foldShuffleOfHorizOp(N, DAG))
36892 // Attempt to combine into a vector load/broadcast.
36893 if (SDValue LD = combineToConsecutiveLoads(VT, SDValue(N, 0), dl, DAG,
36897 // For AVX2, we sometimes want to combine
36898 // (vector_shuffle <mask> (concat_vectors t1, undef)
36899 // (concat_vectors t2, undef))
36901 // (vector_shuffle <mask> (concat_vectors t1, t2), undef)
36902 // Since the latter can be efficiently lowered with VPERMD/VPERMQ
36903 if (SDValue ShufConcat = combineShuffleOfConcatUndef(N, DAG, Subtarget))
36906 if (isTargetShuffle(N->getOpcode())) {
36908 if (SDValue Shuffle = combineTargetShuffle(Op, DAG, DCI, Subtarget))
36911 // Try recursively combining arbitrary sequences of x86 shuffle
36912 // instructions into higher-order shuffles. We do this after combining
36913 // specific PSHUF instruction sequences into their minimal form so that we
36914 // can evaluate how many specialized shuffle instructions are involved in
36915 // a particular chain.
36916 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
36919 // Simplify source operands based on shuffle mask.
36920 // TODO - merge this into combineX86ShufflesRecursively.
36921 APInt KnownUndef, KnownZero;
36922 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
36923 if (TLI.SimplifyDemandedVectorElts(Op, DemandedElts, KnownUndef, KnownZero, DCI))
36924 return SDValue(N, 0);
36927 // Pull subvector inserts into undef through VZEXT_MOVL by making it an
36928 // insert into a zero vector. This helps get VZEXT_MOVL closer to
36929 // scalar_to_vectors where 256/512 are canonicalized to an insert and a
36930 // 128-bit scalar_to_vector. This reduces the number of isel patterns.
36931 if (N->getOpcode() == X86ISD::VZEXT_MOVL && !DCI.isBeforeLegalizeOps() &&
36932 N->getOperand(0).hasOneUse()) {
36933 SDValue V = peekThroughOneUseBitcasts(N->getOperand(0));
36935 if (V.getOpcode() == ISD::INSERT_SUBVECTOR &&
36936 V.getOperand(0).isUndef() && isNullConstant(V.getOperand(2))) {
36937 SDValue In = V.getOperand(1);
36939 MVT::getVectorVT(VT.getSimpleVT().getVectorElementType(),
36940 In.getValueSizeInBits() / VT.getScalarSizeInBits());
36941 In = DAG.getBitcast(SubVT, In);
36942 SDValue Movl = DAG.getNode(X86ISD::VZEXT_MOVL, dl, SubVT, In);
36943 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, VT,
36944 getZeroVector(VT.getSimpleVT(), Subtarget, DAG, dl),
36945 Movl, V.getOperand(2));
36952 // Simplify variable target shuffle masks based on the demanded elements.
36953 // TODO: Handle DemandedBits in mask indices as well?
36954 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetShuffle(
36955 SDValue Op, const APInt &DemandedElts, unsigned MaskIndex,
36956 TargetLowering::TargetLoweringOpt &TLO, unsigned Depth) const {
36957 // If we're demanding all elements don't bother trying to simplify the mask.
36958 unsigned NumElts = DemandedElts.getBitWidth();
36959 if (DemandedElts.isAllOnesValue())
36962 SDValue Mask = Op.getOperand(MaskIndex);
36963 if (!Mask.hasOneUse())
36966 // Attempt to generically simplify the variable shuffle mask.
36967 APInt MaskUndef, MaskZero;
36968 if (SimplifyDemandedVectorElts(Mask, DemandedElts, MaskUndef, MaskZero, TLO,
36972 // Attempt to extract+simplify a (constant pool load) shuffle mask.
36973 // TODO: Support other types from getTargetShuffleMaskIndices?
36974 SDValue BC = peekThroughOneUseBitcasts(Mask);
36975 EVT BCVT = BC.getValueType();
36976 auto *Load = dyn_cast<LoadSDNode>(BC);
36980 const Constant *C = getTargetConstantFromNode(Load);
36984 Type *CTy = C->getType();
36985 if (!CTy->isVectorTy() ||
36986 CTy->getPrimitiveSizeInBits() != Mask.getValueSizeInBits())
36989 // Handle scaling for i64 elements on 32-bit targets.
36990 unsigned NumCstElts = cast<FixedVectorType>(CTy)->getNumElements();
36991 if (NumCstElts != NumElts && NumCstElts != (NumElts * 2))
36993 unsigned Scale = NumCstElts / NumElts;
36995 // Simplify mask if we have an undemanded element that is not undef.
36996 bool Simplified = false;
36997 SmallVector<Constant *, 32> ConstVecOps;
36998 for (unsigned i = 0; i != NumCstElts; ++i) {
36999 Constant *Elt = C->getAggregateElement(i);
37000 if (!DemandedElts[i / Scale] && !isa<UndefValue>(Elt)) {
37001 ConstVecOps.push_back(UndefValue::get(Elt->getType()));
37005 ConstVecOps.push_back(Elt);
37010 // Generate new constant pool entry + legalize immediately for the load.
37012 SDValue CV = TLO.DAG.getConstantPool(ConstantVector::get(ConstVecOps), BCVT);
37013 SDValue LegalCV = LowerConstantPool(CV, TLO.DAG);
37014 SDValue NewMask = TLO.DAG.getLoad(
37015 BCVT, DL, TLO.DAG.getEntryNode(), LegalCV,
37016 MachinePointerInfo::getConstantPool(TLO.DAG.getMachineFunction()),
37018 return TLO.CombineTo(Mask, TLO.DAG.getBitcast(Mask.getValueType(), NewMask));
37021 bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
37022 SDValue Op, const APInt &DemandedElts, APInt &KnownUndef, APInt &KnownZero,
37023 TargetLoweringOpt &TLO, unsigned Depth) const {
37024 int NumElts = DemandedElts.getBitWidth();
37025 unsigned Opc = Op.getOpcode();
37026 EVT VT = Op.getValueType();
37028 // Handle special case opcodes.
37030 case X86ISD::PMULDQ:
37031 case X86ISD::PMULUDQ: {
37032 APInt LHSUndef, LHSZero;
37033 APInt RHSUndef, RHSZero;
37034 SDValue LHS = Op.getOperand(0);
37035 SDValue RHS = Op.getOperand(1);
37036 if (SimplifyDemandedVectorElts(LHS, DemandedElts, LHSUndef, LHSZero, TLO,
37039 if (SimplifyDemandedVectorElts(RHS, DemandedElts, RHSUndef, RHSZero, TLO,
37042 // Multiply by zero.
37043 KnownZero = LHSZero | RHSZero;
37048 case X86ISD::VSRA: {
37049 // We only need the bottom 64-bits of the (128-bit) shift amount.
37050 SDValue Amt = Op.getOperand(1);
37051 MVT AmtVT = Amt.getSimpleValueType();
37052 assert(AmtVT.is128BitVector() && "Unexpected value type");
37054 // If we reuse the shift amount just for sse shift amounts then we know that
37055 // only the bottom 64-bits are only ever used.
37056 bool AssumeSingleUse = llvm::all_of(Amt->uses(), [&Amt](SDNode *Use) {
37057 unsigned UseOpc = Use->getOpcode();
37058 return (UseOpc == X86ISD::VSHL || UseOpc == X86ISD::VSRL ||
37059 UseOpc == X86ISD::VSRA) &&
37060 Use->getOperand(0) != Amt;
37063 APInt AmtUndef, AmtZero;
37064 unsigned NumAmtElts = AmtVT.getVectorNumElements();
37065 APInt AmtElts = APInt::getLowBitsSet(NumAmtElts, NumAmtElts / 2);
37066 if (SimplifyDemandedVectorElts(Amt, AmtElts, AmtUndef, AmtZero, TLO,
37067 Depth + 1, AssumeSingleUse))
37071 case X86ISD::VSHLI:
37072 case X86ISD::VSRLI:
37073 case X86ISD::VSRAI: {
37074 SDValue Src = Op.getOperand(0);
37076 if (SimplifyDemandedVectorElts(Src, DemandedElts, SrcUndef, KnownZero, TLO,
37079 // TODO convert SrcUndef to KnownUndef.
37082 case X86ISD::KSHIFTL: {
37083 SDValue Src = Op.getOperand(0);
37084 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
37085 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
37086 unsigned ShiftAmt = Amt->getZExtValue();
37089 return TLO.CombineTo(Op, Src);
37091 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
37092 // single shift. We can do this if the bottom bits (which are shifted
37093 // out) are never demanded.
37094 if (Src.getOpcode() == X86ISD::KSHIFTR) {
37095 if (!DemandedElts.intersects(APInt::getLowBitsSet(NumElts, ShiftAmt))) {
37096 unsigned C1 = Src.getConstantOperandVal(1);
37097 unsigned NewOpc = X86ISD::KSHIFTL;
37098 int Diff = ShiftAmt - C1;
37101 NewOpc = X86ISD::KSHIFTR;
37105 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
37106 return TLO.CombineTo(
37107 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
37111 APInt DemandedSrc = DemandedElts.lshr(ShiftAmt);
37112 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
37116 KnownUndef <<= ShiftAmt;
37117 KnownZero <<= ShiftAmt;
37118 KnownZero.setLowBits(ShiftAmt);
37121 case X86ISD::KSHIFTR: {
37122 SDValue Src = Op.getOperand(0);
37123 auto *Amt = cast<ConstantSDNode>(Op.getOperand(1));
37124 assert(Amt->getAPIntValue().ult(NumElts) && "Out of range shift amount");
37125 unsigned ShiftAmt = Amt->getZExtValue();
37128 return TLO.CombineTo(Op, Src);
37130 // If this is ((X << C1) >>u ShAmt), see if we can simplify this into a
37131 // single shift. We can do this if the top bits (which are shifted
37132 // out) are never demanded.
37133 if (Src.getOpcode() == X86ISD::KSHIFTL) {
37134 if (!DemandedElts.intersects(APInt::getHighBitsSet(NumElts, ShiftAmt))) {
37135 unsigned C1 = Src.getConstantOperandVal(1);
37136 unsigned NewOpc = X86ISD::KSHIFTR;
37137 int Diff = ShiftAmt - C1;
37140 NewOpc = X86ISD::KSHIFTL;
37144 SDValue NewSA = TLO.DAG.getTargetConstant(Diff, dl, MVT::i8);
37145 return TLO.CombineTo(
37146 Op, TLO.DAG.getNode(NewOpc, dl, VT, Src.getOperand(0), NewSA));
37150 APInt DemandedSrc = DemandedElts.shl(ShiftAmt);
37151 if (SimplifyDemandedVectorElts(Src, DemandedSrc, KnownUndef, KnownZero, TLO,
37155 KnownUndef.lshrInPlace(ShiftAmt);
37156 KnownZero.lshrInPlace(ShiftAmt);
37157 KnownZero.setHighBits(ShiftAmt);
37160 case X86ISD::CVTSI2P:
37161 case X86ISD::CVTUI2P: {
37162 SDValue Src = Op.getOperand(0);
37163 MVT SrcVT = Src.getSimpleValueType();
37164 APInt SrcUndef, SrcZero;
37165 APInt SrcElts = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
37166 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
37171 case X86ISD::PACKSS:
37172 case X86ISD::PACKUS: {
37173 SDValue N0 = Op.getOperand(0);
37174 SDValue N1 = Op.getOperand(1);
37176 APInt DemandedLHS, DemandedRHS;
37177 getPackDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
37179 APInt SrcUndef, SrcZero;
37180 if (SimplifyDemandedVectorElts(N0, DemandedLHS, SrcUndef, SrcZero, TLO,
37183 if (SimplifyDemandedVectorElts(N1, DemandedRHS, SrcUndef, SrcZero, TLO,
37187 // Aggressively peek through ops to get at the demanded elts.
37188 // TODO - we should do this for all target/faux shuffles ops.
37189 if (!DemandedElts.isAllOnesValue()) {
37190 SDValue NewN0 = SimplifyMultipleUseDemandedVectorElts(N0, DemandedLHS,
37191 TLO.DAG, Depth + 1);
37192 SDValue NewN1 = SimplifyMultipleUseDemandedVectorElts(N1, DemandedRHS,
37193 TLO.DAG, Depth + 1);
37194 if (NewN0 || NewN1) {
37195 NewN0 = NewN0 ? NewN0 : N0;
37196 NewN1 = NewN1 ? NewN1 : N1;
37197 return TLO.CombineTo(Op,
37198 TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewN0, NewN1));
37205 case X86ISD::FHADD:
37206 case X86ISD::FHSUB: {
37207 APInt DemandedLHS, DemandedRHS;
37208 getHorizDemandedElts(VT, DemandedElts, DemandedLHS, DemandedRHS);
37210 APInt LHSUndef, LHSZero;
37211 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedLHS, LHSUndef,
37212 LHSZero, TLO, Depth + 1))
37214 APInt RHSUndef, RHSZero;
37215 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedRHS, RHSUndef,
37216 RHSZero, TLO, Depth + 1))
37220 case X86ISD::VTRUNC:
37221 case X86ISD::VTRUNCS:
37222 case X86ISD::VTRUNCUS: {
37223 SDValue Src = Op.getOperand(0);
37224 MVT SrcVT = Src.getSimpleValueType();
37225 APInt DemandedSrc = DemandedElts.zextOrTrunc(SrcVT.getVectorNumElements());
37226 APInt SrcUndef, SrcZero;
37227 if (SimplifyDemandedVectorElts(Src, DemandedSrc, SrcUndef, SrcZero, TLO,
37230 KnownZero = SrcZero.zextOrTrunc(NumElts);
37231 KnownUndef = SrcUndef.zextOrTrunc(NumElts);
37234 case X86ISD::BLENDV: {
37235 APInt SelUndef, SelZero;
37236 if (SimplifyDemandedVectorElts(Op.getOperand(0), DemandedElts, SelUndef,
37237 SelZero, TLO, Depth + 1))
37240 // TODO: Use SelZero to adjust LHS/RHS DemandedElts.
37241 APInt LHSUndef, LHSZero;
37242 if (SimplifyDemandedVectorElts(Op.getOperand(1), DemandedElts, LHSUndef,
37243 LHSZero, TLO, Depth + 1))
37246 APInt RHSUndef, RHSZero;
37247 if (SimplifyDemandedVectorElts(Op.getOperand(2), DemandedElts, RHSUndef,
37248 RHSZero, TLO, Depth + 1))
37251 KnownZero = LHSZero & RHSZero;
37252 KnownUndef = LHSUndef & RHSUndef;
37255 case X86ISD::VZEXT_MOVL: {
37256 // If upper demanded elements are already zero then we have nothing to do.
37257 SDValue Src = Op.getOperand(0);
37258 APInt DemandedUpperElts = DemandedElts;
37259 DemandedUpperElts.clearLowBits(1);
37260 if (TLO.DAG.computeKnownBits(Src, DemandedUpperElts, Depth + 1).isZero())
37261 return TLO.CombineTo(Op, Src);
37264 case X86ISD::VBROADCAST: {
37265 SDValue Src = Op.getOperand(0);
37266 MVT SrcVT = Src.getSimpleValueType();
37267 if (!SrcVT.isVector())
37269 // Don't bother broadcasting if we just need the 0'th element.
37270 if (DemandedElts == 1) {
37271 if (Src.getValueType() != VT)
37272 Src = widenSubVector(VT.getSimpleVT(), Src, false, Subtarget, TLO.DAG,
37274 return TLO.CombineTo(Op, Src);
37276 APInt SrcUndef, SrcZero;
37277 APInt SrcElts = APInt::getOneBitSet(SrcVT.getVectorNumElements(), 0);
37278 if (SimplifyDemandedVectorElts(Src, SrcElts, SrcUndef, SrcZero, TLO,
37281 // Aggressively peek through src to get at the demanded elt.
37282 // TODO - we should do this for all target/faux shuffles ops.
37283 if (SDValue NewSrc = SimplifyMultipleUseDemandedVectorElts(
37284 Src, SrcElts, TLO.DAG, Depth + 1))
37285 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
37288 case X86ISD::VPERMV:
37289 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 0, TLO,
37293 case X86ISD::PSHUFB:
37294 case X86ISD::VPERMV3:
37295 case X86ISD::VPERMILPV:
37296 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 1, TLO,
37300 case X86ISD::VPPERM:
37301 case X86ISD::VPERMIL2:
37302 if (SimplifyDemandedVectorEltsForTargetShuffle(Op, DemandedElts, 2, TLO,
37308 // For 256/512-bit ops that are 128/256-bit ops glued together, if we do not
37309 // demand any of the high elements, then narrow the op to 128/256-bits: e.g.
37310 // (op ymm0, ymm1) --> insert undef, (op xmm0, xmm1), 0
37311 if ((VT.is256BitVector() || VT.is512BitVector()) &&
37312 DemandedElts.lshr(NumElts / 2) == 0) {
37313 unsigned SizeInBits = VT.getSizeInBits();
37314 unsigned ExtSizeInBits = SizeInBits / 2;
37316 // See if 512-bit ops only use the bottom 128-bits.
37317 if (VT.is512BitVector() && DemandedElts.lshr(NumElts / 4) == 0)
37318 ExtSizeInBits = SizeInBits / 4;
37321 // Subvector broadcast.
37322 case X86ISD::SUBV_BROADCAST: {
37324 SDValue Src = Op.getOperand(0);
37325 if (Src.getValueSizeInBits() > ExtSizeInBits)
37326 Src = extractSubVector(Src, 0, TLO.DAG, DL, ExtSizeInBits);
37327 else if (Src.getValueSizeInBits() < ExtSizeInBits) {
37328 MVT SrcSVT = Src.getSimpleValueType().getScalarType();
37330 MVT::getVectorVT(SrcSVT, ExtSizeInBits / SrcSVT.getSizeInBits());
37331 Src = TLO.DAG.getNode(X86ISD::SUBV_BROADCAST, DL, SrcVT, Src);
37333 return TLO.CombineTo(Op, insertSubVector(TLO.DAG.getUNDEF(VT), Src, 0,
37334 TLO.DAG, DL, ExtSizeInBits));
37336 // Byte shifts by immediate.
37337 case X86ISD::VSHLDQ:
37338 case X86ISD::VSRLDQ:
37339 // Shift by uniform.
37343 // Shift by immediate.
37344 case X86ISD::VSHLI:
37345 case X86ISD::VSRLI:
37346 case X86ISD::VSRAI: {
37349 extractSubVector(Op.getOperand(0), 0, TLO.DAG, DL, ExtSizeInBits);
37351 TLO.DAG.getNode(Opc, DL, Ext0.getValueType(), Ext0, Op.getOperand(1));
37352 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
37354 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
37355 return TLO.CombineTo(Op, Insert);
37357 case X86ISD::VPERMI: {
37358 // Simplify PERMPD/PERMQ to extract_subvector.
37359 // TODO: This should be done in shuffle combining.
37360 if (VT == MVT::v4f64 || VT == MVT::v4i64) {
37361 SmallVector<int, 4> Mask;
37362 DecodeVPERMMask(NumElts, Op.getConstantOperandVal(1), Mask);
37363 if (isUndefOrEqual(Mask[0], 2) && isUndefOrEqual(Mask[1], 3)) {
37365 SDValue Ext = extractSubVector(Op.getOperand(0), 2, TLO.DAG, DL, 128);
37366 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
37367 SDValue Insert = insertSubVector(UndefVec, Ext, 0, TLO.DAG, DL, 128);
37368 return TLO.CombineTo(Op, Insert);
37373 // Zero upper elements.
37374 case X86ISD::VZEXT_MOVL:
37375 // Target unary shuffles by immediate:
37376 case X86ISD::PSHUFD:
37377 case X86ISD::PSHUFLW:
37378 case X86ISD::PSHUFHW:
37379 case X86ISD::VPERMILPI:
37380 // (Non-Lane Crossing) Target Shuffles.
37381 case X86ISD::VPERMILPV:
37382 case X86ISD::VPERMIL2:
37383 case X86ISD::PSHUFB:
37384 case X86ISD::UNPCKL:
37385 case X86ISD::UNPCKH:
37386 case X86ISD::BLENDI:
37387 // Saturated Packs.
37388 case X86ISD::PACKSS:
37389 case X86ISD::PACKUS:
37393 case X86ISD::FHADD:
37394 case X86ISD::FHSUB: {
37396 SmallVector<SDValue, 4> Ops;
37397 for (unsigned i = 0, e = Op.getNumOperands(); i != e; ++i) {
37398 SDValue SrcOp = Op.getOperand(i);
37399 EVT SrcVT = SrcOp.getValueType();
37400 assert((!SrcVT.isVector() || SrcVT.getSizeInBits() == SizeInBits) &&
37401 "Unsupported vector size");
37402 Ops.push_back(SrcVT.isVector() ? extractSubVector(SrcOp, 0, TLO.DAG, DL,
37406 MVT ExtVT = VT.getSimpleVT();
37407 ExtVT = MVT::getVectorVT(ExtVT.getScalarType(),
37408 ExtSizeInBits / ExtVT.getScalarSizeInBits());
37409 SDValue ExtOp = TLO.DAG.getNode(Opc, DL, ExtVT, Ops);
37410 SDValue UndefVec = TLO.DAG.getUNDEF(VT);
37412 insertSubVector(UndefVec, ExtOp, 0, TLO.DAG, DL, ExtSizeInBits);
37413 return TLO.CombineTo(Op, Insert);
37418 // Get target/faux shuffle mask.
37419 APInt OpUndef, OpZero;
37420 SmallVector<int, 64> OpMask;
37421 SmallVector<SDValue, 2> OpInputs;
37422 if (!getTargetShuffleInputs(Op, DemandedElts, OpInputs, OpMask, OpUndef,
37423 OpZero, TLO.DAG, Depth, false))
37426 // Shuffle inputs must be the same size as the result.
37427 if (OpMask.size() != (unsigned)NumElts ||
37428 llvm::any_of(OpInputs, [VT](SDValue V) {
37429 return VT.getSizeInBits() != V.getValueSizeInBits() ||
37430 !V.getValueType().isVector();
37434 KnownZero = OpZero;
37435 KnownUndef = OpUndef;
37437 // Check if shuffle mask can be simplified to undef/zero/identity.
37438 int NumSrcs = OpInputs.size();
37439 for (int i = 0; i != NumElts; ++i)
37440 if (!DemandedElts[i])
37441 OpMask[i] = SM_SentinelUndef;
37443 if (isUndefInRange(OpMask, 0, NumElts)) {
37444 KnownUndef.setAllBits();
37445 return TLO.CombineTo(Op, TLO.DAG.getUNDEF(VT));
37447 if (isUndefOrZeroInRange(OpMask, 0, NumElts)) {
37448 KnownZero.setAllBits();
37449 return TLO.CombineTo(
37450 Op, getZeroVector(VT.getSimpleVT(), Subtarget, TLO.DAG, SDLoc(Op)));
37452 for (int Src = 0; Src != NumSrcs; ++Src)
37453 if (isSequentialOrUndefInRange(OpMask, 0, NumElts, Src * NumElts))
37454 return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, OpInputs[Src]));
37456 // Attempt to simplify inputs.
37457 for (int Src = 0; Src != NumSrcs; ++Src) {
37458 // TODO: Support inputs of different types.
37459 if (OpInputs[Src].getValueType() != VT)
37462 int Lo = Src * NumElts;
37463 APInt SrcElts = APInt::getNullValue(NumElts);
37464 for (int i = 0; i != NumElts; ++i)
37465 if (DemandedElts[i]) {
37466 int M = OpMask[i] - Lo;
37467 if (0 <= M && M < NumElts)
37471 // TODO - Propagate input undef/zero elts.
37472 APInt SrcUndef, SrcZero;
37473 if (SimplifyDemandedVectorElts(OpInputs[Src], SrcElts, SrcUndef, SrcZero,
37478 // If we don't demand all elements, then attempt to combine to a simpler
37480 // TODO: Handle other depths, but first we need to handle the fact that
37481 // it might combine to the same shuffle.
37482 if (!DemandedElts.isAllOnesValue() && Depth == 0) {
37483 SmallVector<int, 64> DemandedMask(NumElts, SM_SentinelUndef);
37484 for (int i = 0; i != NumElts; ++i)
37485 if (DemandedElts[i])
37486 DemandedMask[i] = i;
37488 SDValue NewShuffle = combineX86ShufflesRecursively(
37489 {Op}, 0, Op, DemandedMask, {}, Depth, /*HasVarMask*/ false,
37490 /*AllowVarMask*/ true, TLO.DAG, Subtarget);
37492 return TLO.CombineTo(Op, NewShuffle);
37498 bool X86TargetLowering::SimplifyDemandedBitsForTargetNode(
37499 SDValue Op, const APInt &OriginalDemandedBits,
37500 const APInt &OriginalDemandedElts, KnownBits &Known, TargetLoweringOpt &TLO,
37501 unsigned Depth) const {
37502 EVT VT = Op.getValueType();
37503 unsigned BitWidth = OriginalDemandedBits.getBitWidth();
37504 unsigned Opc = Op.getOpcode();
37506 case X86ISD::VTRUNC: {
37508 SDValue Src = Op.getOperand(0);
37509 MVT SrcVT = Src.getSimpleValueType();
37511 // Simplify the input, using demanded bit information.
37512 APInt TruncMask = OriginalDemandedBits.zext(SrcVT.getScalarSizeInBits());
37513 APInt DemandedElts = OriginalDemandedElts.trunc(SrcVT.getVectorNumElements());
37514 if (SimplifyDemandedBits(Src, TruncMask, DemandedElts, KnownOp, TLO, Depth + 1))
37518 case X86ISD::PMULDQ:
37519 case X86ISD::PMULUDQ: {
37520 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
37522 SDValue LHS = Op.getOperand(0);
37523 SDValue RHS = Op.getOperand(1);
37524 // FIXME: Can we bound this better?
37525 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
37526 if (SimplifyDemandedBits(LHS, DemandedMask, OriginalDemandedElts, KnownOp,
37529 if (SimplifyDemandedBits(RHS, DemandedMask, OriginalDemandedElts, KnownOp,
37533 // Aggressively peek through ops to get at the demanded low bits.
37534 SDValue DemandedLHS = SimplifyMultipleUseDemandedBits(
37535 LHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
37536 SDValue DemandedRHS = SimplifyMultipleUseDemandedBits(
37537 RHS, DemandedMask, OriginalDemandedElts, TLO.DAG, Depth + 1);
37538 if (DemandedLHS || DemandedRHS) {
37539 DemandedLHS = DemandedLHS ? DemandedLHS : LHS;
37540 DemandedRHS = DemandedRHS ? DemandedRHS : RHS;
37541 return TLO.CombineTo(
37542 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, DemandedLHS, DemandedRHS));
37546 case X86ISD::VSHLI: {
37547 SDValue Op0 = Op.getOperand(0);
37549 unsigned ShAmt = Op.getConstantOperandVal(1);
37550 if (ShAmt >= BitWidth)
37553 APInt DemandedMask = OriginalDemandedBits.lshr(ShAmt);
37555 // If this is ((X >>u C1) << ShAmt), see if we can simplify this into a
37556 // single shift. We can do this if the bottom bits (which are shifted
37557 // out) are never demanded.
37558 if (Op0.getOpcode() == X86ISD::VSRLI &&
37559 OriginalDemandedBits.countTrailingZeros() >= ShAmt) {
37560 unsigned Shift2Amt = Op0.getConstantOperandVal(1);
37561 if (Shift2Amt < BitWidth) {
37562 int Diff = ShAmt - Shift2Amt;
37564 return TLO.CombineTo(Op, Op0.getOperand(0));
37566 unsigned NewOpc = Diff < 0 ? X86ISD::VSRLI : X86ISD::VSHLI;
37567 SDValue NewShift = TLO.DAG.getNode(
37568 NewOpc, SDLoc(Op), VT, Op0.getOperand(0),
37569 TLO.DAG.getTargetConstant(std::abs(Diff), SDLoc(Op), MVT::i8));
37570 return TLO.CombineTo(Op, NewShift);
37574 // If we are only demanding sign bits then we can use the shift source directly.
37575 unsigned NumSignBits =
37576 TLO.DAG.ComputeNumSignBits(Op0, OriginalDemandedElts, Depth + 1);
37577 unsigned UpperDemandedBits =
37578 BitWidth - OriginalDemandedBits.countTrailingZeros();
37579 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
37580 return TLO.CombineTo(Op, Op0);
37582 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
37586 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
37587 Known.Zero <<= ShAmt;
37588 Known.One <<= ShAmt;
37590 // Low bits known zero.
37591 Known.Zero.setLowBits(ShAmt);
37594 case X86ISD::VSRLI: {
37595 unsigned ShAmt = Op.getConstantOperandVal(1);
37596 if (ShAmt >= BitWidth)
37599 APInt DemandedMask = OriginalDemandedBits << ShAmt;
37601 if (SimplifyDemandedBits(Op.getOperand(0), DemandedMask,
37602 OriginalDemandedElts, Known, TLO, Depth + 1))
37605 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
37606 Known.Zero.lshrInPlace(ShAmt);
37607 Known.One.lshrInPlace(ShAmt);
37609 // High bits known zero.
37610 Known.Zero.setHighBits(ShAmt);
37613 case X86ISD::VSRAI: {
37614 SDValue Op0 = Op.getOperand(0);
37615 SDValue Op1 = Op.getOperand(1);
37617 unsigned ShAmt = cast<ConstantSDNode>(Op1)->getZExtValue();
37618 if (ShAmt >= BitWidth)
37621 APInt DemandedMask = OriginalDemandedBits << ShAmt;
37623 // If we just want the sign bit then we don't need to shift it.
37624 if (OriginalDemandedBits.isSignMask())
37625 return TLO.CombineTo(Op, Op0);
37627 // fold (VSRAI (VSHLI X, C1), C1) --> X iff NumSignBits(X) > C1
37628 if (Op0.getOpcode() == X86ISD::VSHLI &&
37629 Op.getOperand(1) == Op0.getOperand(1)) {
37630 SDValue Op00 = Op0.getOperand(0);
37631 unsigned NumSignBits =
37632 TLO.DAG.ComputeNumSignBits(Op00, OriginalDemandedElts);
37633 if (ShAmt < NumSignBits)
37634 return TLO.CombineTo(Op, Op00);
37637 // If any of the demanded bits are produced by the sign extension, we also
37638 // demand the input sign bit.
37639 if (OriginalDemandedBits.countLeadingZeros() < ShAmt)
37640 DemandedMask.setSignBit();
37642 if (SimplifyDemandedBits(Op0, DemandedMask, OriginalDemandedElts, Known,
37646 assert(!Known.hasConflict() && "Bits known to be one AND zero?");
37647 Known.Zero.lshrInPlace(ShAmt);
37648 Known.One.lshrInPlace(ShAmt);
37650 // If the input sign bit is known to be zero, or if none of the top bits
37651 // are demanded, turn this into an unsigned shift right.
37652 if (Known.Zero[BitWidth - ShAmt - 1] ||
37653 OriginalDemandedBits.countLeadingZeros() >= ShAmt)
37654 return TLO.CombineTo(
37655 Op, TLO.DAG.getNode(X86ISD::VSRLI, SDLoc(Op), VT, Op0, Op1));
37657 // High bits are known one.
37658 if (Known.One[BitWidth - ShAmt - 1])
37659 Known.One.setHighBits(ShAmt);
37662 case X86ISD::PEXTRB:
37663 case X86ISD::PEXTRW: {
37664 SDValue Vec = Op.getOperand(0);
37665 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(1));
37666 MVT VecVT = Vec.getSimpleValueType();
37667 unsigned NumVecElts = VecVT.getVectorNumElements();
37669 if (CIdx && CIdx->getAPIntValue().ult(NumVecElts)) {
37670 unsigned Idx = CIdx->getZExtValue();
37671 unsigned VecBitWidth = VecVT.getScalarSizeInBits();
37673 // If we demand no bits from the vector then we must have demanded
37674 // bits from the implict zext - simplify to zero.
37675 APInt DemandedVecBits = OriginalDemandedBits.trunc(VecBitWidth);
37676 if (DemandedVecBits == 0)
37677 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
37679 APInt KnownUndef, KnownZero;
37680 APInt DemandedVecElts = APInt::getOneBitSet(NumVecElts, Idx);
37681 if (SimplifyDemandedVectorElts(Vec, DemandedVecElts, KnownUndef,
37682 KnownZero, TLO, Depth + 1))
37685 KnownBits KnownVec;
37686 if (SimplifyDemandedBits(Vec, DemandedVecBits, DemandedVecElts,
37687 KnownVec, TLO, Depth + 1))
37690 if (SDValue V = SimplifyMultipleUseDemandedBits(
37691 Vec, DemandedVecBits, DemandedVecElts, TLO.DAG, Depth + 1))
37692 return TLO.CombineTo(
37693 Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, V, Op.getOperand(1)));
37695 Known = KnownVec.zext(BitWidth);
37700 case X86ISD::PINSRB:
37701 case X86ISD::PINSRW: {
37702 SDValue Vec = Op.getOperand(0);
37703 SDValue Scl = Op.getOperand(1);
37704 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
37705 MVT VecVT = Vec.getSimpleValueType();
37707 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements())) {
37708 unsigned Idx = CIdx->getZExtValue();
37709 if (!OriginalDemandedElts[Idx])
37710 return TLO.CombineTo(Op, Vec);
37712 KnownBits KnownVec;
37713 APInt DemandedVecElts(OriginalDemandedElts);
37714 DemandedVecElts.clearBit(Idx);
37715 if (SimplifyDemandedBits(Vec, OriginalDemandedBits, DemandedVecElts,
37716 KnownVec, TLO, Depth + 1))
37719 KnownBits KnownScl;
37720 unsigned NumSclBits = Scl.getScalarValueSizeInBits();
37721 APInt DemandedSclBits = OriginalDemandedBits.zext(NumSclBits);
37722 if (SimplifyDemandedBits(Scl, DemandedSclBits, KnownScl, TLO, Depth + 1))
37725 KnownScl = KnownScl.trunc(VecVT.getScalarSizeInBits());
37726 Known.One = KnownVec.One & KnownScl.One;
37727 Known.Zero = KnownVec.Zero & KnownScl.Zero;
37732 case X86ISD::PACKSS:
37733 // PACKSS saturates to MIN/MAX integer values. So if we just want the
37734 // sign bit then we can just ask for the source operands sign bit.
37735 // TODO - add known bits handling.
37736 if (OriginalDemandedBits.isSignMask()) {
37737 APInt DemandedLHS, DemandedRHS;
37738 getPackDemandedElts(VT, OriginalDemandedElts, DemandedLHS, DemandedRHS);
37740 KnownBits KnownLHS, KnownRHS;
37741 APInt SignMask = APInt::getSignMask(BitWidth * 2);
37742 if (SimplifyDemandedBits(Op.getOperand(0), SignMask, DemandedLHS,
37743 KnownLHS, TLO, Depth + 1))
37745 if (SimplifyDemandedBits(Op.getOperand(1), SignMask, DemandedRHS,
37746 KnownRHS, TLO, Depth + 1))
37749 // Attempt to avoid multi-use ops if we don't need anything from them.
37750 SDValue DemandedOp0 = SimplifyMultipleUseDemandedBits(
37751 Op.getOperand(0), SignMask, DemandedLHS, TLO.DAG, Depth + 1);
37752 SDValue DemandedOp1 = SimplifyMultipleUseDemandedBits(
37753 Op.getOperand(1), SignMask, DemandedRHS, TLO.DAG, Depth + 1);
37754 if (DemandedOp0 || DemandedOp1) {
37755 SDValue Op0 = DemandedOp0 ? DemandedOp0 : Op.getOperand(0);
37756 SDValue Op1 = DemandedOp1 ? DemandedOp1 : Op.getOperand(1);
37757 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, Op0, Op1));
37760 // TODO - add general PACKSS/PACKUS SimplifyDemandedBits support.
37762 case X86ISD::PCMPGT:
37763 // icmp sgt(0, R) == ashr(R, BitWidth-1).
37764 // iff we only need the sign bit then we can use R directly.
37765 if (OriginalDemandedBits.isSignMask() &&
37766 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
37767 return TLO.CombineTo(Op, Op.getOperand(1));
37769 case X86ISD::MOVMSK: {
37770 SDValue Src = Op.getOperand(0);
37771 MVT SrcVT = Src.getSimpleValueType();
37772 unsigned SrcBits = SrcVT.getScalarSizeInBits();
37773 unsigned NumElts = SrcVT.getVectorNumElements();
37775 // If we don't need the sign bits at all just return zero.
37776 if (OriginalDemandedBits.countTrailingZeros() >= NumElts)
37777 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
37779 // Only demand the vector elements of the sign bits we need.
37780 APInt KnownUndef, KnownZero;
37781 APInt DemandedElts = OriginalDemandedBits.zextOrTrunc(NumElts);
37782 if (SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
37786 Known.Zero = KnownZero.zextOrSelf(BitWidth);
37787 Known.Zero.setHighBits(BitWidth - NumElts);
37789 // MOVMSK only uses the MSB from each vector element.
37790 KnownBits KnownSrc;
37791 APInt DemandedSrcBits = APInt::getSignMask(SrcBits);
37792 if (SimplifyDemandedBits(Src, DemandedSrcBits, DemandedElts, KnownSrc, TLO,
37796 if (KnownSrc.One[SrcBits - 1])
37797 Known.One.setLowBits(NumElts);
37798 else if (KnownSrc.Zero[SrcBits - 1])
37799 Known.Zero.setLowBits(NumElts);
37801 // Attempt to avoid multi-use os if we don't need anything from it.
37802 if (SDValue NewSrc = SimplifyMultipleUseDemandedBits(
37803 Src, DemandedSrcBits, DemandedElts, TLO.DAG, Depth + 1))
37804 return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, SDLoc(Op), VT, NewSrc));
37807 case X86ISD::BEXTR: {
37808 SDValue Op0 = Op.getOperand(0);
37809 SDValue Op1 = Op.getOperand(1);
37811 // Only bottom 16-bits of the control bits are required.
37812 if (auto *Cst1 = dyn_cast<ConstantSDNode>(Op1)) {
37813 // NOTE: SimplifyDemandedBits won't do this for constants.
37814 const APInt &Val1 = Cst1->getAPIntValue();
37815 APInt MaskedVal1 = Val1 & 0xFFFF;
37816 if (MaskedVal1 != Val1) {
37818 return TLO.CombineTo(
37819 Op, TLO.DAG.getNode(X86ISD::BEXTR, DL, VT, Op0,
37820 TLO.DAG.getConstant(MaskedVal1, DL, VT)));
37825 APInt DemandedMask(APInt::getLowBitsSet(BitWidth, 16));
37826 if (SimplifyDemandedBits(Op1, DemandedMask, Known1, TLO, Depth + 1))
37829 // If the length is 0, replace with 0.
37830 KnownBits LengthBits = Known1.extractBits(8, 8);
37831 if (LengthBits.isZero())
37832 return TLO.CombineTo(Op, TLO.DAG.getConstant(0, SDLoc(Op), VT));
37838 return TargetLowering::SimplifyDemandedBitsForTargetNode(
37839 Op, OriginalDemandedBits, OriginalDemandedElts, Known, TLO, Depth);
37842 SDValue X86TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
37843 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
37844 SelectionDAG &DAG, unsigned Depth) const {
37845 int NumElts = DemandedElts.getBitWidth();
37846 unsigned Opc = Op.getOpcode();
37847 EVT VT = Op.getValueType();
37850 case X86ISD::PINSRB:
37851 case X86ISD::PINSRW: {
37852 // If we don't demand the inserted element, return the base vector.
37853 SDValue Vec = Op.getOperand(0);
37854 auto *CIdx = dyn_cast<ConstantSDNode>(Op.getOperand(2));
37855 MVT VecVT = Vec.getSimpleValueType();
37856 if (CIdx && CIdx->getAPIntValue().ult(VecVT.getVectorNumElements()) &&
37857 !DemandedElts[CIdx->getZExtValue()])
37861 case X86ISD::VSHLI: {
37862 // If we are only demanding sign bits then we can use the shift source
37864 SDValue Op0 = Op.getOperand(0);
37865 unsigned ShAmt = Op.getConstantOperandVal(1);
37866 unsigned BitWidth = DemandedBits.getBitWidth();
37867 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0, DemandedElts, Depth + 1);
37868 unsigned UpperDemandedBits = BitWidth - DemandedBits.countTrailingZeros();
37869 if (NumSignBits > ShAmt && (NumSignBits - ShAmt) >= UpperDemandedBits)
37873 case X86ISD::VSRAI:
37874 // iff we only need the sign bit then we can use the source directly.
37875 // TODO: generalize where we only demand extended signbits.
37876 if (DemandedBits.isSignMask())
37877 return Op.getOperand(0);
37879 case X86ISD::PCMPGT:
37880 // icmp sgt(0, R) == ashr(R, BitWidth-1).
37881 // iff we only need the sign bit then we can use R directly.
37882 if (DemandedBits.isSignMask() &&
37883 ISD::isBuildVectorAllZeros(Op.getOperand(0).getNode()))
37884 return Op.getOperand(1);
37888 APInt ShuffleUndef, ShuffleZero;
37889 SmallVector<int, 16> ShuffleMask;
37890 SmallVector<SDValue, 2> ShuffleOps;
37891 if (getTargetShuffleInputs(Op, DemandedElts, ShuffleOps, ShuffleMask,
37892 ShuffleUndef, ShuffleZero, DAG, Depth, false)) {
37893 // If all the demanded elts are from one operand and are inline,
37894 // then we can use the operand directly.
37895 int NumOps = ShuffleOps.size();
37896 if (ShuffleMask.size() == (unsigned)NumElts &&
37897 llvm::all_of(ShuffleOps, [VT](SDValue V) {
37898 return VT.getSizeInBits() == V.getValueSizeInBits();
37901 if (DemandedElts.isSubsetOf(ShuffleUndef))
37902 return DAG.getUNDEF(VT);
37903 if (DemandedElts.isSubsetOf(ShuffleUndef | ShuffleZero))
37904 return getZeroVector(VT.getSimpleVT(), Subtarget, DAG, SDLoc(Op));
37906 // Bitmask that indicates which ops have only been accessed 'inline'.
37907 APInt IdentityOp = APInt::getAllOnesValue(NumOps);
37908 for (int i = 0; i != NumElts; ++i) {
37909 int M = ShuffleMask[i];
37910 if (!DemandedElts[i] || ShuffleUndef[i])
37912 int OpIdx = M / NumElts;
37913 int EltIdx = M % NumElts;
37914 if (M < 0 || EltIdx != i) {
37915 IdentityOp.clearAllBits();
37918 IdentityOp &= APInt::getOneBitSet(NumOps, OpIdx);
37919 if (IdentityOp == 0)
37922 assert((IdentityOp == 0 || IdentityOp.countPopulation() == 1) &&
37923 "Multiple identity shuffles detected");
37925 if (IdentityOp != 0)
37926 return DAG.getBitcast(VT, ShuffleOps[IdentityOp.countTrailingZeros()]);
37930 return TargetLowering::SimplifyMultipleUseDemandedBitsForTargetNode(
37931 Op, DemandedBits, DemandedElts, DAG, Depth);
37934 // Helper to peek through bitops/setcc to determine size of source vector.
37935 // Allows combineBitcastvxi1 to determine what size vector generated a <X x i1>.
37936 static bool checkBitcastSrcVectorSize(SDValue Src, unsigned Size) {
37937 switch (Src.getOpcode()) {
37939 return Src.getOperand(0).getValueSizeInBits() == Size;
37943 return checkBitcastSrcVectorSize(Src.getOperand(0), Size) &&
37944 checkBitcastSrcVectorSize(Src.getOperand(1), Size);
37949 // Helper to flip between AND/OR/XOR opcodes and their X86ISD FP equivalents.
37950 static unsigned getAltBitOpcode(unsigned Opcode) {
37952 case ISD::AND: return X86ISD::FAND;
37953 case ISD::OR: return X86ISD::FOR;
37954 case ISD::XOR: return X86ISD::FXOR;
37955 case X86ISD::ANDNP: return X86ISD::FANDN;
37957 llvm_unreachable("Unknown bitwise opcode");
37960 // Helper to adjust v4i32 MOVMSK expansion to work with SSE1-only targets.
37961 static SDValue adjustBitcastSrcVectorSSE1(SelectionDAG &DAG, SDValue Src,
37963 EVT SrcVT = Src.getValueType();
37964 if (SrcVT != MVT::v4i1)
37967 switch (Src.getOpcode()) {
37969 if (Src.getOperand(0).getValueType() == MVT::v4i32 &&
37970 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode()) &&
37971 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT) {
37972 SDValue Op0 = Src.getOperand(0);
37973 if (ISD::isNormalLoad(Op0.getNode()))
37974 return DAG.getBitcast(MVT::v4f32, Op0);
37975 if (Op0.getOpcode() == ISD::BITCAST &&
37976 Op0.getOperand(0).getValueType() == MVT::v4f32)
37977 return Op0.getOperand(0);
37983 SDValue Op0 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(0), DL);
37984 SDValue Op1 = adjustBitcastSrcVectorSSE1(DAG, Src.getOperand(1), DL);
37986 return DAG.getNode(getAltBitOpcode(Src.getOpcode()), DL, MVT::v4f32, Op0,
37994 // Helper to push sign extension of vXi1 SETCC result through bitops.
37995 static SDValue signExtendBitcastSrcVector(SelectionDAG &DAG, EVT SExtVT,
37996 SDValue Src, const SDLoc &DL) {
37997 switch (Src.getOpcode()) {
37999 return DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
38003 return DAG.getNode(
38004 Src.getOpcode(), DL, SExtVT,
38005 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(0), DL),
38006 signExtendBitcastSrcVector(DAG, SExtVT, Src.getOperand(1), DL));
38008 llvm_unreachable("Unexpected node type for vXi1 sign extension");
38011 // Try to match patterns such as
38012 // (i16 bitcast (v16i1 x))
38014 // (i16 movmsk (16i8 sext (v16i1 x)))
38015 // before the illegal vector is scalarized on subtargets that don't have legal
38017 static SDValue combineBitcastvxi1(SelectionDAG &DAG, EVT VT, SDValue Src,
38019 const X86Subtarget &Subtarget) {
38020 EVT SrcVT = Src.getValueType();
38021 if (!SrcVT.isSimple() || SrcVT.getScalarType() != MVT::i1)
38024 // Recognize the IR pattern for the movmsk intrinsic under SSE1 before type
38025 // legalization destroys the v4i32 type.
38026 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) {
38027 if (SDValue V = adjustBitcastSrcVectorSSE1(DAG, Src, DL)) {
38028 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32,
38029 DAG.getBitcast(MVT::v4f32, V));
38030 return DAG.getZExtOrTrunc(V, DL, VT);
38034 // If the input is a truncate from v16i8 or v32i8 go ahead and use a
38035 // movmskb even with avx512. This will be better than truncating to vXi1 and
38036 // using a kmov. This can especially help KNL if the input is a v16i8/v32i8
38037 // vpcmpeqb/vpcmpgtb.
38038 bool PreferMovMsk = Src.getOpcode() == ISD::TRUNCATE && Src.hasOneUse() &&
38039 (Src.getOperand(0).getValueType() == MVT::v16i8 ||
38040 Src.getOperand(0).getValueType() == MVT::v32i8 ||
38041 Src.getOperand(0).getValueType() == MVT::v64i8);
38043 // Prefer movmsk for AVX512 for (bitcast (setlt X, 0)) which can be handled
38044 // directly with vpmovmskb/vmovmskps/vmovmskpd.
38045 if (Src.getOpcode() == ISD::SETCC && Src.hasOneUse() &&
38046 cast<CondCodeSDNode>(Src.getOperand(2))->get() == ISD::SETLT &&
38047 ISD::isBuildVectorAllZeros(Src.getOperand(1).getNode())) {
38048 EVT CmpVT = Src.getOperand(0).getValueType();
38049 EVT EltVT = CmpVT.getVectorElementType();
38050 if (CmpVT.getSizeInBits() <= 256 &&
38051 (EltVT == MVT::i8 || EltVT == MVT::i32 || EltVT == MVT::i64))
38052 PreferMovMsk = true;
38055 // With AVX512 vxi1 types are legal and we prefer using k-regs.
38056 // MOVMSK is supported in SSE2 or later.
38057 if (!Subtarget.hasSSE2() || (Subtarget.hasAVX512() && !PreferMovMsk))
38060 // There are MOVMSK flavors for types v16i8, v32i8, v4f32, v8f32, v4f64 and
38061 // v8f64. So all legal 128-bit and 256-bit vectors are covered except for
38062 // v8i16 and v16i16.
38063 // For these two cases, we can shuffle the upper element bytes to a
38064 // consecutive sequence at the start of the vector and treat the results as
38065 // v16i8 or v32i8, and for v16i8 this is the preferable solution. However,
38066 // for v16i16 this is not the case, because the shuffle is expensive, so we
38067 // avoid sign-extending to this type entirely.
38068 // For example, t0 := (v8i16 sext(v8i1 x)) needs to be shuffled as:
38069 // (v16i8 shuffle <0,2,4,6,8,10,12,14,u,u,...,u> (v16i8 bitcast t0), undef)
38071 bool PropagateSExt = false;
38072 switch (SrcVT.getSimpleVT().SimpleTy) {
38076 SExtVT = MVT::v2i64;
38079 SExtVT = MVT::v4i32;
38080 // For cases such as (i4 bitcast (v4i1 setcc v4i64 v1, v2))
38081 // sign-extend to a 256-bit operation to avoid truncation.
38082 if (Subtarget.hasAVX() && checkBitcastSrcVectorSize(Src, 256)) {
38083 SExtVT = MVT::v4i64;
38084 PropagateSExt = true;
38088 SExtVT = MVT::v8i16;
38089 // For cases such as (i8 bitcast (v8i1 setcc v8i32 v1, v2)),
38090 // sign-extend to a 256-bit operation to match the compare.
38091 // If the setcc operand is 128-bit, prefer sign-extending to 128-bit over
38092 // 256-bit because the shuffle is cheaper than sign extending the result of
38094 if (Subtarget.hasAVX() && (checkBitcastSrcVectorSize(Src, 256) ||
38095 checkBitcastSrcVectorSize(Src, 512))) {
38096 SExtVT = MVT::v8i32;
38097 PropagateSExt = true;
38101 SExtVT = MVT::v16i8;
38102 // For the case (i16 bitcast (v16i1 setcc v16i16 v1, v2)),
38103 // it is not profitable to sign-extend to 256-bit because this will
38104 // require an extra cross-lane shuffle which is more expensive than
38105 // truncating the result of the compare to 128-bits.
38108 SExtVT = MVT::v32i8;
38111 // If we have AVX512F, but not AVX512BW and the input is truncated from
38112 // v64i8 checked earlier. Then split the input and make two pmovmskbs.
38113 if (Subtarget.hasAVX512()) {
38114 if (Subtarget.hasBWI())
38116 SExtVT = MVT::v64i8;
38119 // Split if this is a <64 x i8> comparison result.
38120 if (checkBitcastSrcVectorSize(Src, 512)) {
38121 SExtVT = MVT::v64i8;
38127 SDValue V = PropagateSExt ? signExtendBitcastSrcVector(DAG, SExtVT, Src, DL)
38128 : DAG.getNode(ISD::SIGN_EXTEND, DL, SExtVT, Src);
38130 if (SExtVT == MVT::v16i8 || SExtVT == MVT::v32i8 || SExtVT == MVT::v64i8) {
38131 V = getPMOVMSKB(DL, V, DAG, Subtarget);
38133 if (SExtVT == MVT::v8i16)
38134 V = DAG.getNode(X86ISD::PACKSS, DL, MVT::v16i8, V,
38135 DAG.getUNDEF(MVT::v8i16));
38136 V = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, V);
38140 EVT::getIntegerVT(*DAG.getContext(), SrcVT.getVectorNumElements());
38141 V = DAG.getZExtOrTrunc(V, DL, IntVT);
38142 return DAG.getBitcast(VT, V);
38145 // Convert a vXi1 constant build vector to the same width scalar integer.
38146 static SDValue combinevXi1ConstantToInteger(SDValue Op, SelectionDAG &DAG) {
38147 EVT SrcVT = Op.getValueType();
38148 assert(SrcVT.getVectorElementType() == MVT::i1 &&
38149 "Expected a vXi1 vector");
38150 assert(ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
38151 "Expected a constant build vector");
38153 APInt Imm(SrcVT.getVectorNumElements(), 0);
38154 for (unsigned Idx = 0, e = Op.getNumOperands(); Idx < e; ++Idx) {
38155 SDValue In = Op.getOperand(Idx);
38156 if (!In.isUndef() && (cast<ConstantSDNode>(In)->getZExtValue() & 0x1))
38159 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), Imm.getBitWidth());
38160 return DAG.getConstant(Imm, SDLoc(Op), IntVT);
38163 static SDValue combineCastedMaskArithmetic(SDNode *N, SelectionDAG &DAG,
38164 TargetLowering::DAGCombinerInfo &DCI,
38165 const X86Subtarget &Subtarget) {
38166 assert(N->getOpcode() == ISD::BITCAST && "Expected a bitcast");
38168 if (!DCI.isBeforeLegalizeOps())
38171 // Only do this if we have k-registers.
38172 if (!Subtarget.hasAVX512())
38175 EVT DstVT = N->getValueType(0);
38176 SDValue Op = N->getOperand(0);
38177 EVT SrcVT = Op.getValueType();
38179 if (!Op.hasOneUse())
38182 // Look for logic ops.
38183 if (Op.getOpcode() != ISD::AND &&
38184 Op.getOpcode() != ISD::OR &&
38185 Op.getOpcode() != ISD::XOR)
38188 // Make sure we have a bitcast between mask registers and a scalar type.
38189 if (!(SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
38190 DstVT.isScalarInteger()) &&
38191 !(DstVT.isVector() && DstVT.getVectorElementType() == MVT::i1 &&
38192 SrcVT.isScalarInteger()))
38195 SDValue LHS = Op.getOperand(0);
38196 SDValue RHS = Op.getOperand(1);
38198 if (LHS.hasOneUse() && LHS.getOpcode() == ISD::BITCAST &&
38199 LHS.getOperand(0).getValueType() == DstVT)
38200 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT, LHS.getOperand(0),
38201 DAG.getBitcast(DstVT, RHS));
38203 if (RHS.hasOneUse() && RHS.getOpcode() == ISD::BITCAST &&
38204 RHS.getOperand(0).getValueType() == DstVT)
38205 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
38206 DAG.getBitcast(DstVT, LHS), RHS.getOperand(0));
38208 // If the RHS is a vXi1 build vector, this is a good reason to flip too.
38209 // Most of these have to move a constant from the scalar domain anyway.
38210 if (ISD::isBuildVectorOfConstantSDNodes(RHS.getNode())) {
38211 RHS = combinevXi1ConstantToInteger(RHS, DAG);
38212 return DAG.getNode(Op.getOpcode(), SDLoc(N), DstVT,
38213 DAG.getBitcast(DstVT, LHS), RHS);
38219 static SDValue createMMXBuildVector(BuildVectorSDNode *BV, SelectionDAG &DAG,
38220 const X86Subtarget &Subtarget) {
38222 unsigned NumElts = BV->getNumOperands();
38223 SDValue Splat = BV->getSplatValue();
38225 // Build MMX element from integer GPR or SSE float values.
38226 auto CreateMMXElement = [&](SDValue V) {
38228 return DAG.getUNDEF(MVT::x86mmx);
38229 if (V.getValueType().isFloatingPoint()) {
38230 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) {
38231 V = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4f32, V);
38232 V = DAG.getBitcast(MVT::v2i64, V);
38233 return DAG.getNode(X86ISD::MOVDQ2Q, DL, MVT::x86mmx, V);
38235 V = DAG.getBitcast(MVT::i32, V);
38237 V = DAG.getAnyExtOrTrunc(V, DL, MVT::i32);
38239 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, MVT::x86mmx, V);
38242 // Convert build vector ops to MMX data in the bottom elements.
38243 SmallVector<SDValue, 8> Ops;
38245 // Broadcast - use (PUNPCKL+)PSHUFW to broadcast single element.
38247 if (Splat.isUndef())
38248 return DAG.getUNDEF(MVT::x86mmx);
38250 Splat = CreateMMXElement(Splat);
38252 if (Subtarget.hasSSE1()) {
38253 // Unpack v8i8 to splat i8 elements to lowest 16-bits.
38255 Splat = DAG.getNode(
38256 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
38257 DAG.getConstant(Intrinsic::x86_mmx_punpcklbw, DL, MVT::i32), Splat,
38260 // Use PSHUFW to repeat 16-bit elements.
38261 unsigned ShufMask = (NumElts > 2 ? 0 : 0x44);
38262 return DAG.getNode(
38263 ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx,
38264 DAG.getTargetConstant(Intrinsic::x86_sse_pshuf_w, DL, MVT::i32),
38265 Splat, DAG.getTargetConstant(ShufMask, DL, MVT::i8));
38267 Ops.append(NumElts, Splat);
38269 for (unsigned i = 0; i != NumElts; ++i)
38270 Ops.push_back(CreateMMXElement(BV->getOperand(i)));
38273 // Use tree of PUNPCKLs to build up general MMX vector.
38274 while (Ops.size() > 1) {
38275 unsigned NumOps = Ops.size();
38276 unsigned IntrinOp =
38277 (NumOps == 2 ? Intrinsic::x86_mmx_punpckldq
38278 : (NumOps == 4 ? Intrinsic::x86_mmx_punpcklwd
38279 : Intrinsic::x86_mmx_punpcklbw));
38280 SDValue Intrin = DAG.getConstant(IntrinOp, DL, MVT::i32);
38281 for (unsigned i = 0; i != NumOps; i += 2)
38282 Ops[i / 2] = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, MVT::x86mmx, Intrin,
38283 Ops[i], Ops[i + 1]);
38284 Ops.resize(NumOps / 2);
38290 // Recursive function that attempts to find if a bool vector node was originally
38291 // a vector/float/double that got truncated/extended/bitcast to/from a scalar
38292 // integer. If so, replace the scalar ops with bool vector equivalents back down
38294 static SDValue combineBitcastToBoolVector(EVT VT, SDValue V, SDLoc DL,
38296 const X86Subtarget &Subtarget) {
38297 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38298 unsigned Opc = V.getOpcode();
38300 case ISD::BITCAST: {
38301 // Bitcast from a vector/float/double, we can cheaply bitcast to VT.
38302 SDValue Src = V.getOperand(0);
38303 EVT SrcVT = Src.getValueType();
38304 if (SrcVT.isVector() || SrcVT.isFloatingPoint())
38305 return DAG.getBitcast(VT, Src);
38308 case ISD::TRUNCATE: {
38309 // If we find a suitable source, a truncated scalar becomes a subvector.
38310 SDValue Src = V.getOperand(0);
38312 EVT::getVectorVT(*DAG.getContext(), MVT::i1, Src.getValueSizeInBits());
38313 if (TLI.isTypeLegal(NewSrcVT))
38315 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
38316 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, N0,
38317 DAG.getIntPtrConstant(0, DL));
38320 case ISD::ANY_EXTEND:
38321 case ISD::ZERO_EXTEND: {
38322 // If we find a suitable source, an extended scalar becomes a subvector.
38323 SDValue Src = V.getOperand(0);
38324 EVT NewSrcVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
38325 Src.getScalarValueSizeInBits());
38326 if (TLI.isTypeLegal(NewSrcVT))
38328 combineBitcastToBoolVector(NewSrcVT, Src, DL, DAG, Subtarget))
38329 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
38330 Opc == ISD::ANY_EXTEND ? DAG.getUNDEF(VT)
38331 : DAG.getConstant(0, DL, VT),
38332 N0, DAG.getIntPtrConstant(0, DL));
38336 // If we find suitable sources, we can just move an OR to the vector domain.
38337 SDValue Src0 = V.getOperand(0);
38338 SDValue Src1 = V.getOperand(1);
38339 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
38340 if (SDValue N1 = combineBitcastToBoolVector(VT, Src1, DL, DAG, Subtarget))
38341 return DAG.getNode(Opc, DL, VT, N0, N1);
38345 // If we find a suitable source, a SHL becomes a KSHIFTL.
38346 SDValue Src0 = V.getOperand(0);
38347 if (auto *Amt = dyn_cast<ConstantSDNode>(V.getOperand(1)))
38348 if (SDValue N0 = combineBitcastToBoolVector(VT, Src0, DL, DAG, Subtarget))
38349 return DAG.getNode(
38350 X86ISD::KSHIFTL, DL, VT, N0,
38351 DAG.getTargetConstant(Amt->getZExtValue(), DL, MVT::i8));
38358 static SDValue combineBitcast(SDNode *N, SelectionDAG &DAG,
38359 TargetLowering::DAGCombinerInfo &DCI,
38360 const X86Subtarget &Subtarget) {
38361 SDValue N0 = N->getOperand(0);
38362 EVT VT = N->getValueType(0);
38363 EVT SrcVT = N0.getValueType();
38365 // Try to match patterns such as
38366 // (i16 bitcast (v16i1 x))
38368 // (i16 movmsk (16i8 sext (v16i1 x)))
38369 // before the setcc result is scalarized on subtargets that don't have legal
38371 if (DCI.isBeforeLegalize()) {
38373 if (SDValue V = combineBitcastvxi1(DAG, VT, N0, dl, Subtarget))
38376 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
38377 // type, widen both sides to avoid a trip through memory.
38378 if ((VT == MVT::v4i1 || VT == MVT::v2i1) && SrcVT.isScalarInteger() &&
38379 Subtarget.hasAVX512()) {
38380 N0 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i8, N0);
38381 N0 = DAG.getBitcast(MVT::v8i1, N0);
38382 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, N0,
38383 DAG.getIntPtrConstant(0, dl));
38386 // If this is a bitcast between a MVT::v4i1/v2i1 and an illegal integer
38387 // type, widen both sides to avoid a trip through memory.
38388 if ((SrcVT == MVT::v4i1 || SrcVT == MVT::v2i1) && VT.isScalarInteger() &&
38389 Subtarget.hasAVX512()) {
38390 // Use zeros for the widening if we already have some zeroes. This can
38391 // allow SimplifyDemandedBits to remove scalar ANDs that may be down
38393 // FIXME: It might make sense to detect a concat_vectors with a mix of
38394 // zeroes and undef and turn it into insert_subvector for i1 vectors as
38395 // a separate combine. What we can't do is canonicalize the operands of
38396 // such a concat or we'll get into a loop with SimplifyDemandedBits.
38397 if (N0.getOpcode() == ISD::CONCAT_VECTORS) {
38398 SDValue LastOp = N0.getOperand(N0.getNumOperands() - 1);
38399 if (ISD::isBuildVectorAllZeros(LastOp.getNode())) {
38400 SrcVT = LastOp.getValueType();
38401 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
38402 SmallVector<SDValue, 4> Ops(N0->op_begin(), N0->op_end());
38403 Ops.resize(NumConcats, DAG.getConstant(0, dl, SrcVT));
38404 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
38405 N0 = DAG.getBitcast(MVT::i8, N0);
38406 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
38410 unsigned NumConcats = 8 / SrcVT.getVectorNumElements();
38411 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(SrcVT));
38413 N0 = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
38414 N0 = DAG.getBitcast(MVT::i8, N0);
38415 return DAG.getNode(ISD::TRUNCATE, dl, VT, N0);
38418 // If we're bitcasting from iX to vXi1, see if the integer originally
38419 // began as a vXi1 and whether we can remove the bitcast entirely.
38420 if (VT.isVector() && VT.getScalarType() == MVT::i1 &&
38421 SrcVT.isScalarInteger() &&
38422 DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
38424 combineBitcastToBoolVector(VT, N0, SDLoc(N), DAG, Subtarget))
38429 // Look for (i8 (bitcast (v8i1 (extract_subvector (v16i1 X), 0)))) and
38430 // replace with (i8 (trunc (i16 (bitcast (v16i1 X))))). This can occur
38431 // due to insert_subvector legalization on KNL. By promoting the copy to i16
38432 // we can help with known bits propagation from the vXi1 domain to the
38434 if (VT == MVT::i8 && SrcVT == MVT::v8i1 && Subtarget.hasAVX512() &&
38435 !Subtarget.hasDQI() && N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
38436 N0.getOperand(0).getValueType() == MVT::v16i1 &&
38437 isNullConstant(N0.getOperand(1)))
38438 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT,
38439 DAG.getBitcast(MVT::i16, N0.getOperand(0)));
38441 // Canonicalize (bitcast (vbroadcast_load)) so that the output of the bitcast
38442 // and the vbroadcast_load are both integer or both fp. In some cases this
38443 // will remove the bitcast entirely.
38444 if (N0.getOpcode() == X86ISD::VBROADCAST_LOAD && N0.hasOneUse() &&
38445 VT.isFloatingPoint() != SrcVT.isFloatingPoint() && VT.isVector()) {
38446 auto *BCast = cast<MemIntrinsicSDNode>(N0);
38447 unsigned SrcVTSize = SrcVT.getScalarSizeInBits();
38448 unsigned MemSize = BCast->getMemoryVT().getScalarSizeInBits();
38449 // Don't swap i8/i16 since don't have fp types that size.
38450 if (MemSize >= 32) {
38451 MVT MemVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(MemSize)
38452 : MVT::getIntegerVT(MemSize);
38453 MVT LoadVT = VT.isFloatingPoint() ? MVT::getFloatingPointVT(SrcVTSize)
38454 : MVT::getIntegerVT(SrcVTSize);
38455 LoadVT = MVT::getVectorVT(LoadVT, SrcVT.getVectorNumElements());
38457 SDVTList Tys = DAG.getVTList(LoadVT, MVT::Other);
38458 SDValue Ops[] = { BCast->getChain(), BCast->getBasePtr() };
38460 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
38461 MemVT, BCast->getMemOperand());
38462 DAG.ReplaceAllUsesOfValueWith(SDValue(BCast, 1), ResNode.getValue(1));
38463 return DAG.getBitcast(VT, ResNode);
38467 // Since MMX types are special and don't usually play with other vector types,
38468 // it's better to handle them early to be sure we emit efficient code by
38469 // avoiding store-load conversions.
38470 if (VT == MVT::x86mmx) {
38471 // Detect MMX constant vectors.
38473 SmallVector<APInt, 1> EltBits;
38474 if (getTargetConstantBitsFromNode(N0, 64, UndefElts, EltBits)) {
38476 // Handle zero-extension of i32 with MOVD.
38477 if (EltBits[0].countLeadingZeros() >= 32)
38478 return DAG.getNode(X86ISD::MMX_MOVW2D, DL, VT,
38479 DAG.getConstant(EltBits[0].trunc(32), DL, MVT::i32));
38480 // Else, bitcast to a double.
38481 // TODO - investigate supporting sext 32-bit immediates on x86_64.
38482 APFloat F64(APFloat::IEEEdouble(), EltBits[0]);
38483 return DAG.getBitcast(VT, DAG.getConstantFP(F64, DL, MVT::f64));
38486 // Detect bitcasts to x86mmx low word.
38487 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
38488 (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) &&
38489 N0.getOperand(0).getValueType() == SrcVT.getScalarType()) {
38490 bool LowUndef = true, AllUndefOrZero = true;
38491 for (unsigned i = 1, e = SrcVT.getVectorNumElements(); i != e; ++i) {
38492 SDValue Op = N0.getOperand(i);
38493 LowUndef &= Op.isUndef() || (i >= e/2);
38494 AllUndefOrZero &= (Op.isUndef() || isNullConstant(Op));
38496 if (AllUndefOrZero) {
38497 SDValue N00 = N0.getOperand(0);
38499 N00 = LowUndef ? DAG.getAnyExtOrTrunc(N00, dl, MVT::i32)
38500 : DAG.getZExtOrTrunc(N00, dl, MVT::i32);
38501 return DAG.getNode(X86ISD::MMX_MOVW2D, dl, VT, N00);
38505 // Detect bitcasts of 64-bit build vectors and convert to a
38506 // MMX UNPCK/PSHUFW which takes MMX type inputs with the value in the
38508 if (N0.getOpcode() == ISD::BUILD_VECTOR &&
38509 (SrcVT == MVT::v2f32 || SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 ||
38510 SrcVT == MVT::v8i8))
38511 return createMMXBuildVector(cast<BuildVectorSDNode>(N0), DAG, Subtarget);
38513 // Detect bitcasts between element or subvector extraction to x86mmx.
38514 if ((N0.getOpcode() == ISD::EXTRACT_VECTOR_ELT ||
38515 N0.getOpcode() == ISD::EXTRACT_SUBVECTOR) &&
38516 isNullConstant(N0.getOperand(1))) {
38517 SDValue N00 = N0.getOperand(0);
38518 if (N00.getValueType().is128BitVector())
38519 return DAG.getNode(X86ISD::MOVDQ2Q, SDLoc(N00), VT,
38520 DAG.getBitcast(MVT::v2i64, N00));
38523 // Detect bitcasts from FP_TO_SINT to x86mmx.
38524 if (SrcVT == MVT::v2i32 && N0.getOpcode() == ISD::FP_TO_SINT) {
38526 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4i32, N0,
38527 DAG.getUNDEF(MVT::v2i32));
38528 return DAG.getNode(X86ISD::MOVDQ2Q, DL, VT,
38529 DAG.getBitcast(MVT::v2i64, Res));
38533 // Try to remove a bitcast of constant vXi1 vector. We have to legalize
38534 // most of these to scalar anyway.
38535 if (Subtarget.hasAVX512() && VT.isScalarInteger() &&
38536 SrcVT.isVector() && SrcVT.getVectorElementType() == MVT::i1 &&
38537 ISD::isBuildVectorOfConstantSDNodes(N0.getNode())) {
38538 return combinevXi1ConstantToInteger(N0, DAG);
38541 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
38542 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
38543 isa<ConstantSDNode>(N0)) {
38544 auto *C = cast<ConstantSDNode>(N0);
38545 if (C->isAllOnesValue())
38546 return DAG.getConstant(1, SDLoc(N0), VT);
38547 if (C->isNullValue())
38548 return DAG.getConstant(0, SDLoc(N0), VT);
38551 // Look for MOVMSK that is maybe truncated and then bitcasted to vXi1.
38552 // Turn it into a sign bit compare that produces a k-register. This avoids
38553 // a trip through a GPR.
38554 if (Subtarget.hasAVX512() && SrcVT.isScalarInteger() &&
38555 VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
38556 isPowerOf2_32(VT.getVectorNumElements())) {
38557 unsigned NumElts = VT.getVectorNumElements();
38560 // Peek through truncate.
38561 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse())
38562 Src = N0.getOperand(0);
38564 if (Src.getOpcode() == X86ISD::MOVMSK && Src.hasOneUse()) {
38565 SDValue MovmskIn = Src.getOperand(0);
38566 MVT MovmskVT = MovmskIn.getSimpleValueType();
38567 unsigned MovMskElts = MovmskVT.getVectorNumElements();
38569 // We allow extra bits of the movmsk to be used since they are known zero.
38570 // We can't convert a VPMOVMSKB without avx512bw.
38571 if (MovMskElts <= NumElts &&
38572 (Subtarget.hasBWI() || MovmskVT.getVectorElementType() != MVT::i8)) {
38573 EVT IntVT = EVT(MovmskVT).changeVectorElementTypeToInteger();
38574 MovmskIn = DAG.getBitcast(IntVT, MovmskIn);
38576 MVT CmpVT = MVT::getVectorVT(MVT::i1, MovMskElts);
38577 SDValue Cmp = DAG.getSetCC(dl, CmpVT, MovmskIn,
38578 DAG.getConstant(0, dl, IntVT), ISD::SETLT);
38579 if (EVT(CmpVT) == VT)
38582 // Pad with zeroes up to original VT to replace the zeroes that were
38583 // being used from the MOVMSK.
38584 unsigned NumConcats = NumElts / MovMskElts;
38585 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getConstant(0, dl, CmpVT));
38587 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Ops);
38592 // Try to remove bitcasts from input and output of mask arithmetic to
38593 // remove GPR<->K-register crossings.
38594 if (SDValue V = combineCastedMaskArithmetic(N, DAG, DCI, Subtarget))
38597 // Convert a bitcasted integer logic operation that has one bitcasted
38598 // floating-point operand into a floating-point logic operation. This may
38599 // create a load of a constant, but that is cheaper than materializing the
38600 // constant in an integer register and transferring it to an SSE register or
38601 // transferring the SSE operand to integer register and back.
38603 switch (N0.getOpcode()) {
38604 case ISD::AND: FPOpcode = X86ISD::FAND; break;
38605 case ISD::OR: FPOpcode = X86ISD::FOR; break;
38606 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
38607 default: return SDValue();
38610 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
38611 (Subtarget.hasSSE2() && VT == MVT::f64)))
38614 SDValue LogicOp0 = N0.getOperand(0);
38615 SDValue LogicOp1 = N0.getOperand(1);
38618 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
38619 if (N0.hasOneUse() && LogicOp0.getOpcode() == ISD::BITCAST &&
38620 LogicOp0.hasOneUse() && LogicOp0.getOperand(0).getValueType() == VT &&
38621 !isa<ConstantSDNode>(LogicOp0.getOperand(0))) {
38622 SDValue CastedOp1 = DAG.getBitcast(VT, LogicOp1);
38623 return DAG.getNode(FPOpcode, DL0, VT, LogicOp0.getOperand(0), CastedOp1);
38625 // bitcast(logic(X, bitcast(Y))) --> logic'(bitcast(X), Y)
38626 if (N0.hasOneUse() && LogicOp1.getOpcode() == ISD::BITCAST &&
38627 LogicOp1.hasOneUse() && LogicOp1.getOperand(0).getValueType() == VT &&
38628 !isa<ConstantSDNode>(LogicOp1.getOperand(0))) {
38629 SDValue CastedOp0 = DAG.getBitcast(VT, LogicOp0);
38630 return DAG.getNode(FPOpcode, DL0, VT, LogicOp1.getOperand(0), CastedOp0);
38636 // Given a ABS node, detect the following pattern:
38637 // (ABS (SUB (ZERO_EXTEND a), (ZERO_EXTEND b))).
38638 // This is useful as it is the input into a SAD pattern.
38639 static bool detectZextAbsDiff(const SDValue &Abs, SDValue &Op0, SDValue &Op1) {
38640 SDValue AbsOp1 = Abs->getOperand(0);
38641 if (AbsOp1.getOpcode() != ISD::SUB)
38644 Op0 = AbsOp1.getOperand(0);
38645 Op1 = AbsOp1.getOperand(1);
38647 // Check if the operands of the sub are zero-extended from vectors of i8.
38648 if (Op0.getOpcode() != ISD::ZERO_EXTEND ||
38649 Op0.getOperand(0).getValueType().getVectorElementType() != MVT::i8 ||
38650 Op1.getOpcode() != ISD::ZERO_EXTEND ||
38651 Op1.getOperand(0).getValueType().getVectorElementType() != MVT::i8)
38657 // Given two zexts of <k x i8> to <k x i32>, create a PSADBW of the inputs
38659 static SDValue createPSADBW(SelectionDAG &DAG, const SDValue &Zext0,
38660 const SDValue &Zext1, const SDLoc &DL,
38661 const X86Subtarget &Subtarget) {
38662 // Find the appropriate width for the PSADBW.
38663 EVT InVT = Zext0.getOperand(0).getValueType();
38664 unsigned RegSize = std::max(128u, (unsigned)InVT.getSizeInBits());
38666 // "Zero-extend" the i8 vectors. This is not a per-element zext, rather we
38667 // fill in the missing vector elements with 0.
38668 unsigned NumConcat = RegSize / InVT.getSizeInBits();
38669 SmallVector<SDValue, 16> Ops(NumConcat, DAG.getConstant(0, DL, InVT));
38670 Ops[0] = Zext0.getOperand(0);
38671 MVT ExtendedVT = MVT::getVectorVT(MVT::i8, RegSize / 8);
38672 SDValue SadOp0 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
38673 Ops[0] = Zext1.getOperand(0);
38674 SDValue SadOp1 = DAG.getNode(ISD::CONCAT_VECTORS, DL, ExtendedVT, Ops);
38676 // Actually build the SAD, split as 128/256/512 bits for SSE/AVX2/AVX512BW.
38677 auto PSADBWBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
38678 ArrayRef<SDValue> Ops) {
38679 MVT VT = MVT::getVectorVT(MVT::i64, Ops[0].getValueSizeInBits() / 64);
38680 return DAG.getNode(X86ISD::PSADBW, DL, VT, Ops);
38682 MVT SadVT = MVT::getVectorVT(MVT::i64, RegSize / 64);
38683 return SplitOpsAndApply(DAG, Subtarget, DL, SadVT, { SadOp0, SadOp1 },
38687 // Attempt to replace an min/max v8i16/v16i8 horizontal reduction with
38689 static SDValue combineHorizontalMinMaxResult(SDNode *Extract, SelectionDAG &DAG,
38690 const X86Subtarget &Subtarget) {
38691 // Bail without SSE41.
38692 if (!Subtarget.hasSSE41())
38695 EVT ExtractVT = Extract->getValueType(0);
38696 if (ExtractVT != MVT::i16 && ExtractVT != MVT::i8)
38699 // Check for SMAX/SMIN/UMAX/UMIN horizontal reduction patterns.
38700 ISD::NodeType BinOp;
38701 SDValue Src = DAG.matchBinOpReduction(
38702 Extract, BinOp, {ISD::SMAX, ISD::SMIN, ISD::UMAX, ISD::UMIN}, true);
38706 EVT SrcVT = Src.getValueType();
38707 EVT SrcSVT = SrcVT.getScalarType();
38708 if (SrcSVT != ExtractVT || (SrcVT.getSizeInBits() % 128) != 0)
38712 SDValue MinPos = Src;
38714 // First, reduce the source down to 128-bit, applying BinOp to lo/hi.
38715 while (SrcVT.getSizeInBits() > 128) {
38717 std::tie(Lo, Hi) = splitVector(MinPos, DAG, DL);
38718 SrcVT = Lo.getValueType();
38719 MinPos = DAG.getNode(BinOp, DL, SrcVT, Lo, Hi);
38721 assert(((SrcVT == MVT::v8i16 && ExtractVT == MVT::i16) ||
38722 (SrcVT == MVT::v16i8 && ExtractVT == MVT::i8)) &&
38723 "Unexpected value type");
38725 // PHMINPOSUW applies to UMIN(v8i16), for SMIN/SMAX/UMAX we must apply a mask
38726 // to flip the value accordingly.
38728 unsigned MaskEltsBits = ExtractVT.getSizeInBits();
38729 if (BinOp == ISD::SMAX)
38730 Mask = DAG.getConstant(APInt::getSignedMaxValue(MaskEltsBits), DL, SrcVT);
38731 else if (BinOp == ISD::SMIN)
38732 Mask = DAG.getConstant(APInt::getSignedMinValue(MaskEltsBits), DL, SrcVT);
38733 else if (BinOp == ISD::UMAX)
38734 Mask = DAG.getConstant(APInt::getAllOnesValue(MaskEltsBits), DL, SrcVT);
38737 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
38739 // For v16i8 cases we need to perform UMIN on pairs of byte elements,
38740 // shuffling each upper element down and insert zeros. This means that the
38741 // v16i8 UMIN will leave the upper element as zero, performing zero-extension
38742 // ready for the PHMINPOS.
38743 if (ExtractVT == MVT::i8) {
38744 SDValue Upper = DAG.getVectorShuffle(
38745 SrcVT, DL, MinPos, DAG.getConstant(0, DL, MVT::v16i8),
38746 {1, 16, 3, 16, 5, 16, 7, 16, 9, 16, 11, 16, 13, 16, 15, 16});
38747 MinPos = DAG.getNode(ISD::UMIN, DL, SrcVT, MinPos, Upper);
38750 // Perform the PHMINPOS on a v8i16 vector,
38751 MinPos = DAG.getBitcast(MVT::v8i16, MinPos);
38752 MinPos = DAG.getNode(X86ISD::PHMINPOS, DL, MVT::v8i16, MinPos);
38753 MinPos = DAG.getBitcast(SrcVT, MinPos);
38756 MinPos = DAG.getNode(ISD::XOR, DL, SrcVT, Mask, MinPos);
38758 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, MinPos,
38759 DAG.getIntPtrConstant(0, DL));
38762 // Attempt to replace an all_of/any_of/parity style horizontal reduction with a MOVMSK.
38763 static SDValue combineHorizontalPredicateResult(SDNode *Extract,
38765 const X86Subtarget &Subtarget) {
38766 // Bail without SSE2.
38767 if (!Subtarget.hasSSE2())
38770 EVT ExtractVT = Extract->getValueType(0);
38771 unsigned BitWidth = ExtractVT.getSizeInBits();
38772 if (ExtractVT != MVT::i64 && ExtractVT != MVT::i32 && ExtractVT != MVT::i16 &&
38773 ExtractVT != MVT::i8 && ExtractVT != MVT::i1)
38776 // Check for OR(any_of)/AND(all_of)/XOR(parity) horizontal reduction patterns.
38777 ISD::NodeType BinOp;
38778 SDValue Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::OR, ISD::AND});
38779 if (!Match && ExtractVT == MVT::i1)
38780 Match = DAG.matchBinOpReduction(Extract, BinOp, {ISD::XOR});
38784 // EXTRACT_VECTOR_ELT can require implicit extension of the vector element
38785 // which we can't support here for now.
38786 if (Match.getScalarValueSizeInBits() != BitWidth)
38791 EVT MatchVT = Match.getValueType();
38792 unsigned NumElts = MatchVT.getVectorNumElements();
38793 unsigned MaxElts = Subtarget.hasInt256() ? 32 : 16;
38794 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
38796 if (ExtractVT == MVT::i1) {
38797 // Special case for (pre-legalization) vXi1 reductions.
38798 if (NumElts > 64 || !isPowerOf2_32(NumElts))
38800 if (TLI.isTypeLegal(MatchVT)) {
38801 // If this is a legal AVX512 predicate type then we can just bitcast.
38802 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
38803 Movmsk = DAG.getBitcast(MovmskVT, Match);
38805 // For all_of(setcc(vec,0,eq)) - avoid vXi64 comparisons if we don't have
38806 // PCMPEQQ (SSE41+), use PCMPEQD instead.
38807 if (BinOp == ISD::AND && !Subtarget.hasSSE41() &&
38808 Match.getOpcode() == ISD::SETCC &&
38809 ISD::isBuildVectorAllZeros(Match.getOperand(1).getNode()) &&
38810 cast<CondCodeSDNode>(Match.getOperand(2))->get() ==
38811 ISD::CondCode::SETEQ) {
38812 SDValue Vec = Match.getOperand(0);
38813 if (Vec.getValueType().getScalarType() == MVT::i64 &&
38814 (2 * NumElts) <= MaxElts) {
38816 EVT CmpVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
38817 MatchVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
38818 Match = DAG.getSetCC(
38819 DL, MatchVT, DAG.getBitcast(CmpVT, Match.getOperand(0)),
38820 DAG.getBitcast(CmpVT, Match.getOperand(1)), ISD::CondCode::SETEQ);
38824 // Use combineBitcastvxi1 to create the MOVMSK.
38825 while (NumElts > MaxElts) {
38827 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
38828 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
38831 EVT MovmskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
38832 Movmsk = combineBitcastvxi1(DAG, MovmskVT, Match, DL, Subtarget);
38836 Movmsk = DAG.getZExtOrTrunc(Movmsk, DL, NumElts > 32 ? MVT::i64 : MVT::i32);
38838 // FIXME: Better handling of k-registers or 512-bit vectors?
38839 unsigned MatchSizeInBits = Match.getValueSizeInBits();
38840 if (!(MatchSizeInBits == 128 ||
38841 (MatchSizeInBits == 256 && Subtarget.hasAVX())))
38844 // Make sure this isn't a vector of 1 element. The perf win from using
38845 // MOVMSK diminishes with less elements in the reduction, but it is
38846 // generally better to get the comparison over to the GPRs as soon as
38847 // possible to reduce the number of vector ops.
38848 if (Match.getValueType().getVectorNumElements() < 2)
38851 // Check that we are extracting a reduction of all sign bits.
38852 if (DAG.ComputeNumSignBits(Match) != BitWidth)
38855 if (MatchSizeInBits == 256 && BitWidth < 32 && !Subtarget.hasInt256()) {
38857 std::tie(Lo, Hi) = DAG.SplitVector(Match, DL);
38858 Match = DAG.getNode(BinOp, DL, Lo.getValueType(), Lo, Hi);
38859 MatchSizeInBits = Match.getValueSizeInBits();
38862 // For 32/64 bit comparisons use MOVMSKPS/MOVMSKPD, else PMOVMSKB.
38864 if (64 == BitWidth || 32 == BitWidth)
38865 MaskSrcVT = MVT::getVectorVT(MVT::getFloatingPointVT(BitWidth),
38866 MatchSizeInBits / BitWidth);
38868 MaskSrcVT = MVT::getVectorVT(MVT::i8, MatchSizeInBits / 8);
38870 SDValue BitcastLogicOp = DAG.getBitcast(MaskSrcVT, Match);
38871 Movmsk = getPMOVMSKB(DL, BitcastLogicOp, DAG, Subtarget);
38872 NumElts = MaskSrcVT.getVectorNumElements();
38874 assert((NumElts <= 32 || NumElts == 64) &&
38875 "Not expecting more than 64 elements");
38877 MVT CmpVT = NumElts == 64 ? MVT::i64 : MVT::i32;
38878 if (BinOp == ISD::XOR) {
38879 // parity -> (AND (CTPOP(MOVMSK X)), 1)
38880 SDValue Mask = DAG.getConstant(1, DL, CmpVT);
38881 SDValue Result = DAG.getNode(ISD::CTPOP, DL, CmpVT, Movmsk);
38882 Result = DAG.getNode(ISD::AND, DL, CmpVT, Result, Mask);
38883 return DAG.getZExtOrTrunc(Result, DL, ExtractVT);
38887 ISD::CondCode CondCode;
38888 if (BinOp == ISD::OR) {
38889 // any_of -> MOVMSK != 0
38890 CmpC = DAG.getConstant(0, DL, CmpVT);
38891 CondCode = ISD::CondCode::SETNE;
38893 // all_of -> MOVMSK == ((1 << NumElts) - 1)
38894 CmpC = DAG.getConstant(APInt::getLowBitsSet(CmpVT.getSizeInBits(), NumElts),
38896 CondCode = ISD::CondCode::SETEQ;
38899 // The setcc produces an i8 of 0/1, so extend that to the result width and
38900 // negate to get the final 0/-1 mask value.
38902 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), CmpVT);
38903 SDValue Setcc = DAG.getSetCC(DL, SetccVT, Movmsk, CmpC, CondCode);
38904 SDValue Zext = DAG.getZExtOrTrunc(Setcc, DL, ExtractVT);
38905 SDValue Zero = DAG.getConstant(0, DL, ExtractVT);
38906 return DAG.getNode(ISD::SUB, DL, ExtractVT, Zero, Zext);
38909 static SDValue combineBasicSADPattern(SDNode *Extract, SelectionDAG &DAG,
38910 const X86Subtarget &Subtarget) {
38911 // PSADBW is only supported on SSE2 and up.
38912 if (!Subtarget.hasSSE2())
38915 EVT ExtractVT = Extract->getValueType(0);
38916 // Verify the type we're extracting is either i32 or i64.
38917 // FIXME: Could support other types, but this is what we have coverage for.
38918 if (ExtractVT != MVT::i32 && ExtractVT != MVT::i64)
38921 EVT VT = Extract->getOperand(0).getValueType();
38922 if (!isPowerOf2_32(VT.getVectorNumElements()))
38925 // Match shuffle + add pyramid.
38926 ISD::NodeType BinOp;
38927 SDValue Root = DAG.matchBinOpReduction(Extract, BinOp, {ISD::ADD});
38929 // The operand is expected to be zero extended from i8
38930 // (verified in detectZextAbsDiff).
38931 // In order to convert to i64 and above, additional any/zero/sign
38932 // extend is expected.
38933 // The zero extend from 32 bit has no mathematical effect on the result.
38934 // Also the sign extend is basically zero extend
38935 // (extends the sign bit which is zero).
38936 // So it is correct to skip the sign/zero extend instruction.
38937 if (Root && (Root.getOpcode() == ISD::SIGN_EXTEND ||
38938 Root.getOpcode() == ISD::ZERO_EXTEND ||
38939 Root.getOpcode() == ISD::ANY_EXTEND))
38940 Root = Root.getOperand(0);
38942 // If there was a match, we want Root to be a select that is the root of an
38943 // abs-diff pattern.
38944 if (!Root || Root.getOpcode() != ISD::ABS)
38947 // Check whether we have an abs-diff pattern feeding into the select.
38948 SDValue Zext0, Zext1;
38949 if (!detectZextAbsDiff(Root, Zext0, Zext1))
38952 // Create the SAD instruction.
38954 SDValue SAD = createPSADBW(DAG, Zext0, Zext1, DL, Subtarget);
38956 // If the original vector was wider than 8 elements, sum over the results
38957 // in the SAD vector.
38958 unsigned Stages = Log2_32(VT.getVectorNumElements());
38959 EVT SadVT = SAD.getValueType();
38961 unsigned SadElems = SadVT.getVectorNumElements();
38963 for(unsigned i = Stages - 3; i > 0; --i) {
38964 SmallVector<int, 16> Mask(SadElems, -1);
38965 for(unsigned j = 0, MaskEnd = 1 << (i - 1); j < MaskEnd; ++j)
38966 Mask[j] = MaskEnd + j;
38969 DAG.getVectorShuffle(SadVT, DL, SAD, DAG.getUNDEF(SadVT), Mask);
38970 SAD = DAG.getNode(ISD::ADD, DL, SadVT, SAD, Shuffle);
38974 unsigned ExtractSizeInBits = ExtractVT.getSizeInBits();
38975 // Return the lowest ExtractSizeInBits bits.
38976 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), ExtractVT,
38977 SadVT.getSizeInBits() / ExtractSizeInBits);
38978 SAD = DAG.getBitcast(ResVT, SAD);
38979 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtractVT, SAD,
38980 Extract->getOperand(1));
38983 // Attempt to peek through a target shuffle and extract the scalar from the
38985 static SDValue combineExtractWithShuffle(SDNode *N, SelectionDAG &DAG,
38986 TargetLowering::DAGCombinerInfo &DCI,
38987 const X86Subtarget &Subtarget) {
38988 if (DCI.isBeforeLegalizeOps())
38992 SDValue Src = N->getOperand(0);
38993 SDValue Idx = N->getOperand(1);
38995 EVT VT = N->getValueType(0);
38996 EVT SrcVT = Src.getValueType();
38997 EVT SrcSVT = SrcVT.getVectorElementType();
38998 unsigned SrcEltBits = SrcSVT.getSizeInBits();
38999 unsigned NumSrcElts = SrcVT.getVectorNumElements();
39001 // Don't attempt this for boolean mask vectors or unknown extraction indices.
39002 if (SrcSVT == MVT::i1 || !isa<ConstantSDNode>(Idx))
39005 const APInt &IdxC = N->getConstantOperandAPInt(1);
39006 if (IdxC.uge(NumSrcElts))
39009 SDValue SrcBC = peekThroughBitcasts(Src);
39011 // Handle extract(bitcast(broadcast(scalar_value))).
39012 if (X86ISD::VBROADCAST == SrcBC.getOpcode()) {
39013 SDValue SrcOp = SrcBC.getOperand(0);
39014 EVT SrcOpVT = SrcOp.getValueType();
39015 if (SrcOpVT.isScalarInteger() && VT.isInteger() &&
39016 (SrcOpVT.getSizeInBits() % SrcEltBits) == 0) {
39017 unsigned Scale = SrcOpVT.getSizeInBits() / SrcEltBits;
39018 unsigned Offset = IdxC.urem(Scale) * SrcEltBits;
39019 // TODO support non-zero offsets.
39021 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, SrcVT.getScalarType());
39022 SrcOp = DAG.getZExtOrTrunc(SrcOp, dl, VT);
39028 // If we're extracting a single element from a broadcast load and there are
39029 // no other users, just create a single load.
39030 if (SrcBC.getOpcode() == X86ISD::VBROADCAST_LOAD && SrcBC.hasOneUse()) {
39031 auto *MemIntr = cast<MemIntrinsicSDNode>(SrcBC);
39032 unsigned SrcBCWidth = SrcBC.getScalarValueSizeInBits();
39033 if (MemIntr->getMemoryVT().getSizeInBits() == SrcBCWidth &&
39034 VT.getSizeInBits() == SrcBCWidth && SrcEltBits == SrcBCWidth) {
39035 SDValue Load = DAG.getLoad(VT, dl, MemIntr->getChain(),
39036 MemIntr->getBasePtr(),
39037 MemIntr->getPointerInfo(),
39038 MemIntr->getOriginalAlign(),
39039 MemIntr->getMemOperand()->getFlags());
39040 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), Load.getValue(1));
39045 // Handle extract(bitcast(scalar_to_vector(scalar_value))) for integers.
39046 // TODO: Move to DAGCombine?
39047 if (SrcBC.getOpcode() == ISD::SCALAR_TO_VECTOR && VT.isInteger() &&
39048 SrcBC.getValueType().isInteger() &&
39049 (SrcBC.getScalarValueSizeInBits() % SrcEltBits) == 0 &&
39050 SrcBC.getScalarValueSizeInBits() ==
39051 SrcBC.getOperand(0).getValueSizeInBits()) {
39052 unsigned Scale = SrcBC.getScalarValueSizeInBits() / SrcEltBits;
39053 if (IdxC.ult(Scale)) {
39054 unsigned Offset = IdxC.getZExtValue() * SrcVT.getScalarSizeInBits();
39055 SDValue Scl = SrcBC.getOperand(0);
39056 EVT SclVT = Scl.getValueType();
39058 Scl = DAG.getNode(ISD::SRL, dl, SclVT, Scl,
39059 DAG.getShiftAmountConstant(Offset, SclVT, dl));
39061 Scl = DAG.getZExtOrTrunc(Scl, dl, SrcVT.getScalarType());
39062 Scl = DAG.getZExtOrTrunc(Scl, dl, VT);
39067 // Handle extract(truncate(x)) for 0'th index.
39068 // TODO: Treat this as a faux shuffle?
39069 // TODO: When can we use this for general indices?
39070 if (ISD::TRUNCATE == Src.getOpcode() && SrcVT.is128BitVector() && IdxC == 0) {
39071 Src = extract128BitVector(Src.getOperand(0), 0, DAG, dl);
39072 Src = DAG.getBitcast(SrcVT, Src);
39073 return DAG.getNode(N->getOpcode(), dl, VT, Src, Idx);
39076 // Resolve the target shuffle inputs and mask.
39077 SmallVector<int, 16> Mask;
39078 SmallVector<SDValue, 2> Ops;
39079 if (!getTargetShuffleInputs(SrcBC, Ops, Mask, DAG))
39082 // Shuffle inputs must be the same size as the result.
39083 if (llvm::any_of(Ops, [SrcVT](SDValue Op) {
39084 return SrcVT.getSizeInBits() != Op.getValueSizeInBits();
39088 // Attempt to narrow/widen the shuffle mask to the correct size.
39089 if (Mask.size() != NumSrcElts) {
39090 if ((NumSrcElts % Mask.size()) == 0) {
39091 SmallVector<int, 16> ScaledMask;
39092 int Scale = NumSrcElts / Mask.size();
39093 narrowShuffleMaskElts(Scale, Mask, ScaledMask);
39094 Mask = std::move(ScaledMask);
39095 } else if ((Mask.size() % NumSrcElts) == 0) {
39096 // Simplify Mask based on demanded element.
39097 int ExtractIdx = (int)N->getConstantOperandVal(1);
39098 int Scale = Mask.size() / NumSrcElts;
39099 int Lo = Scale * ExtractIdx;
39100 int Hi = Scale * (ExtractIdx + 1);
39101 for (int i = 0, e = (int)Mask.size(); i != e; ++i)
39102 if (i < Lo || Hi <= i)
39103 Mask[i] = SM_SentinelUndef;
39105 SmallVector<int, 16> WidenedMask;
39106 while (Mask.size() > NumSrcElts &&
39107 canWidenShuffleElements(Mask, WidenedMask))
39108 Mask = std::move(WidenedMask);
39109 // TODO - investigate support for wider shuffle masks with known upper
39110 // undef/zero elements for implicit zero-extension.
39114 // Check if narrowing/widening failed.
39115 if (Mask.size() != NumSrcElts)
39118 int SrcIdx = Mask[IdxC.getZExtValue()];
39120 // If the shuffle source element is undef/zero then we can just accept it.
39121 if (SrcIdx == SM_SentinelUndef)
39122 return DAG.getUNDEF(VT);
39124 if (SrcIdx == SM_SentinelZero)
39125 return VT.isFloatingPoint() ? DAG.getConstantFP(0.0, dl, VT)
39126 : DAG.getConstant(0, dl, VT);
39128 SDValue SrcOp = Ops[SrcIdx / Mask.size()];
39129 SrcIdx = SrcIdx % Mask.size();
39131 // We can only extract other elements from 128-bit vectors and in certain
39132 // circumstances, depending on SSE-level.
39133 // TODO: Investigate using extract_subvector for larger vectors.
39134 // TODO: Investigate float/double extraction if it will be just stored.
39135 if ((SrcVT == MVT::v4i32 || SrcVT == MVT::v2i64) &&
39136 ((SrcIdx == 0 && Subtarget.hasSSE2()) || Subtarget.hasSSE41())) {
39137 assert(SrcSVT == VT && "Unexpected extraction type");
39138 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
39139 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SrcSVT, SrcOp,
39140 DAG.getIntPtrConstant(SrcIdx, dl));
39143 if ((SrcVT == MVT::v8i16 && Subtarget.hasSSE2()) ||
39144 (SrcVT == MVT::v16i8 && Subtarget.hasSSE41())) {
39145 assert(VT.getSizeInBits() >= SrcEltBits && "Unexpected extraction type");
39146 unsigned OpCode = (SrcVT == MVT::v8i16 ? X86ISD::PEXTRW : X86ISD::PEXTRB);
39147 SrcOp = DAG.getBitcast(SrcVT, SrcOp);
39148 SDValue ExtOp = DAG.getNode(OpCode, dl, MVT::i32, SrcOp,
39149 DAG.getIntPtrConstant(SrcIdx, dl));
39150 return DAG.getZExtOrTrunc(ExtOp, dl, VT);
39156 /// Extracting a scalar FP value from vector element 0 is free, so extract each
39157 /// operand first, then perform the math as a scalar op.
39158 static SDValue scalarizeExtEltFP(SDNode *ExtElt, SelectionDAG &DAG) {
39159 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Expected extract");
39160 SDValue Vec = ExtElt->getOperand(0);
39161 SDValue Index = ExtElt->getOperand(1);
39162 EVT VT = ExtElt->getValueType(0);
39163 EVT VecVT = Vec.getValueType();
39165 // TODO: If this is a unary/expensive/expand op, allow extraction from a
39166 // non-zero element because the shuffle+scalar op will be cheaper?
39167 if (!Vec.hasOneUse() || !isNullConstant(Index) || VecVT.getScalarType() != VT)
39170 // Vector FP compares don't fit the pattern of FP math ops (propagate, not
39171 // extract, the condition code), so deal with those as a special-case.
39172 if (Vec.getOpcode() == ISD::SETCC && VT == MVT::i1) {
39173 EVT OpVT = Vec.getOperand(0).getValueType().getScalarType();
39174 if (OpVT != MVT::f32 && OpVT != MVT::f64)
39177 // extract (setcc X, Y, CC), 0 --> setcc (extract X, 0), (extract Y, 0), CC
39179 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
39180 Vec.getOperand(0), Index);
39181 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, OpVT,
39182 Vec.getOperand(1), Index);
39183 return DAG.getNode(Vec.getOpcode(), DL, VT, Ext0, Ext1, Vec.getOperand(2));
39186 if (VT != MVT::f32 && VT != MVT::f64)
39189 // Vector FP selects don't fit the pattern of FP math ops (because the
39190 // condition has a different type and we have to change the opcode), so deal
39191 // with those here.
39192 // FIXME: This is restricted to pre type legalization by ensuring the setcc
39193 // has i1 elements. If we loosen this we need to convert vector bool to a
39195 if (Vec.getOpcode() == ISD::VSELECT &&
39196 Vec.getOperand(0).getOpcode() == ISD::SETCC &&
39197 Vec.getOperand(0).getValueType().getScalarType() == MVT::i1 &&
39198 Vec.getOperand(0).getOperand(0).getValueType() == VecVT) {
39199 // ext (sel Cond, X, Y), 0 --> sel (ext Cond, 0), (ext X, 0), (ext Y, 0)
39201 SDValue Ext0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL,
39202 Vec.getOperand(0).getValueType().getScalarType(),
39203 Vec.getOperand(0), Index);
39204 SDValue Ext1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
39205 Vec.getOperand(1), Index);
39206 SDValue Ext2 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT,
39207 Vec.getOperand(2), Index);
39208 return DAG.getNode(ISD::SELECT, DL, VT, Ext0, Ext1, Ext2);
39211 // TODO: This switch could include FNEG and the x86-specific FP logic ops
39212 // (FAND, FANDN, FOR, FXOR). But that may require enhancements to avoid
39213 // missed load folding and fma+fneg combining.
39214 switch (Vec.getOpcode()) {
39215 case ISD::FMA: // Begin 3 operands
39217 case ISD::FADD: // Begin 2 operands
39222 case ISD::FCOPYSIGN:
39225 case ISD::FMINNUM_IEEE:
39226 case ISD::FMAXNUM_IEEE:
39227 case ISD::FMAXIMUM:
39228 case ISD::FMINIMUM:
39231 case ISD::FABS: // Begin 1 operand
39236 case ISD::FNEARBYINT:
39240 case X86ISD::FRSQRT: {
39241 // extract (fp X, Y, ...), 0 --> fp (extract X, 0), (extract Y, 0), ...
39243 SmallVector<SDValue, 4> ExtOps;
39244 for (SDValue Op : Vec->ops())
39245 ExtOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Op, Index));
39246 return DAG.getNode(Vec.getOpcode(), DL, VT, ExtOps);
39251 llvm_unreachable("All opcodes should return within switch");
39254 /// Try to convert a vector reduction sequence composed of binops and shuffles
39255 /// into horizontal ops.
39256 static SDValue combineReductionToHorizontal(SDNode *ExtElt, SelectionDAG &DAG,
39257 const X86Subtarget &Subtarget) {
39258 assert(ExtElt->getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unexpected caller");
39260 // We need at least SSE2 to anything here.
39261 if (!Subtarget.hasSSE2())
39266 DAG.matchBinOpReduction(ExtElt, Opc, {ISD::ADD, ISD::FADD}, true);
39270 SDValue Index = ExtElt->getOperand(1);
39271 assert(isNullConstant(Index) &&
39272 "Reduction doesn't end in an extract from index 0");
39274 EVT VT = ExtElt->getValueType(0);
39275 EVT VecVT = Rdx.getValueType();
39276 if (VecVT.getScalarType() != VT)
39281 // vXi8 reduction - sub 128-bit vector.
39282 if (VecVT == MVT::v4i8 || VecVT == MVT::v8i8) {
39283 if (VecVT == MVT::v4i8) {
39285 if (Subtarget.hasSSE41()) {
39286 Rdx = DAG.getBitcast(MVT::i32, Rdx);
39287 Rdx = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, MVT::v4i32,
39288 DAG.getConstant(0, DL, MVT::v4i32), Rdx,
39289 DAG.getIntPtrConstant(0, DL));
39290 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
39292 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i8, Rdx,
39293 DAG.getConstant(0, DL, VecVT));
39296 if (Rdx.getValueType() == MVT::v8i8) {
39298 Rdx = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, Rdx,
39299 DAG.getUNDEF(MVT::v8i8));
39301 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
39302 DAG.getConstant(0, DL, MVT::v16i8));
39303 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
39304 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
39307 // Must be a >=128-bit vector with pow2 elements.
39308 if ((VecVT.getSizeInBits() % 128) != 0 ||
39309 !isPowerOf2_32(VecVT.getVectorNumElements()))
39312 // vXi8 reduction - sum lo/hi halves then use PSADBW.
39313 if (VT == MVT::i8) {
39314 while (Rdx.getValueSizeInBits() > 128) {
39316 std::tie(Lo, Hi) = splitVector(Rdx, DAG, DL);
39317 VecVT = Lo.getValueType();
39318 Rdx = DAG.getNode(ISD::ADD, DL, VecVT, Lo, Hi);
39320 assert(VecVT == MVT::v16i8 && "v16i8 reduction expected");
39322 SDValue Hi = DAG.getVectorShuffle(
39323 MVT::v16i8, DL, Rdx, Rdx,
39324 {8, 9, 10, 11, 12, 13, 14, 15, -1, -1, -1, -1, -1, -1, -1, -1});
39325 Rdx = DAG.getNode(ISD::ADD, DL, MVT::v16i8, Rdx, Hi);
39326 Rdx = DAG.getNode(X86ISD::PSADBW, DL, MVT::v2i64, Rdx,
39327 getZeroVector(MVT::v16i8, Subtarget, DAG, DL));
39328 Rdx = DAG.getBitcast(MVT::v16i8, Rdx);
39329 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
39332 // Only use (F)HADD opcodes if they aren't microcoded or minimizes codesize.
39333 if (!shouldUseHorizontalOp(true, DAG, Subtarget))
39336 unsigned HorizOpcode = Opc == ISD::ADD ? X86ISD::HADD : X86ISD::FHADD;
39338 // 256-bit horizontal instructions operate on 128-bit chunks rather than
39339 // across the whole vector, so we need an extract + hop preliminary stage.
39340 // This is the only step where the operands of the hop are not the same value.
39341 // TODO: We could extend this to handle 512-bit or even longer vectors.
39342 if (((VecVT == MVT::v16i16 || VecVT == MVT::v8i32) && Subtarget.hasSSSE3()) ||
39343 ((VecVT == MVT::v8f32 || VecVT == MVT::v4f64) && Subtarget.hasSSE3())) {
39344 unsigned NumElts = VecVT.getVectorNumElements();
39345 SDValue Hi = extract128BitVector(Rdx, NumElts / 2, DAG, DL);
39346 SDValue Lo = extract128BitVector(Rdx, 0, DAG, DL);
39347 Rdx = DAG.getNode(HorizOpcode, DL, Lo.getValueType(), Hi, Lo);
39348 VecVT = Rdx.getValueType();
39350 if (!((VecVT == MVT::v8i16 || VecVT == MVT::v4i32) && Subtarget.hasSSSE3()) &&
39351 !((VecVT == MVT::v4f32 || VecVT == MVT::v2f64) && Subtarget.hasSSE3()))
39354 // extract (add (shuf X), X), 0 --> extract (hadd X, X), 0
39355 unsigned ReductionSteps = Log2_32(VecVT.getVectorNumElements());
39356 for (unsigned i = 0; i != ReductionSteps; ++i)
39357 Rdx = DAG.getNode(HorizOpcode, DL, VecVT, Rdx, Rdx);
39359 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, Rdx, Index);
39362 /// Detect vector gather/scatter index generation and convert it from being a
39363 /// bunch of shuffles and extracts into a somewhat faster sequence.
39364 /// For i686, the best sequence is apparently storing the value and loading
39365 /// scalars back, while for x64 we should use 64-bit extracts and shifts.
39366 static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
39367 TargetLowering::DAGCombinerInfo &DCI,
39368 const X86Subtarget &Subtarget) {
39369 if (SDValue NewOp = combineExtractWithShuffle(N, DAG, DCI, Subtarget))
39372 SDValue InputVector = N->getOperand(0);
39373 SDValue EltIdx = N->getOperand(1);
39374 auto *CIdx = dyn_cast<ConstantSDNode>(EltIdx);
39376 EVT SrcVT = InputVector.getValueType();
39377 EVT VT = N->getValueType(0);
39378 SDLoc dl(InputVector);
39379 bool IsPextr = N->getOpcode() != ISD::EXTRACT_VECTOR_ELT;
39380 unsigned NumSrcElts = SrcVT.getVectorNumElements();
39382 if (CIdx && CIdx->getAPIntValue().uge(NumSrcElts))
39383 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
39385 // Integer Constant Folding.
39386 if (CIdx && VT.isInteger()) {
39387 APInt UndefVecElts;
39388 SmallVector<APInt, 16> EltBits;
39389 unsigned VecEltBitWidth = SrcVT.getScalarSizeInBits();
39390 if (getTargetConstantBitsFromNode(InputVector, VecEltBitWidth, UndefVecElts,
39391 EltBits, true, false)) {
39392 uint64_t Idx = CIdx->getZExtValue();
39393 if (UndefVecElts[Idx])
39394 return IsPextr ? DAG.getConstant(0, dl, VT) : DAG.getUNDEF(VT);
39395 return DAG.getConstant(EltBits[Idx].zextOrSelf(VT.getScalarSizeInBits()),
39401 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39402 if (TLI.SimplifyDemandedBits(
39403 SDValue(N, 0), APInt::getAllOnesValue(VT.getSizeInBits()), DCI))
39404 return SDValue(N, 0);
39406 // PEXTR*(PINSR*(v, s, c), c) -> s (with implicit zext handling).
39407 if ((InputVector.getOpcode() == X86ISD::PINSRB ||
39408 InputVector.getOpcode() == X86ISD::PINSRW) &&
39409 InputVector.getOperand(2) == EltIdx) {
39410 assert(SrcVT == InputVector.getOperand(0).getValueType() &&
39411 "Vector type mismatch");
39412 SDValue Scl = InputVector.getOperand(1);
39413 Scl = DAG.getNode(ISD::TRUNCATE, dl, SrcVT.getScalarType(), Scl);
39414 return DAG.getZExtOrTrunc(Scl, dl, VT);
39417 // TODO - Remove this once we can handle the implicit zero-extension of
39418 // X86ISD::PEXTRW/X86ISD::PEXTRB in combineHorizontalPredicateResult and
39419 // combineBasicSADPattern.
39423 // Detect mmx extraction of all bits as a i64. It works better as a bitcast.
39424 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
39425 VT == MVT::i64 && SrcVT == MVT::v1i64 && isNullConstant(EltIdx)) {
39426 SDValue MMXSrc = InputVector.getOperand(0);
39428 // The bitcast source is a direct mmx result.
39429 if (MMXSrc.getValueType() == MVT::x86mmx)
39430 return DAG.getBitcast(VT, InputVector);
39433 // Detect mmx to i32 conversion through a v2i32 elt extract.
39434 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
39435 VT == MVT::i32 && SrcVT == MVT::v2i32 && isNullConstant(EltIdx)) {
39436 SDValue MMXSrc = InputVector.getOperand(0);
39438 // The bitcast source is a direct mmx result.
39439 if (MMXSrc.getValueType() == MVT::x86mmx)
39440 return DAG.getNode(X86ISD::MMX_MOVD2W, dl, MVT::i32, MMXSrc);
39443 // Check whether this extract is the root of a sum of absolute differences
39444 // pattern. This has to be done here because we really want it to happen
39445 // pre-legalization,
39446 if (SDValue SAD = combineBasicSADPattern(N, DAG, Subtarget))
39449 // Attempt to replace an all_of/any_of horizontal reduction with a MOVMSK.
39450 if (SDValue Cmp = combineHorizontalPredicateResult(N, DAG, Subtarget))
39453 // Attempt to replace min/max v8i16/v16i8 reductions with PHMINPOSUW.
39454 if (SDValue MinMax = combineHorizontalMinMaxResult(N, DAG, Subtarget))
39457 if (SDValue V = combineReductionToHorizontal(N, DAG, Subtarget))
39460 if (SDValue V = scalarizeExtEltFP(N, DAG))
39463 // Attempt to extract a i1 element by using MOVMSK to extract the signbits
39464 // and then testing the relevant element.
39466 // Note that we only combine extracts on the *same* result number, i.e.
39467 // t0 = merge_values a0, a1, a2, a3
39468 // i1 = extract_vector_elt t0, Constant:i64<2>
39469 // i1 = extract_vector_elt t0, Constant:i64<3>
39471 // i1 = extract_vector_elt t0:1, Constant:i64<2>
39472 // since the latter would need its own MOVMSK.
39473 if (CIdx && SrcVT.getScalarType() == MVT::i1) {
39474 SmallVector<SDNode *, 16> BoolExtracts;
39475 unsigned ResNo = InputVector.getResNo();
39476 auto IsBoolExtract = [&BoolExtracts, &ResNo](SDNode *Use) {
39477 if (Use->getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
39478 isa<ConstantSDNode>(Use->getOperand(1)) &&
39479 Use->getOperand(0).getResNo() == ResNo &&
39480 Use->getValueType(0) == MVT::i1) {
39481 BoolExtracts.push_back(Use);
39486 if (all_of(InputVector->uses(), IsBoolExtract) &&
39487 BoolExtracts.size() > 1) {
39488 EVT BCVT = EVT::getIntegerVT(*DAG.getContext(), NumSrcElts);
39490 combineBitcastvxi1(DAG, BCVT, InputVector, dl, Subtarget)) {
39491 for (SDNode *Use : BoolExtracts) {
39492 // extractelement vXi1 X, MaskIdx --> ((movmsk X) & Mask) == Mask
39493 unsigned MaskIdx = Use->getConstantOperandVal(1);
39494 APInt MaskBit = APInt::getOneBitSet(NumSrcElts, MaskIdx);
39495 SDValue Mask = DAG.getConstant(MaskBit, dl, BCVT);
39496 SDValue Res = DAG.getNode(ISD::AND, dl, BCVT, BC, Mask);
39497 Res = DAG.getSetCC(dl, MVT::i1, Res, Mask, ISD::SETEQ);
39498 DCI.CombineTo(Use, Res);
39500 return SDValue(N, 0);
39508 /// If a vector select has an operand that is -1 or 0, try to simplify the
39509 /// select to a bitwise logic operation.
39510 /// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
39512 combineVSelectWithAllOnesOrZeros(SDNode *N, SelectionDAG &DAG,
39513 TargetLowering::DAGCombinerInfo &DCI,
39514 const X86Subtarget &Subtarget) {
39515 SDValue Cond = N->getOperand(0);
39516 SDValue LHS = N->getOperand(1);
39517 SDValue RHS = N->getOperand(2);
39518 EVT VT = LHS.getValueType();
39519 EVT CondVT = Cond.getValueType();
39521 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39523 if (N->getOpcode() != ISD::VSELECT)
39526 assert(CondVT.isVector() && "Vector select expects a vector selector!");
39528 // TODO: Use isNullOrNullSplat() to distinguish constants with undefs?
39529 // TODO: Can we assert that both operands are not zeros (because that should
39530 // get simplified at node creation time)?
39531 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
39532 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
39534 // If both inputs are 0/undef, create a complete zero vector.
39535 // FIXME: As noted above this should be handled by DAGCombiner/getNode.
39536 if (TValIsAllZeros && FValIsAllZeros) {
39537 if (VT.isFloatingPoint())
39538 return DAG.getConstantFP(0.0, DL, VT);
39539 return DAG.getConstant(0, DL, VT);
39542 // To use the condition operand as a bitwise mask, it must have elements that
39543 // are the same size as the select elements. Ie, the condition operand must
39544 // have already been promoted from the IR select condition type <N x i1>.
39545 // Don't check if the types themselves are equal because that excludes
39546 // vector floating-point selects.
39547 if (CondVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
39550 // Try to invert the condition if true value is not all 1s and false value is
39551 // not all 0s. Only do this if the condition has one use.
39552 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
39553 if (!TValIsAllOnes && !FValIsAllZeros && Cond.hasOneUse() &&
39554 // Check if the selector will be produced by CMPP*/PCMP*.
39555 Cond.getOpcode() == ISD::SETCC &&
39556 // Check if SETCC has already been promoted.
39557 TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), VT) ==
39559 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
39561 if (TValIsAllZeros || FValIsAllOnes) {
39562 SDValue CC = Cond.getOperand(2);
39563 ISD::CondCode NewCC = ISD::getSetCCInverse(
39564 cast<CondCodeSDNode>(CC)->get(), Cond.getOperand(0).getValueType());
39565 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1),
39567 std::swap(LHS, RHS);
39568 TValIsAllOnes = FValIsAllOnes;
39569 FValIsAllZeros = TValIsAllZeros;
39573 // Cond value must be 'sign splat' to be converted to a logical op.
39574 if (DAG.ComputeNumSignBits(Cond) != CondVT.getScalarSizeInBits())
39577 // vselect Cond, 111..., 000... -> Cond
39578 if (TValIsAllOnes && FValIsAllZeros)
39579 return DAG.getBitcast(VT, Cond);
39581 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(CondVT))
39584 // vselect Cond, 111..., X -> or Cond, X
39585 if (TValIsAllOnes) {
39586 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
39587 SDValue Or = DAG.getNode(ISD::OR, DL, CondVT, Cond, CastRHS);
39588 return DAG.getBitcast(VT, Or);
39591 // vselect Cond, X, 000... -> and Cond, X
39592 if (FValIsAllZeros) {
39593 SDValue CastLHS = DAG.getBitcast(CondVT, LHS);
39594 SDValue And = DAG.getNode(ISD::AND, DL, CondVT, Cond, CastLHS);
39595 return DAG.getBitcast(VT, And);
39598 // vselect Cond, 000..., X -> andn Cond, X
39599 if (TValIsAllZeros) {
39600 SDValue CastRHS = DAG.getBitcast(CondVT, RHS);
39602 // The canonical form differs for i1 vectors - x86andnp is not used
39603 if (CondVT.getScalarType() == MVT::i1)
39604 AndN = DAG.getNode(ISD::AND, DL, CondVT, DAG.getNOT(DL, Cond, CondVT),
39607 AndN = DAG.getNode(X86ISD::ANDNP, DL, CondVT, Cond, CastRHS);
39608 return DAG.getBitcast(VT, AndN);
39614 /// If both arms of a vector select are concatenated vectors, split the select,
39615 /// and concatenate the result to eliminate a wide (256-bit) vector instruction:
39616 /// vselect Cond, (concat T0, T1), (concat F0, F1) -->
39617 /// concat (vselect (split Cond), T0, F0), (vselect (split Cond), T1, F1)
39618 static SDValue narrowVectorSelect(SDNode *N, SelectionDAG &DAG,
39619 const X86Subtarget &Subtarget) {
39620 unsigned Opcode = N->getOpcode();
39621 if (Opcode != X86ISD::BLENDV && Opcode != ISD::VSELECT)
39624 // TODO: Split 512-bit vectors too?
39625 EVT VT = N->getValueType(0);
39626 if (!VT.is256BitVector())
39629 // TODO: Split as long as any 2 of the 3 operands are concatenated?
39630 SDValue Cond = N->getOperand(0);
39631 SDValue TVal = N->getOperand(1);
39632 SDValue FVal = N->getOperand(2);
39633 SmallVector<SDValue, 4> CatOpsT, CatOpsF;
39634 if (!TVal.hasOneUse() || !FVal.hasOneUse() ||
39635 !collectConcatOps(TVal.getNode(), CatOpsT) ||
39636 !collectConcatOps(FVal.getNode(), CatOpsF))
39639 auto makeBlend = [Opcode](SelectionDAG &DAG, const SDLoc &DL,
39640 ArrayRef<SDValue> Ops) {
39641 return DAG.getNode(Opcode, DL, Ops[1].getValueType(), Ops);
39643 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { Cond, TVal, FVal },
39644 makeBlend, /*CheckBWI*/ false);
39647 static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
39648 SDValue Cond = N->getOperand(0);
39649 SDValue LHS = N->getOperand(1);
39650 SDValue RHS = N->getOperand(2);
39653 auto *TrueC = dyn_cast<ConstantSDNode>(LHS);
39654 auto *FalseC = dyn_cast<ConstantSDNode>(RHS);
39655 if (!TrueC || !FalseC)
39658 // Don't do this for crazy integer types.
39659 EVT VT = N->getValueType(0);
39660 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
39663 // We're going to use the condition bit in math or logic ops. We could allow
39664 // this with a wider condition value (post-legalization it becomes an i8),
39665 // but if nothing is creating selects that late, it doesn't matter.
39666 if (Cond.getValueType() != MVT::i1)
39669 // A power-of-2 multiply is just a shift. LEA also cheaply handles multiply by
39670 // 3, 5, or 9 with i32/i64, so those get transformed too.
39671 // TODO: For constants that overflow or do not differ by power-of-2 or small
39672 // multiplier, convert to 'and' + 'add'.
39673 const APInt &TrueVal = TrueC->getAPIntValue();
39674 const APInt &FalseVal = FalseC->getAPIntValue();
39676 APInt Diff = TrueVal.ssub_ov(FalseVal, OV);
39680 APInt AbsDiff = Diff.abs();
39681 if (AbsDiff.isPowerOf2() ||
39682 ((VT == MVT::i32 || VT == MVT::i64) &&
39683 (AbsDiff == 3 || AbsDiff == 5 || AbsDiff == 9))) {
39685 // We need a positive multiplier constant for shift/LEA codegen. The 'not'
39686 // of the condition can usually be folded into a compare predicate, but even
39687 // without that, the sequence should be cheaper than a CMOV alternative.
39688 if (TrueVal.slt(FalseVal)) {
39689 Cond = DAG.getNOT(DL, Cond, MVT::i1);
39690 std::swap(TrueC, FalseC);
39693 // select Cond, TC, FC --> (zext(Cond) * (TC - FC)) + FC
39694 SDValue R = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Cond);
39696 // Multiply condition by the difference if non-one.
39697 if (!AbsDiff.isOneValue())
39698 R = DAG.getNode(ISD::MUL, DL, VT, R, DAG.getConstant(AbsDiff, DL, VT));
39700 // Add the base if non-zero.
39701 if (!FalseC->isNullValue())
39702 R = DAG.getNode(ISD::ADD, DL, VT, R, SDValue(FalseC, 0));
39710 /// If this is a *dynamic* select (non-constant condition) and we can match
39711 /// this node with one of the variable blend instructions, restructure the
39712 /// condition so that blends can use the high (sign) bit of each element.
39713 /// This function will also call SimplifyDemandedBits on already created
39714 /// BLENDV to perform additional simplifications.
39715 static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
39716 TargetLowering::DAGCombinerInfo &DCI,
39717 const X86Subtarget &Subtarget) {
39718 SDValue Cond = N->getOperand(0);
39719 if ((N->getOpcode() != ISD::VSELECT &&
39720 N->getOpcode() != X86ISD::BLENDV) ||
39721 ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
39724 // Don't optimize before the condition has been transformed to a legal type
39725 // and don't ever optimize vector selects that map to AVX512 mask-registers.
39726 unsigned BitWidth = Cond.getScalarValueSizeInBits();
39727 if (BitWidth < 8 || BitWidth > 64)
39730 // We can only handle the cases where VSELECT is directly legal on the
39731 // subtarget. We custom lower VSELECT nodes with constant conditions and
39732 // this makes it hard to see whether a dynamic VSELECT will correctly
39733 // lower, so we both check the operation's status and explicitly handle the
39734 // cases where a *dynamic* blend will fail even though a constant-condition
39735 // blend could be custom lowered.
39736 // FIXME: We should find a better way to handle this class of problems.
39737 // Potentially, we should combine constant-condition vselect nodes
39738 // pre-legalization into shuffles and not mark as many types as custom
39740 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39741 EVT VT = N->getValueType(0);
39742 if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
39744 // FIXME: We don't support i16-element blends currently. We could and
39745 // should support them by making *all* the bits in the condition be set
39746 // rather than just the high bit and using an i8-element blend.
39747 if (VT.getVectorElementType() == MVT::i16)
39749 // Dynamic blending was only available from SSE4.1 onward.
39750 if (VT.is128BitVector() && !Subtarget.hasSSE41())
39752 // Byte blends are only available in AVX2
39753 if (VT == MVT::v32i8 && !Subtarget.hasAVX2())
39755 // There are no 512-bit blend instructions that use sign bits.
39756 if (VT.is512BitVector())
39759 auto OnlyUsedAsSelectCond = [](SDValue Cond) {
39760 for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
39762 if ((UI->getOpcode() != ISD::VSELECT &&
39763 UI->getOpcode() != X86ISD::BLENDV) ||
39764 UI.getOperandNo() != 0)
39770 APInt DemandedBits(APInt::getSignMask(BitWidth));
39772 if (OnlyUsedAsSelectCond(Cond)) {
39774 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
39775 !DCI.isBeforeLegalizeOps());
39776 if (!TLI.SimplifyDemandedBits(Cond, DemandedBits, Known, TLO, 0, true))
39779 // If we changed the computation somewhere in the DAG, this change will
39780 // affect all users of Cond. Update all the nodes so that we do not use
39781 // the generic VSELECT anymore. Otherwise, we may perform wrong
39782 // optimizations as we messed with the actual expectation for the vector
39784 for (SDNode *U : Cond->uses()) {
39785 if (U->getOpcode() == X86ISD::BLENDV)
39788 SDValue SB = DAG.getNode(X86ISD::BLENDV, SDLoc(U), U->getValueType(0),
39789 Cond, U->getOperand(1), U->getOperand(2));
39790 DAG.ReplaceAllUsesOfValueWith(SDValue(U, 0), SB);
39791 DCI.AddToWorklist(U);
39793 DCI.CommitTargetLoweringOpt(TLO);
39794 return SDValue(N, 0);
39797 // Otherwise we can still at least try to simplify multiple use bits.
39798 if (SDValue V = TLI.SimplifyMultipleUseDemandedBits(Cond, DemandedBits, DAG))
39799 return DAG.getNode(X86ISD::BLENDV, SDLoc(N), N->getValueType(0), V,
39800 N->getOperand(1), N->getOperand(2));
39806 // (or (and (M, (sub 0, X)), (pandn M, X)))
39807 // which is a special case of:
39808 // (select M, (sub 0, X), X)
39810 // http://graphics.stanford.edu/~seander/bithacks.html#ConditionalNegate
39811 // We know that, if fNegate is 0 or 1:
39812 // (fNegate ? -v : v) == ((v ^ -fNegate) + fNegate)
39814 // Here, we have a mask, M (all 1s or 0), and, similarly, we know that:
39815 // ((M & 1) ? -X : X) == ((X ^ -(M & 1)) + (M & 1))
39816 // ( M ? -X : X) == ((X ^ M ) + (M & 1))
39817 // This lets us transform our vselect to:
39818 // (add (xor X, M), (and M, 1))
39820 // (sub (xor X, M), M)
39821 static SDValue combineLogicBlendIntoConditionalNegate(
39822 EVT VT, SDValue Mask, SDValue X, SDValue Y, const SDLoc &DL,
39823 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
39824 EVT MaskVT = Mask.getValueType();
39825 assert(MaskVT.isInteger() &&
39826 DAG.ComputeNumSignBits(Mask) == MaskVT.getScalarSizeInBits() &&
39827 "Mask must be zero/all-bits");
39829 if (X.getValueType() != MaskVT || Y.getValueType() != MaskVT)
39831 if (!DAG.getTargetLoweringInfo().isOperationLegal(ISD::SUB, MaskVT))
39834 auto IsNegV = [](SDNode *N, SDValue V) {
39835 return N->getOpcode() == ISD::SUB && N->getOperand(1) == V &&
39836 ISD::isBuildVectorAllZeros(N->getOperand(0).getNode());
39840 if (IsNegV(Y.getNode(), X))
39842 else if (IsNegV(X.getNode(), Y))
39847 SDValue SubOp1 = DAG.getNode(ISD::XOR, DL, MaskVT, V, Mask);
39848 SDValue SubOp2 = Mask;
39850 // If the negate was on the false side of the select, then
39851 // the operands of the SUB need to be swapped. PR 27251.
39852 // This is because the pattern being matched above is
39853 // (vselect M, (sub (0, X), X) -> (sub (xor X, M), M)
39854 // but if the pattern matched was
39855 // (vselect M, X, (sub (0, X))), that is really negation of the pattern
39856 // above, -(vselect M, (sub 0, X), X), and therefore the replacement
39857 // pattern also needs to be a negation of the replacement pattern above.
39858 // And -(sub X, Y) is just sub (Y, X), so swapping the operands of the
39859 // sub accomplishes the negation of the replacement pattern.
39861 std::swap(SubOp1, SubOp2);
39863 SDValue Res = DAG.getNode(ISD::SUB, DL, MaskVT, SubOp1, SubOp2);
39864 return DAG.getBitcast(VT, Res);
39867 /// Do target-specific dag combines on SELECT and VSELECT nodes.
39868 static SDValue combineSelect(SDNode *N, SelectionDAG &DAG,
39869 TargetLowering::DAGCombinerInfo &DCI,
39870 const X86Subtarget &Subtarget) {
39872 SDValue Cond = N->getOperand(0);
39873 SDValue LHS = N->getOperand(1);
39874 SDValue RHS = N->getOperand(2);
39876 // Try simplification again because we use this function to optimize
39877 // BLENDV nodes that are not handled by the generic combiner.
39878 if (SDValue V = DAG.simplifySelect(Cond, LHS, RHS))
39881 EVT VT = LHS.getValueType();
39882 EVT CondVT = Cond.getValueType();
39883 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
39884 bool CondConstantVector = ISD::isBuildVectorOfConstantSDNodes(Cond.getNode());
39886 // Attempt to combine (select M, (sub 0, X), X) -> (sub (xor X, M), M).
39887 // Limit this to cases of non-constant masks that createShuffleMaskFromVSELECT
39888 // can't catch, plus vXi8 cases where we'd likely end up with BLENDV.
39889 if (CondVT.isVector() && CondVT.isInteger() &&
39890 CondVT.getScalarSizeInBits() == VT.getScalarSizeInBits() &&
39891 (!CondConstantVector || CondVT.getScalarType() == MVT::i8) &&
39892 DAG.ComputeNumSignBits(Cond) == CondVT.getScalarSizeInBits())
39893 if (SDValue V = combineLogicBlendIntoConditionalNegate(VT, Cond, RHS, LHS,
39894 DL, DAG, Subtarget))
39897 // Convert vselects with constant condition into shuffles.
39898 if (CondConstantVector && DCI.isBeforeLegalizeOps()) {
39899 SmallVector<int, 64> Mask;
39900 if (createShuffleMaskFromVSELECT(Mask, Cond))
39901 return DAG.getVectorShuffle(VT, DL, LHS, RHS, Mask);
39904 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
39905 // instructions match the semantics of the common C idiom x<y?x:y but not
39906 // x<=y?x:y, because of how they handle negative zero (which can be
39907 // ignored in unsafe-math mode).
39908 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
39909 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
39910 VT != MVT::f80 && VT != MVT::f128 &&
39911 (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
39912 (Subtarget.hasSSE2() ||
39913 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) {
39914 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
39916 unsigned Opcode = 0;
39917 // Check for x CC y ? x : y.
39918 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
39919 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
39923 // Converting this to a min would handle NaNs incorrectly, and swapping
39924 // the operands would cause it to handle comparisons between positive
39925 // and negative zero incorrectly.
39926 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
39927 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39928 !(DAG.isKnownNeverZeroFloat(LHS) ||
39929 DAG.isKnownNeverZeroFloat(RHS)))
39931 std::swap(LHS, RHS);
39933 Opcode = X86ISD::FMIN;
39936 // Converting this to a min would handle comparisons between positive
39937 // and negative zero incorrectly.
39938 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39939 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
39941 Opcode = X86ISD::FMIN;
39944 // Converting this to a min would handle both negative zeros and NaNs
39945 // incorrectly, but we can swap the operands to fix both.
39946 std::swap(LHS, RHS);
39951 Opcode = X86ISD::FMIN;
39955 // Converting this to a max would handle comparisons between positive
39956 // and negative zero incorrectly.
39957 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39958 !DAG.isKnownNeverZeroFloat(LHS) && !DAG.isKnownNeverZeroFloat(RHS))
39960 Opcode = X86ISD::FMAX;
39963 // Converting this to a max would handle NaNs incorrectly, and swapping
39964 // the operands would cause it to handle comparisons between positive
39965 // and negative zero incorrectly.
39966 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
39967 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39968 !(DAG.isKnownNeverZeroFloat(LHS) ||
39969 DAG.isKnownNeverZeroFloat(RHS)))
39971 std::swap(LHS, RHS);
39973 Opcode = X86ISD::FMAX;
39976 // Converting this to a max would handle both negative zeros and NaNs
39977 // incorrectly, but we can swap the operands to fix both.
39978 std::swap(LHS, RHS);
39983 Opcode = X86ISD::FMAX;
39986 // Check for x CC y ? y : x -- a min/max with reversed arms.
39987 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
39988 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
39992 // Converting this to a min would handle comparisons between positive
39993 // and negative zero incorrectly, and swapping the operands would
39994 // cause it to handle NaNs incorrectly.
39995 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
39996 !(DAG.isKnownNeverZeroFloat(LHS) ||
39997 DAG.isKnownNeverZeroFloat(RHS))) {
39998 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
40000 std::swap(LHS, RHS);
40002 Opcode = X86ISD::FMIN;
40005 // Converting this to a min would handle NaNs incorrectly.
40006 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
40008 Opcode = X86ISD::FMIN;
40011 // Converting this to a min would handle both negative zeros and NaNs
40012 // incorrectly, but we can swap the operands to fix both.
40013 std::swap(LHS, RHS);
40018 Opcode = X86ISD::FMIN;
40022 // Converting this to a max would handle NaNs incorrectly.
40023 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
40025 Opcode = X86ISD::FMAX;
40028 // Converting this to a max would handle comparisons between positive
40029 // and negative zero incorrectly, and swapping the operands would
40030 // cause it to handle NaNs incorrectly.
40031 if (!DAG.getTarget().Options.NoSignedZerosFPMath &&
40032 !DAG.isKnownNeverZeroFloat(LHS) &&
40033 !DAG.isKnownNeverZeroFloat(RHS)) {
40034 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
40036 std::swap(LHS, RHS);
40038 Opcode = X86ISD::FMAX;
40041 // Converting this to a max would handle both negative zeros and NaNs
40042 // incorrectly, but we can swap the operands to fix both.
40043 std::swap(LHS, RHS);
40048 Opcode = X86ISD::FMAX;
40054 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
40057 // Some mask scalar intrinsics rely on checking if only one bit is set
40058 // and implement it in C code like this:
40059 // A[0] = (U & 1) ? A[0] : W[0];
40060 // This creates some redundant instructions that break pattern matching.
40061 // fold (select (setcc (and (X, 1), 0, seteq), Y, Z)) -> select(and(X, 1),Z,Y)
40062 if (Subtarget.hasAVX512() && N->getOpcode() == ISD::SELECT &&
40063 Cond.getOpcode() == ISD::SETCC && (VT == MVT::f32 || VT == MVT::f64)) {
40064 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40065 SDValue AndNode = Cond.getOperand(0);
40066 if (AndNode.getOpcode() == ISD::AND && CC == ISD::SETEQ &&
40067 isNullConstant(Cond.getOperand(1)) &&
40068 isOneConstant(AndNode.getOperand(1))) {
40069 // LHS and RHS swapped due to
40070 // setcc outputting 1 when AND resulted in 0 and vice versa.
40071 AndNode = DAG.getZExtOrTrunc(AndNode, DL, MVT::i8);
40072 return DAG.getNode(ISD::SELECT, DL, VT, AndNode, RHS, LHS);
40076 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
40077 // lowering on KNL. In this case we convert it to
40078 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
40079 // The same situation all vectors of i8 and i16 without BWI.
40080 // Make sure we extend these even before type legalization gets a chance to
40081 // split wide vectors.
40082 // Since SKX these selects have a proper lowering.
40083 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && CondVT.isVector() &&
40084 CondVT.getVectorElementType() == MVT::i1 &&
40085 (VT.getVectorElementType() == MVT::i8 ||
40086 VT.getVectorElementType() == MVT::i16)) {
40087 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Cond);
40088 return DAG.getNode(N->getOpcode(), DL, VT, Cond, LHS, RHS);
40091 // AVX512 - Extend select with zero to merge with target shuffle.
40092 // select(mask, extract_subvector(shuffle(x)), zero) -->
40093 // extract_subvector(select(insert_subvector(mask), shuffle(x), zero))
40094 // TODO - support non target shuffles as well.
40095 if (Subtarget.hasAVX512() && CondVT.isVector() &&
40096 CondVT.getVectorElementType() == MVT::i1) {
40097 auto SelectableOp = [&TLI](SDValue Op) {
40098 return Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40099 isTargetShuffle(Op.getOperand(0).getOpcode()) &&
40100 isNullConstant(Op.getOperand(1)) &&
40101 TLI.isTypeLegal(Op.getOperand(0).getValueType()) &&
40102 Op.hasOneUse() && Op.getOperand(0).hasOneUse();
40105 bool SelectableLHS = SelectableOp(LHS);
40106 bool SelectableRHS = SelectableOp(RHS);
40107 bool ZeroLHS = ISD::isBuildVectorAllZeros(LHS.getNode());
40108 bool ZeroRHS = ISD::isBuildVectorAllZeros(RHS.getNode());
40110 if ((SelectableLHS && ZeroRHS) || (SelectableRHS && ZeroLHS)) {
40111 EVT SrcVT = SelectableLHS ? LHS.getOperand(0).getValueType()
40112 : RHS.getOperand(0).getValueType();
40113 unsigned NumSrcElts = SrcVT.getVectorNumElements();
40114 EVT SrcCondVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumSrcElts);
40115 LHS = insertSubVector(DAG.getUNDEF(SrcVT), LHS, 0, DAG, DL,
40116 VT.getSizeInBits());
40117 RHS = insertSubVector(DAG.getUNDEF(SrcVT), RHS, 0, DAG, DL,
40118 VT.getSizeInBits());
40119 Cond = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, SrcCondVT,
40120 DAG.getUNDEF(SrcCondVT), Cond,
40121 DAG.getIntPtrConstant(0, DL));
40122 SDValue Res = DAG.getSelect(DL, SrcVT, Cond, LHS, RHS);
40123 return extractSubVector(Res, 0, DAG, DL, VT.getSizeInBits());
40127 if (SDValue V = combineSelectOfTwoConstants(N, DAG))
40130 // Canonicalize max and min:
40131 // (x > y) ? x : y -> (x >= y) ? x : y
40132 // (x < y) ? x : y -> (x <= y) ? x : y
40133 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
40134 // the need for an extra compare
40135 // against zero. e.g.
40136 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
40138 // testl %edi, %edi
40140 // cmovgl %edi, %eax
40144 // cmovsl %eax, %edi
40145 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
40146 Cond.hasOneUse() &&
40147 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
40148 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
40149 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40154 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
40155 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
40156 Cond.getOperand(0), Cond.getOperand(1), NewCC);
40157 return DAG.getSelect(DL, VT, Cond, LHS, RHS);
40162 // Match VSELECTs into subs with unsigned saturation.
40163 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
40164 // psubus is available in SSE2 for i8 and i16 vectors.
40165 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
40166 isPowerOf2_32(VT.getVectorNumElements()) &&
40167 (VT.getVectorElementType() == MVT::i8 ||
40168 VT.getVectorElementType() == MVT::i16)) {
40169 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40171 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
40172 // left side invert the predicate to simplify logic below.
40174 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
40176 CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
40177 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
40181 if (Other.getNode() && Other->getNumOperands() == 2 &&
40182 Other->getOperand(0) == Cond.getOperand(0)) {
40183 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
40184 SDValue CondRHS = Cond->getOperand(1);
40186 // Look for a general sub with unsigned saturation first.
40187 // x >= y ? x-y : 0 --> subus x, y
40188 // x > y ? x-y : 0 --> subus x, y
40189 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
40190 Other->getOpcode() == ISD::SUB && OpRHS == CondRHS)
40191 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
40193 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS)) {
40194 if (isa<BuildVectorSDNode>(CondRHS)) {
40195 // If the RHS is a constant we have to reverse the const
40196 // canonicalization.
40197 // x > C-1 ? x+-C : 0 --> subus x, C
40198 auto MatchUSUBSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
40199 return (!Op && !Cond) ||
40201 Cond->getAPIntValue() == (-Op->getAPIntValue() - 1));
40203 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
40204 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUSUBSAT,
40205 /*AllowUndefs*/ true)) {
40206 OpRHS = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
40208 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
40211 // Another special case: If C was a sign bit, the sub has been
40212 // canonicalized into a xor.
40213 // FIXME: Would it be better to use computeKnownBits to determine
40214 // whether it's safe to decanonicalize the xor?
40215 // x s< 0 ? x^C : 0 --> subus x, C
40216 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
40217 if (CC == ISD::SETLT && Other.getOpcode() == ISD::XOR &&
40218 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
40219 OpRHSConst->getAPIntValue().isSignMask()) {
40220 // Note that we have to rebuild the RHS constant here to ensure we
40221 // don't rely on particular values of undef lanes.
40222 OpRHS = DAG.getConstant(OpRHSConst->getAPIntValue(), DL, VT);
40223 return DAG.getNode(ISD::USUBSAT, DL, VT, OpLHS, OpRHS);
40231 // Match VSELECTs into add with unsigned saturation.
40232 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
40233 // paddus is available in SSE2 for i8 and i16 vectors.
40234 Subtarget.hasSSE2() && VT.getVectorNumElements() >= 2 &&
40235 isPowerOf2_32(VT.getVectorNumElements()) &&
40236 (VT.getVectorElementType() == MVT::i8 ||
40237 VT.getVectorElementType() == MVT::i16)) {
40238 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
40240 SDValue CondLHS = Cond->getOperand(0);
40241 SDValue CondRHS = Cond->getOperand(1);
40243 // Check if one of the arms of the VSELECT is vector with all bits set.
40244 // If it's on the left side invert the predicate to simplify logic below.
40246 if (ISD::isBuildVectorAllOnes(LHS.getNode())) {
40248 CC = ISD::getSetCCInverse(CC, VT.getVectorElementType());
40249 } else if (ISD::isBuildVectorAllOnes(RHS.getNode())) {
40253 if (Other.getNode() && Other.getOpcode() == ISD::ADD) {
40254 SDValue OpLHS = Other.getOperand(0), OpRHS = Other.getOperand(1);
40256 // Canonicalize condition operands.
40257 if (CC == ISD::SETUGE) {
40258 std::swap(CondLHS, CondRHS);
40262 // We can test against either of the addition operands.
40263 // x <= x+y ? x+y : ~0 --> addus x, y
40264 // x+y >= x ? x+y : ~0 --> addus x, y
40265 if (CC == ISD::SETULE && Other == CondRHS &&
40266 (OpLHS == CondLHS || OpRHS == CondLHS))
40267 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
40269 if (isa<BuildVectorSDNode>(OpRHS) && isa<BuildVectorSDNode>(CondRHS) &&
40270 CondLHS == OpLHS) {
40271 // If the RHS is a constant we have to reverse the const
40272 // canonicalization.
40273 // x > ~C ? x+C : ~0 --> addus x, C
40274 auto MatchUADDSAT = [](ConstantSDNode *Op, ConstantSDNode *Cond) {
40275 return Cond->getAPIntValue() == ~Op->getAPIntValue();
40277 if (CC == ISD::SETULE &&
40278 ISD::matchBinaryPredicate(OpRHS, CondRHS, MatchUADDSAT))
40279 return DAG.getNode(ISD::UADDSAT, DL, VT, OpLHS, OpRHS);
40284 // Check if the first operand is all zeros and Cond type is vXi1.
40285 // If this an avx512 target we can improve the use of zero masking by
40286 // swapping the operands and inverting the condition.
40287 if (N->getOpcode() == ISD::VSELECT && Cond.hasOneUse() &&
40288 Subtarget.hasAVX512() && CondVT.getVectorElementType() == MVT::i1 &&
40289 ISD::isBuildVectorAllZeros(LHS.getNode()) &&
40290 !ISD::isBuildVectorAllZeros(RHS.getNode())) {
40291 // Invert the cond to not(cond) : xor(op,allones)=not(op)
40292 SDValue CondNew = DAG.getNOT(DL, Cond, CondVT);
40293 // Vselect cond, op1, op2 = Vselect not(cond), op2, op1
40294 return DAG.getSelect(DL, VT, CondNew, RHS, LHS);
40297 // Early exit check
40298 if (!TLI.isTypeLegal(VT))
40301 if (SDValue V = combineVSelectWithAllOnesOrZeros(N, DAG, DCI, Subtarget))
40304 if (SDValue V = combineVSelectToBLENDV(N, DAG, DCI, Subtarget))
40307 if (SDValue V = narrowVectorSelect(N, DAG, Subtarget))
40310 // select(~Cond, X, Y) -> select(Cond, Y, X)
40311 if (CondVT.getScalarType() != MVT::i1)
40312 if (SDValue CondNot = IsNOT(Cond, DAG))
40313 return DAG.getNode(N->getOpcode(), DL, VT,
40314 DAG.getBitcast(CondVT, CondNot), RHS, LHS);
40316 // Try to optimize vXi1 selects if both operands are either all constants or
40317 // bitcasts from scalar integer type. In that case we can convert the operands
40318 // to integer and use an integer select which will be converted to a CMOV.
40319 // We need to take a little bit of care to avoid creating an i64 type after
40320 // type legalization.
40321 if (N->getOpcode() == ISD::SELECT && VT.isVector() &&
40322 VT.getVectorElementType() == MVT::i1 &&
40323 (DCI.isBeforeLegalize() || (VT != MVT::v64i1 || Subtarget.is64Bit()))) {
40324 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
40325 bool LHSIsConst = ISD::isBuildVectorOfConstantSDNodes(LHS.getNode());
40326 bool RHSIsConst = ISD::isBuildVectorOfConstantSDNodes(RHS.getNode());
40329 (LHS.getOpcode() == ISD::BITCAST &&
40330 LHS.getOperand(0).getValueType() == IntVT)) &&
40332 (RHS.getOpcode() == ISD::BITCAST &&
40333 RHS.getOperand(0).getValueType() == IntVT))) {
40335 LHS = combinevXi1ConstantToInteger(LHS, DAG);
40337 LHS = LHS.getOperand(0);
40340 RHS = combinevXi1ConstantToInteger(RHS, DAG);
40342 RHS = RHS.getOperand(0);
40344 SDValue Select = DAG.getSelect(DL, IntVT, Cond, LHS, RHS);
40345 return DAG.getBitcast(VT, Select);
40349 // If this is "((X & C) == 0) ? Y : Z" and C is a constant mask vector of
40350 // single bits, then invert the predicate and swap the select operands.
40351 // This can lower using a vector shift bit-hack rather than mask and compare.
40352 if (DCI.isBeforeLegalize() && !Subtarget.hasAVX512() &&
40353 N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
40354 Cond.hasOneUse() && CondVT.getVectorElementType() == MVT::i1 &&
40355 Cond.getOperand(0).getOpcode() == ISD::AND &&
40356 isNullOrNullSplat(Cond.getOperand(1)) &&
40357 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
40358 Cond.getOperand(0).getValueType() == VT) {
40359 // The 'and' mask must be composed of power-of-2 constants.
40360 SDValue And = Cond.getOperand(0);
40361 auto *C = isConstOrConstSplat(And.getOperand(1));
40362 if (C && C->getAPIntValue().isPowerOf2()) {
40363 // vselect (X & C == 0), LHS, RHS --> vselect (X & C != 0), RHS, LHS
40365 DAG.getSetCC(DL, CondVT, And, Cond.getOperand(1), ISD::SETNE);
40366 return DAG.getSelect(DL, VT, NotCond, RHS, LHS);
40369 // If we have a non-splat but still powers-of-2 mask, AVX1 can use pmulld
40370 // and AVX2 can use vpsllv{dq}. 8-bit lacks a proper shift or multiply.
40371 // 16-bit lacks a proper blendv.
40372 unsigned EltBitWidth = VT.getScalarSizeInBits();
40373 bool CanShiftBlend =
40374 TLI.isTypeLegal(VT) && ((Subtarget.hasAVX() && EltBitWidth == 32) ||
40375 (Subtarget.hasAVX2() && EltBitWidth == 64) ||
40376 (Subtarget.hasXOP()));
40377 if (CanShiftBlend &&
40378 ISD::matchUnaryPredicate(And.getOperand(1), [](ConstantSDNode *C) {
40379 return C->getAPIntValue().isPowerOf2();
40381 // Create a left-shift constant to get the mask bits over to the sign-bit.
40382 SDValue Mask = And.getOperand(1);
40383 SmallVector<int, 32> ShlVals;
40384 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) {
40385 auto *MaskVal = cast<ConstantSDNode>(Mask.getOperand(i));
40386 ShlVals.push_back(EltBitWidth - 1 -
40387 MaskVal->getAPIntValue().exactLogBase2());
40389 // vsel ((X & C) == 0), LHS, RHS --> vsel ((shl X, C') < 0), RHS, LHS
40390 SDValue ShlAmt = getConstVector(ShlVals, VT.getSimpleVT(), DAG, DL);
40391 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And.getOperand(0), ShlAmt);
40393 DAG.getSetCC(DL, CondVT, Shl, Cond.getOperand(1), ISD::SETLT);
40394 return DAG.getSelect(DL, VT, NewCond, RHS, LHS);
40402 /// (brcond/cmov/setcc .., (cmp (atomic_load_add x, 1), 0), COND_S)
40404 /// (brcond/cmov/setcc .., (LADD x, 1), COND_LE)
40405 /// i.e., reusing the EFLAGS produced by the LOCKed instruction.
40406 /// Note that this is only legal for some op/cc combinations.
40407 static SDValue combineSetCCAtomicArith(SDValue Cmp, X86::CondCode &CC,
40409 const X86Subtarget &Subtarget) {
40410 // This combine only operates on CMP-like nodes.
40411 if (!(Cmp.getOpcode() == X86ISD::CMP ||
40412 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
40415 // Can't replace the cmp if it has more uses than the one we're looking at.
40416 // FIXME: We would like to be able to handle this, but would need to make sure
40417 // all uses were updated.
40418 if (!Cmp.hasOneUse())
40421 // This only applies to variations of the common case:
40422 // (icmp slt x, 0) -> (icmp sle (add x, 1), 0)
40423 // (icmp sge x, 0) -> (icmp sgt (add x, 1), 0)
40424 // (icmp sle x, 0) -> (icmp slt (sub x, 1), 0)
40425 // (icmp sgt x, 0) -> (icmp sge (sub x, 1), 0)
40426 // Using the proper condcodes (see below), overflow is checked for.
40428 // FIXME: We can generalize both constraints:
40429 // - XOR/OR/AND (if they were made to survive AtomicExpand)
40431 // if the result is compared.
40433 SDValue CmpLHS = Cmp.getOperand(0);
40434 SDValue CmpRHS = Cmp.getOperand(1);
40436 if (!CmpLHS.hasOneUse())
40439 unsigned Opc = CmpLHS.getOpcode();
40440 if (Opc != ISD::ATOMIC_LOAD_ADD && Opc != ISD::ATOMIC_LOAD_SUB)
40443 SDValue OpRHS = CmpLHS.getOperand(2);
40444 auto *OpRHSC = dyn_cast<ConstantSDNode>(OpRHS);
40448 APInt Addend = OpRHSC->getAPIntValue();
40449 if (Opc == ISD::ATOMIC_LOAD_SUB)
40452 auto *CmpRHSC = dyn_cast<ConstantSDNode>(CmpRHS);
40456 APInt Comparison = CmpRHSC->getAPIntValue();
40458 // If the addend is the negation of the comparison value, then we can do
40459 // a full comparison by emitting the atomic arithmetic as a locked sub.
40460 if (Comparison == -Addend) {
40461 // The CC is fine, but we need to rewrite the LHS of the comparison as an
40463 auto *AN = cast<AtomicSDNode>(CmpLHS.getNode());
40464 auto AtomicSub = DAG.getAtomic(
40465 ISD::ATOMIC_LOAD_SUB, SDLoc(CmpLHS), CmpLHS.getValueType(),
40466 /*Chain*/ CmpLHS.getOperand(0), /*LHS*/ CmpLHS.getOperand(1),
40467 /*RHS*/ DAG.getConstant(-Addend, SDLoc(CmpRHS), CmpRHS.getValueType()),
40468 AN->getMemOperand());
40469 auto LockOp = lowerAtomicArithWithLOCK(AtomicSub, DAG, Subtarget);
40470 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
40471 DAG.getUNDEF(CmpLHS.getValueType()));
40472 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
40476 // We can handle comparisons with zero in a number of cases by manipulating
40478 if (!Comparison.isNullValue())
40481 if (CC == X86::COND_S && Addend == 1)
40483 else if (CC == X86::COND_NS && Addend == 1)
40485 else if (CC == X86::COND_G && Addend == -1)
40487 else if (CC == X86::COND_LE && Addend == -1)
40492 SDValue LockOp = lowerAtomicArithWithLOCK(CmpLHS, DAG, Subtarget);
40493 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(0),
40494 DAG.getUNDEF(CmpLHS.getValueType()));
40495 DAG.ReplaceAllUsesOfValueWith(CmpLHS.getValue(1), LockOp.getValue(1));
40499 // Check whether a boolean test is testing a boolean value generated by
40500 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
40503 // Simplify the following patterns:
40504 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
40505 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
40506 // to (Op EFLAGS Cond)
40508 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
40509 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
40510 // to (Op EFLAGS !Cond)
40512 // where Op could be BRCOND or CMOV.
40514 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
40515 // This combine only operates on CMP-like nodes.
40516 if (!(Cmp.getOpcode() == X86ISD::CMP ||
40517 (Cmp.getOpcode() == X86ISD::SUB && !Cmp->hasAnyUseOfValue(0))))
40520 // Quit if not used as a boolean value.
40521 if (CC != X86::COND_E && CC != X86::COND_NE)
40524 // Check CMP operands. One of them should be 0 or 1 and the other should be
40525 // an SetCC or extended from it.
40526 SDValue Op1 = Cmp.getOperand(0);
40527 SDValue Op2 = Cmp.getOperand(1);
40530 const ConstantSDNode* C = nullptr;
40531 bool needOppositeCond = (CC == X86::COND_E);
40532 bool checkAgainstTrue = false; // Is it a comparison against 1?
40534 if ((C = dyn_cast<ConstantSDNode>(Op1)))
40536 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
40538 else // Quit if all operands are not constants.
40541 if (C->getZExtValue() == 1) {
40542 needOppositeCond = !needOppositeCond;
40543 checkAgainstTrue = true;
40544 } else if (C->getZExtValue() != 0)
40545 // Quit if the constant is neither 0 or 1.
40548 bool truncatedToBoolWithAnd = false;
40549 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
40550 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
40551 SetCC.getOpcode() == ISD::TRUNCATE ||
40552 SetCC.getOpcode() == ISD::AND) {
40553 if (SetCC.getOpcode() == ISD::AND) {
40555 if (isOneConstant(SetCC.getOperand(0)))
40557 if (isOneConstant(SetCC.getOperand(1)))
40561 SetCC = SetCC.getOperand(OpIdx);
40562 truncatedToBoolWithAnd = true;
40564 SetCC = SetCC.getOperand(0);
40567 switch (SetCC.getOpcode()) {
40568 case X86ISD::SETCC_CARRY:
40569 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
40570 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
40571 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
40572 // truncated to i1 using 'and'.
40573 if (checkAgainstTrue && !truncatedToBoolWithAnd)
40575 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
40576 "Invalid use of SETCC_CARRY!");
40578 case X86ISD::SETCC:
40579 // Set the condition code or opposite one if necessary.
40580 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
40581 if (needOppositeCond)
40582 CC = X86::GetOppositeBranchCondition(CC);
40583 return SetCC.getOperand(1);
40584 case X86ISD::CMOV: {
40585 // Check whether false/true value has canonical one, i.e. 0 or 1.
40586 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
40587 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
40588 // Quit if true value is not a constant.
40591 // Quit if false value is not a constant.
40593 SDValue Op = SetCC.getOperand(0);
40594 // Skip 'zext' or 'trunc' node.
40595 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
40596 Op.getOpcode() == ISD::TRUNCATE)
40597 Op = Op.getOperand(0);
40598 // A special case for rdrand/rdseed, where 0 is set if false cond is
40600 if ((Op.getOpcode() != X86ISD::RDRAND &&
40601 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
40604 // Quit if false value is not the constant 0 or 1.
40605 bool FValIsFalse = true;
40606 if (FVal && FVal->getZExtValue() != 0) {
40607 if (FVal->getZExtValue() != 1)
40609 // If FVal is 1, opposite cond is needed.
40610 needOppositeCond = !needOppositeCond;
40611 FValIsFalse = false;
40613 // Quit if TVal is not the constant opposite of FVal.
40614 if (FValIsFalse && TVal->getZExtValue() != 1)
40616 if (!FValIsFalse && TVal->getZExtValue() != 0)
40618 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
40619 if (needOppositeCond)
40620 CC = X86::GetOppositeBranchCondition(CC);
40621 return SetCC.getOperand(3);
40628 /// Check whether Cond is an AND/OR of SETCCs off of the same EFLAGS.
40630 /// (X86or (X86setcc) (X86setcc))
40631 /// (X86cmp (and (X86setcc) (X86setcc)), 0)
40632 static bool checkBoolTestAndOrSetCCCombine(SDValue Cond, X86::CondCode &CC0,
40633 X86::CondCode &CC1, SDValue &Flags,
40635 if (Cond->getOpcode() == X86ISD::CMP) {
40636 if (!isNullConstant(Cond->getOperand(1)))
40639 Cond = Cond->getOperand(0);
40644 SDValue SetCC0, SetCC1;
40645 switch (Cond->getOpcode()) {
40646 default: return false;
40653 SetCC0 = Cond->getOperand(0);
40654 SetCC1 = Cond->getOperand(1);
40658 // Make sure we have SETCC nodes, using the same flags value.
40659 if (SetCC0.getOpcode() != X86ISD::SETCC ||
40660 SetCC1.getOpcode() != X86ISD::SETCC ||
40661 SetCC0->getOperand(1) != SetCC1->getOperand(1))
40664 CC0 = (X86::CondCode)SetCC0->getConstantOperandVal(0);
40665 CC1 = (X86::CondCode)SetCC1->getConstantOperandVal(0);
40666 Flags = SetCC0->getOperand(1);
40670 // When legalizing carry, we create carries via add X, -1
40671 // If that comes from an actual carry, via setcc, we use the
40673 static SDValue combineCarryThroughADD(SDValue EFLAGS, SelectionDAG &DAG) {
40674 if (EFLAGS.getOpcode() == X86ISD::ADD) {
40675 if (isAllOnesConstant(EFLAGS.getOperand(1))) {
40676 SDValue Carry = EFLAGS.getOperand(0);
40677 while (Carry.getOpcode() == ISD::TRUNCATE ||
40678 Carry.getOpcode() == ISD::ZERO_EXTEND ||
40679 Carry.getOpcode() == ISD::SIGN_EXTEND ||
40680 Carry.getOpcode() == ISD::ANY_EXTEND ||
40681 (Carry.getOpcode() == ISD::AND &&
40682 isOneConstant(Carry.getOperand(1))))
40683 Carry = Carry.getOperand(0);
40684 if (Carry.getOpcode() == X86ISD::SETCC ||
40685 Carry.getOpcode() == X86ISD::SETCC_CARRY) {
40686 // TODO: Merge this code with equivalent in combineAddOrSubToADCOrSBB?
40687 uint64_t CarryCC = Carry.getConstantOperandVal(0);
40688 SDValue CarryOp1 = Carry.getOperand(1);
40689 if (CarryCC == X86::COND_B)
40691 if (CarryCC == X86::COND_A) {
40692 // Try to convert COND_A into COND_B in an attempt to facilitate
40693 // materializing "setb reg".
40695 // Do not flip "e > c", where "c" is a constant, because Cmp
40696 // instruction cannot take an immediate as its first operand.
40698 if (CarryOp1.getOpcode() == X86ISD::SUB &&
40699 CarryOp1.getNode()->hasOneUse() &&
40700 CarryOp1.getValueType().isInteger() &&
40701 !isa<ConstantSDNode>(CarryOp1.getOperand(1))) {
40702 SDValue SubCommute =
40703 DAG.getNode(X86ISD::SUB, SDLoc(CarryOp1), CarryOp1->getVTList(),
40704 CarryOp1.getOperand(1), CarryOp1.getOperand(0));
40705 return SDValue(SubCommute.getNode(), CarryOp1.getResNo());
40708 // If this is a check of the z flag of an add with 1, switch to the
40710 if (CarryCC == X86::COND_E &&
40711 CarryOp1.getOpcode() == X86ISD::ADD &&
40712 isOneConstant(CarryOp1.getOperand(1)))
40721 /// If we are inverting an PTEST/TESTP operand, attempt to adjust the CC
40722 /// to avoid the inversion.
40723 static SDValue combinePTESTCC(SDValue EFLAGS, X86::CondCode &CC,
40725 const X86Subtarget &Subtarget) {
40726 // TODO: Handle X86ISD::KTEST/X86ISD::KORTEST.
40727 if (EFLAGS.getOpcode() != X86ISD::PTEST &&
40728 EFLAGS.getOpcode() != X86ISD::TESTP)
40731 // PTEST/TESTP sets EFLAGS as:
40732 // TESTZ: ZF = (Op0 & Op1) == 0
40733 // TESTC: CF = (~Op0 & Op1) == 0
40734 // TESTNZC: ZF == 0 && CF == 0
40735 EVT VT = EFLAGS.getValueType();
40736 SDValue Op0 = EFLAGS.getOperand(0);
40737 SDValue Op1 = EFLAGS.getOperand(1);
40738 EVT OpVT = Op0.getValueType();
40740 // TEST*(~X,Y) == TEST*(X,Y)
40741 if (SDValue NotOp0 = IsNOT(Op0, DAG)) {
40742 X86::CondCode InvCC;
40746 InvCC = X86::COND_E;
40749 // !testc -> !testz.
40750 InvCC = X86::COND_NE;
40754 InvCC = X86::COND_B;
40757 // !testz -> !testc.
40758 InvCC = X86::COND_AE;
40762 // testnzc -> testnzc (no change).
40766 InvCC = X86::COND_INVALID;
40770 if (InvCC != X86::COND_INVALID) {
40772 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40773 DAG.getBitcast(OpVT, NotOp0), Op1);
40777 if (CC == X86::COND_E || CC == X86::COND_NE) {
40778 // TESTZ(X,~Y) == TESTC(Y,X)
40779 if (SDValue NotOp1 = IsNOT(Op1, DAG)) {
40780 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
40781 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40782 DAG.getBitcast(OpVT, NotOp1), Op0);
40786 SDValue BC = peekThroughBitcasts(Op0);
40787 EVT BCVT = BC.getValueType();
40788 assert(BCVT.isVector() && DAG.getTargetLoweringInfo().isTypeLegal(BCVT) &&
40789 "Unexpected vector type");
40791 // TESTZ(AND(X,Y),AND(X,Y)) == TESTZ(X,Y)
40792 if (BC.getOpcode() == ISD::AND || BC.getOpcode() == X86ISD::FAND) {
40793 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40794 DAG.getBitcast(OpVT, BC.getOperand(0)),
40795 DAG.getBitcast(OpVT, BC.getOperand(1)));
40798 // TESTZ(AND(~X,Y),AND(~X,Y)) == TESTC(X,Y)
40799 if (BC.getOpcode() == X86ISD::ANDNP || BC.getOpcode() == X86ISD::FANDN) {
40800 CC = (CC == X86::COND_E ? X86::COND_B : X86::COND_AE);
40801 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT,
40802 DAG.getBitcast(OpVT, BC.getOperand(0)),
40803 DAG.getBitcast(OpVT, BC.getOperand(1)));
40806 // If every element is an all-sign value, see if we can use MOVMSK to
40807 // more efficiently extract the sign bits and compare that.
40808 // TODO: Handle TESTC with comparison inversion.
40809 // TODO: Can we remove SimplifyMultipleUseDemandedBits and rely on
40810 // MOVMSK combines to make sure its never worse than PTEST?
40811 unsigned EltBits = BCVT.getScalarSizeInBits();
40812 if (DAG.ComputeNumSignBits(BC) == EltBits) {
40813 assert(VT == MVT::i32 && "Expected i32 EFLAGS comparison result");
40814 APInt SignMask = APInt::getSignMask(EltBits);
40815 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
40817 TLI.SimplifyMultipleUseDemandedBits(BC, SignMask, DAG)) {
40818 // For vXi16 cases we need to use pmovmksb and extract every other
40821 if (EltBits == 16) {
40822 MVT MovmskVT = BCVT.is128BitVector() ? MVT::v16i8 : MVT::v32i8;
40823 Res = DAG.getBitcast(MovmskVT, Res);
40824 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
40825 Res = DAG.getNode(ISD::AND, DL, MVT::i32, Res,
40826 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
40828 Res = getPMOVMSKB(DL, Res, DAG, Subtarget);
40830 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Res,
40831 DAG.getConstant(0, DL, MVT::i32));
40836 // TESTZ(-1,X) == TESTZ(X,X)
40837 if (ISD::isBuildVectorAllOnes(Op0.getNode()))
40838 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op1, Op1);
40840 // TESTZ(X,-1) == TESTZ(X,X)
40841 if (ISD::isBuildVectorAllOnes(Op1.getNode()))
40842 return DAG.getNode(EFLAGS.getOpcode(), SDLoc(EFLAGS), VT, Op0, Op0);
40848 // Attempt to simplify the MOVMSK input based on the comparison type.
40849 static SDValue combineSetCCMOVMSK(SDValue EFLAGS, X86::CondCode &CC,
40851 const X86Subtarget &Subtarget) {
40852 // Handle eq/ne against zero (any_of).
40853 // Handle eq/ne against -1 (all_of).
40854 if (!(CC == X86::COND_E || CC == X86::COND_NE))
40856 if (EFLAGS.getValueType() != MVT::i32)
40858 unsigned CmpOpcode = EFLAGS.getOpcode();
40859 if (CmpOpcode != X86ISD::CMP && CmpOpcode != X86ISD::SUB)
40861 auto *CmpConstant = dyn_cast<ConstantSDNode>(EFLAGS.getOperand(1));
40864 const APInt &CmpVal = CmpConstant->getAPIntValue();
40866 SDValue CmpOp = EFLAGS.getOperand(0);
40867 unsigned CmpBits = CmpOp.getValueSizeInBits();
40868 assert(CmpBits == CmpVal.getBitWidth() && "Value size mismatch");
40870 // Peek through any truncate.
40871 if (CmpOp.getOpcode() == ISD::TRUNCATE)
40872 CmpOp = CmpOp.getOperand(0);
40874 // Bail if we don't find a MOVMSK.
40875 if (CmpOp.getOpcode() != X86ISD::MOVMSK)
40878 SDValue Vec = CmpOp.getOperand(0);
40879 MVT VecVT = Vec.getSimpleValueType();
40880 assert((VecVT.is128BitVector() || VecVT.is256BitVector()) &&
40881 "Unexpected MOVMSK operand");
40882 unsigned NumElts = VecVT.getVectorNumElements();
40883 unsigned NumEltBits = VecVT.getScalarSizeInBits();
40885 bool IsAnyOf = CmpOpcode == X86ISD::CMP && CmpVal.isNullValue();
40886 bool IsAllOf = CmpOpcode == X86ISD::SUB && NumElts <= CmpBits &&
40887 CmpVal.isMask(NumElts);
40888 if (!IsAnyOf && !IsAllOf)
40891 // See if we can peek through to a vector with a wider element type, if the
40892 // signbits extend down to all the sub-elements as well.
40893 // Calling MOVMSK with the wider type, avoiding the bitcast, helps expose
40894 // potential SimplifyDemandedBits/Elts cases.
40895 if (Vec.getOpcode() == ISD::BITCAST) {
40896 SDValue BC = peekThroughBitcasts(Vec);
40897 MVT BCVT = BC.getSimpleValueType();
40898 unsigned BCNumElts = BCVT.getVectorNumElements();
40899 unsigned BCNumEltBits = BCVT.getScalarSizeInBits();
40900 if ((BCNumEltBits == 32 || BCNumEltBits == 64) &&
40901 BCNumEltBits > NumEltBits &&
40902 DAG.ComputeNumSignBits(BC) > (BCNumEltBits - NumEltBits)) {
40904 unsigned CmpMask = IsAnyOf ? 0 : ((1 << BCNumElts) - 1);
40905 return DAG.getNode(X86ISD::CMP, DL, MVT::i32,
40906 DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, BC),
40907 DAG.getConstant(CmpMask, DL, MVT::i32));
40911 // MOVMSK(PCMPEQ(X,0)) == -1 -> PTESTZ(X,X).
40912 // MOVMSK(PCMPEQ(X,0)) != -1 -> !PTESTZ(X,X).
40913 if (IsAllOf && Subtarget.hasSSE41()) {
40914 SDValue BC = peekThroughBitcasts(Vec);
40915 if (BC.getOpcode() == X86ISD::PCMPEQ &&
40916 ISD::isBuildVectorAllZeros(BC.getOperand(1).getNode())) {
40917 MVT TestVT = VecVT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
40918 SDValue V = DAG.getBitcast(TestVT, BC.getOperand(0));
40919 return DAG.getNode(X86ISD::PTEST, SDLoc(EFLAGS), MVT::i32, V, V);
40923 // See if we can avoid a PACKSS by calling MOVMSK on the sources.
40924 // For vXi16 cases we can use a v2Xi8 PMOVMSKB. We must mask out
40925 // sign bits prior to the comparison with zero unless we know that
40926 // the vXi16 splats the sign bit down to the lower i8 half.
40927 // TODO: Handle all_of patterns.
40928 if (Vec.getOpcode() == X86ISD::PACKSS && VecVT == MVT::v16i8) {
40929 SDValue VecOp0 = Vec.getOperand(0);
40930 SDValue VecOp1 = Vec.getOperand(1);
40931 bool SignExt0 = DAG.ComputeNumSignBits(VecOp0) > 8;
40932 bool SignExt1 = DAG.ComputeNumSignBits(VecOp1) > 8;
40933 // PMOVMSKB(PACKSSBW(X, undef)) -> PMOVMSKB(BITCAST_v16i8(X)) & 0xAAAA.
40934 if (IsAnyOf && CmpBits == 8 && VecOp1.isUndef()) {
40936 SDValue Result = DAG.getBitcast(MVT::v16i8, VecOp0);
40937 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
40938 Result = DAG.getZExtOrTrunc(Result, DL, MVT::i16);
40940 Result = DAG.getNode(ISD::AND, DL, MVT::i16, Result,
40941 DAG.getConstant(0xAAAA, DL, MVT::i16));
40943 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
40944 DAG.getConstant(0, DL, MVT::i16));
40946 // PMOVMSKB(PACKSSBW(LO(X), HI(X)))
40947 // -> PMOVMSKB(BITCAST_v32i8(X)) & 0xAAAAAAAA.
40948 if (CmpBits == 16 && Subtarget.hasInt256() &&
40949 VecOp0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40950 VecOp1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
40951 VecOp0.getOperand(0) == VecOp1.getOperand(0) &&
40952 VecOp0.getConstantOperandAPInt(1) == 0 &&
40953 VecOp1.getConstantOperandAPInt(1) == 8 &&
40954 (IsAnyOf || (SignExt0 && SignExt1))) {
40956 SDValue Result = DAG.getBitcast(MVT::v32i8, VecOp0.getOperand(0));
40957 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
40958 unsigned CmpMask = IsAnyOf ? 0 : 0xFFFFFFFF;
40959 if (!SignExt0 || !SignExt1) {
40960 assert(IsAnyOf && "Only perform v16i16 signmasks for any_of patterns");
40961 Result = DAG.getNode(ISD::AND, DL, MVT::i32, Result,
40962 DAG.getConstant(0xAAAAAAAA, DL, MVT::i32));
40964 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
40965 DAG.getConstant(CmpMask, DL, MVT::i32));
40969 // MOVMSK(SHUFFLE(X,u)) -> MOVMSK(X) iff every element is referenced.
40970 SmallVector<int, 32> ShuffleMask;
40971 SmallVector<SDValue, 2> ShuffleInputs;
40972 if (NumElts == CmpBits &&
40973 getTargetShuffleInputs(peekThroughBitcasts(Vec), ShuffleInputs,
40974 ShuffleMask, DAG) &&
40975 ShuffleInputs.size() == 1 && !isAnyZeroOrUndef(ShuffleMask) &&
40976 ShuffleInputs[0].getValueSizeInBits() == VecVT.getSizeInBits()) {
40977 unsigned NumShuffleElts = ShuffleMask.size();
40978 APInt DemandedElts = APInt::getNullValue(NumShuffleElts);
40979 for (int M : ShuffleMask) {
40980 assert(0 <= M && M < (int)NumShuffleElts && "Bad unary shuffle index");
40981 DemandedElts.setBit(M);
40983 if (DemandedElts.isAllOnesValue()) {
40985 SDValue Result = DAG.getBitcast(VecVT, ShuffleInputs[0]);
40986 Result = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
40988 DAG.getZExtOrTrunc(Result, DL, EFLAGS.getOperand(0).getValueType());
40989 return DAG.getNode(X86ISD::CMP, DL, MVT::i32, Result,
40990 EFLAGS.getOperand(1));
40997 /// Optimize an EFLAGS definition used according to the condition code \p CC
40998 /// into a simpler EFLAGS value, potentially returning a new \p CC and replacing
40999 /// uses of chain values.
41000 static SDValue combineSetCCEFLAGS(SDValue EFLAGS, X86::CondCode &CC,
41002 const X86Subtarget &Subtarget) {
41003 if (CC == X86::COND_B)
41004 if (SDValue Flags = combineCarryThroughADD(EFLAGS, DAG))
41007 if (SDValue R = checkBoolTestSetCCCombine(EFLAGS, CC))
41010 if (SDValue R = combinePTESTCC(EFLAGS, CC, DAG, Subtarget))
41013 if (SDValue R = combineSetCCMOVMSK(EFLAGS, CC, DAG, Subtarget))
41016 return combineSetCCAtomicArith(EFLAGS, CC, DAG, Subtarget);
41019 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
41020 static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
41021 TargetLowering::DAGCombinerInfo &DCI,
41022 const X86Subtarget &Subtarget) {
41025 SDValue FalseOp = N->getOperand(0);
41026 SDValue TrueOp = N->getOperand(1);
41027 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
41028 SDValue Cond = N->getOperand(3);
41030 // cmov X, X, ?, ? --> X
41031 if (TrueOp == FalseOp)
41034 // Try to simplify the EFLAGS and condition code operands.
41035 // We can't always do this as FCMOV only supports a subset of X86 cond.
41036 if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
41037 if (!(FalseOp.getValueType() == MVT::f80 ||
41038 (FalseOp.getValueType() == MVT::f64 && !Subtarget.hasSSE2()) ||
41039 (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) ||
41040 !Subtarget.hasCMov() || hasFPCMov(CC)) {
41041 SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
41043 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
41047 // If this is a select between two integer constants, try to do some
41048 // optimizations. Note that the operands are ordered the opposite of SELECT
41050 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
41051 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
41052 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
41053 // larger than FalseC (the false value).
41054 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
41055 CC = X86::GetOppositeBranchCondition(CC);
41056 std::swap(TrueC, FalseC);
41057 std::swap(TrueOp, FalseOp);
41060 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
41061 // This is efficient for any integer data type (including i8/i16) and
41063 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
41064 Cond = getSETCC(CC, Cond, DL, DAG);
41066 // Zero extend the condition if needed.
41067 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
41069 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
41070 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
41071 DAG.getConstant(ShAmt, DL, MVT::i8));
41075 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
41076 // for any integer data type, including i8/i16.
41077 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
41078 Cond = getSETCC(CC, Cond, DL, DAG);
41080 // Zero extend the condition if needed.
41081 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
41082 FalseC->getValueType(0), Cond);
41083 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
41084 SDValue(FalseC, 0));
41088 // Optimize cases that will turn into an LEA instruction. This requires
41089 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
41090 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
41091 APInt Diff = TrueC->getAPIntValue() - FalseC->getAPIntValue();
41092 assert(Diff.getBitWidth() == N->getValueType(0).getSizeInBits() &&
41093 "Implicit constant truncation");
41095 bool isFastMultiplier = false;
41096 if (Diff.ult(10)) {
41097 switch (Diff.getZExtValue()) {
41099 case 1: // result = add base, cond
41100 case 2: // result = lea base( , cond*2)
41101 case 3: // result = lea base(cond, cond*2)
41102 case 4: // result = lea base( , cond*4)
41103 case 5: // result = lea base(cond, cond*4)
41104 case 8: // result = lea base( , cond*8)
41105 case 9: // result = lea base(cond, cond*8)
41106 isFastMultiplier = true;
41111 if (isFastMultiplier) {
41112 Cond = getSETCC(CC, Cond, DL ,DAG);
41113 // Zero extend the condition if needed.
41114 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
41116 // Scale the condition by the difference.
41118 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
41119 DAG.getConstant(Diff, DL, Cond.getValueType()));
41121 // Add the base if non-zero.
41122 if (FalseC->getAPIntValue() != 0)
41123 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
41124 SDValue(FalseC, 0));
41131 // Handle these cases:
41132 // (select (x != c), e, c) -> select (x != c), e, x),
41133 // (select (x == c), c, e) -> select (x == c), x, e)
41134 // where the c is an integer constant, and the "select" is the combination
41135 // of CMOV and CMP.
41137 // The rationale for this change is that the conditional-move from a constant
41138 // needs two instructions, however, conditional-move from a register needs
41139 // only one instruction.
41141 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
41142 // some instruction-combining opportunities. This opt needs to be
41143 // postponed as late as possible.
41145 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
41146 // the DCI.xxxx conditions are provided to postpone the optimization as
41147 // late as possible.
41149 ConstantSDNode *CmpAgainst = nullptr;
41150 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
41151 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
41152 !isa<ConstantSDNode>(Cond.getOperand(0))) {
41154 if (CC == X86::COND_NE &&
41155 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
41156 CC = X86::GetOppositeBranchCondition(CC);
41157 std::swap(TrueOp, FalseOp);
41160 if (CC == X86::COND_E &&
41161 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
41162 SDValue Ops[] = {FalseOp, Cond.getOperand(0),
41163 DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
41164 return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
41169 // Fold and/or of setcc's to double CMOV:
41170 // (CMOV F, T, ((cc1 | cc2) != 0)) -> (CMOV (CMOV F, T, cc1), T, cc2)
41171 // (CMOV F, T, ((cc1 & cc2) != 0)) -> (CMOV (CMOV T, F, !cc1), F, !cc2)
41173 // This combine lets us generate:
41174 // cmovcc1 (jcc1 if we don't have CMOV)
41180 // cmovne (jne if we don't have CMOV)
41181 // When we can't use the CMOV instruction, it might increase branch
41183 // When we can use CMOV, or when there is no mispredict, this improves
41184 // throughput and reduces register pressure.
41186 if (CC == X86::COND_NE) {
41188 X86::CondCode CC0, CC1;
41190 if (checkBoolTestAndOrSetCCCombine(Cond, CC0, CC1, Flags, isAndSetCC)) {
41192 std::swap(FalseOp, TrueOp);
41193 CC0 = X86::GetOppositeBranchCondition(CC0);
41194 CC1 = X86::GetOppositeBranchCondition(CC1);
41197 SDValue LOps[] = {FalseOp, TrueOp,
41198 DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
41199 SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
41200 SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
41202 SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
41207 // Fold (CMOV C1, (ADD (CTTZ X), C2), (X != 0)) ->
41208 // (ADD (CMOV C1-C2, (CTTZ X), (X != 0)), C2)
41209 // Or (CMOV (ADD (CTTZ X), C2), C1, (X == 0)) ->
41210 // (ADD (CMOV (CTTZ X), C1-C2, (X == 0)), C2)
41211 if ((CC == X86::COND_NE || CC == X86::COND_E) &&
41212 Cond.getOpcode() == X86ISD::CMP && isNullConstant(Cond.getOperand(1))) {
41213 SDValue Add = TrueOp;
41214 SDValue Const = FalseOp;
41215 // Canonicalize the condition code for easier matching and output.
41216 if (CC == X86::COND_E)
41217 std::swap(Add, Const);
41219 // We might have replaced the constant in the cmov with the LHS of the
41220 // compare. If so change it to the RHS of the compare.
41221 if (Const == Cond.getOperand(0))
41222 Const = Cond.getOperand(1);
41224 // Ok, now make sure that Add is (add (cttz X), C2) and Const is a constant.
41225 if (isa<ConstantSDNode>(Const) && Add.getOpcode() == ISD::ADD &&
41226 Add.hasOneUse() && isa<ConstantSDNode>(Add.getOperand(1)) &&
41227 (Add.getOperand(0).getOpcode() == ISD::CTTZ_ZERO_UNDEF ||
41228 Add.getOperand(0).getOpcode() == ISD::CTTZ) &&
41229 Add.getOperand(0).getOperand(0) == Cond.getOperand(0)) {
41230 EVT VT = N->getValueType(0);
41231 // This should constant fold.
41232 SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
41234 DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
41235 DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
41236 return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
41243 /// Different mul shrinking modes.
41244 enum class ShrinkMode { MULS8, MULU8, MULS16, MULU16 };
41246 static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
41247 EVT VT = N->getOperand(0).getValueType();
41248 if (VT.getScalarSizeInBits() != 32)
41251 assert(N->getNumOperands() == 2 && "NumOperands of Mul are 2");
41252 unsigned SignBits[2] = {1, 1};
41253 bool IsPositive[2] = {false, false};
41254 for (unsigned i = 0; i < 2; i++) {
41255 SDValue Opd = N->getOperand(i);
41257 SignBits[i] = DAG.ComputeNumSignBits(Opd);
41258 IsPositive[i] = DAG.SignBitIsZero(Opd);
41261 bool AllPositive = IsPositive[0] && IsPositive[1];
41262 unsigned MinSignBits = std::min(SignBits[0], SignBits[1]);
41263 // When ranges are from -128 ~ 127, use MULS8 mode.
41264 if (MinSignBits >= 25)
41265 Mode = ShrinkMode::MULS8;
41266 // When ranges are from 0 ~ 255, use MULU8 mode.
41267 else if (AllPositive && MinSignBits >= 24)
41268 Mode = ShrinkMode::MULU8;
41269 // When ranges are from -32768 ~ 32767, use MULS16 mode.
41270 else if (MinSignBits >= 17)
41271 Mode = ShrinkMode::MULS16;
41272 // When ranges are from 0 ~ 65535, use MULU16 mode.
41273 else if (AllPositive && MinSignBits >= 16)
41274 Mode = ShrinkMode::MULU16;
41280 /// When the operands of vector mul are extended from smaller size values,
41281 /// like i8 and i16, the type of mul may be shrinked to generate more
41282 /// efficient code. Two typical patterns are handled:
41284 /// %2 = sext/zext <N x i8> %1 to <N x i32>
41285 /// %4 = sext/zext <N x i8> %3 to <N x i32>
41286 // or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
41287 /// %5 = mul <N x i32> %2, %4
41290 /// %2 = zext/sext <N x i16> %1 to <N x i32>
41291 /// %4 = zext/sext <N x i16> %3 to <N x i32>
41292 /// or %4 = build_vector <N x i32> %C1, ..., %CN (%C1..%CN are constants)
41293 /// %5 = mul <N x i32> %2, %4
41295 /// There are four mul shrinking modes:
41296 /// If %2 == sext32(trunc8(%2)), i.e., the scalar value range of %2 is
41297 /// -128 to 128, and the scalar value range of %4 is also -128 to 128,
41298 /// generate pmullw+sext32 for it (MULS8 mode).
41299 /// If %2 == zext32(trunc8(%2)), i.e., the scalar value range of %2 is
41300 /// 0 to 255, and the scalar value range of %4 is also 0 to 255,
41301 /// generate pmullw+zext32 for it (MULU8 mode).
41302 /// If %2 == sext32(trunc16(%2)), i.e., the scalar value range of %2 is
41303 /// -32768 to 32767, and the scalar value range of %4 is also -32768 to 32767,
41304 /// generate pmullw+pmulhw for it (MULS16 mode).
41305 /// If %2 == zext32(trunc16(%2)), i.e., the scalar value range of %2 is
41306 /// 0 to 65535, and the scalar value range of %4 is also 0 to 65535,
41307 /// generate pmullw+pmulhuw for it (MULU16 mode).
41308 static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
41309 const X86Subtarget &Subtarget) {
41310 // Check for legality
41311 // pmullw/pmulhw are not supported by SSE.
41312 if (!Subtarget.hasSSE2())
41315 // Check for profitability
41316 // pmulld is supported since SSE41. It is better to use pmulld
41317 // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
41319 bool OptForMinSize = DAG.getMachineFunction().getFunction().hasMinSize();
41320 if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
41324 if (!canReduceVMulWidth(N, DAG, Mode))
41328 SDValue N0 = N->getOperand(0);
41329 SDValue N1 = N->getOperand(1);
41330 EVT VT = N->getOperand(0).getValueType();
41331 unsigned NumElts = VT.getVectorNumElements();
41332 if ((NumElts % 2) != 0)
41335 EVT ReducedVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, NumElts);
41337 // Shrink the operands of mul.
41338 SDValue NewN0 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N0);
41339 SDValue NewN1 = DAG.getNode(ISD::TRUNCATE, DL, ReducedVT, N1);
41341 // Generate the lower part of mul: pmullw. For MULU8/MULS8, only the
41342 // lower part is needed.
41343 SDValue MulLo = DAG.getNode(ISD::MUL, DL, ReducedVT, NewN0, NewN1);
41344 if (Mode == ShrinkMode::MULU8 || Mode == ShrinkMode::MULS8)
41345 return DAG.getNode((Mode == ShrinkMode::MULU8) ? ISD::ZERO_EXTEND
41346 : ISD::SIGN_EXTEND,
41349 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts / 2);
41350 // Generate the higher part of mul: pmulhw/pmulhuw. For MULU16/MULS16,
41351 // the higher part is also needed.
41353 DAG.getNode(Mode == ShrinkMode::MULS16 ? ISD::MULHS : ISD::MULHU, DL,
41354 ReducedVT, NewN0, NewN1);
41356 // Repack the lower part and higher part result of mul into a wider
41358 // Generate shuffle functioning as punpcklwd.
41359 SmallVector<int, 16> ShuffleMask(NumElts);
41360 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
41361 ShuffleMask[2 * i] = i;
41362 ShuffleMask[2 * i + 1] = i + NumElts;
41365 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
41366 ResLo = DAG.getBitcast(ResVT, ResLo);
41367 // Generate shuffle functioning as punpckhwd.
41368 for (unsigned i = 0, e = NumElts / 2; i < e; i++) {
41369 ShuffleMask[2 * i] = i + NumElts / 2;
41370 ShuffleMask[2 * i + 1] = i + NumElts * 3 / 2;
41373 DAG.getVectorShuffle(ReducedVT, DL, MulLo, MulHi, ShuffleMask);
41374 ResHi = DAG.getBitcast(ResVT, ResHi);
41375 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, ResLo, ResHi);
41378 static SDValue combineMulSpecial(uint64_t MulAmt, SDNode *N, SelectionDAG &DAG,
41379 EVT VT, const SDLoc &DL) {
41381 auto combineMulShlAddOrSub = [&](int Mult, int Shift, bool isAdd) {
41382 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41383 DAG.getConstant(Mult, DL, VT));
41384 Result = DAG.getNode(ISD::SHL, DL, VT, Result,
41385 DAG.getConstant(Shift, DL, MVT::i8));
41386 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
41391 auto combineMulMulAddOrSub = [&](int Mul1, int Mul2, bool isAdd) {
41392 SDValue Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41393 DAG.getConstant(Mul1, DL, VT));
41394 Result = DAG.getNode(X86ISD::MUL_IMM, DL, VT, Result,
41395 DAG.getConstant(Mul2, DL, VT));
41396 Result = DAG.getNode(isAdd ? ISD::ADD : ISD::SUB, DL, VT, Result,
41405 // mul x, 11 => add ((shl (mul x, 5), 1), x)
41406 return combineMulShlAddOrSub(5, 1, /*isAdd*/ true);
41408 // mul x, 21 => add ((shl (mul x, 5), 2), x)
41409 return combineMulShlAddOrSub(5, 2, /*isAdd*/ true);
41411 // mul x, 41 => add ((shl (mul x, 5), 3), x)
41412 return combineMulShlAddOrSub(5, 3, /*isAdd*/ true);
41414 // mul x, 22 => add (add ((shl (mul x, 5), 2), x), x)
41415 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
41416 combineMulShlAddOrSub(5, 2, /*isAdd*/ true));
41418 // mul x, 19 => add ((shl (mul x, 9), 1), x)
41419 return combineMulShlAddOrSub(9, 1, /*isAdd*/ true);
41421 // mul x, 37 => add ((shl (mul x, 9), 2), x)
41422 return combineMulShlAddOrSub(9, 2, /*isAdd*/ true);
41424 // mul x, 73 => add ((shl (mul x, 9), 3), x)
41425 return combineMulShlAddOrSub(9, 3, /*isAdd*/ true);
41427 // mul x, 13 => add ((shl (mul x, 3), 2), x)
41428 return combineMulShlAddOrSub(3, 2, /*isAdd*/ true);
41430 // mul x, 23 => sub ((shl (mul x, 3), 3), x)
41431 return combineMulShlAddOrSub(3, 3, /*isAdd*/ false);
41433 // mul x, 26 => add ((mul (mul x, 5), 5), x)
41434 return combineMulMulAddOrSub(5, 5, /*isAdd*/ true);
41436 // mul x, 28 => add ((mul (mul x, 9), 3), x)
41437 return combineMulMulAddOrSub(9, 3, /*isAdd*/ true);
41439 // mul x, 29 => add (add ((mul (mul x, 9), 3), x), x)
41440 return DAG.getNode(ISD::ADD, DL, VT, N->getOperand(0),
41441 combineMulMulAddOrSub(9, 3, /*isAdd*/ true));
41444 // Another trick. If this is a power 2 + 2/4/8, we can use a shift followed
41445 // by a single LEA.
41446 // First check if this a sum of two power of 2s because that's easy. Then
41447 // count how many zeros are up to the first bit.
41448 // TODO: We can do this even without LEA at a cost of two shifts and an add.
41449 if (isPowerOf2_64(MulAmt & (MulAmt - 1))) {
41450 unsigned ScaleShift = countTrailingZeros(MulAmt);
41451 if (ScaleShift >= 1 && ScaleShift < 4) {
41452 unsigned ShiftAmt = Log2_64((MulAmt & (MulAmt - 1)));
41453 SDValue Shift1 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41454 DAG.getConstant(ShiftAmt, DL, MVT::i8));
41455 SDValue Shift2 = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41456 DAG.getConstant(ScaleShift, DL, MVT::i8));
41457 return DAG.getNode(ISD::ADD, DL, VT, Shift1, Shift2);
41464 // If the upper 17 bits of each element are zero then we can use PMADDWD,
41465 // which is always at least as quick as PMULLD, except on KNL.
41466 static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
41467 const X86Subtarget &Subtarget) {
41468 if (!Subtarget.hasSSE2())
41471 if (Subtarget.isPMADDWDSlow())
41474 EVT VT = N->getValueType(0);
41476 // Only support vXi32 vectors.
41477 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32)
41480 // Make sure the type is legal or will be widened to a legal type.
41481 if (VT != MVT::v2i32 && !DAG.getTargetLoweringInfo().isTypeLegal(VT))
41484 MVT WVT = MVT::getVectorVT(MVT::i16, 2 * VT.getVectorNumElements());
41486 // Without BWI, we would need to split v32i16.
41487 if (WVT == MVT::v32i16 && !Subtarget.hasBWI())
41490 SDValue N0 = N->getOperand(0);
41491 SDValue N1 = N->getOperand(1);
41493 // If we are zero extending two steps without SSE4.1, its better to reduce
41494 // the vmul width instead.
41495 if (!Subtarget.hasSSE41() &&
41496 (N0.getOpcode() == ISD::ZERO_EXTEND &&
41497 N0.getOperand(0).getScalarValueSizeInBits() <= 8) &&
41498 (N1.getOpcode() == ISD::ZERO_EXTEND &&
41499 N1.getOperand(0).getScalarValueSizeInBits() <= 8))
41502 APInt Mask17 = APInt::getHighBitsSet(32, 17);
41503 if (!DAG.MaskedValueIsZero(N1, Mask17) ||
41504 !DAG.MaskedValueIsZero(N0, Mask17))
41507 // Use SplitOpsAndApply to handle AVX splitting.
41508 auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41509 ArrayRef<SDValue> Ops) {
41510 MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
41511 return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
41513 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
41514 { DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
41518 static SDValue combineMulToPMULDQ(SDNode *N, SelectionDAG &DAG,
41519 const X86Subtarget &Subtarget) {
41520 if (!Subtarget.hasSSE2())
41523 EVT VT = N->getValueType(0);
41525 // Only support vXi64 vectors.
41526 if (!VT.isVector() || VT.getVectorElementType() != MVT::i64 ||
41527 VT.getVectorNumElements() < 2 ||
41528 !isPowerOf2_32(VT.getVectorNumElements()))
41531 SDValue N0 = N->getOperand(0);
41532 SDValue N1 = N->getOperand(1);
41534 // MULDQ returns the 64-bit result of the signed multiplication of the lower
41535 // 32-bits. We can lower with this if the sign bits stretch that far.
41536 if (Subtarget.hasSSE41() && DAG.ComputeNumSignBits(N0) > 32 &&
41537 DAG.ComputeNumSignBits(N1) > 32) {
41538 auto PMULDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41539 ArrayRef<SDValue> Ops) {
41540 return DAG.getNode(X86ISD::PMULDQ, DL, Ops[0].getValueType(), Ops);
41542 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
41543 PMULDQBuilder, /*CheckBWI*/false);
41546 // If the upper bits are zero we can use a single pmuludq.
41547 APInt Mask = APInt::getHighBitsSet(64, 32);
41548 if (DAG.MaskedValueIsZero(N0, Mask) && DAG.MaskedValueIsZero(N1, Mask)) {
41549 auto PMULUDQBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
41550 ArrayRef<SDValue> Ops) {
41551 return DAG.getNode(X86ISD::PMULUDQ, DL, Ops[0].getValueType(), Ops);
41553 return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, { N0, N1 },
41554 PMULUDQBuilder, /*CheckBWI*/false);
41560 /// Optimize a single multiply with constant into two operations in order to
41561 /// implement it with two cheaper instructions, e.g. LEA + SHL, LEA + LEA.
41562 static SDValue combineMul(SDNode *N, SelectionDAG &DAG,
41563 TargetLowering::DAGCombinerInfo &DCI,
41564 const X86Subtarget &Subtarget) {
41565 EVT VT = N->getValueType(0);
41567 if (SDValue V = combineMulToPMADDWD(N, DAG, Subtarget))
41570 if (SDValue V = combineMulToPMULDQ(N, DAG, Subtarget))
41573 if (DCI.isBeforeLegalize() && VT.isVector())
41574 return reduceVMULWidth(N, DAG, Subtarget);
41576 if (!MulConstantOptimization)
41578 // An imul is usually smaller than the alternative sequence.
41579 if (DAG.getMachineFunction().getFunction().hasMinSize())
41582 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
41585 if (VT != MVT::i64 && VT != MVT::i32)
41588 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
41591 if (isPowerOf2_64(C->getZExtValue()))
41594 int64_t SignMulAmt = C->getSExtValue();
41595 assert(SignMulAmt != INT64_MIN && "Int min should have been handled!");
41596 uint64_t AbsMulAmt = SignMulAmt < 0 ? -SignMulAmt : SignMulAmt;
41599 if (AbsMulAmt == 3 || AbsMulAmt == 5 || AbsMulAmt == 9) {
41600 SDValue NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41601 DAG.getConstant(AbsMulAmt, DL, VT));
41602 if (SignMulAmt < 0)
41603 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
41609 uint64_t MulAmt1 = 0;
41610 uint64_t MulAmt2 = 0;
41611 if ((AbsMulAmt % 9) == 0) {
41613 MulAmt2 = AbsMulAmt / 9;
41614 } else if ((AbsMulAmt % 5) == 0) {
41616 MulAmt2 = AbsMulAmt / 5;
41617 } else if ((AbsMulAmt % 3) == 0) {
41619 MulAmt2 = AbsMulAmt / 3;
41623 // For negative multiply amounts, only allow MulAmt2 to be a power of 2.
41625 (isPowerOf2_64(MulAmt2) ||
41626 (SignMulAmt >= 0 && (MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)))) {
41628 if (isPowerOf2_64(MulAmt2) &&
41629 !(SignMulAmt >= 0 && N->hasOneUse() &&
41630 N->use_begin()->getOpcode() == ISD::ADD))
41631 // If second multiplifer is pow2, issue it first. We want the multiply by
41632 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
41633 // is an add. Only do this for positive multiply amounts since the
41634 // negate would prevent it from being used as an address mode anyway.
41635 std::swap(MulAmt1, MulAmt2);
41637 if (isPowerOf2_64(MulAmt1))
41638 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41639 DAG.getConstant(Log2_64(MulAmt1), DL, MVT::i8));
41641 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
41642 DAG.getConstant(MulAmt1, DL, VT));
41644 if (isPowerOf2_64(MulAmt2))
41645 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
41646 DAG.getConstant(Log2_64(MulAmt2), DL, MVT::i8));
41648 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
41649 DAG.getConstant(MulAmt2, DL, VT));
41651 // Negate the result.
41652 if (SignMulAmt < 0)
41653 NewMul = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT),
41655 } else if (!Subtarget.slowLEA())
41656 NewMul = combineMulSpecial(C->getZExtValue(), N, DAG, VT, DL);
41659 assert(C->getZExtValue() != 0 &&
41660 C->getZExtValue() != (VT == MVT::i64 ? UINT64_MAX : UINT32_MAX) &&
41661 "Both cases that could cause potential overflows should have "
41662 "already been handled.");
41663 if (isPowerOf2_64(AbsMulAmt - 1)) {
41664 // (mul x, 2^N + 1) => (add (shl x, N), x)
41665 NewMul = DAG.getNode(
41666 ISD::ADD, DL, VT, N->getOperand(0),
41667 DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41668 DAG.getConstant(Log2_64(AbsMulAmt - 1), DL,
41670 // To negate, subtract the number from zero
41671 if (SignMulAmt < 0)
41672 NewMul = DAG.getNode(ISD::SUB, DL, VT,
41673 DAG.getConstant(0, DL, VT), NewMul);
41674 } else if (isPowerOf2_64(AbsMulAmt + 1)) {
41675 // (mul x, 2^N - 1) => (sub (shl x, N), x)
41676 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41677 DAG.getConstant(Log2_64(AbsMulAmt + 1),
41679 // To negate, reverse the operands of the subtract.
41680 if (SignMulAmt < 0)
41681 NewMul = DAG.getNode(ISD::SUB, DL, VT, N->getOperand(0), NewMul);
41683 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
41684 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt - 2)) {
41685 // (mul x, 2^N + 2) => (add (add (shl x, N), x), x)
41686 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41687 DAG.getConstant(Log2_64(AbsMulAmt - 2),
41689 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
41690 NewMul = DAG.getNode(ISD::ADD, DL, VT, NewMul, N->getOperand(0));
41691 } else if (SignMulAmt >= 0 && isPowerOf2_64(AbsMulAmt + 2)) {
41692 // (mul x, 2^N - 2) => (sub (sub (shl x, N), x), x)
41693 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
41694 DAG.getConstant(Log2_64(AbsMulAmt + 2),
41696 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
41697 NewMul = DAG.getNode(ISD::SUB, DL, VT, NewMul, N->getOperand(0));
41704 // Try to form a MULHU or MULHS node by looking for
41705 // (srl (mul ext, ext), 16)
41706 // TODO: This is X86 specific because we want to be able to handle wide types
41707 // before type legalization. But we can only do it if the vector will be
41708 // legalized via widening/splitting. Type legalization can't handle promotion
41709 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
41711 static SDValue combineShiftToPMULH(SDNode *N, SelectionDAG &DAG,
41712 const X86Subtarget &Subtarget) {
41713 assert((N->getOpcode() == ISD::SRL || N->getOpcode() == ISD::SRA) &&
41714 "SRL or SRA node is required here!");
41717 // Only do this with SSE4.1. On earlier targets reduceVMULWidth will expand
41719 if (!Subtarget.hasSSE41())
41722 // The operation feeding into the shift must be a multiply.
41723 SDValue ShiftOperand = N->getOperand(0);
41724 if (ShiftOperand.getOpcode() != ISD::MUL || !ShiftOperand.hasOneUse())
41727 // Input type should be at least vXi32.
41728 EVT VT = N->getValueType(0);
41729 if (!VT.isVector() || VT.getVectorElementType().getSizeInBits() < 32)
41732 // Need a shift by 16.
41734 if (!ISD::isConstantSplatVector(N->getOperand(1).getNode(), ShiftAmt) ||
41738 SDValue LHS = ShiftOperand.getOperand(0);
41739 SDValue RHS = ShiftOperand.getOperand(1);
41741 unsigned ExtOpc = LHS.getOpcode();
41742 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
41743 RHS.getOpcode() != ExtOpc)
41746 // Peek through the extends.
41747 LHS = LHS.getOperand(0);
41748 RHS = RHS.getOperand(0);
41750 // Ensure the input types match.
41751 EVT MulVT = LHS.getValueType();
41752 if (MulVT.getVectorElementType() != MVT::i16 || RHS.getValueType() != MulVT)
41755 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
41756 SDValue Mulh = DAG.getNode(Opc, DL, MulVT, LHS, RHS);
41758 ExtOpc = N->getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
41759 return DAG.getNode(ExtOpc, DL, VT, Mulh);
41762 static SDValue combineShiftLeft(SDNode *N, SelectionDAG &DAG) {
41763 SDValue N0 = N->getOperand(0);
41764 SDValue N1 = N->getOperand(1);
41765 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
41766 EVT VT = N0.getValueType();
41768 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
41769 // since the result of setcc_c is all zero's or all ones.
41770 if (VT.isInteger() && !VT.isVector() &&
41771 N1C && N0.getOpcode() == ISD::AND &&
41772 N0.getOperand(1).getOpcode() == ISD::Constant) {
41773 SDValue N00 = N0.getOperand(0);
41774 APInt Mask = N0.getConstantOperandAPInt(1);
41775 Mask <<= N1C->getAPIntValue();
41776 bool MaskOK = false;
41777 // We can handle cases concerning bit-widening nodes containing setcc_c if
41778 // we carefully interrogate the mask to make sure we are semantics
41780 // The transform is not safe if the result of C1 << C2 exceeds the bitwidth
41781 // of the underlying setcc_c operation if the setcc_c was zero extended.
41782 // Consider the following example:
41783 // zext(setcc_c) -> i32 0x0000FFFF
41784 // c1 -> i32 0x0000FFFF
41785 // c2 -> i32 0x00000001
41786 // (shl (and (setcc_c), c1), c2) -> i32 0x0001FFFE
41787 // (and setcc_c, (c1 << c2)) -> i32 0x0000FFFE
41788 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
41790 } else if (N00.getOpcode() == ISD::SIGN_EXTEND &&
41791 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
41793 } else if ((N00.getOpcode() == ISD::ZERO_EXTEND ||
41794 N00.getOpcode() == ISD::ANY_EXTEND) &&
41795 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
41796 MaskOK = Mask.isIntN(N00.getOperand(0).getValueSizeInBits());
41798 if (MaskOK && Mask != 0) {
41800 return DAG.getNode(ISD::AND, DL, VT, N00, DAG.getConstant(Mask, DL, VT));
41804 // Hardware support for vector shifts is sparse which makes us scalarize the
41805 // vector operations in many cases. Also, on sandybridge ADD is faster than
41807 // (shl V, 1) -> add V,V
41808 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
41809 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
41810 assert(N0.getValueType().isVector() && "Invalid vector shift type");
41811 // We shift all of the values by one. In many cases we do not have
41812 // hardware support for this operation. This is better expressed as an ADD
41814 if (N1SplatC->isOne())
41815 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
41821 static SDValue combineShiftRightArithmetic(SDNode *N, SelectionDAG &DAG,
41822 const X86Subtarget &Subtarget) {
41823 SDValue N0 = N->getOperand(0);
41824 SDValue N1 = N->getOperand(1);
41825 EVT VT = N0.getValueType();
41826 unsigned Size = VT.getSizeInBits();
41828 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
41831 // fold (ashr (shl, a, [56,48,32,24,16]), SarConst)
41832 // into (shl, (sext (a), [56,48,32,24,16] - SarConst)) or
41833 // into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
41834 // depending on sign of (SarConst - [56,48,32,24,16])
41836 // sexts in X86 are MOVs. The MOVs have the same code size
41837 // as above SHIFTs (only SHIFT on 1 has lower code size).
41838 // However the MOVs have 2 advantages to a SHIFT:
41839 // 1. MOVs can write to a register that differs from source
41840 // 2. MOVs accept memory operands
41842 if (VT.isVector() || N1.getOpcode() != ISD::Constant ||
41843 N0.getOpcode() != ISD::SHL || !N0.hasOneUse() ||
41844 N0.getOperand(1).getOpcode() != ISD::Constant)
41847 SDValue N00 = N0.getOperand(0);
41848 SDValue N01 = N0.getOperand(1);
41849 APInt ShlConst = (cast<ConstantSDNode>(N01))->getAPIntValue();
41850 APInt SarConst = (cast<ConstantSDNode>(N1))->getAPIntValue();
41851 EVT CVT = N1.getValueType();
41853 if (SarConst.isNegative())
41856 for (MVT SVT : { MVT::i8, MVT::i16, MVT::i32 }) {
41857 unsigned ShiftSize = SVT.getSizeInBits();
41858 // skipping types without corresponding sext/zext and
41859 // ShlConst that is not one of [56,48,32,24,16]
41860 if (ShiftSize >= Size || ShlConst != Size - ShiftSize)
41864 DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT, N00, DAG.getValueType(SVT));
41865 SarConst = SarConst - (Size - ShiftSize);
41868 else if (SarConst.isNegative())
41869 return DAG.getNode(ISD::SHL, DL, VT, NN,
41870 DAG.getConstant(-SarConst, DL, CVT));
41872 return DAG.getNode(ISD::SRA, DL, VT, NN,
41873 DAG.getConstant(SarConst, DL, CVT));
41878 static SDValue combineShiftRightLogical(SDNode *N, SelectionDAG &DAG,
41879 TargetLowering::DAGCombinerInfo &DCI,
41880 const X86Subtarget &Subtarget) {
41881 SDValue N0 = N->getOperand(0);
41882 SDValue N1 = N->getOperand(1);
41883 EVT VT = N0.getValueType();
41885 if (SDValue V = combineShiftToPMULH(N, DAG, Subtarget))
41888 // Only do this on the last DAG combine as it can interfere with other
41890 if (!DCI.isAfterLegalizeDAG())
41893 // Try to improve a sequence of srl (and X, C1), C2 by inverting the order.
41894 // TODO: This is a generic DAG combine that became an x86-only combine to
41895 // avoid shortcomings in other folds such as bswap, bit-test ('bt'), and
41896 // and-not ('andn').
41897 if (N0.getOpcode() != ISD::AND || !N0.hasOneUse())
41900 auto *ShiftC = dyn_cast<ConstantSDNode>(N1);
41901 auto *AndC = dyn_cast<ConstantSDNode>(N0.getOperand(1));
41902 if (!ShiftC || !AndC)
41905 // If we can shrink the constant mask below 8-bits or 32-bits, then this
41906 // transform should reduce code size. It may also enable secondary transforms
41907 // from improved known-bits analysis or instruction selection.
41908 APInt MaskVal = AndC->getAPIntValue();
41910 // If this can be matched by a zero extend, don't optimize.
41911 if (MaskVal.isMask()) {
41912 unsigned TO = MaskVal.countTrailingOnes();
41913 if (TO >= 8 && isPowerOf2_32(TO))
41917 APInt NewMaskVal = MaskVal.lshr(ShiftC->getAPIntValue());
41918 unsigned OldMaskSize = MaskVal.getMinSignedBits();
41919 unsigned NewMaskSize = NewMaskVal.getMinSignedBits();
41920 if ((OldMaskSize > 8 && NewMaskSize <= 8) ||
41921 (OldMaskSize > 32 && NewMaskSize <= 32)) {
41922 // srl (and X, AndC), ShiftC --> and (srl X, ShiftC), (AndC >> ShiftC)
41924 SDValue NewMask = DAG.getConstant(NewMaskVal, DL, VT);
41925 SDValue NewShift = DAG.getNode(ISD::SRL, DL, VT, N0.getOperand(0), N1);
41926 return DAG.getNode(ISD::AND, DL, VT, NewShift, NewMask);
41931 static SDValue combineVectorPackWithShuffle(SDNode *N, SelectionDAG &DAG) {
41932 unsigned Opcode = N->getOpcode();
41933 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
41934 "Unexpected pack opcode");
41936 EVT VT = N->getValueType(0);
41937 SDValue N0 = N->getOperand(0);
41938 SDValue N1 = N->getOperand(1);
41939 unsigned NumDstElts = VT.getVectorNumElements();
41941 // Attempt to fold PACK(LOSUBVECTOR(SHUFFLE(X)),HISUBVECTOR(SHUFFLE(X)))
41942 // to SHUFFLE(PACK(LOSUBVECTOR(X),HISUBVECTOR(X))), this is mainly for
41943 // truncation trees that help us avoid lane crossing shuffles.
41944 // TODO: There's a lot more we can do for PACK/HADD style shuffle combines.
41945 if (N0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41946 N1.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
41947 N0.getConstantOperandAPInt(1) == 0 &&
41948 N1.getConstantOperandAPInt(1) == (NumDstElts / 2) &&
41949 N0.getOperand(0) == N1.getOperand(0) && VT.is128BitVector() &&
41950 N0.getOperand(0).getValueType().is256BitVector()) {
41951 // TODO - support target/faux shuffles.
41952 SDValue Vec = peekThroughBitcasts(N0.getOperand(0));
41953 if (auto *SVN = dyn_cast<ShuffleVectorSDNode>(Vec)) {
41954 // To keep the PACK LHS/RHS coherency, we must be able to scale the unary
41955 // shuffle to a vXi64 width - we can probably relax this in the future.
41956 SmallVector<int, 4> ShuffleMask;
41957 if (SVN->getOperand(1).isUndef() &&
41958 scaleShuffleElements(SVN->getMask(), 4, ShuffleMask)) {
41961 std::tie(Lo, Hi) = DAG.SplitVector(SVN->getOperand(0), DL);
41962 Lo = DAG.getBitcast(N0.getValueType(), Lo);
41963 Hi = DAG.getBitcast(N1.getValueType(), Hi);
41964 SDValue Res = DAG.getNode(Opcode, DL, VT, Lo, Hi);
41965 Res = DAG.getBitcast(MVT::v4i32, Res);
41966 Res = DAG.getVectorShuffle(MVT::v4i32, DL, Res, Res, ShuffleMask);
41967 return DAG.getBitcast(VT, Res);
41972 // Attempt to fold PACK(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(PACK(X,Y)).
41973 // TODO: Relax shuffle scaling to support sub-128-bit subvector shuffles.
41974 if (VT.is256BitVector()) {
41975 if (auto *SVN0 = dyn_cast<ShuffleVectorSDNode>(N0)) {
41976 if (auto *SVN1 = dyn_cast<ShuffleVectorSDNode>(N1)) {
41977 SmallVector<int, 2> ShuffleMask0, ShuffleMask1;
41978 if (scaleShuffleElements(SVN0->getMask(), 2, ShuffleMask0) &&
41979 scaleShuffleElements(SVN1->getMask(), 2, ShuffleMask1)) {
41980 SDValue Op00 = SVN0->getOperand(0);
41981 SDValue Op01 = SVN0->getOperand(1);
41982 SDValue Op10 = SVN1->getOperand(0);
41983 SDValue Op11 = SVN1->getOperand(1);
41984 if ((Op00 == Op11) && (Op01 == Op10)) {
41985 std::swap(Op10, Op11);
41986 ShuffleVectorSDNode::commuteMask(ShuffleMask1);
41988 if ((Op00 == Op10) && (Op01 == Op11)) {
41989 SmallVector<int, 4> ShuffleMask;
41990 ShuffleMask.append(ShuffleMask0.begin(), ShuffleMask0.end());
41991 ShuffleMask.append(ShuffleMask1.begin(), ShuffleMask1.end());
41993 SDValue Res = DAG.getNode(Opcode, DL, VT, Op00, Op01);
41994 Res = DAG.getBitcast(MVT::v4i64, Res);
41995 Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, ShuffleMask);
41996 return DAG.getBitcast(VT, Res);
42006 static SDValue combineVectorPack(SDNode *N, SelectionDAG &DAG,
42007 TargetLowering::DAGCombinerInfo &DCI,
42008 const X86Subtarget &Subtarget) {
42009 unsigned Opcode = N->getOpcode();
42010 assert((X86ISD::PACKSS == Opcode || X86ISD::PACKUS == Opcode) &&
42011 "Unexpected pack opcode");
42013 EVT VT = N->getValueType(0);
42014 SDValue N0 = N->getOperand(0);
42015 SDValue N1 = N->getOperand(1);
42016 unsigned NumDstElts = VT.getVectorNumElements();
42017 unsigned DstBitsPerElt = VT.getScalarSizeInBits();
42018 unsigned SrcBitsPerElt = 2 * DstBitsPerElt;
42019 assert(N0.getScalarValueSizeInBits() == SrcBitsPerElt &&
42020 N1.getScalarValueSizeInBits() == SrcBitsPerElt &&
42021 "Unexpected PACKSS/PACKUS input type");
42023 bool IsSigned = (X86ISD::PACKSS == Opcode);
42025 // Constant Folding.
42026 APInt UndefElts0, UndefElts1;
42027 SmallVector<APInt, 32> EltBits0, EltBits1;
42028 if ((N0.isUndef() || N->isOnlyUserOf(N0.getNode())) &&
42029 (N1.isUndef() || N->isOnlyUserOf(N1.getNode())) &&
42030 getTargetConstantBitsFromNode(N0, SrcBitsPerElt, UndefElts0, EltBits0) &&
42031 getTargetConstantBitsFromNode(N1, SrcBitsPerElt, UndefElts1, EltBits1)) {
42032 unsigned NumLanes = VT.getSizeInBits() / 128;
42033 unsigned NumSrcElts = NumDstElts / 2;
42034 unsigned NumDstEltsPerLane = NumDstElts / NumLanes;
42035 unsigned NumSrcEltsPerLane = NumSrcElts / NumLanes;
42037 APInt Undefs(NumDstElts, 0);
42038 SmallVector<APInt, 32> Bits(NumDstElts, APInt::getNullValue(DstBitsPerElt));
42039 for (unsigned Lane = 0; Lane != NumLanes; ++Lane) {
42040 for (unsigned Elt = 0; Elt != NumDstEltsPerLane; ++Elt) {
42041 unsigned SrcIdx = Lane * NumSrcEltsPerLane + Elt % NumSrcEltsPerLane;
42042 auto &UndefElts = (Elt >= NumSrcEltsPerLane ? UndefElts1 : UndefElts0);
42043 auto &EltBits = (Elt >= NumSrcEltsPerLane ? EltBits1 : EltBits0);
42045 if (UndefElts[SrcIdx]) {
42046 Undefs.setBit(Lane * NumDstEltsPerLane + Elt);
42050 APInt &Val = EltBits[SrcIdx];
42052 // PACKSS: Truncate signed value with signed saturation.
42053 // Source values less than dst minint are saturated to minint.
42054 // Source values greater than dst maxint are saturated to maxint.
42055 if (Val.isSignedIntN(DstBitsPerElt))
42056 Val = Val.trunc(DstBitsPerElt);
42057 else if (Val.isNegative())
42058 Val = APInt::getSignedMinValue(DstBitsPerElt);
42060 Val = APInt::getSignedMaxValue(DstBitsPerElt);
42062 // PACKUS: Truncate signed value with unsigned saturation.
42063 // Source values less than zero are saturated to zero.
42064 // Source values greater than dst maxuint are saturated to maxuint.
42065 if (Val.isIntN(DstBitsPerElt))
42066 Val = Val.trunc(DstBitsPerElt);
42067 else if (Val.isNegative())
42068 Val = APInt::getNullValue(DstBitsPerElt);
42070 Val = APInt::getAllOnesValue(DstBitsPerElt);
42072 Bits[Lane * NumDstEltsPerLane + Elt] = Val;
42076 return getConstVector(Bits, Undefs, VT.getSimpleVT(), DAG, SDLoc(N));
42079 // Try to fold PACK(SHUFFLE(),SHUFFLE()) -> SHUFFLE(PACK()).
42080 if (SDValue V = combineVectorPackWithShuffle(N, DAG))
42083 // Try to combine a PACKUSWB/PACKSSWB implemented truncate with a regular
42084 // truncate to create a larger truncate.
42085 if (Subtarget.hasAVX512() &&
42086 N0.getOpcode() == ISD::TRUNCATE && N1.isUndef() && VT == MVT::v16i8 &&
42087 N0.getOperand(0).getValueType() == MVT::v8i32) {
42088 if ((IsSigned && DAG.ComputeNumSignBits(N0) > 8) ||
42090 DAG.MaskedValueIsZero(N0, APInt::getHighBitsSet(16, 8)))) {
42091 if (Subtarget.hasVLX())
42092 return DAG.getNode(X86ISD::VTRUNC, SDLoc(N), VT, N0.getOperand(0));
42094 // Widen input to v16i32 so we can truncate that.
42096 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v16i32,
42097 N0.getOperand(0), DAG.getUNDEF(MVT::v8i32));
42098 return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Concat);
42102 // Attempt to combine as shuffle.
42104 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42110 static SDValue combineVectorShiftVar(SDNode *N, SelectionDAG &DAG,
42111 TargetLowering::DAGCombinerInfo &DCI,
42112 const X86Subtarget &Subtarget) {
42113 assert((X86ISD::VSHL == N->getOpcode() || X86ISD::VSRA == N->getOpcode() ||
42114 X86ISD::VSRL == N->getOpcode()) &&
42115 "Unexpected shift opcode");
42116 EVT VT = N->getValueType(0);
42117 SDValue N0 = N->getOperand(0);
42118 SDValue N1 = N->getOperand(1);
42120 // Shift zero -> zero.
42121 if (ISD::isBuildVectorAllZeros(N0.getNode()))
42122 return DAG.getConstant(0, SDLoc(N), VT);
42124 // Detect constant shift amounts.
42126 SmallVector<APInt, 32> EltBits;
42127 if (getTargetConstantBitsFromNode(N1, 64, UndefElts, EltBits, true, false)) {
42128 unsigned X86Opc = getTargetVShiftUniformOpcode(N->getOpcode(), false);
42129 return getTargetVShiftByConstNode(X86Opc, SDLoc(N), VT.getSimpleVT(), N0,
42130 EltBits[0].getZExtValue(), DAG);
42133 APInt KnownUndef, KnownZero;
42134 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42135 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
42136 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
42138 return SDValue(N, 0);
42143 static SDValue combineVectorShiftImm(SDNode *N, SelectionDAG &DAG,
42144 TargetLowering::DAGCombinerInfo &DCI,
42145 const X86Subtarget &Subtarget) {
42146 unsigned Opcode = N->getOpcode();
42147 assert((X86ISD::VSHLI == Opcode || X86ISD::VSRAI == Opcode ||
42148 X86ISD::VSRLI == Opcode) &&
42149 "Unexpected shift opcode");
42150 bool LogicalShift = X86ISD::VSHLI == Opcode || X86ISD::VSRLI == Opcode;
42151 EVT VT = N->getValueType(0);
42152 SDValue N0 = N->getOperand(0);
42153 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
42154 assert(VT == N0.getValueType() && (NumBitsPerElt % 8) == 0 &&
42155 "Unexpected value type");
42156 assert(N->getOperand(1).getValueType() == MVT::i8 &&
42157 "Unexpected shift amount type");
42159 // Out of range logical bit shifts are guaranteed to be zero.
42160 // Out of range arithmetic bit shifts splat the sign bit.
42161 unsigned ShiftVal = N->getConstantOperandVal(1);
42162 if (ShiftVal >= NumBitsPerElt) {
42164 return DAG.getConstant(0, SDLoc(N), VT);
42165 ShiftVal = NumBitsPerElt - 1;
42168 // (shift X, 0) -> X
42172 // (shift 0, C) -> 0
42173 if (ISD::isBuildVectorAllZeros(N0.getNode()))
42174 // N0 is all zeros or undef. We guarantee that the bits shifted into the
42175 // result are all zeros, not undef.
42176 return DAG.getConstant(0, SDLoc(N), VT);
42178 // (VSRAI -1, C) -> -1
42179 if (!LogicalShift && ISD::isBuildVectorAllOnes(N0.getNode()))
42180 // N0 is all ones or undef. We guarantee that the bits shifted into the
42181 // result are all ones, not undef.
42182 return DAG.getConstant(-1, SDLoc(N), VT);
42184 // (shift (shift X, C2), C1) -> (shift X, (C1 + C2))
42185 if (Opcode == N0.getOpcode()) {
42186 unsigned ShiftVal2 = cast<ConstantSDNode>(N0.getOperand(1))->getZExtValue();
42187 unsigned NewShiftVal = ShiftVal + ShiftVal2;
42188 if (NewShiftVal >= NumBitsPerElt) {
42189 // Out of range logical bit shifts are guaranteed to be zero.
42190 // Out of range arithmetic bit shifts splat the sign bit.
42192 return DAG.getConstant(0, SDLoc(N), VT);
42193 NewShiftVal = NumBitsPerElt - 1;
42195 return DAG.getNode(Opcode, SDLoc(N), VT, N0.getOperand(0),
42196 DAG.getTargetConstant(NewShiftVal, SDLoc(N), MVT::i8));
42199 // We can decode 'whole byte' logical bit shifts as shuffles.
42200 if (LogicalShift && (ShiftVal % 8) == 0) {
42202 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42206 // Constant Folding.
42208 SmallVector<APInt, 32> EltBits;
42209 if (N->isOnlyUserOf(N0.getNode()) &&
42210 getTargetConstantBitsFromNode(N0, NumBitsPerElt, UndefElts, EltBits)) {
42211 assert(EltBits.size() == VT.getVectorNumElements() &&
42212 "Unexpected shift value type");
42213 // Undef elements need to fold to 0. It's possible SimplifyDemandedBits
42214 // created an undef input due to no input bits being demanded, but user
42215 // still expects 0 in other bits.
42216 for (unsigned i = 0, e = EltBits.size(); i != e; ++i) {
42217 APInt &Elt = EltBits[i];
42220 else if (X86ISD::VSHLI == Opcode)
42222 else if (X86ISD::VSRAI == Opcode)
42223 Elt.ashrInPlace(ShiftVal);
42225 Elt.lshrInPlace(ShiftVal);
42227 // Reset undef elements since they were zeroed above.
42229 return getConstVector(EltBits, UndefElts, VT.getSimpleVT(), DAG, SDLoc(N));
42232 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42233 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
42234 APInt::getAllOnesValue(NumBitsPerElt), DCI))
42235 return SDValue(N, 0);
42240 static SDValue combineVectorInsert(SDNode *N, SelectionDAG &DAG,
42241 TargetLowering::DAGCombinerInfo &DCI,
42242 const X86Subtarget &Subtarget) {
42243 EVT VT = N->getValueType(0);
42244 assert(((N->getOpcode() == X86ISD::PINSRB && VT == MVT::v16i8) ||
42245 (N->getOpcode() == X86ISD::PINSRW && VT == MVT::v8i16) ||
42246 N->getOpcode() == ISD::INSERT_VECTOR_ELT) &&
42247 "Unexpected vector insertion");
42249 if (N->getOpcode() == X86ISD::PINSRB || N->getOpcode() == X86ISD::PINSRW) {
42250 unsigned NumBitsPerElt = VT.getScalarSizeInBits();
42251 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42252 if (TLI.SimplifyDemandedBits(SDValue(N, 0),
42253 APInt::getAllOnesValue(NumBitsPerElt), DCI))
42254 return SDValue(N, 0);
42257 // Attempt to combine insertion patterns to a shuffle.
42258 if (VT.isSimple() && DCI.isAfterLegalizeDAG()) {
42260 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42267 /// Recognize the distinctive (AND (setcc ...) (setcc ..)) where both setccs
42268 /// reference the same FP CMP, and rewrite for CMPEQSS and friends. Likewise for
42269 /// OR -> CMPNEQSS.
42270 static SDValue combineCompareEqual(SDNode *N, SelectionDAG &DAG,
42271 TargetLowering::DAGCombinerInfo &DCI,
42272 const X86Subtarget &Subtarget) {
42275 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
42276 // we're requiring SSE2 for both.
42277 if (Subtarget.hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
42278 SDValue N0 = N->getOperand(0);
42279 SDValue N1 = N->getOperand(1);
42280 SDValue CMP0 = N0.getOperand(1);
42281 SDValue CMP1 = N1.getOperand(1);
42284 // The SETCCs should both refer to the same CMP.
42285 if (CMP0.getOpcode() != X86ISD::FCMP || CMP0 != CMP1)
42288 SDValue CMP00 = CMP0->getOperand(0);
42289 SDValue CMP01 = CMP0->getOperand(1);
42290 EVT VT = CMP00.getValueType();
42292 if (VT == MVT::f32 || VT == MVT::f64) {
42293 bool ExpectingFlags = false;
42294 // Check for any users that want flags:
42295 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
42296 !ExpectingFlags && UI != UE; ++UI)
42297 switch (UI->getOpcode()) {
42302 ExpectingFlags = true;
42304 case ISD::CopyToReg:
42305 case ISD::SIGN_EXTEND:
42306 case ISD::ZERO_EXTEND:
42307 case ISD::ANY_EXTEND:
42311 if (!ExpectingFlags) {
42312 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
42313 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
42315 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
42316 X86::CondCode tmp = cc0;
42321 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
42322 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
42323 // FIXME: need symbolic constants for these magic numbers.
42324 // See X86ATTInstPrinter.cpp:printSSECC().
42325 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
42326 if (Subtarget.hasAVX512()) {
42328 DAG.getNode(X86ISD::FSETCCM, DL, MVT::v1i1, CMP00, CMP01,
42329 DAG.getTargetConstant(x86cc, DL, MVT::i8));
42330 // Need to fill with zeros to ensure the bitcast will produce zeroes
42331 // for the upper bits. An EXTRACT_ELEMENT here wouldn't guarantee that.
42332 SDValue Ins = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, MVT::v16i1,
42333 DAG.getConstant(0, DL, MVT::v16i1),
42334 FSetCC, DAG.getIntPtrConstant(0, DL));
42335 return DAG.getZExtOrTrunc(DAG.getBitcast(MVT::i16, Ins), DL,
42336 N->getSimpleValueType(0));
42338 SDValue OnesOrZeroesF =
42339 DAG.getNode(X86ISD::FSETCC, DL, CMP00.getValueType(), CMP00,
42340 CMP01, DAG.getTargetConstant(x86cc, DL, MVT::i8));
42342 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
42343 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
42345 if (is64BitFP && !Subtarget.is64Bit()) {
42346 // On a 32-bit target, we cannot bitcast the 64-bit float to a
42347 // 64-bit integer, since that's not a legal type. Since
42348 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
42349 // bits, but can do this little dance to extract the lowest 32 bits
42350 // and work with those going forward.
42351 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
42353 SDValue Vector32 = DAG.getBitcast(MVT::v4f32, Vector64);
42354 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
42355 Vector32, DAG.getIntPtrConstant(0, DL));
42359 SDValue OnesOrZeroesI = DAG.getBitcast(IntVT, OnesOrZeroesF);
42360 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
42361 DAG.getConstant(1, DL, IntVT));
42362 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
42364 return OneBitOfTruth;
42372 /// Try to fold: (and (xor X, -1), Y) -> (andnp X, Y).
42373 static SDValue combineANDXORWithAllOnesIntoANDNP(SDNode *N, SelectionDAG &DAG) {
42374 assert(N->getOpcode() == ISD::AND);
42376 MVT VT = N->getSimpleValueType(0);
42377 if (!VT.is128BitVector() && !VT.is256BitVector() && !VT.is512BitVector())
42381 SDValue N0 = N->getOperand(0);
42382 SDValue N1 = N->getOperand(1);
42384 auto GetNot = [&VT, &DAG](SDValue V) {
42385 // Basic X = NOT(Y) detection.
42386 if (SDValue Not = IsNOT(V, DAG))
42388 // Fold BROADCAST(NOT(Y)) -> BROADCAST(Y).
42389 if (V.getOpcode() == X86ISD::VBROADCAST) {
42390 SDValue Src = V.getOperand(0);
42391 EVT SrcVT = Src.getValueType();
42392 if (!SrcVT.isVector())
42394 if (SDValue Not = IsNOT(Src, DAG))
42395 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(V), VT,
42396 DAG.getBitcast(SrcVT, Not));
42401 if (SDValue Not = GetNot(N0)) {
42404 } else if (SDValue Not = GetNot(N1)) {
42410 X = DAG.getBitcast(VT, X);
42411 Y = DAG.getBitcast(VT, Y);
42412 return DAG.getNode(X86ISD::ANDNP, SDLoc(N), VT, X, Y);
42415 // Try to widen AND, OR and XOR nodes to VT in order to remove casts around
42416 // logical operations, like in the example below.
42417 // or (and (truncate x, truncate y)),
42418 // (xor (truncate z, build_vector (constants)))
42419 // Given a target type \p VT, we generate
42420 // or (and x, y), (xor z, zext(build_vector (constants)))
42421 // given x, y and z are of type \p VT. We can do so, if operands are either
42422 // truncates from VT types, the second operand is a vector of constants or can
42423 // be recursively promoted.
42424 static SDValue PromoteMaskArithmetic(SDNode *N, EVT VT, SelectionDAG &DAG,
42426 // Limit recursion to avoid excessive compile times.
42427 if (Depth >= SelectionDAG::MaxRecursionDepth)
42430 if (N->getOpcode() != ISD::XOR && N->getOpcode() != ISD::AND &&
42431 N->getOpcode() != ISD::OR)
42434 SDValue N0 = N->getOperand(0);
42435 SDValue N1 = N->getOperand(1);
42438 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42439 if (!TLI.isOperationLegalOrPromote(N->getOpcode(), VT))
42442 if (SDValue NN0 = PromoteMaskArithmetic(N0.getNode(), VT, DAG, Depth + 1))
42445 // The Left side has to be a trunc.
42446 if (N0.getOpcode() != ISD::TRUNCATE)
42449 // The type of the truncated inputs.
42450 if (N0.getOperand(0).getValueType() != VT)
42453 N0 = N0.getOperand(0);
42456 if (SDValue NN1 = PromoteMaskArithmetic(N1.getNode(), VT, DAG, Depth + 1))
42459 // The right side has to be a 'trunc' or a constant vector.
42460 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE &&
42461 N1.getOperand(0).getValueType() == VT;
42462 if (!RHSTrunc && !ISD::isBuildVectorOfConstantSDNodes(N1.getNode()))
42466 N1 = N1.getOperand(0);
42468 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N1);
42471 return DAG.getNode(N->getOpcode(), DL, VT, N0, N1);
42474 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
42475 // register. In most cases we actually compare or select YMM-sized registers
42476 // and mixing the two types creates horrible code. This method optimizes
42477 // some of the transition sequences.
42478 // Even with AVX-512 this is still useful for removing casts around logical
42479 // operations on vXi1 mask types.
42480 static SDValue PromoteMaskArithmetic(SDNode *N, SelectionDAG &DAG,
42481 const X86Subtarget &Subtarget) {
42482 EVT VT = N->getValueType(0);
42483 assert(VT.isVector() && "Expected vector type");
42486 assert((N->getOpcode() == ISD::ANY_EXTEND ||
42487 N->getOpcode() == ISD::ZERO_EXTEND ||
42488 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
42490 SDValue Narrow = N->getOperand(0);
42491 EVT NarrowVT = Narrow.getValueType();
42493 // Generate the wide operation.
42494 SDValue Op = PromoteMaskArithmetic(Narrow.getNode(), VT, DAG, 0);
42497 switch (N->getOpcode()) {
42498 default: llvm_unreachable("Unexpected opcode");
42499 case ISD::ANY_EXTEND:
42501 case ISD::ZERO_EXTEND:
42502 return DAG.getZeroExtendInReg(Op, DL, NarrowVT);
42503 case ISD::SIGN_EXTEND:
42504 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
42505 Op, DAG.getValueType(NarrowVT));
42509 static unsigned convertIntLogicToFPLogicOpcode(unsigned Opcode) {
42512 default: llvm_unreachable("Unexpected input node for FP logic conversion");
42513 case ISD::AND: FPOpcode = X86ISD::FAND; break;
42514 case ISD::OR: FPOpcode = X86ISD::FOR; break;
42515 case ISD::XOR: FPOpcode = X86ISD::FXOR; break;
42520 /// If both input operands of a logic op are being cast from floating point
42521 /// types, try to convert this into a floating point logic node to avoid
42522 /// unnecessary moves from SSE to integer registers.
42523 static SDValue convertIntLogicToFPLogic(SDNode *N, SelectionDAG &DAG,
42524 const X86Subtarget &Subtarget) {
42525 EVT VT = N->getValueType(0);
42526 SDValue N0 = N->getOperand(0);
42527 SDValue N1 = N->getOperand(1);
42530 if (N0.getOpcode() != ISD::BITCAST || N1.getOpcode() != ISD::BITCAST)
42533 SDValue N00 = N0.getOperand(0);
42534 SDValue N10 = N1.getOperand(0);
42535 EVT N00Type = N00.getValueType();
42536 EVT N10Type = N10.getValueType();
42538 // Ensure that both types are the same and are legal scalar fp types.
42539 if (N00Type != N10Type ||
42540 !((Subtarget.hasSSE1() && N00Type == MVT::f32) ||
42541 (Subtarget.hasSSE2() && N00Type == MVT::f64)))
42544 unsigned FPOpcode = convertIntLogicToFPLogicOpcode(N->getOpcode());
42545 SDValue FPLogic = DAG.getNode(FPOpcode, DL, N00Type, N00, N10);
42546 return DAG.getBitcast(VT, FPLogic);
42549 // Attempt to fold BITOP(MOVMSK(X),MOVMSK(Y)) -> MOVMSK(BITOP(X,Y))
42550 // to reduce XMM->GPR traffic.
42551 static SDValue combineBitOpWithMOVMSK(SDNode *N, SelectionDAG &DAG) {
42552 unsigned Opc = N->getOpcode();
42553 assert((Opc == ISD::OR || Opc == ISD::AND || Opc == ISD::XOR) &&
42554 "Unexpected bit opcode");
42556 SDValue N0 = N->getOperand(0);
42557 SDValue N1 = N->getOperand(1);
42559 // Both operands must be single use MOVMSK.
42560 if (N0.getOpcode() != X86ISD::MOVMSK || !N0.hasOneUse() ||
42561 N1.getOpcode() != X86ISD::MOVMSK || !N1.hasOneUse())
42564 SDValue Vec0 = N0.getOperand(0);
42565 SDValue Vec1 = N1.getOperand(0);
42566 EVT VecVT0 = Vec0.getValueType();
42567 EVT VecVT1 = Vec1.getValueType();
42569 // Both MOVMSK operands must be from vectors of the same size and same element
42570 // size, but its OK for a fp/int diff.
42571 if (VecVT0.getSizeInBits() != VecVT1.getSizeInBits() ||
42572 VecVT0.getScalarSizeInBits() != VecVT1.getScalarSizeInBits())
42577 VecVT0.isFloatingPoint() ? convertIntLogicToFPLogicOpcode(Opc) : Opc;
42579 DAG.getNode(VecOpc, DL, VecVT0, Vec0, DAG.getBitcast(VecVT0, Vec1));
42580 return DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Result);
42583 /// If this is a zero/all-bits result that is bitwise-anded with a low bits
42584 /// mask. (Mask == 1 for the x86 lowering of a SETCC + ZEXT), replace the 'and'
42585 /// with a shift-right to eliminate loading the vector constant mask value.
42586 static SDValue combineAndMaskToShift(SDNode *N, SelectionDAG &DAG,
42587 const X86Subtarget &Subtarget) {
42588 SDValue Op0 = peekThroughBitcasts(N->getOperand(0));
42589 SDValue Op1 = peekThroughBitcasts(N->getOperand(1));
42590 EVT VT0 = Op0.getValueType();
42591 EVT VT1 = Op1.getValueType();
42593 if (VT0 != VT1 || !VT0.isSimple() || !VT0.isInteger())
42597 if (!ISD::isConstantSplatVector(Op1.getNode(), SplatVal) ||
42598 !SplatVal.isMask())
42601 // Don't prevent creation of ANDN.
42602 if (isBitwiseNot(Op0))
42605 if (!SupportedVectorShiftWithImm(VT0.getSimpleVT(), Subtarget, ISD::SRL))
42608 unsigned EltBitWidth = VT0.getScalarSizeInBits();
42609 if (EltBitWidth != DAG.ComputeNumSignBits(Op0))
42613 unsigned ShiftVal = SplatVal.countTrailingOnes();
42614 SDValue ShAmt = DAG.getTargetConstant(EltBitWidth - ShiftVal, DL, MVT::i8);
42615 SDValue Shift = DAG.getNode(X86ISD::VSRLI, DL, VT0, Op0, ShAmt);
42616 return DAG.getBitcast(N->getValueType(0), Shift);
42619 // Get the index node from the lowered DAG of a GEP IR instruction with one
42620 // indexing dimension.
42621 static SDValue getIndexFromUnindexedLoad(LoadSDNode *Ld) {
42622 if (Ld->isIndexed())
42625 SDValue Base = Ld->getBasePtr();
42627 if (Base.getOpcode() != ISD::ADD)
42630 SDValue ShiftedIndex = Base.getOperand(0);
42632 if (ShiftedIndex.getOpcode() != ISD::SHL)
42635 return ShiftedIndex.getOperand(0);
42639 static bool hasBZHI(const X86Subtarget &Subtarget, MVT VT) {
42640 if (Subtarget.hasBMI2() && VT.isScalarInteger()) {
42641 switch (VT.getSizeInBits()) {
42642 default: return false;
42643 case 64: return Subtarget.is64Bit() ? true : false;
42644 case 32: return true;
42650 // This function recognizes cases where X86 bzhi instruction can replace and
42651 // 'and-load' sequence.
42652 // In case of loading integer value from an array of constants which is defined
42655 // int array[SIZE] = {0x0, 0x1, 0x3, 0x7, 0xF ..., 2^(SIZE-1) - 1}
42657 // then applying a bitwise and on the result with another input.
42658 // It's equivalent to performing bzhi (zero high bits) on the input, with the
42659 // same index of the load.
42660 static SDValue combineAndLoadToBZHI(SDNode *Node, SelectionDAG &DAG,
42661 const X86Subtarget &Subtarget) {
42662 MVT VT = Node->getSimpleValueType(0);
42665 // Check if subtarget has BZHI instruction for the node's type
42666 if (!hasBZHI(Subtarget, VT))
42669 // Try matching the pattern for both operands.
42670 for (unsigned i = 0; i < 2; i++) {
42671 SDValue N = Node->getOperand(i);
42672 LoadSDNode *Ld = dyn_cast<LoadSDNode>(N.getNode());
42674 // continue if the operand is not a load instruction
42678 const Value *MemOp = Ld->getMemOperand()->getValue();
42683 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(MemOp)) {
42684 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0))) {
42685 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
42687 Constant *Init = GV->getInitializer();
42688 Type *Ty = Init->getType();
42689 if (!isa<ConstantDataArray>(Init) ||
42690 !Ty->getArrayElementType()->isIntegerTy() ||
42691 Ty->getArrayElementType()->getScalarSizeInBits() !=
42692 VT.getSizeInBits() ||
42693 Ty->getArrayNumElements() >
42694 Ty->getArrayElementType()->getScalarSizeInBits())
42697 // Check if the array's constant elements are suitable to our case.
42698 uint64_t ArrayElementCount = Init->getType()->getArrayNumElements();
42699 bool ConstantsMatch = true;
42700 for (uint64_t j = 0; j < ArrayElementCount; j++) {
42701 ConstantInt *Elem =
42702 dyn_cast<ConstantInt>(Init->getAggregateElement(j));
42703 if (Elem->getZExtValue() != (((uint64_t)1 << j) - 1)) {
42704 ConstantsMatch = false;
42708 if (!ConstantsMatch)
42711 // Do the transformation (For 32-bit type):
42712 // -> (and (load arr[idx]), inp)
42713 // <- (and (srl 0xFFFFFFFF, (sub 32, idx)))
42714 // that will be replaced with one bzhi instruction.
42715 SDValue Inp = (i == 0) ? Node->getOperand(1) : Node->getOperand(0);
42716 SDValue SizeC = DAG.getConstant(VT.getSizeInBits(), dl, MVT::i32);
42718 // Get the Node which indexes into the array.
42719 SDValue Index = getIndexFromUnindexedLoad(Ld);
42722 Index = DAG.getZExtOrTrunc(Index, dl, MVT::i32);
42724 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, SizeC, Index);
42725 Sub = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Sub);
42727 SDValue AllOnes = DAG.getAllOnesConstant(dl, VT);
42728 SDValue LShr = DAG.getNode(ISD::SRL, dl, VT, AllOnes, Sub);
42730 return DAG.getNode(ISD::AND, dl, VT, Inp, LShr);
42738 // Look for (and (ctpop X), 1) which is the IR form of __builtin_parity.
42739 // Turn it into series of XORs and a setnp.
42740 static SDValue combineParity(SDNode *N, SelectionDAG &DAG,
42741 const X86Subtarget &Subtarget) {
42742 EVT VT = N->getValueType(0);
42744 // We only support 64-bit and 32-bit. 64-bit requires special handling
42745 // unless the 64-bit popcnt instruction is legal.
42746 if (VT != MVT::i32 && VT != MVT::i64)
42749 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42750 if (TLI.isTypeLegal(VT) && TLI.isOperationLegal(ISD::CTPOP, VT))
42753 SDValue N0 = N->getOperand(0);
42754 SDValue N1 = N->getOperand(1);
42756 // LHS needs to be a single use CTPOP.
42757 if (N0.getOpcode() != ISD::CTPOP || !N0.hasOneUse())
42760 // RHS needs to be 1.
42761 if (!isOneConstant(N1))
42765 SDValue X = N0.getOperand(0);
42767 // If this is 64-bit, its always best to xor the two 32-bit pieces together
42768 // even if we have popcnt.
42769 if (VT == MVT::i64) {
42770 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32,
42771 DAG.getNode(ISD::SRL, DL, VT, X,
42772 DAG.getConstant(32, DL, MVT::i8)));
42773 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, X);
42774 X = DAG.getNode(ISD::XOR, DL, MVT::i32, Lo, Hi);
42775 // Generate a 32-bit parity idiom. This will bring us back here if we need
42776 // to expand it too.
42777 SDValue Parity = DAG.getNode(ISD::AND, DL, MVT::i32,
42778 DAG.getNode(ISD::CTPOP, DL, MVT::i32, X),
42779 DAG.getConstant(1, DL, MVT::i32));
42780 return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Parity);
42782 assert(VT == MVT::i32 && "Unexpected VT!");
42784 // Xor the high and low 16-bits together using a 32-bit operation.
42785 SDValue Hi16 = DAG.getNode(ISD::SRL, DL, VT, X,
42786 DAG.getConstant(16, DL, MVT::i8));
42787 X = DAG.getNode(ISD::XOR, DL, VT, X, Hi16);
42789 // Finally xor the low 2 bytes together and use a 8-bit flag setting xor.
42790 // This should allow an h-reg to be used to save a shift.
42791 // FIXME: We only get an h-reg in 32-bit mode.
42792 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8,
42793 DAG.getNode(ISD::SRL, DL, VT, X,
42794 DAG.getConstant(8, DL, MVT::i8)));
42795 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, X);
42796 SDVTList VTs = DAG.getVTList(MVT::i8, MVT::i32);
42797 SDValue Flags = DAG.getNode(X86ISD::XOR, DL, VTs, Lo, Hi).getValue(1);
42799 // Copy the inverse of the parity flag into a register with setcc.
42800 SDValue Setnp = getSETCC(X86::COND_NP, Flags, DL, DAG);
42801 // Zero extend to original type.
42802 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0), Setnp);
42806 // Look for (and (bitcast (vXi1 (concat_vectors (vYi1 setcc), undef,))), C)
42807 // Where C is a mask containing the same number of bits as the setcc and
42808 // where the setcc will freely 0 upper bits of k-register. We can replace the
42809 // undef in the concat with 0s and remove the AND. This mainly helps with
42810 // v2i1/v4i1 setcc being casted to scalar.
42811 static SDValue combineScalarAndWithMaskSetcc(SDNode *N, SelectionDAG &DAG,
42812 const X86Subtarget &Subtarget) {
42813 assert(N->getOpcode() == ISD::AND && "Unexpected opcode!");
42815 EVT VT = N->getValueType(0);
42817 // Make sure this is an AND with constant. We will check the value of the
42819 if (!isa<ConstantSDNode>(N->getOperand(1)))
42822 // This is implied by the ConstantSDNode.
42823 assert(!VT.isVector() && "Expected scalar VT!");
42825 if (N->getOperand(0).getOpcode() != ISD::BITCAST ||
42826 !N->getOperand(0).hasOneUse() ||
42827 !N->getOperand(0).getOperand(0).hasOneUse())
42830 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42831 SDValue Src = N->getOperand(0).getOperand(0);
42832 EVT SrcVT = Src.getValueType();
42833 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::i1 ||
42834 !TLI.isTypeLegal(SrcVT))
42837 if (Src.getOpcode() != ISD::CONCAT_VECTORS)
42840 // We only care about the first subvector of the concat, we expect the
42841 // other subvectors to be ignored due to the AND if we make the change.
42842 SDValue SubVec = Src.getOperand(0);
42843 EVT SubVecVT = SubVec.getValueType();
42845 // First subvector should be a setcc with a legal result type. The RHS of the
42846 // AND should be a mask with this many bits.
42847 if (SubVec.getOpcode() != ISD::SETCC || !TLI.isTypeLegal(SubVecVT) ||
42848 !N->getConstantOperandAPInt(1).isMask(SubVecVT.getVectorNumElements()))
42851 EVT SetccVT = SubVec.getOperand(0).getValueType();
42852 if (!TLI.isTypeLegal(SetccVT) ||
42853 !(Subtarget.hasVLX() || SetccVT.is512BitVector()))
42856 if (!(Subtarget.hasBWI() || SetccVT.getScalarSizeInBits() >= 32))
42859 // We passed all the checks. Rebuild the concat_vectors with zeroes
42860 // and cast it back to VT.
42862 SmallVector<SDValue, 4> Ops(Src.getNumOperands(),
42863 DAG.getConstant(0, dl, SubVecVT));
42865 SDValue Concat = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT,
42867 return DAG.getBitcast(VT, Concat);
42870 static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
42871 TargetLowering::DAGCombinerInfo &DCI,
42872 const X86Subtarget &Subtarget) {
42873 EVT VT = N->getValueType(0);
42875 // If this is SSE1 only convert to FAND to avoid scalarization.
42876 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
42877 return DAG.getBitcast(
42878 MVT::v4i32, DAG.getNode(X86ISD::FAND, SDLoc(N), MVT::v4f32,
42879 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
42880 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
42883 // Use a 32-bit and+zext if upper bits known zero.
42884 if (VT == MVT::i64 && Subtarget.is64Bit() &&
42885 !isa<ConstantSDNode>(N->getOperand(1))) {
42886 APInt HiMask = APInt::getHighBitsSet(64, 32);
42887 if (DAG.MaskedValueIsZero(N->getOperand(1), HiMask) ||
42888 DAG.MaskedValueIsZero(N->getOperand(0), HiMask)) {
42890 SDValue LHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(0));
42891 SDValue RHS = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, N->getOperand(1));
42892 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64,
42893 DAG.getNode(ISD::AND, dl, MVT::i32, LHS, RHS));
42897 // This must be done before legalization has expanded the ctpop.
42898 if (SDValue V = combineParity(N, DAG, Subtarget))
42901 // Match all-of bool scalar reductions into a bitcast/movmsk + cmp.
42902 // TODO: Support multiple SrcOps.
42903 if (VT == MVT::i1) {
42904 SmallVector<SDValue, 2> SrcOps;
42905 SmallVector<APInt, 2> SrcPartials;
42906 if (matchScalarReduction(SDValue(N, 0), ISD::AND, SrcOps, &SrcPartials) &&
42907 SrcOps.size() == 1) {
42909 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
42910 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
42911 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
42912 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
42913 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
42914 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
42916 assert(SrcPartials[0].getBitWidth() == NumElts &&
42917 "Unexpected partial reduction mask");
42918 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
42919 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
42920 return DAG.getSetCC(dl, MVT::i1, Mask, PartialBits, ISD::SETEQ);
42925 if (SDValue V = combineScalarAndWithMaskSetcc(N, DAG, Subtarget))
42928 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
42931 if (DCI.isBeforeLegalizeOps())
42934 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
42937 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
42940 if (SDValue R = combineANDXORWithAllOnesIntoANDNP(N, DAG))
42943 if (SDValue ShiftRight = combineAndMaskToShift(N, DAG, Subtarget))
42946 if (SDValue R = combineAndLoadToBZHI(N, DAG, Subtarget))
42949 // Attempt to recursively combine a bitmask AND with shuffles.
42950 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
42952 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
42956 // Attempt to combine a scalar bitmask AND with an extracted shuffle.
42957 if ((VT.getScalarSizeInBits() % 8) == 0 &&
42958 N->getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
42959 isa<ConstantSDNode>(N->getOperand(0).getOperand(1))) {
42960 SDValue BitMask = N->getOperand(1);
42961 SDValue SrcVec = N->getOperand(0).getOperand(0);
42962 EVT SrcVecVT = SrcVec.getValueType();
42964 // Check that the constant bitmask masks whole bytes.
42966 SmallVector<APInt, 64> EltBits;
42967 if (VT == SrcVecVT.getScalarType() &&
42968 N->getOperand(0)->isOnlyUserOf(SrcVec.getNode()) &&
42969 getTargetConstantBitsFromNode(BitMask, 8, UndefElts, EltBits) &&
42970 llvm::all_of(EltBits, [](APInt M) {
42971 return M.isNullValue() || M.isAllOnesValue();
42973 unsigned NumElts = SrcVecVT.getVectorNumElements();
42974 unsigned Scale = SrcVecVT.getScalarSizeInBits() / 8;
42975 unsigned Idx = N->getOperand(0).getConstantOperandVal(1);
42977 // Create a root shuffle mask from the byte mask and the extracted index.
42978 SmallVector<int, 16> ShuffleMask(NumElts * Scale, SM_SentinelUndef);
42979 for (unsigned i = 0; i != Scale; ++i) {
42982 int VecIdx = Scale * Idx + i;
42983 ShuffleMask[VecIdx] =
42984 EltBits[i].isNullValue() ? SM_SentinelZero : VecIdx;
42987 if (SDValue Shuffle = combineX86ShufflesRecursively(
42988 {SrcVec}, 0, SrcVec, ShuffleMask, {}, /*Depth*/ 1,
42989 /*HasVarMask*/ false, /*AllowVarMask*/ true, DAG, Subtarget))
42990 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(N), VT, Shuffle,
42991 N->getOperand(0).getOperand(1));
42998 // Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
42999 static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
43000 const X86Subtarget &Subtarget) {
43001 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
43003 MVT VT = N->getSimpleValueType(0);
43004 if (!VT.isVector() || (VT.getScalarSizeInBits() % 8) != 0)
43007 SDValue N0 = peekThroughBitcasts(N->getOperand(0));
43008 SDValue N1 = peekThroughBitcasts(N->getOperand(1));
43009 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
43012 // On XOP we'll lower to PCMOV so accept one use. With AVX512, we can use
43013 // VPTERNLOG. Otherwise only do this if either mask has multiple uses already.
43014 bool UseVPTERNLOG = (Subtarget.hasAVX512() && VT.is512BitVector()) ||
43015 Subtarget.hasVLX();
43016 if (!(Subtarget.hasXOP() || UseVPTERNLOG ||
43017 !N0.getOperand(1).hasOneUse() || !N1.getOperand(1).hasOneUse()))
43020 // Attempt to extract constant byte masks.
43021 APInt UndefElts0, UndefElts1;
43022 SmallVector<APInt, 32> EltBits0, EltBits1;
43023 if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
43026 if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
43030 for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
43031 // TODO - add UNDEF elts support.
43032 if (UndefElts0[i] || UndefElts1[i])
43034 if (EltBits0[i] != ~EltBits1[i])
43040 if (UseVPTERNLOG) {
43041 // Emit a VPTERNLOG node directly.
43042 SDValue A = DAG.getBitcast(VT, N0.getOperand(1));
43043 SDValue B = DAG.getBitcast(VT, N0.getOperand(0));
43044 SDValue C = DAG.getBitcast(VT, N1.getOperand(0));
43045 SDValue Imm = DAG.getTargetConstant(0xCA, DL, MVT::i8);
43046 return DAG.getNode(X86ISD::VPTERNLOG, DL, VT, A, B, C, Imm);
43049 SDValue X = N->getOperand(0);
43051 DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
43052 DAG.getBitcast(VT, N1.getOperand(0)));
43053 return DAG.getNode(ISD::OR, DL, VT, X, Y);
43056 // Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
43057 static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
43058 if (N->getOpcode() != ISD::OR)
43061 SDValue N0 = N->getOperand(0);
43062 SDValue N1 = N->getOperand(1);
43064 // Canonicalize AND to LHS.
43065 if (N1.getOpcode() == ISD::AND)
43068 // Attempt to match OR(AND(M,Y),ANDNP(M,X)).
43069 if (N0.getOpcode() != ISD::AND || N1.getOpcode() != X86ISD::ANDNP)
43072 Mask = N1.getOperand(0);
43073 X = N1.getOperand(1);
43075 // Check to see if the mask appeared in both the AND and ANDNP.
43076 if (N0.getOperand(0) == Mask)
43077 Y = N0.getOperand(1);
43078 else if (N0.getOperand(1) == Mask)
43079 Y = N0.getOperand(0);
43083 // TODO: Attempt to match against AND(XOR(-1,M),Y) as well, waiting for
43084 // ANDNP combine allows other combines to happen that prevent matching.
43089 // (or (and (m, y), (pandn m, x)))
43091 // (vselect m, x, y)
43092 // As a special case, try to fold:
43093 // (or (and (m, (sub 0, x)), (pandn m, x)))
43095 // (sub (xor X, M), M)
43096 static SDValue combineLogicBlendIntoPBLENDV(SDNode *N, SelectionDAG &DAG,
43097 const X86Subtarget &Subtarget) {
43098 assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
43100 EVT VT = N->getValueType(0);
43101 if (!((VT.is128BitVector() && Subtarget.hasSSE2()) ||
43102 (VT.is256BitVector() && Subtarget.hasInt256())))
43105 SDValue X, Y, Mask;
43106 if (!matchLogicBlend(N, X, Y, Mask))
43109 // Validate that X, Y, and Mask are bitcasts, and see through them.
43110 Mask = peekThroughBitcasts(Mask);
43111 X = peekThroughBitcasts(X);
43112 Y = peekThroughBitcasts(Y);
43114 EVT MaskVT = Mask.getValueType();
43115 unsigned EltBits = MaskVT.getScalarSizeInBits();
43117 // TODO: Attempt to handle floating point cases as well?
43118 if (!MaskVT.isInteger() || DAG.ComputeNumSignBits(Mask) != EltBits)
43123 // Attempt to combine to conditional negate: (sub (xor X, M), M)
43124 if (SDValue Res = combineLogicBlendIntoConditionalNegate(VT, Mask, X, Y, DL,
43128 // PBLENDVB is only available on SSE 4.1.
43129 if (!Subtarget.hasSSE41())
43132 // If we have VPTERNLOG we should prefer that since PBLENDVB is multiple uops.
43133 if (Subtarget.hasVLX())
43136 MVT BlendVT = VT.is256BitVector() ? MVT::v32i8 : MVT::v16i8;
43138 X = DAG.getBitcast(BlendVT, X);
43139 Y = DAG.getBitcast(BlendVT, Y);
43140 Mask = DAG.getBitcast(BlendVT, Mask);
43141 Mask = DAG.getSelect(DL, BlendVT, Mask, Y, X);
43142 return DAG.getBitcast(VT, Mask);
43145 // Helper function for combineOrCmpEqZeroToCtlzSrl
43149 // srl(ctlz x), log2(bitsize(x))
43150 // Input pattern is checked by caller.
43151 static SDValue lowerX86CmpEqZeroToCtlzSrl(SDValue Op, EVT ExtTy,
43152 SelectionDAG &DAG) {
43153 SDValue Cmp = Op.getOperand(1);
43154 EVT VT = Cmp.getOperand(0).getValueType();
43155 unsigned Log2b = Log2_32(VT.getSizeInBits());
43157 SDValue Clz = DAG.getNode(ISD::CTLZ, dl, VT, Cmp->getOperand(0));
43158 // The result of the shift is true or false, and on X86, the 32-bit
43159 // encoding of shr and lzcnt is more desirable.
43160 SDValue Trunc = DAG.getZExtOrTrunc(Clz, dl, MVT::i32);
43161 SDValue Scc = DAG.getNode(ISD::SRL, dl, MVT::i32, Trunc,
43162 DAG.getConstant(Log2b, dl, MVT::i8));
43163 return DAG.getZExtOrTrunc(Scc, dl, ExtTy);
43166 // Try to transform:
43167 // zext(or(setcc(eq, (cmp x, 0)), setcc(eq, (cmp y, 0))))
43169 // srl(or(ctlz(x), ctlz(y)), log2(bitsize(x))
43170 // Will also attempt to match more generic cases, eg:
43171 // zext(or(or(setcc(eq, cmp 0), setcc(eq, cmp 0)), setcc(eq, cmp 0)))
43172 // Only applies if the target supports the FastLZCNT feature.
43173 static SDValue combineOrCmpEqZeroToCtlzSrl(SDNode *N, SelectionDAG &DAG,
43174 TargetLowering::DAGCombinerInfo &DCI,
43175 const X86Subtarget &Subtarget) {
43176 if (DCI.isBeforeLegalize() || !Subtarget.getTargetLowering()->isCtlzFast())
43179 auto isORCandidate = [](SDValue N) {
43180 return (N->getOpcode() == ISD::OR && N->hasOneUse());
43183 // Check the zero extend is extending to 32-bit or more. The code generated by
43184 // srl(ctlz) for 16-bit or less variants of the pattern would require extra
43185 // instructions to clear the upper bits.
43186 if (!N->hasOneUse() || !N->getSimpleValueType(0).bitsGE(MVT::i32) ||
43187 !isORCandidate(N->getOperand(0)))
43190 // Check the node matches: setcc(eq, cmp 0)
43191 auto isSetCCCandidate = [](SDValue N) {
43192 return N->getOpcode() == X86ISD::SETCC && N->hasOneUse() &&
43193 X86::CondCode(N->getConstantOperandVal(0)) == X86::COND_E &&
43194 N->getOperand(1).getOpcode() == X86ISD::CMP &&
43195 isNullConstant(N->getOperand(1).getOperand(1)) &&
43196 N->getOperand(1).getValueType().bitsGE(MVT::i32);
43199 SDNode *OR = N->getOperand(0).getNode();
43200 SDValue LHS = OR->getOperand(0);
43201 SDValue RHS = OR->getOperand(1);
43203 // Save nodes matching or(or, setcc(eq, cmp 0)).
43204 SmallVector<SDNode *, 2> ORNodes;
43205 while (((isORCandidate(LHS) && isSetCCCandidate(RHS)) ||
43206 (isORCandidate(RHS) && isSetCCCandidate(LHS)))) {
43207 ORNodes.push_back(OR);
43208 OR = (LHS->getOpcode() == ISD::OR) ? LHS.getNode() : RHS.getNode();
43209 LHS = OR->getOperand(0);
43210 RHS = OR->getOperand(1);
43213 // The last OR node should match or(setcc(eq, cmp 0), setcc(eq, cmp 0)).
43214 if (!(isSetCCCandidate(LHS) && isSetCCCandidate(RHS)) ||
43215 !isORCandidate(SDValue(OR, 0)))
43218 // We have a or(setcc(eq, cmp 0), setcc(eq, cmp 0)) pattern, try to lower it
43220 // or(srl(ctlz),srl(ctlz)).
43221 // The dag combiner can then fold it into:
43222 // srl(or(ctlz, ctlz)).
43223 EVT VT = OR->getValueType(0);
43224 SDValue NewLHS = lowerX86CmpEqZeroToCtlzSrl(LHS, VT, DAG);
43225 SDValue Ret, NewRHS;
43226 if (NewLHS && (NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG)))
43227 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, NewLHS, NewRHS);
43232 // Try to lower nodes matching the or(or, setcc(eq, cmp 0)) pattern.
43233 while (ORNodes.size() > 0) {
43234 OR = ORNodes.pop_back_val();
43235 LHS = OR->getOperand(0);
43236 RHS = OR->getOperand(1);
43237 // Swap rhs with lhs to match or(setcc(eq, cmp, 0), or).
43238 if (RHS->getOpcode() == ISD::OR)
43239 std::swap(LHS, RHS);
43240 NewRHS = lowerX86CmpEqZeroToCtlzSrl(RHS, VT, DAG);
43243 Ret = DAG.getNode(ISD::OR, SDLoc(OR), VT, Ret, NewRHS);
43247 Ret = DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), Ret);
43252 static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
43253 TargetLowering::DAGCombinerInfo &DCI,
43254 const X86Subtarget &Subtarget) {
43255 SDValue N0 = N->getOperand(0);
43256 SDValue N1 = N->getOperand(1);
43257 EVT VT = N->getValueType(0);
43259 // If this is SSE1 only convert to FOR to avoid scalarization.
43260 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) {
43261 return DAG.getBitcast(MVT::v4i32,
43262 DAG.getNode(X86ISD::FOR, SDLoc(N), MVT::v4f32,
43263 DAG.getBitcast(MVT::v4f32, N0),
43264 DAG.getBitcast(MVT::v4f32, N1)));
43267 // Match any-of bool scalar reductions into a bitcast/movmsk + cmp.
43268 // TODO: Support multiple SrcOps.
43269 if (VT == MVT::i1) {
43270 SmallVector<SDValue, 2> SrcOps;
43271 SmallVector<APInt, 2> SrcPartials;
43272 if (matchScalarReduction(SDValue(N, 0), ISD::OR, SrcOps, &SrcPartials) &&
43273 SrcOps.size() == 1) {
43275 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43276 unsigned NumElts = SrcOps[0].getValueType().getVectorNumElements();
43277 EVT MaskVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
43278 SDValue Mask = combineBitcastvxi1(DAG, MaskVT, SrcOps[0], dl, Subtarget);
43279 if (!Mask && TLI.isTypeLegal(SrcOps[0].getValueType()))
43280 Mask = DAG.getBitcast(MaskVT, SrcOps[0]);
43282 assert(SrcPartials[0].getBitWidth() == NumElts &&
43283 "Unexpected partial reduction mask");
43284 SDValue ZeroBits = DAG.getConstant(0, dl, MaskVT);
43285 SDValue PartialBits = DAG.getConstant(SrcPartials[0], dl, MaskVT);
43286 Mask = DAG.getNode(ISD::AND, dl, MaskVT, Mask, PartialBits);
43287 return DAG.getSetCC(dl, MVT::i1, Mask, ZeroBits, ISD::SETNE);
43292 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
43295 if (DCI.isBeforeLegalizeOps())
43298 if (SDValue R = combineCompareEqual(N, DAG, DCI, Subtarget))
43301 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
43304 if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
43307 if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
43310 // Combine OR(X,KSHIFTL(Y,Elts/2)) -> CONCAT_VECTORS(X,Y) == KUNPCK(X,Y).
43311 // Combine OR(KSHIFTL(X,Elts/2),Y) -> CONCAT_VECTORS(Y,X) == KUNPCK(Y,X).
43312 // iff the upper elements of the non-shifted arg are zero.
43313 // KUNPCK require 16+ bool vector elements.
43314 if (N0.getOpcode() == X86ISD::KSHIFTL || N1.getOpcode() == X86ISD::KSHIFTL) {
43315 unsigned NumElts = VT.getVectorNumElements();
43316 unsigned HalfElts = NumElts / 2;
43317 APInt UpperElts = APInt::getHighBitsSet(NumElts, HalfElts);
43318 if (NumElts >= 16 && N1.getOpcode() == X86ISD::KSHIFTL &&
43319 N1.getConstantOperandAPInt(1) == HalfElts &&
43320 DAG.MaskedValueIsZero(N0, APInt(1, 1), UpperElts)) {
43322 return DAG.getNode(
43323 ISD::CONCAT_VECTORS, dl, VT,
43324 extractSubVector(N0, 0, DAG, dl, HalfElts),
43325 extractSubVector(N1.getOperand(0), 0, DAG, dl, HalfElts));
43327 if (NumElts >= 16 && N0.getOpcode() == X86ISD::KSHIFTL &&
43328 N0.getConstantOperandAPInt(1) == HalfElts &&
43329 DAG.MaskedValueIsZero(N1, APInt(1, 1), UpperElts)) {
43331 return DAG.getNode(
43332 ISD::CONCAT_VECTORS, dl, VT,
43333 extractSubVector(N1, 0, DAG, dl, HalfElts),
43334 extractSubVector(N0.getOperand(0), 0, DAG, dl, HalfElts));
43338 // Attempt to recursively combine an OR of shuffles.
43339 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
43341 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
43348 /// Try to turn tests against the signbit in the form of:
43349 /// XOR(TRUNCATE(SRL(X, size(X)-1)), 1)
43352 static SDValue foldXorTruncShiftIntoCmp(SDNode *N, SelectionDAG &DAG) {
43353 // This is only worth doing if the output type is i8 or i1.
43354 EVT ResultType = N->getValueType(0);
43355 if (ResultType != MVT::i8 && ResultType != MVT::i1)
43358 SDValue N0 = N->getOperand(0);
43359 SDValue N1 = N->getOperand(1);
43361 // We should be performing an xor against a truncated shift.
43362 if (N0.getOpcode() != ISD::TRUNCATE || !N0.hasOneUse())
43365 // Make sure we are performing an xor against one.
43366 if (!isOneConstant(N1))
43369 // SetCC on x86 zero extends so only act on this if it's a logical shift.
43370 SDValue Shift = N0.getOperand(0);
43371 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse())
43374 // Make sure we are truncating from one of i16, i32 or i64.
43375 EVT ShiftTy = Shift.getValueType();
43376 if (ShiftTy != MVT::i16 && ShiftTy != MVT::i32 && ShiftTy != MVT::i64)
43379 // Make sure the shift amount extracts the sign bit.
43380 if (!isa<ConstantSDNode>(Shift.getOperand(1)) ||
43381 Shift.getConstantOperandAPInt(1) != (ShiftTy.getSizeInBits() - 1))
43384 // Create a greater-than comparison against -1.
43385 // N.B. Using SETGE against 0 works but we want a canonical looking
43386 // comparison, using SETGT matches up with what TranslateX86CC.
43388 SDValue ShiftOp = Shift.getOperand(0);
43389 EVT ShiftOpTy = ShiftOp.getValueType();
43390 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43391 EVT SetCCResultType = TLI.getSetCCResultType(DAG.getDataLayout(),
43392 *DAG.getContext(), ResultType);
43393 SDValue Cond = DAG.getSetCC(DL, SetCCResultType, ShiftOp,
43394 DAG.getConstant(-1, DL, ShiftOpTy), ISD::SETGT);
43395 if (SetCCResultType != ResultType)
43396 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, ResultType, Cond);
43400 /// Turn vector tests of the signbit in the form of:
43401 /// xor (sra X, elt_size(X)-1), -1
43405 /// This should be called before type legalization because the pattern may not
43406 /// persist after that.
43407 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG,
43408 const X86Subtarget &Subtarget) {
43409 EVT VT = N->getValueType(0);
43410 if (!VT.isSimple())
43413 switch (VT.getSimpleVT().SimpleTy) {
43414 default: return SDValue();
43418 case MVT::v2i64: if (!Subtarget.hasSSE2()) return SDValue(); break;
43422 case MVT::v4i64: if (!Subtarget.hasAVX2()) return SDValue(); break;
43425 // There must be a shift right algebraic before the xor, and the xor must be a
43426 // 'not' operation.
43427 SDValue Shift = N->getOperand(0);
43428 SDValue Ones = N->getOperand(1);
43429 if (Shift.getOpcode() != ISD::SRA || !Shift.hasOneUse() ||
43430 !ISD::isBuildVectorAllOnes(Ones.getNode()))
43433 // The shift should be smearing the sign bit across each vector element.
43435 isConstOrConstSplat(Shift.getOperand(1), /*AllowUndefs*/ true);
43437 ShiftAmt->getAPIntValue() != (Shift.getScalarValueSizeInBits() - 1))
43440 // Create a greater-than comparison against -1. We don't use the more obvious
43441 // greater-than-or-equal-to-zero because SSE/AVX don't have that instruction.
43442 return DAG.getSetCC(SDLoc(N), VT, Shift.getOperand(0), Ones, ISD::SETGT);
43445 /// Detect patterns of truncation with unsigned saturation:
43447 /// 1. (truncate (umin (x, unsigned_max_of_dest_type)) to dest_type).
43448 /// Return the source value x to be truncated or SDValue() if the pattern was
43451 /// 2. (truncate (smin (smax (x, C1), C2)) to dest_type),
43452 /// where C1 >= 0 and C2 is unsigned max of destination type.
43454 /// (truncate (smax (smin (x, C2), C1)) to dest_type)
43455 /// where C1 >= 0, C2 is unsigned max of destination type and C1 <= C2.
43457 /// These two patterns are equivalent to:
43458 /// (truncate (umin (smax(x, C1), unsigned_max_of_dest_type)) to dest_type)
43459 /// So return the smax(x, C1) value to be truncated or SDValue() if the
43460 /// pattern was not matched.
43461 static SDValue detectUSatPattern(SDValue In, EVT VT, SelectionDAG &DAG,
43463 EVT InVT = In.getValueType();
43465 // Saturation with truncation. We truncate from InVT to VT.
43466 assert(InVT.getScalarSizeInBits() > VT.getScalarSizeInBits() &&
43467 "Unexpected types for truncate operation");
43469 // Match min/max and return limit value as a parameter.
43470 auto MatchMinMax = [](SDValue V, unsigned Opcode, APInt &Limit) -> SDValue {
43471 if (V.getOpcode() == Opcode &&
43472 ISD::isConstantSplatVector(V.getOperand(1).getNode(), Limit))
43473 return V.getOperand(0);
43478 if (SDValue UMin = MatchMinMax(In, ISD::UMIN, C2))
43479 // C2 should be equal to UINT32_MAX / UINT16_MAX / UINT8_MAX according
43480 // the element size of the destination type.
43481 if (C2.isMask(VT.getScalarSizeInBits()))
43484 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, C2))
43485 if (MatchMinMax(SMin, ISD::SMAX, C1))
43486 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()))
43489 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, C1))
43490 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, C2))
43491 if (C1.isNonNegative() && C2.isMask(VT.getScalarSizeInBits()) &&
43493 return DAG.getNode(ISD::SMAX, DL, InVT, SMin, In.getOperand(1));
43499 /// Detect patterns of truncation with signed saturation:
43500 /// (truncate (smin ((smax (x, signed_min_of_dest_type)),
43501 /// signed_max_of_dest_type)) to dest_type)
43503 /// (truncate (smax ((smin (x, signed_max_of_dest_type)),
43504 /// signed_min_of_dest_type)) to dest_type).
43505 /// With MatchPackUS, the smax/smin range is [0, unsigned_max_of_dest_type].
43506 /// Return the source value to be truncated or SDValue() if the pattern was not
43508 static SDValue detectSSatPattern(SDValue In, EVT VT, bool MatchPackUS = false) {
43509 unsigned NumDstBits = VT.getScalarSizeInBits();
43510 unsigned NumSrcBits = In.getScalarValueSizeInBits();
43511 assert(NumSrcBits > NumDstBits && "Unexpected types for truncate operation");
43513 auto MatchMinMax = [](SDValue V, unsigned Opcode,
43514 const APInt &Limit) -> SDValue {
43516 if (V.getOpcode() == Opcode &&
43517 ISD::isConstantSplatVector(V.getOperand(1).getNode(), C) && C == Limit)
43518 return V.getOperand(0);
43522 APInt SignedMax, SignedMin;
43524 SignedMax = APInt::getAllOnesValue(NumDstBits).zext(NumSrcBits);
43525 SignedMin = APInt(NumSrcBits, 0);
43527 SignedMax = APInt::getSignedMaxValue(NumDstBits).sext(NumSrcBits);
43528 SignedMin = APInt::getSignedMinValue(NumDstBits).sext(NumSrcBits);
43531 if (SDValue SMin = MatchMinMax(In, ISD::SMIN, SignedMax))
43532 if (SDValue SMax = MatchMinMax(SMin, ISD::SMAX, SignedMin))
43535 if (SDValue SMax = MatchMinMax(In, ISD::SMAX, SignedMin))
43536 if (SDValue SMin = MatchMinMax(SMax, ISD::SMIN, SignedMax))
43542 static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
43544 const X86Subtarget &Subtarget) {
43545 if (!Subtarget.hasSSE2() || !VT.isVector())
43548 EVT SVT = VT.getVectorElementType();
43549 EVT InVT = In.getValueType();
43550 EVT InSVT = InVT.getVectorElementType();
43552 // If we're clamping a signed 32-bit vector to 0-255 and the 32-bit vector is
43553 // split across two registers. We can use a packusdw+perm to clamp to 0-65535
43554 // and concatenate at the same time. Then we can use a final vpmovuswb to
43556 if (Subtarget.hasBWI() && !Subtarget.useAVX512Regs() &&
43557 InVT == MVT::v16i32 && VT == MVT::v16i8) {
43558 if (auto USatVal = detectSSatPattern(In, VT, true)) {
43559 // Emit a VPACKUSDW+VPERMQ followed by a VPMOVUSWB.
43560 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKUS, MVT::v16i16, USatVal,
43561 DL, DAG, Subtarget);
43562 assert(Mid && "Failed to pack!");
43563 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, Mid);
43567 // vXi32 truncate instructions are available with AVX512F.
43568 // vXi16 truncate instructions are only available with AVX512BW.
43569 // For 256-bit or smaller vectors, we require VLX.
43570 // FIXME: We could widen truncates to 512 to remove the VLX restriction.
43571 // If the result type is 256-bits or larger and we have disable 512-bit
43572 // registers, we should go ahead and use the pack instructions if possible.
43573 bool PreferAVX512 = ((Subtarget.hasAVX512() && InSVT == MVT::i32) ||
43574 (Subtarget.hasBWI() && InSVT == MVT::i16)) &&
43575 (InVT.getSizeInBits() > 128) &&
43576 (Subtarget.hasVLX() || InVT.getSizeInBits() > 256) &&
43577 !(!Subtarget.useAVX512Regs() && VT.getSizeInBits() >= 256);
43579 if (isPowerOf2_32(VT.getVectorNumElements()) && !PreferAVX512 &&
43580 VT.getSizeInBits() >= 64 &&
43581 (SVT == MVT::i8 || SVT == MVT::i16) &&
43582 (InSVT == MVT::i16 || InSVT == MVT::i32)) {
43583 if (auto USatVal = detectSSatPattern(In, VT, true)) {
43584 // vXi32 -> vXi8 must be performed as PACKUSWB(PACKSSDW,PACKSSDW).
43585 // Only do this when the result is at least 64 bits or we'll leaving
43586 // dangling PACKSSDW nodes.
43587 if (SVT == MVT::i8 && InSVT == MVT::i32) {
43588 EVT MidVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
43589 VT.getVectorNumElements());
43590 SDValue Mid = truncateVectorWithPACK(X86ISD::PACKSS, MidVT, USatVal, DL,
43592 assert(Mid && "Failed to pack!");
43593 SDValue V = truncateVectorWithPACK(X86ISD::PACKUS, VT, Mid, DL, DAG,
43595 assert(V && "Failed to pack!");
43597 } else if (SVT == MVT::i8 || Subtarget.hasSSE41())
43598 return truncateVectorWithPACK(X86ISD::PACKUS, VT, USatVal, DL, DAG,
43601 if (auto SSatVal = detectSSatPattern(In, VT))
43602 return truncateVectorWithPACK(X86ISD::PACKSS, VT, SSatVal, DL, DAG,
43606 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43607 if (TLI.isTypeLegal(InVT) && InVT.isVector() && SVT != MVT::i1 &&
43608 Subtarget.hasAVX512() && (InSVT != MVT::i16 || Subtarget.hasBWI())) {
43609 unsigned TruncOpc = 0;
43611 if (auto SSatVal = detectSSatPattern(In, VT)) {
43613 TruncOpc = X86ISD::VTRUNCS;
43614 } else if (auto USatVal = detectUSatPattern(In, VT, DAG, DL)) {
43616 TruncOpc = X86ISD::VTRUNCUS;
43619 unsigned ResElts = VT.getVectorNumElements();
43620 // If the input type is less than 512 bits and we don't have VLX, we need
43621 // to widen to 512 bits.
43622 if (!Subtarget.hasVLX() && !InVT.is512BitVector()) {
43623 unsigned NumConcats = 512 / InVT.getSizeInBits();
43624 ResElts *= NumConcats;
43625 SmallVector<SDValue, 4> ConcatOps(NumConcats, DAG.getUNDEF(InVT));
43626 ConcatOps[0] = SatVal;
43627 InVT = EVT::getVectorVT(*DAG.getContext(), InSVT,
43628 NumConcats * InVT.getVectorNumElements());
43629 SatVal = DAG.getNode(ISD::CONCAT_VECTORS, DL, InVT, ConcatOps);
43631 // Widen the result if its narrower than 128 bits.
43632 if (ResElts * SVT.getSizeInBits() < 128)
43633 ResElts = 128 / SVT.getSizeInBits();
43634 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), SVT, ResElts);
43635 SDValue Res = DAG.getNode(TruncOpc, DL, TruncVT, SatVal);
43636 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Res,
43637 DAG.getIntPtrConstant(0, DL));
43644 /// This function detects the AVG pattern between vectors of unsigned i8/i16,
43645 /// which is c = (a + b + 1) / 2, and replace this operation with the efficient
43646 /// X86ISD::AVG instruction.
43647 static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
43648 const X86Subtarget &Subtarget,
43650 if (!VT.isVector())
43652 EVT InVT = In.getValueType();
43653 unsigned NumElems = VT.getVectorNumElements();
43655 EVT ScalarVT = VT.getVectorElementType();
43656 if (!((ScalarVT == MVT::i8 || ScalarVT == MVT::i16) &&
43657 NumElems >= 2 && isPowerOf2_32(NumElems)))
43660 // InScalarVT is the intermediate type in AVG pattern and it should be greater
43661 // than the original input type (i8/i16).
43662 EVT InScalarVT = InVT.getVectorElementType();
43663 if (InScalarVT.getSizeInBits() <= ScalarVT.getSizeInBits())
43666 if (!Subtarget.hasSSE2())
43669 // Detect the following pattern:
43671 // %1 = zext <N x i8> %a to <N x i32>
43672 // %2 = zext <N x i8> %b to <N x i32>
43673 // %3 = add nuw nsw <N x i32> %1, <i32 1 x N>
43674 // %4 = add nuw nsw <N x i32> %3, %2
43675 // %5 = lshr <N x i32> %N, <i32 1 x N>
43676 // %6 = trunc <N x i32> %5 to <N x i8>
43678 // In AVX512, the last instruction can also be a trunc store.
43679 if (In.getOpcode() != ISD::SRL)
43682 // A lambda checking the given SDValue is a constant vector and each element
43683 // is in the range [Min, Max].
43684 auto IsConstVectorInRange = [](SDValue V, unsigned Min, unsigned Max) {
43685 return ISD::matchUnaryPredicate(V, [Min, Max](ConstantSDNode *C) {
43686 return !(C->getAPIntValue().ult(Min) || C->getAPIntValue().ugt(Max));
43690 // Check if each element of the vector is right-shifted by one.
43691 auto LHS = In.getOperand(0);
43692 auto RHS = In.getOperand(1);
43693 if (!IsConstVectorInRange(RHS, 1, 1))
43695 if (LHS.getOpcode() != ISD::ADD)
43698 // Detect a pattern of a + b + 1 where the order doesn't matter.
43699 SDValue Operands[3];
43700 Operands[0] = LHS.getOperand(0);
43701 Operands[1] = LHS.getOperand(1);
43703 auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
43704 ArrayRef<SDValue> Ops) {
43705 return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
43708 // Take care of the case when one of the operands is a constant vector whose
43709 // element is in the range [1, 256].
43710 if (IsConstVectorInRange(Operands[1], 1, ScalarVT == MVT::i8 ? 256 : 65536) &&
43711 Operands[0].getOpcode() == ISD::ZERO_EXTEND &&
43712 Operands[0].getOperand(0).getValueType() == VT) {
43713 // The pattern is detected. Subtract one from the constant vector, then
43714 // demote it and emit X86ISD::AVG instruction.
43715 SDValue VecOnes = DAG.getConstant(1, DL, InVT);
43716 Operands[1] = DAG.getNode(ISD::SUB, DL, InVT, Operands[1], VecOnes);
43717 Operands[1] = DAG.getNode(ISD::TRUNCATE, DL, VT, Operands[1]);
43718 return SplitOpsAndApply(DAG, Subtarget, DL, VT,
43719 { Operands[0].getOperand(0), Operands[1] },
43723 // Matches 'add like' patterns: add(Op0,Op1) + zext(or(Op0,Op1)).
43724 // Match the or case only if its 'add-like' - can be replaced by an add.
43725 auto FindAddLike = [&](SDValue V, SDValue &Op0, SDValue &Op1) {
43726 if (ISD::ADD == V.getOpcode()) {
43727 Op0 = V.getOperand(0);
43728 Op1 = V.getOperand(1);
43731 if (ISD::ZERO_EXTEND != V.getOpcode())
43733 V = V.getOperand(0);
43734 if (V.getValueType() != VT || ISD::OR != V.getOpcode() ||
43735 !DAG.haveNoCommonBitsSet(V.getOperand(0), V.getOperand(1)))
43737 Op0 = V.getOperand(0);
43738 Op1 = V.getOperand(1);
43743 if (FindAddLike(Operands[0], Op0, Op1))
43744 std::swap(Operands[0], Operands[1]);
43745 else if (!FindAddLike(Operands[1], Op0, Op1))
43750 // Now we have three operands of two additions. Check that one of them is a
43751 // constant vector with ones, and the other two can be promoted from i8/i16.
43752 for (int i = 0; i < 3; ++i) {
43753 if (!IsConstVectorInRange(Operands[i], 1, 1))
43755 std::swap(Operands[i], Operands[2]);
43757 // Check if Operands[0] and Operands[1] are results of type promotion.
43758 for (int j = 0; j < 2; ++j)
43759 if (Operands[j].getValueType() != VT) {
43760 if (Operands[j].getOpcode() != ISD::ZERO_EXTEND ||
43761 Operands[j].getOperand(0).getValueType() != VT)
43763 Operands[j] = Operands[j].getOperand(0);
43766 // The pattern is detected, emit X86ISD::AVG instruction(s).
43767 return SplitOpsAndApply(DAG, Subtarget, DL, VT, {Operands[0], Operands[1]},
43774 static SDValue combineLoad(SDNode *N, SelectionDAG &DAG,
43775 TargetLowering::DAGCombinerInfo &DCI,
43776 const X86Subtarget &Subtarget) {
43777 LoadSDNode *Ld = cast<LoadSDNode>(N);
43778 EVT RegVT = Ld->getValueType(0);
43779 EVT MemVT = Ld->getMemoryVT();
43781 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
43783 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
43784 // into two 16-byte operations. Also split non-temporal aligned loads on
43785 // pre-AVX2 targets as 32-byte loads will lower to regular temporal loads.
43786 ISD::LoadExtType Ext = Ld->getExtensionType();
43788 if (RegVT.is256BitVector() && !DCI.isBeforeLegalizeOps() &&
43789 Ext == ISD::NON_EXTLOAD &&
43790 ((Ld->isNonTemporal() && !Subtarget.hasInt256() &&
43791 Ld->getAlignment() >= 16) ||
43792 (TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), RegVT,
43793 *Ld->getMemOperand(), &Fast) &&
43795 unsigned NumElems = RegVT.getVectorNumElements();
43799 unsigned HalfOffset = 16;
43800 SDValue Ptr1 = Ld->getBasePtr();
43801 SDValue Ptr2 = DAG.getMemBasePlusOffset(Ptr1, HalfOffset, dl);
43802 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
43805 DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr1, Ld->getPointerInfo(),
43806 Ld->getOriginalAlign(),
43807 Ld->getMemOperand()->getFlags());
43808 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr2,
43809 Ld->getPointerInfo().getWithOffset(HalfOffset),
43810 Ld->getOriginalAlign(),
43811 Ld->getMemOperand()->getFlags());
43812 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
43813 Load1.getValue(1), Load2.getValue(1));
43815 SDValue NewVec = DAG.getNode(ISD::CONCAT_VECTORS, dl, RegVT, Load1, Load2);
43816 return DCI.CombineTo(N, NewVec, TF, true);
43819 // Bool vector load - attempt to cast to an integer, as we have good
43820 // (vXiY *ext(vXi1 bitcast(iX))) handling.
43821 if (Ext == ISD::NON_EXTLOAD && !Subtarget.hasAVX512() && RegVT.isVector() &&
43822 RegVT.getScalarType() == MVT::i1 && DCI.isBeforeLegalize()) {
43823 unsigned NumElts = RegVT.getVectorNumElements();
43824 EVT IntVT = EVT::getIntegerVT(*DAG.getContext(), NumElts);
43825 if (TLI.isTypeLegal(IntVT)) {
43826 SDValue IntLoad = DAG.getLoad(IntVT, dl, Ld->getChain(), Ld->getBasePtr(),
43827 Ld->getPointerInfo(),
43828 Ld->getOriginalAlign(),
43829 Ld->getMemOperand()->getFlags());
43830 SDValue BoolVec = DAG.getBitcast(RegVT, IntLoad);
43831 return DCI.CombineTo(N, BoolVec, IntLoad.getValue(1), true);
43835 // Cast ptr32 and ptr64 pointers to the default address space before a load.
43836 unsigned AddrSpace = Ld->getAddressSpace();
43837 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
43838 AddrSpace == X86AS::PTR32_UPTR) {
43839 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
43840 if (PtrVT != Ld->getBasePtr().getSimpleValueType()) {
43842 DAG.getAddrSpaceCast(dl, PtrVT, Ld->getBasePtr(), AddrSpace, 0);
43843 return DAG.getLoad(RegVT, dl, Ld->getChain(), Cast, Ld->getPointerInfo(),
43844 Ld->getOriginalAlign(),
43845 Ld->getMemOperand()->getFlags());
43852 /// If V is a build vector of boolean constants and exactly one of those
43853 /// constants is true, return the operand index of that true element.
43854 /// Otherwise, return -1.
43855 static int getOneTrueElt(SDValue V) {
43856 // This needs to be a build vector of booleans.
43857 // TODO: Checking for the i1 type matches the IR definition for the mask,
43858 // but the mask check could be loosened to i8 or other types. That might
43859 // also require checking more than 'allOnesValue'; eg, the x86 HW
43860 // instructions only require that the MSB is set for each mask element.
43861 // The ISD::MSTORE comments/definition do not specify how the mask operand
43863 auto *BV = dyn_cast<BuildVectorSDNode>(V);
43864 if (!BV || BV->getValueType(0).getVectorElementType() != MVT::i1)
43867 int TrueIndex = -1;
43868 unsigned NumElts = BV->getValueType(0).getVectorNumElements();
43869 for (unsigned i = 0; i < NumElts; ++i) {
43870 const SDValue &Op = BV->getOperand(i);
43873 auto *ConstNode = dyn_cast<ConstantSDNode>(Op);
43876 if (ConstNode->getAPIntValue().isAllOnesValue()) {
43877 // If we already found a one, this is too many.
43878 if (TrueIndex >= 0)
43886 /// Given a masked memory load/store operation, return true if it has one mask
43887 /// bit set. If it has one mask bit set, then also return the memory address of
43888 /// the scalar element to load/store, the vector index to insert/extract that
43889 /// scalar element, and the alignment for the scalar memory access.
43890 static bool getParamsForOneTrueMaskedElt(MaskedLoadStoreSDNode *MaskedOp,
43891 SelectionDAG &DAG, SDValue &Addr,
43892 SDValue &Index, unsigned &Alignment) {
43893 int TrueMaskElt = getOneTrueElt(MaskedOp->getMask());
43894 if (TrueMaskElt < 0)
43897 // Get the address of the one scalar element that is specified by the mask
43898 // using the appropriate offset from the base pointer.
43899 EVT EltVT = MaskedOp->getMemoryVT().getVectorElementType();
43900 Addr = MaskedOp->getBasePtr();
43901 if (TrueMaskElt != 0) {
43902 unsigned Offset = TrueMaskElt * EltVT.getStoreSize();
43903 Addr = DAG.getMemBasePlusOffset(Addr, Offset, SDLoc(MaskedOp));
43906 Index = DAG.getIntPtrConstant(TrueMaskElt, SDLoc(MaskedOp));
43907 Alignment = MinAlign(MaskedOp->getAlignment(), EltVT.getStoreSize());
43911 /// If exactly one element of the mask is set for a non-extending masked load,
43912 /// it is a scalar load and vector insert.
43913 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
43914 /// mask have already been optimized in IR, so we don't bother with those here.
43916 reduceMaskedLoadToScalarLoad(MaskedLoadSDNode *ML, SelectionDAG &DAG,
43917 TargetLowering::DAGCombinerInfo &DCI) {
43918 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
43919 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
43920 // However, some target hooks may need to be added to know when the transform
43921 // is profitable. Endianness would also have to be considered.
43923 SDValue Addr, VecIndex;
43924 unsigned Alignment;
43925 if (!getParamsForOneTrueMaskedElt(ML, DAG, Addr, VecIndex, Alignment))
43928 // Load the one scalar element that is specified by the mask using the
43929 // appropriate offset from the base pointer.
43931 EVT VT = ML->getValueType(0);
43932 EVT EltVT = VT.getVectorElementType();
43934 DAG.getLoad(EltVT, DL, ML->getChain(), Addr, ML->getPointerInfo(),
43935 Alignment, ML->getMemOperand()->getFlags());
43937 // Insert the loaded element into the appropriate place in the vector.
43938 SDValue Insert = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT,
43939 ML->getPassThru(), Load, VecIndex);
43940 return DCI.CombineTo(ML, Insert, Load.getValue(1), true);
43944 combineMaskedLoadConstantMask(MaskedLoadSDNode *ML, SelectionDAG &DAG,
43945 TargetLowering::DAGCombinerInfo &DCI) {
43946 assert(ML->isUnindexed() && "Unexpected indexed masked load!");
43947 if (!ISD::isBuildVectorOfConstantSDNodes(ML->getMask().getNode()))
43951 EVT VT = ML->getValueType(0);
43953 // If we are loading the first and last elements of a vector, it is safe and
43954 // always faster to load the whole vector. Replace the masked load with a
43955 // vector load and select.
43956 unsigned NumElts = VT.getVectorNumElements();
43957 BuildVectorSDNode *MaskBV = cast<BuildVectorSDNode>(ML->getMask());
43958 bool LoadFirstElt = !isNullConstant(MaskBV->getOperand(0));
43959 bool LoadLastElt = !isNullConstant(MaskBV->getOperand(NumElts - 1));
43960 if (LoadFirstElt && LoadLastElt) {
43961 SDValue VecLd = DAG.getLoad(VT, DL, ML->getChain(), ML->getBasePtr(),
43962 ML->getMemOperand());
43963 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), VecLd,
43964 ML->getPassThru());
43965 return DCI.CombineTo(ML, Blend, VecLd.getValue(1), true);
43968 // Convert a masked load with a constant mask into a masked load and a select.
43969 // This allows the select operation to use a faster kind of select instruction
43970 // (for example, vblendvps -> vblendps).
43972 // Don't try this if the pass-through operand is already undefined. That would
43973 // cause an infinite loop because that's what we're about to create.
43974 if (ML->getPassThru().isUndef())
43977 if (ISD::isBuildVectorAllZeros(ML->getPassThru().getNode()))
43980 // The new masked load has an undef pass-through operand. The select uses the
43981 // original pass-through operand.
43982 SDValue NewML = DAG.getMaskedLoad(
43983 VT, DL, ML->getChain(), ML->getBasePtr(), ML->getOffset(), ML->getMask(),
43984 DAG.getUNDEF(VT), ML->getMemoryVT(), ML->getMemOperand(),
43985 ML->getAddressingMode(), ML->getExtensionType());
43986 SDValue Blend = DAG.getSelect(DL, VT, ML->getMask(), NewML,
43987 ML->getPassThru());
43989 return DCI.CombineTo(ML, Blend, NewML.getValue(1), true);
43992 static SDValue combineMaskedLoad(SDNode *N, SelectionDAG &DAG,
43993 TargetLowering::DAGCombinerInfo &DCI,
43994 const X86Subtarget &Subtarget) {
43995 auto *Mld = cast<MaskedLoadSDNode>(N);
43997 // TODO: Expanding load with constant mask may be optimized as well.
43998 if (Mld->isExpandingLoad())
44001 if (Mld->getExtensionType() == ISD::NON_EXTLOAD) {
44002 if (SDValue ScalarLoad = reduceMaskedLoadToScalarLoad(Mld, DAG, DCI))
44005 // TODO: Do some AVX512 subsets benefit from this transform?
44006 if (!Subtarget.hasAVX512())
44007 if (SDValue Blend = combineMaskedLoadConstantMask(Mld, DAG, DCI))
44011 // If the mask value has been legalized to a non-boolean vector, try to
44012 // simplify ops leading up to it. We only demand the MSB of each lane.
44013 SDValue Mask = Mld->getMask();
44014 if (Mask.getScalarValueSizeInBits() != 1) {
44015 EVT VT = Mld->getValueType(0);
44016 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44017 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
44018 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
44019 if (N->getOpcode() != ISD::DELETED_NODE)
44020 DCI.AddToWorklist(N);
44021 return SDValue(N, 0);
44023 if (SDValue NewMask =
44024 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
44025 return DAG.getMaskedLoad(
44026 VT, SDLoc(N), Mld->getChain(), Mld->getBasePtr(), Mld->getOffset(),
44027 NewMask, Mld->getPassThru(), Mld->getMemoryVT(), Mld->getMemOperand(),
44028 Mld->getAddressingMode(), Mld->getExtensionType());
44034 /// If exactly one element of the mask is set for a non-truncating masked store,
44035 /// it is a vector extract and scalar store.
44036 /// Note: It is expected that the degenerate cases of an all-zeros or all-ones
44037 /// mask have already been optimized in IR, so we don't bother with those here.
44038 static SDValue reduceMaskedStoreToScalarStore(MaskedStoreSDNode *MS,
44039 SelectionDAG &DAG) {
44040 // TODO: This is not x86-specific, so it could be lifted to DAGCombiner.
44041 // However, some target hooks may need to be added to know when the transform
44042 // is profitable. Endianness would also have to be considered.
44044 SDValue Addr, VecIndex;
44045 unsigned Alignment;
44046 if (!getParamsForOneTrueMaskedElt(MS, DAG, Addr, VecIndex, Alignment))
44049 // Extract the one scalar element that is actually being stored.
44051 EVT VT = MS->getValue().getValueType();
44052 EVT EltVT = VT.getVectorElementType();
44053 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT,
44054 MS->getValue(), VecIndex);
44056 // Store that element at the appropriate offset from the base pointer.
44057 return DAG.getStore(MS->getChain(), DL, Extract, Addr, MS->getPointerInfo(),
44058 Alignment, MS->getMemOperand()->getFlags());
44061 static SDValue combineMaskedStore(SDNode *N, SelectionDAG &DAG,
44062 TargetLowering::DAGCombinerInfo &DCI,
44063 const X86Subtarget &Subtarget) {
44064 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
44065 if (Mst->isCompressingStore())
44068 EVT VT = Mst->getValue().getValueType();
44070 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44072 if (Mst->isTruncatingStore())
44075 if (SDValue ScalarStore = reduceMaskedStoreToScalarStore(Mst, DAG))
44076 return ScalarStore;
44078 // If the mask value has been legalized to a non-boolean vector, try to
44079 // simplify ops leading up to it. We only demand the MSB of each lane.
44080 SDValue Mask = Mst->getMask();
44081 if (Mask.getScalarValueSizeInBits() != 1) {
44082 APInt DemandedBits(APInt::getSignMask(VT.getScalarSizeInBits()));
44083 if (TLI.SimplifyDemandedBits(Mask, DemandedBits, DCI)) {
44084 if (N->getOpcode() != ISD::DELETED_NODE)
44085 DCI.AddToWorklist(N);
44086 return SDValue(N, 0);
44088 if (SDValue NewMask =
44089 TLI.SimplifyMultipleUseDemandedBits(Mask, DemandedBits, DAG))
44090 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Mst->getValue(),
44091 Mst->getBasePtr(), Mst->getOffset(), NewMask,
44092 Mst->getMemoryVT(), Mst->getMemOperand(),
44093 Mst->getAddressingMode());
44096 SDValue Value = Mst->getValue();
44097 if (Value.getOpcode() == ISD::TRUNCATE && Value.getNode()->hasOneUse() &&
44098 TLI.isTruncStoreLegal(Value.getOperand(0).getValueType(),
44099 Mst->getMemoryVT())) {
44100 return DAG.getMaskedStore(Mst->getChain(), SDLoc(N), Value.getOperand(0),
44101 Mst->getBasePtr(), Mst->getOffset(), Mask,
44102 Mst->getMemoryVT(), Mst->getMemOperand(),
44103 Mst->getAddressingMode(), true);
44109 static SDValue combineStore(SDNode *N, SelectionDAG &DAG,
44110 TargetLowering::DAGCombinerInfo &DCI,
44111 const X86Subtarget &Subtarget) {
44112 StoreSDNode *St = cast<StoreSDNode>(N);
44113 EVT StVT = St->getMemoryVT();
44115 SDValue StoredVal = St->getValue();
44116 EVT VT = StoredVal.getValueType();
44117 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44119 // Convert a store of vXi1 into a store of iX and a bitcast.
44120 if (!Subtarget.hasAVX512() && VT == StVT && VT.isVector() &&
44121 VT.getVectorElementType() == MVT::i1) {
44123 EVT NewVT = EVT::getIntegerVT(*DAG.getContext(), VT.getVectorNumElements());
44124 StoredVal = DAG.getBitcast(NewVT, StoredVal);
44126 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
44127 St->getPointerInfo(), St->getOriginalAlign(),
44128 St->getMemOperand()->getFlags());
44131 // If this is a store of a scalar_to_vector to v1i1, just use a scalar store.
44132 // This will avoid a copy to k-register.
44133 if (VT == MVT::v1i1 && VT == StVT && Subtarget.hasAVX512() &&
44134 StoredVal.getOpcode() == ISD::SCALAR_TO_VECTOR &&
44135 StoredVal.getOperand(0).getValueType() == MVT::i8) {
44136 return DAG.getStore(St->getChain(), dl, StoredVal.getOperand(0),
44137 St->getBasePtr(), St->getPointerInfo(),
44138 St->getOriginalAlign(),
44139 St->getMemOperand()->getFlags());
44142 // Widen v2i1/v4i1 stores to v8i1.
44143 if ((VT == MVT::v2i1 || VT == MVT::v4i1) && VT == StVT &&
44144 Subtarget.hasAVX512()) {
44145 unsigned NumConcats = 8 / VT.getVectorNumElements();
44146 SmallVector<SDValue, 4> Ops(NumConcats, DAG.getUNDEF(VT));
44147 Ops[0] = StoredVal;
44148 StoredVal = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i1, Ops);
44149 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
44150 St->getPointerInfo(), St->getOriginalAlign(),
44151 St->getMemOperand()->getFlags());
44154 // Turn vXi1 stores of constants into a scalar store.
44155 if ((VT == MVT::v8i1 || VT == MVT::v16i1 || VT == MVT::v32i1 ||
44156 VT == MVT::v64i1) && VT == StVT && TLI.isTypeLegal(VT) &&
44157 ISD::isBuildVectorOfConstantSDNodes(StoredVal.getNode())) {
44158 // If its a v64i1 store without 64-bit support, we need two stores.
44159 if (!DCI.isBeforeLegalize() && VT == MVT::v64i1 && !Subtarget.is64Bit()) {
44160 SDValue Lo = DAG.getBuildVector(MVT::v32i1, dl,
44161 StoredVal->ops().slice(0, 32));
44162 Lo = combinevXi1ConstantToInteger(Lo, DAG);
44163 SDValue Hi = DAG.getBuildVector(MVT::v32i1, dl,
44164 StoredVal->ops().slice(32, 32));
44165 Hi = combinevXi1ConstantToInteger(Hi, DAG);
44167 SDValue Ptr0 = St->getBasePtr();
44168 SDValue Ptr1 = DAG.getMemBasePlusOffset(Ptr0, 4, dl);
44171 DAG.getStore(St->getChain(), dl, Lo, Ptr0, St->getPointerInfo(),
44172 St->getOriginalAlign(),
44173 St->getMemOperand()->getFlags());
44175 DAG.getStore(St->getChain(), dl, Hi, Ptr1,
44176 St->getPointerInfo().getWithOffset(4),
44177 St->getOriginalAlign(),
44178 St->getMemOperand()->getFlags());
44179 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
44182 StoredVal = combinevXi1ConstantToInteger(StoredVal, DAG);
44183 return DAG.getStore(St->getChain(), dl, StoredVal, St->getBasePtr(),
44184 St->getPointerInfo(), St->getOriginalAlign(),
44185 St->getMemOperand()->getFlags());
44188 // If we are saving a 32-byte vector and 32-byte stores are slow, such as on
44189 // Sandy Bridge, perform two 16-byte stores.
44191 if (VT.is256BitVector() && StVT == VT &&
44192 TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
44193 *St->getMemOperand(), &Fast) &&
44195 unsigned NumElems = VT.getVectorNumElements();
44199 return splitVectorStore(St, DAG);
44202 // Split under-aligned vector non-temporal stores.
44203 if (St->isNonTemporal() && StVT == VT &&
44204 St->getAlignment() < VT.getStoreSize()) {
44205 // ZMM/YMM nt-stores - either it can be stored as a series of shorter
44206 // vectors or the legalizer can scalarize it to use MOVNTI.
44207 if (VT.is256BitVector() || VT.is512BitVector()) {
44208 unsigned NumElems = VT.getVectorNumElements();
44211 return splitVectorStore(St, DAG);
44214 // XMM nt-stores - scalarize this to f64 nt-stores on SSE4A, else i32/i64
44216 if (VT.is128BitVector() && Subtarget.hasSSE2()) {
44217 MVT NTVT = Subtarget.hasSSE4A()
44219 : (TLI.isTypeLegal(MVT::i64) ? MVT::v2i64 : MVT::v4i32);
44220 return scalarizeVectorStore(St, NTVT, DAG);
44224 // Try to optimize v16i16->v16i8 truncating stores when BWI is not
44225 // supported, but avx512f is by extending to v16i32 and truncating.
44226 if (!St->isTruncatingStore() && VT == MVT::v16i8 && !Subtarget.hasBWI() &&
44227 St->getValue().getOpcode() == ISD::TRUNCATE &&
44228 St->getValue().getOperand(0).getValueType() == MVT::v16i16 &&
44229 TLI.isTruncStoreLegal(MVT::v16i32, MVT::v16i8) &&
44230 St->getValue().hasOneUse() && !DCI.isBeforeLegalizeOps()) {
44231 SDValue Ext = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::v16i32, St->getValue());
44232 return DAG.getTruncStore(St->getChain(), dl, Ext, St->getBasePtr(),
44233 MVT::v16i8, St->getMemOperand());
44236 // Try to fold a VTRUNCUS or VTRUNCS into a truncating store.
44237 if (!St->isTruncatingStore() && StoredVal.hasOneUse() &&
44238 (StoredVal.getOpcode() == X86ISD::VTRUNCUS ||
44239 StoredVal.getOpcode() == X86ISD::VTRUNCS) &&
44240 TLI.isTruncStoreLegal(StoredVal.getOperand(0).getValueType(), VT)) {
44241 bool IsSigned = StoredVal.getOpcode() == X86ISD::VTRUNCS;
44242 return EmitTruncSStore(IsSigned, St->getChain(),
44243 dl, StoredVal.getOperand(0), St->getBasePtr(),
44244 VT, St->getMemOperand(), DAG);
44247 // Optimize trunc store (of multiple scalars) to shuffle and store.
44248 // First, pack all of the elements in one place. Next, store to memory
44249 // in fewer chunks.
44250 if (St->isTruncatingStore() && VT.isVector()) {
44251 // Check if we can detect an AVG pattern from the truncation. If yes,
44252 // replace the trunc store by a normal store with the result of X86ISD::AVG
44254 if (DCI.isBeforeLegalize() || TLI.isTypeLegal(St->getMemoryVT()))
44255 if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
44257 return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
44258 St->getPointerInfo(), St->getOriginalAlign(),
44259 St->getMemOperand()->getFlags());
44261 if (TLI.isTruncStoreLegal(VT, StVT)) {
44262 if (SDValue Val = detectSSatPattern(St->getValue(), St->getMemoryVT()))
44263 return EmitTruncSStore(true /* Signed saturation */, St->getChain(),
44264 dl, Val, St->getBasePtr(),
44265 St->getMemoryVT(), St->getMemOperand(), DAG);
44266 if (SDValue Val = detectUSatPattern(St->getValue(), St->getMemoryVT(),
44268 return EmitTruncSStore(false /* Unsigned saturation */, St->getChain(),
44269 dl, Val, St->getBasePtr(),
44270 St->getMemoryVT(), St->getMemOperand(), DAG);
44276 // Cast ptr32 and ptr64 pointers to the default address space before a store.
44277 unsigned AddrSpace = St->getAddressSpace();
44278 if (AddrSpace == X86AS::PTR64 || AddrSpace == X86AS::PTR32_SPTR ||
44279 AddrSpace == X86AS::PTR32_UPTR) {
44280 MVT PtrVT = TLI.getPointerTy(DAG.getDataLayout());
44281 if (PtrVT != St->getBasePtr().getSimpleValueType()) {
44283 DAG.getAddrSpaceCast(dl, PtrVT, St->getBasePtr(), AddrSpace, 0);
44284 return DAG.getStore(St->getChain(), dl, StoredVal, Cast,
44285 St->getPointerInfo(), St->getOriginalAlign(),
44286 St->getMemOperand()->getFlags(), St->getAAInfo());
44290 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
44291 // the FP state in cases where an emms may be missing.
44292 // A preferable solution to the general problem is to figure out the right
44293 // places to insert EMMS. This qualifies as a quick hack.
44295 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
44296 if (VT.getSizeInBits() != 64)
44299 const Function &F = DAG.getMachineFunction().getFunction();
44300 bool NoImplicitFloatOps = F.hasFnAttribute(Attribute::NoImplicitFloat);
44302 !Subtarget.useSoftFloat() && !NoImplicitFloatOps && Subtarget.hasSSE2();
44303 if ((VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit()) &&
44304 isa<LoadSDNode>(St->getValue()) &&
44305 cast<LoadSDNode>(St->getValue())->isSimple() &&
44306 St->getChain().hasOneUse() && St->isSimple()) {
44307 LoadSDNode *Ld = cast<LoadSDNode>(St->getValue().getNode());
44309 if (!ISD::isNormalLoad(Ld))
44312 // Avoid the transformation if there are multiple uses of the loaded value.
44313 if (!Ld->hasNUsesOfValue(1, 0))
44318 // Lower to a single movq load/store pair.
44319 SDValue NewLd = DAG.getLoad(MVT::f64, LdDL, Ld->getChain(),
44320 Ld->getBasePtr(), Ld->getMemOperand());
44322 // Make sure new load is placed in same chain order.
44323 DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
44324 return DAG.getStore(St->getChain(), StDL, NewLd, St->getBasePtr(),
44325 St->getMemOperand());
44328 // This is similar to the above case, but here we handle a scalar 64-bit
44329 // integer store that is extracted from a vector on a 32-bit target.
44330 // If we have SSE2, then we can treat it like a floating-point double
44331 // to get past legalization. The execution dependencies fixup pass will
44332 // choose the optimal machine instruction for the store if this really is
44333 // an integer or v2f32 rather than an f64.
44334 if (VT == MVT::i64 && F64IsLegal && !Subtarget.is64Bit() &&
44335 St->getOperand(1).getOpcode() == ISD::EXTRACT_VECTOR_ELT) {
44336 SDValue OldExtract = St->getOperand(1);
44337 SDValue ExtOp0 = OldExtract.getOperand(0);
44338 unsigned VecSize = ExtOp0.getValueSizeInBits();
44339 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), MVT::f64, VecSize / 64);
44340 SDValue BitCast = DAG.getBitcast(VecVT, ExtOp0);
44341 SDValue NewExtract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
44342 BitCast, OldExtract.getOperand(1));
44343 return DAG.getStore(St->getChain(), dl, NewExtract, St->getBasePtr(),
44344 St->getPointerInfo(), St->getOriginalAlign(),
44345 St->getMemOperand()->getFlags());
44351 static SDValue combineVEXTRACT_STORE(SDNode *N, SelectionDAG &DAG,
44352 TargetLowering::DAGCombinerInfo &DCI,
44353 const X86Subtarget &Subtarget) {
44354 auto *St = cast<MemIntrinsicSDNode>(N);
44356 SDValue StoredVal = N->getOperand(1);
44357 MVT VT = StoredVal.getSimpleValueType();
44358 EVT MemVT = St->getMemoryVT();
44360 // Figure out which elements we demand.
44361 unsigned StElts = MemVT.getSizeInBits() / VT.getScalarSizeInBits();
44362 APInt DemandedElts = APInt::getLowBitsSet(VT.getVectorNumElements(), StElts);
44364 APInt KnownUndef, KnownZero;
44365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44366 if (TLI.SimplifyDemandedVectorElts(StoredVal, DemandedElts, KnownUndef,
44368 if (N->getOpcode() != ISD::DELETED_NODE)
44369 DCI.AddToWorklist(N);
44370 return SDValue(N, 0);
44376 /// Return 'true' if this vector operation is "horizontal"
44377 /// and return the operands for the horizontal operation in LHS and RHS. A
44378 /// horizontal operation performs the binary operation on successive elements
44379 /// of its first operand, then on successive elements of its second operand,
44380 /// returning the resulting values in a vector. For example, if
44381 /// A = < float a0, float a1, float a2, float a3 >
44383 /// B = < float b0, float b1, float b2, float b3 >
44384 /// then the result of doing a horizontal operation on A and B is
44385 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
44386 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
44387 /// A horizontal-op B, for some already available A and B, and if so then LHS is
44388 /// set to A, RHS to B, and the routine returns 'true'.
44389 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, SelectionDAG &DAG,
44390 const X86Subtarget &Subtarget, bool IsCommutative,
44391 SmallVectorImpl<int> &PostShuffleMask) {
44392 // If either operand is undef, bail out. The binop should be simplified.
44393 if (LHS.isUndef() || RHS.isUndef())
44396 // Look for the following pattern:
44397 // A = < float a0, float a1, float a2, float a3 >
44398 // B = < float b0, float b1, float b2, float b3 >
44400 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
44401 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
44402 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
44403 // which is A horizontal-op B.
44405 MVT VT = LHS.getSimpleValueType();
44406 assert((VT.is128BitVector() || VT.is256BitVector()) &&
44407 "Unsupported vector type for horizontal add/sub");
44408 unsigned NumElts = VT.getVectorNumElements();
44410 // TODO - can we make a general helper method that does all of this for us?
44411 auto GetShuffle = [&](SDValue Op, SDValue &N0, SDValue &N1,
44412 SmallVectorImpl<int> &ShuffleMask) {
44413 if (Op.getOpcode() == ISD::VECTOR_SHUFFLE) {
44414 if (!Op.getOperand(0).isUndef())
44415 N0 = Op.getOperand(0);
44416 if (!Op.getOperand(1).isUndef())
44417 N1 = Op.getOperand(1);
44418 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask();
44419 ShuffleMask.append(Mask.begin(), Mask.end());
44422 bool UseSubVector = false;
44423 if (Op.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
44424 Op.getOperand(0).getValueType().is256BitVector() &&
44425 llvm::isNullConstant(Op.getOperand(1))) {
44426 Op = Op.getOperand(0);
44427 UseSubVector = true;
44430 SmallVector<SDValue, 2> SrcOps;
44431 SmallVector<int, 16> SrcShuffleMask;
44432 SDValue BC = peekThroughBitcasts(Op);
44433 if (isTargetShuffle(BC.getOpcode()) &&
44434 getTargetShuffleMask(BC.getNode(), BC.getSimpleValueType(), false,
44435 SrcOps, SrcShuffleMask, IsUnary)) {
44436 if (!UseSubVector && SrcShuffleMask.size() == NumElts &&
44437 SrcOps.size() <= 2) {
44438 N0 = SrcOps.size() > 0 ? SrcOps[0] : SDValue();
44439 N1 = SrcOps.size() > 1 ? SrcOps[1] : SDValue();
44440 ShuffleMask.append(SrcShuffleMask.begin(), SrcShuffleMask.end());
44442 if (UseSubVector && (SrcShuffleMask.size() == (NumElts * 2)) &&
44443 SrcOps.size() == 1) {
44444 N0 = extract128BitVector(SrcOps[0], 0, DAG, SDLoc(Op));
44445 N1 = extract128BitVector(SrcOps[0], NumElts, DAG, SDLoc(Op));
44446 ArrayRef<int> Mask = ArrayRef<int>(SrcShuffleMask).slice(0, NumElts);
44447 ShuffleMask.append(Mask.begin(), Mask.end());
44452 // View LHS in the form
44453 // LHS = VECTOR_SHUFFLE A, B, LMask
44454 // If LHS is not a shuffle, then pretend it is the identity shuffle:
44455 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
44456 // NOTE: A default initialized SDValue represents an UNDEF of type VT.
44458 SmallVector<int, 16> LMask;
44459 GetShuffle(LHS, A, B, LMask);
44461 // Likewise, view RHS in the form
44462 // RHS = VECTOR_SHUFFLE C, D, RMask
44464 SmallVector<int, 16> RMask;
44465 GetShuffle(RHS, C, D, RMask);
44467 // At least one of the operands should be a vector shuffle.
44468 unsigned NumShuffles = (LMask.empty() ? 0 : 1) + (RMask.empty() ? 0 : 1);
44469 if (NumShuffles == 0)
44472 if (LMask.empty()) {
44474 for (unsigned i = 0; i != NumElts; ++i)
44475 LMask.push_back(i);
44478 if (RMask.empty()) {
44480 for (unsigned i = 0; i != NumElts; ++i)
44481 RMask.push_back(i);
44484 // Avoid 128-bit lane crossing if pre-AVX2 and FP (integer will split).
44485 if (!Subtarget.hasAVX2() && VT.isFloatingPoint() &&
44486 (isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), LMask) ||
44487 isLaneCrossingShuffleMask(128, VT.getScalarSizeInBits(), RMask)))
44490 // If A and B occur in reverse order in RHS, then canonicalize by commuting
44491 // RHS operands and shuffle mask.
44494 ShuffleVectorSDNode::commuteMask(RMask);
44496 // Check that the shuffles are both shuffling the same vectors.
44497 if (!(A == C && B == D))
44500 PostShuffleMask.clear();
44501 PostShuffleMask.append(NumElts, SM_SentinelUndef);
44503 // LHS and RHS are now:
44504 // LHS = shuffle A, B, LMask
44505 // RHS = shuffle A, B, RMask
44506 // Check that the masks correspond to performing a horizontal operation.
44507 // AVX defines horizontal add/sub to operate independently on 128-bit lanes,
44508 // so we just repeat the inner loop if this is a 256-bit op.
44509 unsigned Num128BitChunks = VT.getSizeInBits() / 128;
44510 unsigned NumEltsPer128BitChunk = NumElts / Num128BitChunks;
44511 unsigned NumEltsPer64BitChunk = NumEltsPer128BitChunk / 2;
44512 assert((NumEltsPer128BitChunk % 2 == 0) &&
44513 "Vector type should have an even number of elements in each lane");
44514 for (unsigned j = 0; j != NumElts; j += NumEltsPer128BitChunk) {
44515 for (unsigned i = 0; i != NumEltsPer128BitChunk; ++i) {
44516 // Ignore undefined components.
44517 int LIdx = LMask[i + j], RIdx = RMask[i + j];
44518 if (LIdx < 0 || RIdx < 0 ||
44519 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
44520 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
44523 // Check that successive odd/even elements are being operated on. If not,
44524 // this is not a horizontal operation.
44525 if (!((RIdx & 1) == 1 && (LIdx + 1) == RIdx) &&
44526 !((LIdx & 1) == 1 && (RIdx + 1) == LIdx && IsCommutative))
44529 // Compute the post-shuffle mask index based on where the element
44530 // is stored in the HOP result, and where it needs to be moved to.
44531 int Base = LIdx & ~1u;
44532 int Index = ((Base % NumEltsPer128BitChunk) / 2) +
44533 ((Base % NumElts) & ~(NumEltsPer128BitChunk - 1));
44535 // The low half of the 128-bit result must choose from A.
44536 // The high half of the 128-bit result must choose from B,
44537 // unless B is undef. In that case, we are always choosing from A.
44538 if ((B && Base >= (int)NumElts) || (!B && i >= NumEltsPer64BitChunk))
44539 Index += NumEltsPer64BitChunk;
44540 PostShuffleMask[i + j] = Index;
44544 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
44545 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
44547 bool IsIdentityPostShuffle =
44548 isSequentialOrUndefInRange(PostShuffleMask, 0, NumElts, 0);
44549 if (IsIdentityPostShuffle)
44550 PostShuffleMask.clear();
44552 // Assume a SingleSource HOP if we only shuffle one input and don't need to
44553 // shuffle the result.
44554 if (!shouldUseHorizontalOp(LHS == RHS &&
44555 (NumShuffles < 2 || !IsIdentityPostShuffle),
44559 LHS = DAG.getBitcast(VT, LHS);
44560 RHS = DAG.getBitcast(VT, RHS);
44564 /// Do target-specific dag combines on floating-point adds/subs.
44565 static SDValue combineFaddFsub(SDNode *N, SelectionDAG &DAG,
44566 const X86Subtarget &Subtarget) {
44567 EVT VT = N->getValueType(0);
44568 SDValue LHS = N->getOperand(0);
44569 SDValue RHS = N->getOperand(1);
44570 bool IsFadd = N->getOpcode() == ISD::FADD;
44571 auto HorizOpcode = IsFadd ? X86ISD::FHADD : X86ISD::FHSUB;
44572 assert((IsFadd || N->getOpcode() == ISD::FSUB) && "Wrong opcode");
44574 // Try to synthesize horizontal add/sub from adds/subs of shuffles.
44575 SmallVector<int, 8> PostShuffleMask;
44576 if (((Subtarget.hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
44577 (Subtarget.hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
44578 isHorizontalBinOp(LHS, RHS, DAG, Subtarget, IsFadd, PostShuffleMask)) {
44579 SDValue HorizBinOp = DAG.getNode(HorizOpcode, SDLoc(N), VT, LHS, RHS);
44580 if (!PostShuffleMask.empty())
44581 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
44582 DAG.getUNDEF(VT), PostShuffleMask);
44586 // NOTE: isHorizontalBinOp may have changed LHS/RHS variables.
44591 /// Attempt to pre-truncate inputs to arithmetic ops if it will simplify
44593 /// e.g. TRUNC( BINOP( X, Y ) ) --> BINOP( TRUNC( X ), TRUNC( Y ) )
44594 /// TODO: This overlaps with the generic combiner's visitTRUNCATE. Remove
44595 /// anything that is guaranteed to be transformed by DAGCombiner.
44596 static SDValue combineTruncatedArithmetic(SDNode *N, SelectionDAG &DAG,
44597 const X86Subtarget &Subtarget,
44599 assert(N->getOpcode() == ISD::TRUNCATE && "Wrong opcode");
44600 SDValue Src = N->getOperand(0);
44601 unsigned SrcOpcode = Src.getOpcode();
44602 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
44604 EVT VT = N->getValueType(0);
44605 EVT SrcVT = Src.getValueType();
44607 auto IsFreeTruncation = [VT](SDValue Op) {
44608 unsigned TruncSizeInBits = VT.getScalarSizeInBits();
44610 // See if this has been extended from a smaller/equal size to
44611 // the truncation size, allowing a truncation to combine with the extend.
44612 unsigned Opcode = Op.getOpcode();
44613 if ((Opcode == ISD::ANY_EXTEND || Opcode == ISD::SIGN_EXTEND ||
44614 Opcode == ISD::ZERO_EXTEND) &&
44615 Op.getOperand(0).getScalarValueSizeInBits() <= TruncSizeInBits)
44618 // See if this is a single use constant which can be constant folded.
44619 // NOTE: We don't peek throught bitcasts here because there is currently
44620 // no support for constant folding truncate+bitcast+vector_of_constants. So
44621 // we'll just send up with a truncate on both operands which will
44622 // get turned back into (truncate (binop)) causing an infinite loop.
44623 return ISD::isBuildVectorOfConstantSDNodes(Op.getNode());
44626 auto TruncateArithmetic = [&](SDValue N0, SDValue N1) {
44627 SDValue Trunc0 = DAG.getNode(ISD::TRUNCATE, DL, VT, N0);
44628 SDValue Trunc1 = DAG.getNode(ISD::TRUNCATE, DL, VT, N1);
44629 return DAG.getNode(SrcOpcode, DL, VT, Trunc0, Trunc1);
44632 // Don't combine if the operation has other uses.
44633 if (!Src.hasOneUse())
44636 // Only support vector truncation for now.
44637 // TODO: i64 scalar math would benefit as well.
44638 if (!VT.isVector())
44641 // In most cases its only worth pre-truncating if we're only facing the cost
44642 // of one truncation.
44643 // i.e. if one of the inputs will constant fold or the input is repeated.
44644 switch (SrcOpcode) {
44646 // X86 is rubbish at scalar and vector i64 multiplies (until AVX512DQ) - its
44647 // better to truncate if we have the chance.
44648 if (SrcVT.getScalarType() == MVT::i64 &&
44649 TLI.isOperationLegal(SrcOpcode, VT) &&
44650 !TLI.isOperationLegal(SrcOpcode, SrcVT))
44651 return TruncateArithmetic(Src.getOperand(0), Src.getOperand(1));
44658 SDValue Op0 = Src.getOperand(0);
44659 SDValue Op1 = Src.getOperand(1);
44660 if (TLI.isOperationLegal(SrcOpcode, VT) &&
44661 (Op0 == Op1 || IsFreeTruncation(Op0) || IsFreeTruncation(Op1)))
44662 return TruncateArithmetic(Op0, Op1);
44670 /// Truncate using ISD::AND mask and X86ISD::PACKUS.
44671 /// e.g. trunc <8 x i32> X to <8 x i16> -->
44672 /// MaskX = X & 0xffff (clear high bits to prevent saturation)
44673 /// packus (extract_subv MaskX, 0), (extract_subv MaskX, 1)
44674 static SDValue combineVectorTruncationWithPACKUS(SDNode *N, const SDLoc &DL,
44675 const X86Subtarget &Subtarget,
44676 SelectionDAG &DAG) {
44677 SDValue In = N->getOperand(0);
44678 EVT InVT = In.getValueType();
44679 EVT OutVT = N->getValueType(0);
44681 APInt Mask = APInt::getLowBitsSet(InVT.getScalarSizeInBits(),
44682 OutVT.getScalarSizeInBits());
44683 In = DAG.getNode(ISD::AND, DL, InVT, In, DAG.getConstant(Mask, DL, InVT));
44684 return truncateVectorWithPACK(X86ISD::PACKUS, OutVT, In, DL, DAG, Subtarget);
44687 /// Truncate a group of v4i32 into v8i16 using X86ISD::PACKSS.
44688 static SDValue combineVectorTruncationWithPACKSS(SDNode *N, const SDLoc &DL,
44689 const X86Subtarget &Subtarget,
44690 SelectionDAG &DAG) {
44691 SDValue In = N->getOperand(0);
44692 EVT InVT = In.getValueType();
44693 EVT OutVT = N->getValueType(0);
44694 In = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, InVT, In,
44695 DAG.getValueType(OutVT));
44696 return truncateVectorWithPACK(X86ISD::PACKSS, OutVT, In, DL, DAG, Subtarget);
44699 /// This function transforms truncation from vXi32/vXi64 to vXi8/vXi16 into
44700 /// X86ISD::PACKUS/X86ISD::PACKSS operations. We do it here because after type
44701 /// legalization the truncation will be translated into a BUILD_VECTOR with each
44702 /// element that is extracted from a vector and then truncated, and it is
44703 /// difficult to do this optimization based on them.
44704 static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
44705 const X86Subtarget &Subtarget) {
44706 EVT OutVT = N->getValueType(0);
44707 if (!OutVT.isVector())
44710 SDValue In = N->getOperand(0);
44711 if (!In.getValueType().isSimple())
44714 EVT InVT = In.getValueType();
44715 unsigned NumElems = OutVT.getVectorNumElements();
44717 // TODO: On AVX2, the behavior of X86ISD::PACKUS is different from that on
44718 // SSE2, and we need to take care of it specially.
44719 // AVX512 provides vpmovdb.
44720 if (!Subtarget.hasSSE2() || Subtarget.hasAVX2())
44723 EVT OutSVT = OutVT.getVectorElementType();
44724 EVT InSVT = InVT.getVectorElementType();
44725 if (!((InSVT == MVT::i32 || InSVT == MVT::i64) &&
44726 (OutSVT == MVT::i8 || OutSVT == MVT::i16) && isPowerOf2_32(NumElems) &&
44730 // SSSE3's pshufb results in less instructions in the cases below.
44731 if (Subtarget.hasSSSE3() && NumElems == 8 &&
44732 ((OutSVT == MVT::i8 && InSVT != MVT::i64) ||
44733 (InSVT == MVT::i32 && OutSVT == MVT::i16)))
44737 // SSE2 provides PACKUS for only 2 x v8i16 -> v16i8 and SSE4.1 provides PACKUS
44738 // for 2 x v4i32 -> v8i16. For SSSE3 and below, we need to use PACKSS to
44739 // truncate 2 x v4i32 to v8i16.
44740 if (Subtarget.hasSSE41() || OutSVT == MVT::i8)
44741 return combineVectorTruncationWithPACKUS(N, DL, Subtarget, DAG);
44742 if (InSVT == MVT::i32)
44743 return combineVectorTruncationWithPACKSS(N, DL, Subtarget, DAG);
44748 /// This function transforms vector truncation of 'extended sign-bits' or
44749 /// 'extended zero-bits' values.
44750 /// vXi16/vXi32/vXi64 to vXi8/vXi16/vXi32 into X86ISD::PACKSS/PACKUS operations.
44751 static SDValue combineVectorSignBitsTruncation(SDNode *N, const SDLoc &DL,
44753 const X86Subtarget &Subtarget) {
44755 if (!Subtarget.hasSSE2())
44758 if (!N->getValueType(0).isVector() || !N->getValueType(0).isSimple())
44761 SDValue In = N->getOperand(0);
44762 if (!In.getValueType().isSimple())
44765 MVT VT = N->getValueType(0).getSimpleVT();
44766 MVT SVT = VT.getScalarType();
44768 MVT InVT = In.getValueType().getSimpleVT();
44769 MVT InSVT = InVT.getScalarType();
44771 // Check we have a truncation suited for PACKSS/PACKUS.
44772 if (!isPowerOf2_32(VT.getVectorNumElements()))
44774 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32)
44776 if (InSVT != MVT::i16 && InSVT != MVT::i32 && InSVT != MVT::i64)
44779 // Truncation to sub-128bit vXi32 can be better handled with shuffles.
44780 if (SVT == MVT::i32 && VT.getSizeInBits() < 128)
44783 // AVX512 has fast truncate, but if the input is already going to be split,
44784 // there's no harm in trying pack.
44785 if (Subtarget.hasAVX512() &&
44786 !(!Subtarget.useAVX512Regs() && VT.is256BitVector() &&
44787 InVT.is512BitVector()))
44790 unsigned NumPackedSignBits = std::min<unsigned>(SVT.getSizeInBits(), 16);
44791 unsigned NumPackedZeroBits = Subtarget.hasSSE41() ? NumPackedSignBits : 8;
44793 // Use PACKUS if the input has zero-bits that extend all the way to the
44794 // packed/truncated value. e.g. masks, zext_in_reg, etc.
44795 KnownBits Known = DAG.computeKnownBits(In);
44796 unsigned NumLeadingZeroBits = Known.countMinLeadingZeros();
44797 if (NumLeadingZeroBits >= (InSVT.getSizeInBits() - NumPackedZeroBits))
44798 return truncateVectorWithPACK(X86ISD::PACKUS, VT, In, DL, DAG, Subtarget);
44800 // Use PACKSS if the input has sign-bits that extend all the way to the
44801 // packed/truncated value. e.g. Comparison result, sext_in_reg, etc.
44802 unsigned NumSignBits = DAG.ComputeNumSignBits(In);
44804 // Don't use PACKSS for vXi64 -> vXi32 truncations unless we're dealing with
44805 // a sign splat. ComputeNumSignBits struggles to see through BITCASTs later
44806 // on and combines/simplifications can't then use it.
44807 if (SVT == MVT::i32 && NumSignBits != InSVT.getSizeInBits())
44810 if (NumSignBits > (InSVT.getSizeInBits() - NumPackedSignBits))
44811 return truncateVectorWithPACK(X86ISD::PACKSS, VT, In, DL, DAG, Subtarget);
44816 // Try to form a MULHU or MULHS node by looking for
44817 // (trunc (srl (mul ext, ext), 16))
44818 // TODO: This is X86 specific because we want to be able to handle wide types
44819 // before type legalization. But we can only do it if the vector will be
44820 // legalized via widening/splitting. Type legalization can't handle promotion
44821 // of a MULHU/MULHS. There isn't a way to convey this to the generic DAG
44823 static SDValue combinePMULH(SDValue Src, EVT VT, const SDLoc &DL,
44824 SelectionDAG &DAG, const X86Subtarget &Subtarget) {
44825 // First instruction should be a right shift of a multiply.
44826 if (Src.getOpcode() != ISD::SRL ||
44827 Src.getOperand(0).getOpcode() != ISD::MUL)
44830 if (!Subtarget.hasSSE2())
44833 // Only handle vXi16 types that are at least 128-bits unless they will be
44835 if (!VT.isVector() || VT.getVectorElementType() != MVT::i16)
44838 // Input type should be at least vXi32.
44839 EVT InVT = Src.getValueType();
44840 if (InVT.getVectorElementType().getSizeInBits() < 32)
44843 // Need a shift by 16.
44845 if (!ISD::isConstantSplatVector(Src.getOperand(1).getNode(), ShiftAmt) ||
44849 SDValue LHS = Src.getOperand(0).getOperand(0);
44850 SDValue RHS = Src.getOperand(0).getOperand(1);
44852 unsigned ExtOpc = LHS.getOpcode();
44853 if ((ExtOpc != ISD::SIGN_EXTEND && ExtOpc != ISD::ZERO_EXTEND) ||
44854 RHS.getOpcode() != ExtOpc)
44857 // Peek through the extends.
44858 LHS = LHS.getOperand(0);
44859 RHS = RHS.getOperand(0);
44861 // Ensure the input types match.
44862 if (LHS.getValueType() != VT || RHS.getValueType() != VT)
44865 unsigned Opc = ExtOpc == ISD::SIGN_EXTEND ? ISD::MULHS : ISD::MULHU;
44866 return DAG.getNode(Opc, DL, VT, LHS, RHS);
44869 // Attempt to match PMADDUBSW, which multiplies corresponding unsigned bytes
44870 // from one vector with signed bytes from another vector, adds together
44871 // adjacent pairs of 16-bit products, and saturates the result before
44872 // truncating to 16-bits.
44874 // Which looks something like this:
44875 // (i16 (ssat (add (mul (zext (even elts (i8 A))), (sext (even elts (i8 B)))),
44876 // (mul (zext (odd elts (i8 A)), (sext (odd elts (i8 B))))))))
44877 static SDValue detectPMADDUBSW(SDValue In, EVT VT, SelectionDAG &DAG,
44878 const X86Subtarget &Subtarget,
44880 if (!VT.isVector() || !Subtarget.hasSSSE3())
44883 unsigned NumElems = VT.getVectorNumElements();
44884 EVT ScalarVT = VT.getVectorElementType();
44885 if (ScalarVT != MVT::i16 || NumElems < 8 || !isPowerOf2_32(NumElems))
44888 SDValue SSatVal = detectSSatPattern(In, VT);
44889 if (!SSatVal || SSatVal.getOpcode() != ISD::ADD)
44892 // Ok this is a signed saturation of an ADD. See if this ADD is adding pairs
44893 // of multiplies from even/odd elements.
44894 SDValue N0 = SSatVal.getOperand(0);
44895 SDValue N1 = SSatVal.getOperand(1);
44897 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
44900 SDValue N00 = N0.getOperand(0);
44901 SDValue N01 = N0.getOperand(1);
44902 SDValue N10 = N1.getOperand(0);
44903 SDValue N11 = N1.getOperand(1);
44905 // TODO: Handle constant vectors and use knownbits/computenumsignbits?
44906 // Canonicalize zero_extend to LHS.
44907 if (N01.getOpcode() == ISD::ZERO_EXTEND)
44908 std::swap(N00, N01);
44909 if (N11.getOpcode() == ISD::ZERO_EXTEND)
44910 std::swap(N10, N11);
44912 // Ensure we have a zero_extend and a sign_extend.
44913 if (N00.getOpcode() != ISD::ZERO_EXTEND ||
44914 N01.getOpcode() != ISD::SIGN_EXTEND ||
44915 N10.getOpcode() != ISD::ZERO_EXTEND ||
44916 N11.getOpcode() != ISD::SIGN_EXTEND)
44919 // Peek through the extends.
44920 N00 = N00.getOperand(0);
44921 N01 = N01.getOperand(0);
44922 N10 = N10.getOperand(0);
44923 N11 = N11.getOperand(0);
44925 // Ensure the extend is from vXi8.
44926 if (N00.getValueType().getVectorElementType() != MVT::i8 ||
44927 N01.getValueType().getVectorElementType() != MVT::i8 ||
44928 N10.getValueType().getVectorElementType() != MVT::i8 ||
44929 N11.getValueType().getVectorElementType() != MVT::i8)
44932 // All inputs should be build_vectors.
44933 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
44934 N01.getOpcode() != ISD::BUILD_VECTOR ||
44935 N10.getOpcode() != ISD::BUILD_VECTOR ||
44936 N11.getOpcode() != ISD::BUILD_VECTOR)
44939 // N00/N10 are zero extended. N01/N11 are sign extended.
44941 // For each element, we need to ensure we have an odd element from one vector
44942 // multiplied by the odd element of another vector and the even element from
44943 // one of the same vectors being multiplied by the even element from the
44944 // other vector. So we need to make sure for each element i, this operator
44945 // is being performed:
44946 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
44947 SDValue ZExtIn, SExtIn;
44948 for (unsigned i = 0; i != NumElems; ++i) {
44949 SDValue N00Elt = N00.getOperand(i);
44950 SDValue N01Elt = N01.getOperand(i);
44951 SDValue N10Elt = N10.getOperand(i);
44952 SDValue N11Elt = N11.getOperand(i);
44953 // TODO: Be more tolerant to undefs.
44954 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44955 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44956 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
44957 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
44959 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
44960 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
44961 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
44962 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
44963 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
44965 unsigned IdxN00 = ConstN00Elt->getZExtValue();
44966 unsigned IdxN01 = ConstN01Elt->getZExtValue();
44967 unsigned IdxN10 = ConstN10Elt->getZExtValue();
44968 unsigned IdxN11 = ConstN11Elt->getZExtValue();
44969 // Add is commutative so indices can be reordered.
44970 if (IdxN00 > IdxN10) {
44971 std::swap(IdxN00, IdxN10);
44972 std::swap(IdxN01, IdxN11);
44974 // N0 indices be the even element. N1 indices must be the next odd element.
44975 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
44976 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
44978 SDValue N00In = N00Elt.getOperand(0);
44979 SDValue N01In = N01Elt.getOperand(0);
44980 SDValue N10In = N10Elt.getOperand(0);
44981 SDValue N11In = N11Elt.getOperand(0);
44982 // First time we find an input capture it.
44987 if (ZExtIn != N00In || SExtIn != N01In ||
44988 ZExtIn != N10In || SExtIn != N11In)
44992 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
44993 ArrayRef<SDValue> Ops) {
44994 // Shrink by adding truncate nodes and let DAGCombine fold with the
44996 EVT InVT = Ops[0].getValueType();
44997 assert(InVT.getScalarType() == MVT::i8 &&
44998 "Unexpected scalar element type");
44999 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
45000 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
45001 InVT.getVectorNumElements() / 2);
45002 return DAG.getNode(X86ISD::VPMADDUBSW, DL, ResVT, Ops[0], Ops[1]);
45004 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { ZExtIn, SExtIn },
45008 static SDValue combineTruncate(SDNode *N, SelectionDAG &DAG,
45009 const X86Subtarget &Subtarget) {
45010 EVT VT = N->getValueType(0);
45011 SDValue Src = N->getOperand(0);
45014 // Attempt to pre-truncate inputs to arithmetic ops instead.
45015 if (SDValue V = combineTruncatedArithmetic(N, DAG, Subtarget, DL))
45018 // Try to detect AVG pattern first.
45019 if (SDValue Avg = detectAVGPattern(Src, VT, DAG, Subtarget, DL))
45022 // Try to detect PMADD
45023 if (SDValue PMAdd = detectPMADDUBSW(Src, VT, DAG, Subtarget, DL))
45026 // Try to combine truncation with signed/unsigned saturation.
45027 if (SDValue Val = combineTruncateWithSat(Src, VT, DL, DAG, Subtarget))
45030 // Try to combine PMULHUW/PMULHW for vXi16.
45031 if (SDValue V = combinePMULH(Src, VT, DL, DAG, Subtarget))
45034 // The bitcast source is a direct mmx result.
45035 // Detect bitcasts between i32 to x86mmx
45036 if (Src.getOpcode() == ISD::BITCAST && VT == MVT::i32) {
45037 SDValue BCSrc = Src.getOperand(0);
45038 if (BCSrc.getValueType() == MVT::x86mmx)
45039 return DAG.getNode(X86ISD::MMX_MOVD2W, DL, MVT::i32, BCSrc);
45042 // Try to truncate extended sign/zero bits with PACKSS/PACKUS.
45043 if (SDValue V = combineVectorSignBitsTruncation(N, DL, DAG, Subtarget))
45046 return combineVectorTruncation(N, DAG, Subtarget);
45049 static SDValue combineVTRUNC(SDNode *N, SelectionDAG &DAG,
45050 TargetLowering::DAGCombinerInfo &DCI) {
45051 EVT VT = N->getValueType(0);
45052 SDValue In = N->getOperand(0);
45055 if (auto SSatVal = detectSSatPattern(In, VT))
45056 return DAG.getNode(X86ISD::VTRUNCS, DL, VT, SSatVal);
45057 if (auto USatVal = detectUSatPattern(In, VT, DAG, DL))
45058 return DAG.getNode(X86ISD::VTRUNCUS, DL, VT, USatVal);
45060 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45061 APInt DemandedMask(APInt::getAllOnesValue(VT.getScalarSizeInBits()));
45062 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
45063 return SDValue(N, 0);
45068 /// Returns the negated value if the node \p N flips sign of FP value.
45070 /// FP-negation node may have different forms: FNEG(x), FXOR (x, 0x80000000)
45072 /// AVX512F does not have FXOR, so FNEG is lowered as
45073 /// (bitcast (xor (bitcast x), (bitcast ConstantFP(0x80000000)))).
45074 /// In this case we go though all bitcasts.
45075 /// This also recognizes splat of a negated value and returns the splat of that
45077 static SDValue isFNEG(SelectionDAG &DAG, SDNode *N, unsigned Depth = 0) {
45078 if (N->getOpcode() == ISD::FNEG)
45079 return N->getOperand(0);
45081 // Don't recurse exponentially.
45082 if (Depth > SelectionDAG::MaxRecursionDepth)
45085 unsigned ScalarSize = N->getValueType(0).getScalarSizeInBits();
45087 SDValue Op = peekThroughBitcasts(SDValue(N, 0));
45088 EVT VT = Op->getValueType(0);
45090 // Make sure the element size doesn't change.
45091 if (VT.getScalarSizeInBits() != ScalarSize)
45094 unsigned Opc = Op.getOpcode();
45096 case ISD::VECTOR_SHUFFLE: {
45097 // For a VECTOR_SHUFFLE(VEC1, VEC2), if the VEC2 is undef, then the negate
45098 // of this is VECTOR_SHUFFLE(-VEC1, UNDEF). The mask can be anything here.
45099 if (!Op.getOperand(1).isUndef())
45101 if (SDValue NegOp0 = isFNEG(DAG, Op.getOperand(0).getNode(), Depth + 1))
45102 if (NegOp0.getValueType() == VT) // FIXME: Can we do better?
45103 return DAG.getVectorShuffle(VT, SDLoc(Op), NegOp0, DAG.getUNDEF(VT),
45104 cast<ShuffleVectorSDNode>(Op)->getMask());
45107 case ISD::INSERT_VECTOR_ELT: {
45108 // Negate of INSERT_VECTOR_ELT(UNDEF, V, INDEX) is INSERT_VECTOR_ELT(UNDEF,
45110 SDValue InsVector = Op.getOperand(0);
45111 SDValue InsVal = Op.getOperand(1);
45112 if (!InsVector.isUndef())
45114 if (SDValue NegInsVal = isFNEG(DAG, InsVal.getNode(), Depth + 1))
45115 if (NegInsVal.getValueType() == VT.getVectorElementType()) // FIXME
45116 return DAG.getNode(ISD::INSERT_VECTOR_ELT, SDLoc(Op), VT, InsVector,
45117 NegInsVal, Op.getOperand(2));
45122 case X86ISD::FXOR: {
45123 SDValue Op1 = Op.getOperand(1);
45124 SDValue Op0 = Op.getOperand(0);
45126 // For XOR and FXOR, we want to check if constant
45127 // bits of Op1 are sign bit masks. For FSUB, we
45128 // have to check if constant bits of Op0 are sign
45129 // bit masks and hence we swap the operands.
45130 if (Opc == ISD::FSUB)
45131 std::swap(Op0, Op1);
45134 SmallVector<APInt, 16> EltBits;
45135 // Extract constant bits and see if they are all
45136 // sign bit masks. Ignore the undef elements.
45137 if (getTargetConstantBitsFromNode(Op1, ScalarSize, UndefElts, EltBits,
45138 /* AllowWholeUndefs */ true,
45139 /* AllowPartialUndefs */ false)) {
45140 for (unsigned I = 0, E = EltBits.size(); I < E; I++)
45141 if (!UndefElts[I] && !EltBits[I].isSignMask())
45144 return peekThroughBitcasts(Op0);
45152 static unsigned negateFMAOpcode(unsigned Opcode, bool NegMul, bool NegAcc,
45156 default: llvm_unreachable("Unexpected opcode");
45157 case ISD::FMA: Opcode = X86ISD::FNMADD; break;
45158 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FNMADD; break;
45159 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMADD_RND; break;
45160 case X86ISD::FMSUB: Opcode = X86ISD::FNMSUB; break;
45161 case X86ISD::STRICT_FMSUB: Opcode = X86ISD::STRICT_FNMSUB; break;
45162 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMSUB_RND; break;
45163 case X86ISD::FNMADD: Opcode = ISD::FMA; break;
45164 case X86ISD::STRICT_FNMADD: Opcode = ISD::STRICT_FMA; break;
45165 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMADD_RND; break;
45166 case X86ISD::FNMSUB: Opcode = X86ISD::FMSUB; break;
45167 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FMSUB; break;
45168 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMSUB_RND; break;
45174 default: llvm_unreachable("Unexpected opcode");
45175 case ISD::FMA: Opcode = X86ISD::FMSUB; break;
45176 case ISD::STRICT_FMA: Opcode = X86ISD::STRICT_FMSUB; break;
45177 case X86ISD::FMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
45178 case X86ISD::FMSUB: Opcode = ISD::FMA; break;
45179 case X86ISD::STRICT_FMSUB: Opcode = ISD::STRICT_FMA; break;
45180 case X86ISD::FMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
45181 case X86ISD::FNMADD: Opcode = X86ISD::FNMSUB; break;
45182 case X86ISD::STRICT_FNMADD: Opcode = X86ISD::STRICT_FNMSUB; break;
45183 case X86ISD::FNMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
45184 case X86ISD::FNMSUB: Opcode = X86ISD::FNMADD; break;
45185 case X86ISD::STRICT_FNMSUB: Opcode = X86ISD::STRICT_FNMADD; break;
45186 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
45187 case X86ISD::FMADDSUB: Opcode = X86ISD::FMSUBADD; break;
45188 case X86ISD::FMADDSUB_RND: Opcode = X86ISD::FMSUBADD_RND; break;
45189 case X86ISD::FMSUBADD: Opcode = X86ISD::FMADDSUB; break;
45190 case X86ISD::FMSUBADD_RND: Opcode = X86ISD::FMADDSUB_RND; break;
45196 // For accuracy reason, we never combine fneg and fma under strict FP.
45197 default: llvm_unreachable("Unexpected opcode");
45198 case ISD::FMA: Opcode = X86ISD::FNMSUB; break;
45199 case X86ISD::FMADD_RND: Opcode = X86ISD::FNMSUB_RND; break;
45200 case X86ISD::FMSUB: Opcode = X86ISD::FNMADD; break;
45201 case X86ISD::FMSUB_RND: Opcode = X86ISD::FNMADD_RND; break;
45202 case X86ISD::FNMADD: Opcode = X86ISD::FMSUB; break;
45203 case X86ISD::FNMADD_RND: Opcode = X86ISD::FMSUB_RND; break;
45204 case X86ISD::FNMSUB: Opcode = ISD::FMA; break;
45205 case X86ISD::FNMSUB_RND: Opcode = X86ISD::FMADD_RND; break;
45212 /// Do target-specific dag combines on floating point negations.
45213 static SDValue combineFneg(SDNode *N, SelectionDAG &DAG,
45214 TargetLowering::DAGCombinerInfo &DCI,
45215 const X86Subtarget &Subtarget) {
45216 EVT OrigVT = N->getValueType(0);
45217 SDValue Arg = isFNEG(DAG, N);
45221 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45222 EVT VT = Arg.getValueType();
45223 EVT SVT = VT.getScalarType();
45226 // Let legalize expand this if it isn't a legal type yet.
45227 if (!TLI.isTypeLegal(VT))
45230 // If we're negating a FMUL node on a target with FMA, then we can avoid the
45231 // use of a constant by performing (-0 - A*B) instead.
45232 // FIXME: Check rounding control flags as well once it becomes available.
45233 if (Arg.getOpcode() == ISD::FMUL && (SVT == MVT::f32 || SVT == MVT::f64) &&
45234 Arg->getFlags().hasNoSignedZeros() && Subtarget.hasAnyFMA()) {
45235 SDValue Zero = DAG.getConstantFP(0.0, DL, VT);
45236 SDValue NewNode = DAG.getNode(X86ISD::FNMSUB, DL, VT, Arg.getOperand(0),
45237 Arg.getOperand(1), Zero);
45238 return DAG.getBitcast(OrigVT, NewNode);
45241 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
45242 bool LegalOperations = !DCI.isBeforeLegalizeOps();
45243 if (SDValue NegArg =
45244 TLI.getNegatedExpression(Arg, DAG, LegalOperations, CodeSize))
45245 return DAG.getBitcast(OrigVT, NegArg);
45250 SDValue X86TargetLowering::getNegatedExpression(SDValue Op, SelectionDAG &DAG,
45251 bool LegalOperations,
45253 NegatibleCost &Cost,
45254 unsigned Depth) const {
45255 // fneg patterns are removable even if they have multiple uses.
45256 if (SDValue Arg = isFNEG(DAG, Op.getNode(), Depth)) {
45257 Cost = NegatibleCost::Cheaper;
45258 return DAG.getBitcast(Op.getValueType(), Arg);
45261 EVT VT = Op.getValueType();
45262 EVT SVT = VT.getScalarType();
45263 unsigned Opc = Op.getOpcode();
45266 case X86ISD::FMSUB:
45267 case X86ISD::FNMADD:
45268 case X86ISD::FNMSUB:
45269 case X86ISD::FMADD_RND:
45270 case X86ISD::FMSUB_RND:
45271 case X86ISD::FNMADD_RND:
45272 case X86ISD::FNMSUB_RND: {
45273 if (!Op.hasOneUse() || !Subtarget.hasAnyFMA() || !isTypeLegal(VT) ||
45274 !(SVT == MVT::f32 || SVT == MVT::f64) ||
45275 !isOperationLegal(ISD::FMA, VT))
45278 // This is always negatible for free but we might be able to remove some
45279 // extra operand negations as well.
45280 SmallVector<SDValue, 4> NewOps(Op.getNumOperands(), SDValue());
45281 for (int i = 0; i != 3; ++i)
45282 NewOps[i] = getCheaperNegatedExpression(
45283 Op.getOperand(i), DAG, LegalOperations, ForCodeSize, Depth + 1);
45285 bool NegA = !!NewOps[0];
45286 bool NegB = !!NewOps[1];
45287 bool NegC = !!NewOps[2];
45288 unsigned NewOpc = negateFMAOpcode(Opc, NegA != NegB, NegC, true);
45290 Cost = (NegA || NegB || NegC) ? NegatibleCost::Cheaper
45291 : NegatibleCost::Neutral;
45293 // Fill in the non-negated ops with the original values.
45294 for (int i = 0, e = Op.getNumOperands(); i != e; ++i)
45296 NewOps[i] = Op.getOperand(i);
45297 return DAG.getNode(NewOpc, SDLoc(Op), VT, NewOps);
45300 if (SDValue NegOp0 =
45301 getNegatedExpression(Op.getOperand(0), DAG, LegalOperations,
45302 ForCodeSize, Cost, Depth + 1))
45303 return DAG.getNode(Opc, SDLoc(Op), VT, NegOp0);
45307 return TargetLowering::getNegatedExpression(Op, DAG, LegalOperations,
45308 ForCodeSize, Cost, Depth);
45311 static SDValue lowerX86FPLogicOp(SDNode *N, SelectionDAG &DAG,
45312 const X86Subtarget &Subtarget) {
45313 MVT VT = N->getSimpleValueType(0);
45314 // If we have integer vector types available, use the integer opcodes.
45315 if (!VT.isVector() || !Subtarget.hasSSE2())
45320 unsigned IntBits = VT.getScalarSizeInBits();
45321 MVT IntSVT = MVT::getIntegerVT(IntBits);
45322 MVT IntVT = MVT::getVectorVT(IntSVT, VT.getSizeInBits() / IntBits);
45324 SDValue Op0 = DAG.getBitcast(IntVT, N->getOperand(0));
45325 SDValue Op1 = DAG.getBitcast(IntVT, N->getOperand(1));
45326 unsigned IntOpcode;
45327 switch (N->getOpcode()) {
45328 default: llvm_unreachable("Unexpected FP logic op");
45329 case X86ISD::FOR: IntOpcode = ISD::OR; break;
45330 case X86ISD::FXOR: IntOpcode = ISD::XOR; break;
45331 case X86ISD::FAND: IntOpcode = ISD::AND; break;
45332 case X86ISD::FANDN: IntOpcode = X86ISD::ANDNP; break;
45334 SDValue IntOp = DAG.getNode(IntOpcode, dl, IntVT, Op0, Op1);
45335 return DAG.getBitcast(VT, IntOp);
45339 /// Fold a xor(setcc cond, val), 1 --> setcc (inverted(cond), val)
45340 static SDValue foldXor1SetCC(SDNode *N, SelectionDAG &DAG) {
45341 if (N->getOpcode() != ISD::XOR)
45344 SDValue LHS = N->getOperand(0);
45345 if (!isOneConstant(N->getOperand(1)) || LHS->getOpcode() != X86ISD::SETCC)
45348 X86::CondCode NewCC = X86::GetOppositeBranchCondition(
45349 X86::CondCode(LHS->getConstantOperandVal(0)));
45351 return getSETCC(NewCC, LHS->getOperand(1), DL, DAG);
45354 static SDValue combineXor(SDNode *N, SelectionDAG &DAG,
45355 TargetLowering::DAGCombinerInfo &DCI,
45356 const X86Subtarget &Subtarget) {
45357 // If this is SSE1 only convert to FXOR to avoid scalarization.
45358 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() &&
45359 N->getValueType(0) == MVT::v4i32) {
45360 return DAG.getBitcast(
45361 MVT::v4i32, DAG.getNode(X86ISD::FXOR, SDLoc(N), MVT::v4f32,
45362 DAG.getBitcast(MVT::v4f32, N->getOperand(0)),
45363 DAG.getBitcast(MVT::v4f32, N->getOperand(1))));
45366 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget))
45369 if (SDValue R = combineBitOpWithMOVMSK(N, DAG))
45372 if (DCI.isBeforeLegalizeOps())
45375 if (SDValue SetCC = foldXor1SetCC(N, DAG))
45378 if (SDValue RV = foldXorTruncShiftIntoCmp(N, DAG))
45381 if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
45384 return combineFneg(N, DAG, DCI, Subtarget);
45387 static SDValue combineBEXTR(SDNode *N, SelectionDAG &DAG,
45388 TargetLowering::DAGCombinerInfo &DCI,
45389 const X86Subtarget &Subtarget) {
45390 EVT VT = N->getValueType(0);
45391 unsigned NumBits = VT.getSizeInBits();
45393 // TODO - Constant Folding.
45395 // Simplify the inputs.
45396 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45397 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
45398 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
45399 return SDValue(N, 0);
45404 static bool isNullFPScalarOrVectorConst(SDValue V) {
45405 return isNullFPConstant(V) || ISD::isBuildVectorAllZeros(V.getNode());
45408 /// If a value is a scalar FP zero or a vector FP zero (potentially including
45409 /// undefined elements), return a zero constant that may be used to fold away
45410 /// that value. In the case of a vector, the returned constant will not contain
45411 /// undefined elements even if the input parameter does. This makes it suitable
45412 /// to be used as a replacement operand with operations (eg, bitwise-and) where
45413 /// an undef should not propagate.
45414 static SDValue getNullFPConstForNullVal(SDValue V, SelectionDAG &DAG,
45415 const X86Subtarget &Subtarget) {
45416 if (!isNullFPScalarOrVectorConst(V))
45419 if (V.getValueType().isVector())
45420 return getZeroVector(V.getSimpleValueType(), Subtarget, DAG, SDLoc(V));
45425 static SDValue combineFAndFNotToFAndn(SDNode *N, SelectionDAG &DAG,
45426 const X86Subtarget &Subtarget) {
45427 SDValue N0 = N->getOperand(0);
45428 SDValue N1 = N->getOperand(1);
45429 EVT VT = N->getValueType(0);
45432 // Vector types are handled in combineANDXORWithAllOnesIntoANDNP().
45433 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) ||
45434 (VT == MVT::f64 && Subtarget.hasSSE2()) ||
45435 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2())))
45438 auto isAllOnesConstantFP = [](SDValue V) {
45439 if (V.getSimpleValueType().isVector())
45440 return ISD::isBuildVectorAllOnes(V.getNode());
45441 auto *C = dyn_cast<ConstantFPSDNode>(V);
45442 return C && C->getConstantFPValue()->isAllOnesValue();
45445 // fand (fxor X, -1), Y --> fandn X, Y
45446 if (N0.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N0.getOperand(1)))
45447 return DAG.getNode(X86ISD::FANDN, DL, VT, N0.getOperand(0), N1);
45449 // fand X, (fxor Y, -1) --> fandn Y, X
45450 if (N1.getOpcode() == X86ISD::FXOR && isAllOnesConstantFP(N1.getOperand(1)))
45451 return DAG.getNode(X86ISD::FANDN, DL, VT, N1.getOperand(0), N0);
45456 /// Do target-specific dag combines on X86ISD::FAND nodes.
45457 static SDValue combineFAnd(SDNode *N, SelectionDAG &DAG,
45458 const X86Subtarget &Subtarget) {
45459 // FAND(0.0, x) -> 0.0
45460 if (SDValue V = getNullFPConstForNullVal(N->getOperand(0), DAG, Subtarget))
45463 // FAND(x, 0.0) -> 0.0
45464 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
45467 if (SDValue V = combineFAndFNotToFAndn(N, DAG, Subtarget))
45470 return lowerX86FPLogicOp(N, DAG, Subtarget);
45473 /// Do target-specific dag combines on X86ISD::FANDN nodes.
45474 static SDValue combineFAndn(SDNode *N, SelectionDAG &DAG,
45475 const X86Subtarget &Subtarget) {
45476 // FANDN(0.0, x) -> x
45477 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
45478 return N->getOperand(1);
45480 // FANDN(x, 0.0) -> 0.0
45481 if (SDValue V = getNullFPConstForNullVal(N->getOperand(1), DAG, Subtarget))
45484 return lowerX86FPLogicOp(N, DAG, Subtarget);
45487 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
45488 static SDValue combineFOr(SDNode *N, SelectionDAG &DAG,
45489 TargetLowering::DAGCombinerInfo &DCI,
45490 const X86Subtarget &Subtarget) {
45491 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
45493 // F[X]OR(0.0, x) -> x
45494 if (isNullFPScalarOrVectorConst(N->getOperand(0)))
45495 return N->getOperand(1);
45497 // F[X]OR(x, 0.0) -> x
45498 if (isNullFPScalarOrVectorConst(N->getOperand(1)))
45499 return N->getOperand(0);
45501 if (SDValue NewVal = combineFneg(N, DAG, DCI, Subtarget))
45504 return lowerX86FPLogicOp(N, DAG, Subtarget);
45507 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
45508 static SDValue combineFMinFMax(SDNode *N, SelectionDAG &DAG) {
45509 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
45511 // FMIN/FMAX are commutative if no NaNs and no negative zeros are allowed.
45512 if (!DAG.getTarget().Options.NoNaNsFPMath ||
45513 !DAG.getTarget().Options.NoSignedZerosFPMath)
45516 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
45517 // into FMINC and FMAXC, which are Commutative operations.
45518 unsigned NewOp = 0;
45519 switch (N->getOpcode()) {
45520 default: llvm_unreachable("unknown opcode");
45521 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
45522 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
45525 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
45526 N->getOperand(0), N->getOperand(1));
45529 static SDValue combineFMinNumFMaxNum(SDNode *N, SelectionDAG &DAG,
45530 const X86Subtarget &Subtarget) {
45531 if (Subtarget.useSoftFloat())
45534 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45536 EVT VT = N->getValueType(0);
45537 if (!((Subtarget.hasSSE1() && VT == MVT::f32) ||
45538 (Subtarget.hasSSE2() && VT == MVT::f64) ||
45539 (VT.isVector() && TLI.isTypeLegal(VT))))
45542 SDValue Op0 = N->getOperand(0);
45543 SDValue Op1 = N->getOperand(1);
45545 auto MinMaxOp = N->getOpcode() == ISD::FMAXNUM ? X86ISD::FMAX : X86ISD::FMIN;
45547 // If we don't have to respect NaN inputs, this is a direct translation to x86
45548 // min/max instructions.
45549 if (DAG.getTarget().Options.NoNaNsFPMath || N->getFlags().hasNoNaNs())
45550 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
45552 // If one of the operands is known non-NaN use the native min/max instructions
45553 // with the non-NaN input as second operand.
45554 if (DAG.isKnownNeverNaN(Op1))
45555 return DAG.getNode(MinMaxOp, DL, VT, Op0, Op1, N->getFlags());
45556 if (DAG.isKnownNeverNaN(Op0))
45557 return DAG.getNode(MinMaxOp, DL, VT, Op1, Op0, N->getFlags());
45559 // If we have to respect NaN inputs, this takes at least 3 instructions.
45560 // Favor a library call when operating on a scalar and minimizing code size.
45561 if (!VT.isVector() && DAG.getMachineFunction().getFunction().hasMinSize())
45564 EVT SetCCType = TLI.getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(),
45567 // There are 4 possibilities involving NaN inputs, and these are the required
45571 // ----------------
45572 // Num | Max | Op0 |
45573 // Op0 ----------------
45574 // NaN | Op1 | NaN |
45575 // ----------------
45577 // The SSE FP max/min instructions were not designed for this case, but rather
45579 // Min = Op1 < Op0 ? Op1 : Op0
45580 // Max = Op1 > Op0 ? Op1 : Op0
45582 // So they always return Op0 if either input is a NaN. However, we can still
45583 // use those instructions for fmaxnum by selecting away a NaN input.
45585 // If either operand is NaN, the 2nd source operand (Op0) is passed through.
45586 SDValue MinOrMax = DAG.getNode(MinMaxOp, DL, VT, Op1, Op0);
45587 SDValue IsOp0Nan = DAG.getSetCC(DL, SetCCType, Op0, Op0, ISD::SETUO);
45589 // If Op0 is a NaN, select Op1. Otherwise, select the max. If both operands
45590 // are NaN, the NaN value of Op1 is the result.
45591 return DAG.getSelect(DL, VT, IsOp0Nan, Op1, MinOrMax);
45594 static SDValue combineX86INT_TO_FP(SDNode *N, SelectionDAG &DAG,
45595 TargetLowering::DAGCombinerInfo &DCI) {
45596 EVT VT = N->getValueType(0);
45597 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45599 APInt KnownUndef, KnownZero;
45600 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
45601 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
45603 return SDValue(N, 0);
45605 // Convert a full vector load into vzload when not all bits are needed.
45606 SDValue In = N->getOperand(0);
45607 MVT InVT = In.getSimpleValueType();
45608 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
45609 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
45610 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
45611 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(0));
45612 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
45613 MVT MemVT = MVT::getIntegerVT(NumBits);
45614 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
45615 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
45617 SDValue Convert = DAG.getNode(N->getOpcode(), dl, VT,
45618 DAG.getBitcast(InVT, VZLoad));
45619 DCI.CombineTo(N, Convert);
45620 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
45621 DCI.recursivelyDeleteUnusedNodes(LN);
45622 return SDValue(N, 0);
45629 static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
45630 TargetLowering::DAGCombinerInfo &DCI) {
45631 bool IsStrict = N->isTargetStrictFPOpcode();
45632 EVT VT = N->getValueType(0);
45634 // Convert a full vector load into vzload when not all bits are needed.
45635 SDValue In = N->getOperand(IsStrict ? 1 : 0);
45636 MVT InVT = In.getSimpleValueType();
45637 if (VT.getVectorNumElements() < InVT.getVectorNumElements() &&
45638 ISD::isNormalLoad(In.getNode()) && In.hasOneUse()) {
45639 assert(InVT.is128BitVector() && "Expected 128-bit input vector");
45640 LoadSDNode *LN = cast<LoadSDNode>(In);
45641 unsigned NumBits = InVT.getScalarSizeInBits() * VT.getVectorNumElements();
45642 MVT MemVT = MVT::getFloatingPointVT(NumBits);
45643 MVT LoadVT = MVT::getVectorVT(MemVT, 128 / NumBits);
45644 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MemVT, LoadVT, DAG)) {
45648 DAG.getNode(N->getOpcode(), dl, {VT, MVT::Other},
45649 {N->getOperand(0), DAG.getBitcast(InVT, VZLoad)});
45650 DCI.CombineTo(N, Convert, Convert.getValue(1));
45653 DAG.getNode(N->getOpcode(), dl, VT, DAG.getBitcast(InVT, VZLoad));
45654 DCI.CombineTo(N, Convert);
45656 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
45657 DCI.recursivelyDeleteUnusedNodes(LN);
45658 return SDValue(N, 0);
45665 /// Do target-specific dag combines on X86ISD::ANDNP nodes.
45666 static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
45667 TargetLowering::DAGCombinerInfo &DCI,
45668 const X86Subtarget &Subtarget) {
45669 MVT VT = N->getSimpleValueType(0);
45671 // ANDNP(0, x) -> x
45672 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
45673 return N->getOperand(1);
45675 // ANDNP(x, 0) -> 0
45676 if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
45677 return DAG.getConstant(0, SDLoc(N), VT);
45679 // Turn ANDNP back to AND if input is inverted.
45680 if (SDValue Not = IsNOT(N->getOperand(0), DAG))
45681 return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
45684 // Attempt to recursively combine a bitmask ANDNP with shuffles.
45685 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
45687 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
45694 static SDValue combineBT(SDNode *N, SelectionDAG &DAG,
45695 TargetLowering::DAGCombinerInfo &DCI) {
45696 SDValue N1 = N->getOperand(1);
45698 // BT ignores high bits in the bit index operand.
45699 unsigned BitWidth = N1.getValueSizeInBits();
45700 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
45701 if (DAG.getTargetLoweringInfo().SimplifyDemandedBits(N1, DemandedMask, DCI)) {
45702 if (N->getOpcode() != ISD::DELETED_NODE)
45703 DCI.AddToWorklist(N);
45704 return SDValue(N, 0);
45710 static SDValue combineCVTPH2PS(SDNode *N, SelectionDAG &DAG,
45711 TargetLowering::DAGCombinerInfo &DCI) {
45712 bool IsStrict = N->getOpcode() == X86ISD::STRICT_CVTPH2PS;
45713 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
45715 if (N->getValueType(0) == MVT::v4f32 && Src.getValueType() == MVT::v8i16) {
45716 APInt KnownUndef, KnownZero;
45717 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
45718 APInt DemandedElts = APInt::getLowBitsSet(8, 4);
45719 if (TLI.SimplifyDemandedVectorElts(Src, DemandedElts, KnownUndef, KnownZero,
45721 if (N->getOpcode() != ISD::DELETED_NODE)
45722 DCI.AddToWorklist(N);
45723 return SDValue(N, 0);
45726 // Convert a full vector load into vzload when not all bits are needed.
45727 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
45728 LoadSDNode *LN = cast<LoadSDNode>(N->getOperand(IsStrict ? 1 : 0));
45729 if (SDValue VZLoad = narrowLoadToVZLoad(LN, MVT::i64, MVT::v2i64, DAG)) {
45732 SDValue Convert = DAG.getNode(
45733 N->getOpcode(), dl, {MVT::v4f32, MVT::Other},
45734 {N->getOperand(0), DAG.getBitcast(MVT::v8i16, VZLoad)});
45735 DCI.CombineTo(N, Convert, Convert.getValue(1));
45737 SDValue Convert = DAG.getNode(N->getOpcode(), dl, MVT::v4f32,
45738 DAG.getBitcast(MVT::v8i16, VZLoad));
45739 DCI.CombineTo(N, Convert);
45742 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), VZLoad.getValue(1));
45743 DCI.recursivelyDeleteUnusedNodes(LN);
45744 return SDValue(N, 0);
45752 // Try to combine sext_in_reg of a cmov of constants by extending the constants.
45753 static SDValue combineSextInRegCmov(SDNode *N, SelectionDAG &DAG) {
45754 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
45756 EVT DstVT = N->getValueType(0);
45758 SDValue N0 = N->getOperand(0);
45759 SDValue N1 = N->getOperand(1);
45760 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
45762 if (ExtraVT != MVT::i8 && ExtraVT != MVT::i16)
45765 // Look through single use any_extends / truncs.
45766 SDValue IntermediateBitwidthOp;
45767 if ((N0.getOpcode() == ISD::ANY_EXTEND || N0.getOpcode() == ISD::TRUNCATE) &&
45769 IntermediateBitwidthOp = N0;
45770 N0 = N0.getOperand(0);
45773 // See if we have a single use cmov.
45774 if (N0.getOpcode() != X86ISD::CMOV || !N0.hasOneUse())
45777 SDValue CMovOp0 = N0.getOperand(0);
45778 SDValue CMovOp1 = N0.getOperand(1);
45780 // Make sure both operands are constants.
45781 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
45782 !isa<ConstantSDNode>(CMovOp1.getNode()))
45787 // If we looked through an any_extend/trunc above, add one to the constants.
45788 if (IntermediateBitwidthOp) {
45789 unsigned IntermediateOpc = IntermediateBitwidthOp.getOpcode();
45790 CMovOp0 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp0);
45791 CMovOp1 = DAG.getNode(IntermediateOpc, DL, DstVT, CMovOp1);
45794 CMovOp0 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp0, N1);
45795 CMovOp1 = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, DstVT, CMovOp1, N1);
45797 EVT CMovVT = DstVT;
45798 // We do not want i16 CMOV's. Promote to i32 and truncate afterwards.
45799 if (DstVT == MVT::i16) {
45801 CMovOp0 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp0);
45802 CMovOp1 = DAG.getNode(ISD::ZERO_EXTEND, DL, CMovVT, CMovOp1);
45805 SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, CMovVT, CMovOp0, CMovOp1,
45806 N0.getOperand(2), N0.getOperand(3));
45808 if (CMovVT != DstVT)
45809 CMov = DAG.getNode(ISD::TRUNCATE, DL, DstVT, CMov);
45814 static SDValue combineSignExtendInReg(SDNode *N, SelectionDAG &DAG,
45815 const X86Subtarget &Subtarget) {
45816 assert(N->getOpcode() == ISD::SIGN_EXTEND_INREG);
45818 if (SDValue V = combineSextInRegCmov(N, DAG))
45821 EVT VT = N->getValueType(0);
45822 SDValue N0 = N->getOperand(0);
45823 SDValue N1 = N->getOperand(1);
45824 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
45827 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
45828 // both SSE and AVX2 since there is no sign-extended shift right
45829 // operation on a vector with 64-bit elements.
45830 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
45831 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
45832 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
45833 N0.getOpcode() == ISD::SIGN_EXTEND)) {
45834 SDValue N00 = N0.getOperand(0);
45836 // EXTLOAD has a better solution on AVX2,
45837 // it may be replaced with X86ISD::VSEXT node.
45838 if (N00.getOpcode() == ISD::LOAD && Subtarget.hasInt256())
45839 if (!ISD::isNormalLoad(N00.getNode()))
45842 // Attempt to promote any comparison mask ops before moving the
45843 // SIGN_EXTEND_INREG in the way.
45844 if (SDValue Promote = PromoteMaskArithmetic(N0.getNode(), DAG, Subtarget))
45845 return DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, VT, Promote, N1);
45847 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
45849 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, N00, N1);
45850 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
45856 /// sext(add_nsw(x, C)) --> add(sext(x), C_sext)
45857 /// zext(add_nuw(x, C)) --> add(zext(x), C_zext)
45858 /// Promoting a sign/zero extension ahead of a no overflow 'add' exposes
45859 /// opportunities to combine math ops, use an LEA, or use a complex addressing
45860 /// mode. This can eliminate extend, add, and shift instructions.
45861 static SDValue promoteExtBeforeAdd(SDNode *Ext, SelectionDAG &DAG,
45862 const X86Subtarget &Subtarget) {
45863 if (Ext->getOpcode() != ISD::SIGN_EXTEND &&
45864 Ext->getOpcode() != ISD::ZERO_EXTEND)
45867 // TODO: This should be valid for other integer types.
45868 EVT VT = Ext->getValueType(0);
45869 if (VT != MVT::i64)
45872 SDValue Add = Ext->getOperand(0);
45873 if (Add.getOpcode() != ISD::ADD)
45876 bool Sext = Ext->getOpcode() == ISD::SIGN_EXTEND;
45877 bool NSW = Add->getFlags().hasNoSignedWrap();
45878 bool NUW = Add->getFlags().hasNoUnsignedWrap();
45880 // We need an 'add nsw' feeding into the 'sext' or 'add nuw' feeding
45882 if ((Sext && !NSW) || (!Sext && !NUW))
45885 // Having a constant operand to the 'add' ensures that we are not increasing
45886 // the instruction count because the constant is extended for free below.
45887 // A constant operand can also become the displacement field of an LEA.
45888 auto *AddOp1 = dyn_cast<ConstantSDNode>(Add.getOperand(1));
45892 // Don't make the 'add' bigger if there's no hope of combining it with some
45893 // other 'add' or 'shl' instruction.
45894 // TODO: It may be profitable to generate simpler LEA instructions in place
45895 // of single 'add' instructions, but the cost model for selecting an LEA
45896 // currently has a high threshold.
45897 bool HasLEAPotential = false;
45898 for (auto *User : Ext->uses()) {
45899 if (User->getOpcode() == ISD::ADD || User->getOpcode() == ISD::SHL) {
45900 HasLEAPotential = true;
45904 if (!HasLEAPotential)
45907 // Everything looks good, so pull the '{s|z}ext' ahead of the 'add'.
45908 int64_t AddConstant = Sext ? AddOp1->getSExtValue() : AddOp1->getZExtValue();
45909 SDValue AddOp0 = Add.getOperand(0);
45910 SDValue NewExt = DAG.getNode(Ext->getOpcode(), SDLoc(Ext), VT, AddOp0);
45911 SDValue NewConstant = DAG.getConstant(AddConstant, SDLoc(Add), VT);
45913 // The wider add is guaranteed to not wrap because both operands are
45916 Flags.setNoSignedWrap(NSW);
45917 Flags.setNoUnsignedWrap(NUW);
45918 return DAG.getNode(ISD::ADD, SDLoc(Add), VT, NewExt, NewConstant, Flags);
45921 // If we face {ANY,SIGN,ZERO}_EXTEND that is applied to a CMOV with constant
45922 // operands and the result of CMOV is not used anywhere else - promote CMOV
45923 // itself instead of promoting its result. This could be beneficial, because:
45924 // 1) X86TargetLowering::EmitLoweredSelect later can do merging of two
45925 // (or more) pseudo-CMOVs only when they go one-after-another and
45926 // getting rid of result extension code after CMOV will help that.
45927 // 2) Promotion of constant CMOV arguments is free, hence the
45928 // {ANY,SIGN,ZERO}_EXTEND will just be deleted.
45929 // 3) 16-bit CMOV encoding is 4 bytes, 32-bit CMOV is 3-byte, so this
45930 // promotion is also good in terms of code-size.
45931 // (64-bit CMOV is 4-bytes, that's why we don't do 32-bit => 64-bit
45933 static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
45934 SDValue CMovN = Extend->getOperand(0);
45935 if (CMovN.getOpcode() != X86ISD::CMOV || !CMovN.hasOneUse())
45938 EVT TargetVT = Extend->getValueType(0);
45939 unsigned ExtendOpcode = Extend->getOpcode();
45942 EVT VT = CMovN.getValueType();
45943 SDValue CMovOp0 = CMovN.getOperand(0);
45944 SDValue CMovOp1 = CMovN.getOperand(1);
45946 if (!isa<ConstantSDNode>(CMovOp0.getNode()) ||
45947 !isa<ConstantSDNode>(CMovOp1.getNode()))
45950 // Only extend to i32 or i64.
45951 if (TargetVT != MVT::i32 && TargetVT != MVT::i64)
45954 // Only extend from i16 unless its a sign_extend from i32. Zext/aext from i32
45956 if (VT != MVT::i16 && !(ExtendOpcode == ISD::SIGN_EXTEND && VT == MVT::i32))
45959 // If this a zero extend to i64, we should only extend to i32 and use a free
45960 // zero extend to finish.
45961 EVT ExtendVT = TargetVT;
45962 if (TargetVT == MVT::i64 && ExtendOpcode != ISD::SIGN_EXTEND)
45963 ExtendVT = MVT::i32;
45965 CMovOp0 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp0);
45966 CMovOp1 = DAG.getNode(ExtendOpcode, DL, ExtendVT, CMovOp1);
45968 SDValue Res = DAG.getNode(X86ISD::CMOV, DL, ExtendVT, CMovOp0, CMovOp1,
45969 CMovN.getOperand(2), CMovN.getOperand(3));
45971 // Finish extending if needed.
45972 if (ExtendVT != TargetVT)
45973 Res = DAG.getNode(ExtendOpcode, DL, TargetVT, Res);
45978 // Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
45979 // This is more or less the reverse of combineBitcastvxi1.
45981 combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
45982 TargetLowering::DAGCombinerInfo &DCI,
45983 const X86Subtarget &Subtarget) {
45984 unsigned Opcode = N->getOpcode();
45985 if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
45986 Opcode != ISD::ANY_EXTEND)
45988 if (!DCI.isBeforeLegalizeOps())
45990 if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
45993 SDValue N0 = N->getOperand(0);
45994 EVT VT = N->getValueType(0);
45995 EVT SVT = VT.getScalarType();
45996 EVT InSVT = N0.getValueType().getScalarType();
45997 unsigned EltSizeInBits = SVT.getSizeInBits();
45999 // Input type must be extending a bool vector (bit-casted from a scalar
46000 // integer) to legal integer types.
46001 if (!VT.isVector())
46003 if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
46005 if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
46008 SDValue N00 = N0.getOperand(0);
46009 EVT SclVT = N0.getOperand(0).getValueType();
46010 if (!SclVT.isScalarInteger())
46015 SmallVector<int, 32> ShuffleMask;
46016 unsigned NumElts = VT.getVectorNumElements();
46017 assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
46019 // Broadcast the scalar integer to the vector elements.
46020 if (NumElts > EltSizeInBits) {
46021 // If the scalar integer is greater than the vector element size, then we
46022 // must split it down into sub-sections for broadcasting. For example:
46023 // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
46024 // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
46025 assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
46026 unsigned Scale = NumElts / EltSizeInBits;
46028 EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
46029 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
46030 Vec = DAG.getBitcast(VT, Vec);
46032 for (unsigned i = 0; i != Scale; ++i)
46033 ShuffleMask.append(EltSizeInBits, i);
46034 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
46035 } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
46036 (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
46037 // If we have register broadcast instructions, use the scalar size as the
46038 // element type for the shuffle. Then cast to the wider element type. The
46039 // widened bits won't be used, and this might allow the use of a broadcast
46041 assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
46042 unsigned Scale = EltSizeInBits / NumElts;
46044 EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
46045 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
46046 ShuffleMask.append(NumElts * Scale, 0);
46047 Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
46048 Vec = DAG.getBitcast(VT, Vec);
46050 // For smaller scalar integers, we can simply any-extend it to the vector
46051 // element size (we don't care about the upper bits) and broadcast it to all
46053 SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
46054 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
46055 ShuffleMask.append(NumElts, 0);
46056 Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
46059 // Now, mask the relevant bit in each element.
46060 SmallVector<SDValue, 32> Bits;
46061 for (unsigned i = 0; i != NumElts; ++i) {
46062 int BitIdx = (i % EltSizeInBits);
46063 APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
46064 Bits.push_back(DAG.getConstant(Bit, DL, SVT));
46066 SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
46067 Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
46069 // Compare against the bitmask and extend the result.
46070 EVT CCVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1, NumElts);
46071 Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
46072 Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
46074 // For SEXT, this is now done, otherwise shift the result down for
46076 if (Opcode == ISD::SIGN_EXTEND)
46078 return DAG.getNode(ISD::SRL, DL, VT, Vec,
46079 DAG.getConstant(EltSizeInBits - 1, DL, VT));
46082 // Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
46084 static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
46085 const X86Subtarget &Subtarget) {
46086 SDValue N0 = N->getOperand(0);
46087 EVT VT = N->getValueType(0);
46090 // Only do this combine with AVX512 for vector extends.
46091 if (!Subtarget.hasAVX512() || !VT.isVector() || N0.getOpcode() != ISD::SETCC)
46094 // Only combine legal element types.
46095 EVT SVT = VT.getVectorElementType();
46096 if (SVT != MVT::i8 && SVT != MVT::i16 && SVT != MVT::i32 &&
46097 SVT != MVT::i64 && SVT != MVT::f32 && SVT != MVT::f64)
46100 // We can only do this if the vector size in 256 bits or less.
46101 unsigned Size = VT.getSizeInBits();
46102 if (Size > 256 && Subtarget.useAVX512Regs())
46105 // Don't fold if the condition code can't be handled by PCMPEQ/PCMPGT since
46106 // that's the only integer compares with we have.
46107 ISD::CondCode CC = cast<CondCodeSDNode>(N0.getOperand(2))->get();
46108 if (ISD::isUnsignedIntSetCC(CC))
46111 // Only do this combine if the extension will be fully consumed by the setcc.
46112 EVT N00VT = N0.getOperand(0).getValueType();
46113 EVT MatchingVecType = N00VT.changeVectorElementTypeToInteger();
46114 if (Size != MatchingVecType.getSizeInBits())
46117 SDValue Res = DAG.getSetCC(dl, VT, N0.getOperand(0), N0.getOperand(1), CC);
46119 if (N->getOpcode() == ISD::ZERO_EXTEND)
46120 Res = DAG.getZeroExtendInReg(Res, dl, N0.getValueType());
46125 static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
46126 TargetLowering::DAGCombinerInfo &DCI,
46127 const X86Subtarget &Subtarget) {
46128 SDValue N0 = N->getOperand(0);
46129 EVT VT = N->getValueType(0);
46130 EVT InVT = N0.getValueType();
46133 // (i32 (sext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
46134 if (!DCI.isBeforeLegalizeOps() &&
46135 N0.getOpcode() == X86ISD::SETCC_CARRY) {
46136 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, DL, VT, N0->getOperand(0),
46137 N0->getOperand(1));
46138 bool ReplaceOtherUses = !N0.hasOneUse();
46139 DCI.CombineTo(N, Setcc);
46140 // Replace other uses with a truncate of the widened setcc_carry.
46141 if (ReplaceOtherUses) {
46142 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
46143 N0.getValueType(), Setcc);
46144 DCI.CombineTo(N0.getNode(), Trunc);
46147 return SDValue(N, 0);
46150 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
46153 if (!DCI.isBeforeLegalizeOps())
46156 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
46159 if (InVT == MVT::i1 && N0.getOpcode() == ISD::XOR &&
46160 isAllOnesConstant(N0.getOperand(1)) && N0.hasOneUse()) {
46161 // Invert and sign-extend a boolean is the same as zero-extend and subtract
46162 // 1 because 0 becomes -1 and 1 becomes 0. The subtract is efficiently
46163 // lowered with an LEA or a DEC. This is the same as: select Bool, 0, -1.
46164 // sext (xor Bool, -1) --> sub (zext Bool), 1
46165 SDValue Zext = DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0.getOperand(0));
46166 return DAG.getNode(ISD::SUB, DL, VT, Zext, DAG.getConstant(1, DL, VT));
46169 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
46173 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
46176 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
46182 static SDValue combineFMA(SDNode *N, SelectionDAG &DAG,
46183 TargetLowering::DAGCombinerInfo &DCI,
46184 const X86Subtarget &Subtarget) {
46186 EVT VT = N->getValueType(0);
46187 bool IsStrict = N->isStrictFPOpcode() || N->isTargetStrictFPOpcode();
46189 // Let legalize expand this if it isn't a legal type yet.
46190 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46191 if (!TLI.isTypeLegal(VT))
46194 EVT ScalarVT = VT.getScalarType();
46195 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) || !Subtarget.hasAnyFMA())
46198 SDValue A = N->getOperand(IsStrict ? 1 : 0);
46199 SDValue B = N->getOperand(IsStrict ? 2 : 1);
46200 SDValue C = N->getOperand(IsStrict ? 3 : 2);
46202 auto invertIfNegative = [&DAG, &TLI, &DCI](SDValue &V) {
46203 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
46204 bool LegalOperations = !DCI.isBeforeLegalizeOps();
46205 if (SDValue NegV = TLI.getCheaperNegatedExpression(V, DAG, LegalOperations,
46210 // Look through extract_vector_elts. If it comes from an FNEG, create a
46211 // new extract from the FNEG input.
46212 if (V.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
46213 isNullConstant(V.getOperand(1))) {
46214 SDValue Vec = V.getOperand(0);
46215 if (SDValue NegV = TLI.getCheaperNegatedExpression(
46216 Vec, DAG, LegalOperations, CodeSize)) {
46217 V = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, SDLoc(V), V.getValueType(),
46218 NegV, V.getOperand(1));
46226 // Do not convert the passthru input of scalar intrinsics.
46227 // FIXME: We could allow negations of the lower element only.
46228 bool NegA = invertIfNegative(A);
46229 bool NegB = invertIfNegative(B);
46230 bool NegC = invertIfNegative(C);
46232 if (!NegA && !NegB && !NegC)
46235 unsigned NewOpcode =
46236 negateFMAOpcode(N->getOpcode(), NegA != NegB, NegC, false);
46239 assert(N->getNumOperands() == 4 && "Shouldn't be greater than 4");
46240 return DAG.getNode(NewOpcode, dl, {VT, MVT::Other},
46241 {N->getOperand(0), A, B, C});
46243 if (N->getNumOperands() == 4)
46244 return DAG.getNode(NewOpcode, dl, VT, A, B, C, N->getOperand(3));
46245 return DAG.getNode(NewOpcode, dl, VT, A, B, C);
46249 // Combine FMADDSUB(A, B, FNEG(C)) -> FMSUBADD(A, B, C)
46250 // Combine FMSUBADD(A, B, FNEG(C)) -> FMADDSUB(A, B, C)
46251 static SDValue combineFMADDSUB(SDNode *N, SelectionDAG &DAG,
46252 TargetLowering::DAGCombinerInfo &DCI) {
46254 EVT VT = N->getValueType(0);
46255 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46256 bool CodeSize = DAG.getMachineFunction().getFunction().hasOptSize();
46257 bool LegalOperations = !DCI.isBeforeLegalizeOps();
46259 SDValue N2 = N->getOperand(2);
46262 TLI.getCheaperNegatedExpression(N2, DAG, LegalOperations, CodeSize);
46265 unsigned NewOpcode = negateFMAOpcode(N->getOpcode(), false, true, false);
46267 if (N->getNumOperands() == 4)
46268 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
46269 NegN2, N->getOperand(3));
46270 return DAG.getNode(NewOpcode, dl, VT, N->getOperand(0), N->getOperand(1),
46274 static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
46275 TargetLowering::DAGCombinerInfo &DCI,
46276 const X86Subtarget &Subtarget) {
46278 SDValue N0 = N->getOperand(0);
46279 EVT VT = N->getValueType(0);
46281 // (i32 (aext (i8 (x86isd::setcc_carry)))) -> (i32 (x86isd::setcc_carry))
46282 // FIXME: Is this needed? We don't seem to have any tests for it.
46283 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ANY_EXTEND &&
46284 N0.getOpcode() == X86ISD::SETCC_CARRY) {
46285 SDValue Setcc = DAG.getNode(X86ISD::SETCC_CARRY, dl, VT, N0->getOperand(0),
46286 N0->getOperand(1));
46287 bool ReplaceOtherUses = !N0.hasOneUse();
46288 DCI.CombineTo(N, Setcc);
46289 // Replace other uses with a truncate of the widened setcc_carry.
46290 if (ReplaceOtherUses) {
46291 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, SDLoc(N0),
46292 N0.getValueType(), Setcc);
46293 DCI.CombineTo(N0.getNode(), Trunc);
46296 return SDValue(N, 0);
46299 if (SDValue NewCMov = combineToExtendCMOV(N, DAG))
46302 if (DCI.isBeforeLegalizeOps())
46303 if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
46306 if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
46310 if (SDValue R = PromoteMaskArithmetic(N, DAG, Subtarget))
46313 if (SDValue NewAdd = promoteExtBeforeAdd(N, DAG, Subtarget))
46316 if (SDValue R = combineOrCmpEqZeroToCtlzSrl(N, DAG, DCI, Subtarget))
46319 // TODO: Combine with any target/faux shuffle.
46320 if (N0.getOpcode() == X86ISD::PACKUS && N0.getValueSizeInBits() == 128 &&
46321 VT.getScalarSizeInBits() == N0.getOperand(0).getScalarValueSizeInBits()) {
46322 SDValue N00 = N0.getOperand(0);
46323 SDValue N01 = N0.getOperand(1);
46324 unsigned NumSrcEltBits = N00.getScalarValueSizeInBits();
46325 APInt ZeroMask = APInt::getHighBitsSet(NumSrcEltBits, NumSrcEltBits / 2);
46326 if ((N00.isUndef() || DAG.MaskedValueIsZero(N00, ZeroMask)) &&
46327 (N01.isUndef() || DAG.MaskedValueIsZero(N01, ZeroMask))) {
46328 return concatSubVectors(N00, N01, DAG, dl);
46335 /// Recursive helper for combineVectorSizedSetCCEquality() to see if we have a
46336 /// recognizable memcmp expansion.
46337 static bool isOrXorXorTree(SDValue X, bool Root = true) {
46338 if (X.getOpcode() == ISD::OR)
46339 return isOrXorXorTree(X.getOperand(0), false) &&
46340 isOrXorXorTree(X.getOperand(1), false);
46343 return X.getOpcode() == ISD::XOR;
46346 /// Recursive helper for combineVectorSizedSetCCEquality() to emit the memcmp
46348 template<typename F>
46349 static SDValue emitOrXorXorTree(SDValue X, SDLoc &DL, SelectionDAG &DAG,
46350 EVT VecVT, EVT CmpVT, bool HasPT, F SToV) {
46351 SDValue Op0 = X.getOperand(0);
46352 SDValue Op1 = X.getOperand(1);
46353 if (X.getOpcode() == ISD::OR) {
46354 SDValue A = emitOrXorXorTree(Op0, DL, DAG, VecVT, CmpVT, HasPT, SToV);
46355 SDValue B = emitOrXorXorTree(Op1, DL, DAG, VecVT, CmpVT, HasPT, SToV);
46356 if (VecVT != CmpVT)
46357 return DAG.getNode(ISD::OR, DL, CmpVT, A, B);
46359 return DAG.getNode(ISD::OR, DL, VecVT, A, B);
46360 return DAG.getNode(ISD::AND, DL, CmpVT, A, B);
46361 } else if (X.getOpcode() == ISD::XOR) {
46362 SDValue A = SToV(Op0);
46363 SDValue B = SToV(Op1);
46364 if (VecVT != CmpVT)
46365 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETNE);
46367 return DAG.getNode(ISD::XOR, DL, VecVT, A, B);
46368 return DAG.getSetCC(DL, CmpVT, A, B, ISD::SETEQ);
46370 llvm_unreachable("Impossible");
46373 /// Try to map a 128-bit or larger integer comparison to vector instructions
46374 /// before type legalization splits it up into chunks.
46375 static SDValue combineVectorSizedSetCCEquality(SDNode *SetCC, SelectionDAG &DAG,
46376 const X86Subtarget &Subtarget) {
46377 ISD::CondCode CC = cast<CondCodeSDNode>(SetCC->getOperand(2))->get();
46378 assert((CC == ISD::SETNE || CC == ISD::SETEQ) && "Bad comparison predicate");
46380 // We're looking for an oversized integer equality comparison.
46381 SDValue X = SetCC->getOperand(0);
46382 SDValue Y = SetCC->getOperand(1);
46383 EVT OpVT = X.getValueType();
46384 unsigned OpSize = OpVT.getSizeInBits();
46385 if (!OpVT.isScalarInteger() || OpSize < 128)
46388 // Ignore a comparison with zero because that gets special treatment in
46389 // EmitTest(). But make an exception for the special case of a pair of
46390 // logically-combined vector-sized operands compared to zero. This pattern may
46391 // be generated by the memcmp expansion pass with oversized integer compares
46393 bool IsOrXorXorTreeCCZero = isNullConstant(Y) && isOrXorXorTree(X);
46394 if (isNullConstant(Y) && !IsOrXorXorTreeCCZero)
46397 // Don't perform this combine if constructing the vector will be expensive.
46398 auto IsVectorBitCastCheap = [](SDValue X) {
46399 X = peekThroughBitcasts(X);
46400 return isa<ConstantSDNode>(X) || X.getValueType().isVector() ||
46401 X.getOpcode() == ISD::LOAD;
46403 if ((!IsVectorBitCastCheap(X) || !IsVectorBitCastCheap(Y)) &&
46404 !IsOrXorXorTreeCCZero)
46407 EVT VT = SetCC->getValueType(0);
46410 // Use XOR (plus OR) and PTEST after SSE4.1 for 128/256-bit operands.
46411 // Use PCMPNEQ (plus OR) and KORTEST for 512-bit operands.
46412 // Otherwise use PCMPEQ (plus AND) and mask testing.
46413 if ((OpSize == 128 && Subtarget.hasSSE2()) ||
46414 (OpSize == 256 && Subtarget.hasAVX()) ||
46415 (OpSize == 512 && Subtarget.useAVX512Regs())) {
46416 bool HasPT = Subtarget.hasSSE41();
46418 // PTEST and MOVMSK are slow on Knights Landing and Knights Mill and widened
46419 // vector registers are essentially free. (Technically, widening registers
46420 // prevents load folding, but the tradeoff is worth it.)
46421 bool PreferKOT = Subtarget.preferMaskRegisters();
46422 bool NeedZExt = PreferKOT && !Subtarget.hasVLX() && OpSize != 512;
46424 EVT VecVT = MVT::v16i8;
46425 EVT CmpVT = PreferKOT ? MVT::v16i1 : VecVT;
46426 if (OpSize == 256) {
46427 VecVT = MVT::v32i8;
46428 CmpVT = PreferKOT ? MVT::v32i1 : VecVT;
46430 EVT CastVT = VecVT;
46431 bool NeedsAVX512FCast = false;
46432 if (OpSize == 512 || NeedZExt) {
46433 if (Subtarget.hasBWI()) {
46434 VecVT = MVT::v64i8;
46435 CmpVT = MVT::v64i1;
46439 VecVT = MVT::v16i32;
46440 CmpVT = MVT::v16i1;
46441 CastVT = OpSize == 512 ? VecVT :
46442 OpSize == 256 ? MVT::v8i32 : MVT::v4i32;
46443 NeedsAVX512FCast = true;
46447 auto ScalarToVector = [&](SDValue X) -> SDValue {
46448 bool TmpZext = false;
46449 EVT TmpCastVT = CastVT;
46450 if (X.getOpcode() == ISD::ZERO_EXTEND) {
46451 SDValue OrigX = X.getOperand(0);
46452 unsigned OrigSize = OrigX.getScalarValueSizeInBits();
46453 if (OrigSize < OpSize) {
46454 if (OrigSize == 128) {
46455 TmpCastVT = NeedsAVX512FCast ? MVT::v4i32 : MVT::v16i8;
46458 } else if (OrigSize == 256) {
46459 TmpCastVT = NeedsAVX512FCast ? MVT::v8i32 : MVT::v32i8;
46465 X = DAG.getBitcast(TmpCastVT, X);
46466 if (!NeedZExt && !TmpZext)
46468 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT,
46469 DAG.getConstant(0, DL, VecVT), X,
46470 DAG.getVectorIdxConstant(0, DL));
46474 if (IsOrXorXorTreeCCZero) {
46475 // This is a bitwise-combined equality comparison of 2 pairs of vectors:
46476 // setcc i128 (or (xor A, B), (xor C, D)), 0, eq|ne
46477 // Use 2 vector equality compares and 'and' the results before doing a
46479 Cmp = emitOrXorXorTree(X, DL, DAG, VecVT, CmpVT, HasPT, ScalarToVector);
46481 SDValue VecX = ScalarToVector(X);
46482 SDValue VecY = ScalarToVector(Y);
46483 if (VecVT != CmpVT) {
46484 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETNE);
46485 } else if (HasPT) {
46486 Cmp = DAG.getNode(ISD::XOR, DL, VecVT, VecX, VecY);
46488 Cmp = DAG.getSetCC(DL, CmpVT, VecX, VecY, ISD::SETEQ);
46491 // AVX512 should emit a setcc that will lower to kortest.
46492 if (VecVT != CmpVT) {
46493 EVT KRegVT = CmpVT == MVT::v64i1 ? MVT::i64 :
46494 CmpVT == MVT::v32i1 ? MVT::i32 : MVT::i16;
46495 return DAG.getSetCC(DL, VT, DAG.getBitcast(KRegVT, Cmp),
46496 DAG.getConstant(0, DL, KRegVT), CC);
46499 SDValue BCCmp = DAG.getBitcast(OpSize == 256 ? MVT::v4i64 : MVT::v2i64,
46501 SDValue PT = DAG.getNode(X86ISD::PTEST, DL, MVT::i32, BCCmp, BCCmp);
46502 X86::CondCode X86CC = CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE;
46503 SDValue X86SetCC = getSETCC(X86CC, PT, DL, DAG);
46504 return DAG.getNode(ISD::TRUNCATE, DL, VT, X86SetCC.getValue(0));
46506 // If all bytes match (bitmask is 0x(FFFF)FFFF), that's equality.
46507 // setcc i128 X, Y, eq --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, eq
46508 // setcc i128 X, Y, ne --> setcc (pmovmskb (pcmpeqb X, Y)), 0xFFFF, ne
46509 assert(Cmp.getValueType() == MVT::v16i8 &&
46510 "Non 128-bit vector on pre-SSE41 target");
46511 SDValue MovMsk = DAG.getNode(X86ISD::MOVMSK, DL, MVT::i32, Cmp);
46512 SDValue FFFFs = DAG.getConstant(0xFFFF, DL, MVT::i32);
46513 return DAG.getSetCC(DL, VT, MovMsk, FFFFs, CC);
46519 static SDValue combineSetCC(SDNode *N, SelectionDAG &DAG,
46520 const X86Subtarget &Subtarget) {
46521 const ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
46522 const SDValue LHS = N->getOperand(0);
46523 const SDValue RHS = N->getOperand(1);
46524 EVT VT = N->getValueType(0);
46525 EVT OpVT = LHS.getValueType();
46528 if (CC == ISD::SETNE || CC == ISD::SETEQ) {
46529 if (SDValue V = combineVectorSizedSetCCEquality(N, DAG, Subtarget))
46532 if (VT == MVT::i1 && isNullConstant(RHS)) {
46535 MatchVectorAllZeroTest(LHS, CC, DL, Subtarget, DAG, X86CC))
46536 return DAG.getNode(ISD::TRUNCATE, DL, VT,
46537 DAG.getNode(X86ISD::SETCC, DL, MVT::i8, X86CC, V));
46541 if (VT.isVector() && VT.getVectorElementType() == MVT::i1 &&
46542 (CC == ISD::SETNE || CC == ISD::SETEQ || ISD::isSignedIntSetCC(CC))) {
46543 // Using temporaries to avoid messing up operand ordering for later
46544 // transformations if this doesn't work.
46547 ISD::CondCode TmpCC = CC;
46548 // Put build_vector on the right.
46549 if (Op0.getOpcode() == ISD::BUILD_VECTOR) {
46550 std::swap(Op0, Op1);
46551 TmpCC = ISD::getSetCCSwappedOperands(TmpCC);
46555 (Op0.getOpcode() == ISD::SIGN_EXTEND) &&
46556 (Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1);
46557 bool IsVZero1 = ISD::isBuildVectorAllZeros(Op1.getNode());
46559 if (IsSEXT0 && IsVZero1) {
46560 assert(VT == Op0.getOperand(0).getValueType() &&
46561 "Unexpected operand type");
46562 if (TmpCC == ISD::SETGT)
46563 return DAG.getConstant(0, DL, VT);
46564 if (TmpCC == ISD::SETLE)
46565 return DAG.getConstant(1, DL, VT);
46566 if (TmpCC == ISD::SETEQ || TmpCC == ISD::SETGE)
46567 return DAG.getNOT(DL, Op0.getOperand(0), VT);
46569 assert((TmpCC == ISD::SETNE || TmpCC == ISD::SETLT) &&
46570 "Unexpected condition code!");
46571 return Op0.getOperand(0);
46575 // If we have AVX512, but not BWI and this is a vXi16/vXi8 setcc, just
46576 // pre-promote its result type since vXi1 vectors don't get promoted
46577 // during type legalization.
46578 // NOTE: The element count check is to ignore operand types that need to
46579 // go through type promotion to a 128-bit vector.
46580 if (Subtarget.hasAVX512() && !Subtarget.hasBWI() && VT.isVector() &&
46581 VT.getVectorElementType() == MVT::i1 &&
46582 (OpVT.getVectorElementType() == MVT::i8 ||
46583 OpVT.getVectorElementType() == MVT::i16)) {
46584 SDValue Setcc = DAG.getSetCC(DL, OpVT, LHS, RHS, CC);
46585 return DAG.getNode(ISD::TRUNCATE, DL, VT, Setcc);
46588 // For an SSE1-only target, lower a comparison of v4f32 to X86ISD::CMPP early
46589 // to avoid scalarization via legalization because v4i32 is not a legal type.
46590 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 &&
46591 LHS.getValueType() == MVT::v4f32)
46592 return LowerVSETCC(SDValue(N, 0), Subtarget, DAG);
46597 static SDValue combineMOVMSK(SDNode *N, SelectionDAG &DAG,
46598 TargetLowering::DAGCombinerInfo &DCI,
46599 const X86Subtarget &Subtarget) {
46600 SDValue Src = N->getOperand(0);
46601 MVT SrcVT = Src.getSimpleValueType();
46602 MVT VT = N->getSimpleValueType(0);
46603 unsigned NumBits = VT.getScalarSizeInBits();
46604 unsigned NumElts = SrcVT.getVectorNumElements();
46606 // Perform constant folding.
46607 if (ISD::isBuildVectorOfConstantSDNodes(Src.getNode())) {
46608 assert(VT == MVT::i32 && "Unexpected result type");
46610 for (unsigned Idx = 0, e = Src.getNumOperands(); Idx < e; ++Idx) {
46611 if (!Src.getOperand(Idx).isUndef() &&
46612 Src.getConstantOperandAPInt(Idx).isNegative())
46615 return DAG.getConstant(Imm, SDLoc(N), VT);
46618 // Look through int->fp bitcasts that don't change the element width.
46619 unsigned EltWidth = SrcVT.getScalarSizeInBits();
46620 if (Subtarget.hasSSE2() && Src.getOpcode() == ISD::BITCAST &&
46621 Src.getOperand(0).getScalarValueSizeInBits() == EltWidth)
46622 return DAG.getNode(X86ISD::MOVMSK, SDLoc(N), VT, Src.getOperand(0));
46624 // Fold movmsk(not(x)) -> not(movmsk) to improve folding of movmsk results
46625 // with scalar comparisons.
46626 if (SDValue NotSrc = IsNOT(Src, DAG)) {
46628 APInt NotMask = APInt::getLowBitsSet(NumBits, NumElts);
46629 NotSrc = DAG.getBitcast(SrcVT, NotSrc);
46630 return DAG.getNode(ISD::XOR, DL, VT,
46631 DAG.getNode(X86ISD::MOVMSK, DL, VT, NotSrc),
46632 DAG.getConstant(NotMask, DL, VT));
46635 // Simplify the inputs.
46636 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46637 APInt DemandedMask(APInt::getAllOnesValue(NumBits));
46638 if (TLI.SimplifyDemandedBits(SDValue(N, 0), DemandedMask, DCI))
46639 return SDValue(N, 0);
46644 static SDValue combineX86GatherScatter(SDNode *N, SelectionDAG &DAG,
46645 TargetLowering::DAGCombinerInfo &DCI) {
46646 // With vector masks we only demand the upper bit of the mask.
46647 SDValue Mask = cast<X86MaskedGatherScatterSDNode>(N)->getMask();
46648 if (Mask.getScalarValueSizeInBits() != 1) {
46649 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46650 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
46651 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
46652 if (N->getOpcode() != ISD::DELETED_NODE)
46653 DCI.AddToWorklist(N);
46654 return SDValue(N, 0);
46661 static SDValue rebuildGatherScatter(MaskedGatherScatterSDNode *GorS,
46662 SDValue Index, SDValue Base, SDValue Scale,
46663 SelectionDAG &DAG) {
46666 if (auto *Gather = dyn_cast<MaskedGatherSDNode>(GorS)) {
46667 SDValue Ops[] = { Gather->getChain(), Gather->getPassThru(),
46668 Gather->getMask(), Base, Index, Scale } ;
46669 return DAG.getMaskedGather(Gather->getVTList(),
46670 Gather->getMemoryVT(), DL, Ops,
46671 Gather->getMemOperand(),
46672 Gather->getIndexType());
46674 auto *Scatter = cast<MaskedScatterSDNode>(GorS);
46675 SDValue Ops[] = { Scatter->getChain(), Scatter->getValue(),
46676 Scatter->getMask(), Base, Index, Scale };
46677 return DAG.getMaskedScatter(Scatter->getVTList(),
46678 Scatter->getMemoryVT(), DL,
46679 Ops, Scatter->getMemOperand(),
46680 Scatter->getIndexType());
46683 static SDValue combineGatherScatter(SDNode *N, SelectionDAG &DAG,
46684 TargetLowering::DAGCombinerInfo &DCI) {
46686 auto *GorS = cast<MaskedGatherScatterSDNode>(N);
46687 SDValue Index = GorS->getIndex();
46688 SDValue Base = GorS->getBasePtr();
46689 SDValue Scale = GorS->getScale();
46691 if (DCI.isBeforeLegalize()) {
46692 unsigned IndexWidth = Index.getScalarValueSizeInBits();
46694 // Shrink constant indices if they are larger than 32-bits.
46695 // Only do this before legalize types since v2i64 could become v2i32.
46696 // FIXME: We could check that the type is legal if we're after legalize
46697 // types, but then we would need to construct test cases where that happens.
46698 // FIXME: We could support more than just constant vectors, but we need to
46699 // careful with costing. A truncate that can be optimized out would be fine.
46700 // Otherwise we might only want to create a truncate if it avoids a split.
46701 if (auto *BV = dyn_cast<BuildVectorSDNode>(Index)) {
46702 if (BV->isConstant() && IndexWidth > 32 &&
46703 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
46704 unsigned NumElts = Index.getValueType().getVectorNumElements();
46705 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
46706 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
46707 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
46711 // Shrink any sign/zero extends from 32 or smaller to larger than 32 if
46712 // there are sufficient sign bits. Only do this before legalize types to
46713 // avoid creating illegal types in truncate.
46714 if ((Index.getOpcode() == ISD::SIGN_EXTEND ||
46715 Index.getOpcode() == ISD::ZERO_EXTEND) &&
46717 Index.getOperand(0).getScalarValueSizeInBits() <= 32 &&
46718 DAG.ComputeNumSignBits(Index) > (IndexWidth - 32)) {
46719 unsigned NumElts = Index.getValueType().getVectorNumElements();
46720 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32, NumElts);
46721 Index = DAG.getNode(ISD::TRUNCATE, DL, NewVT, Index);
46722 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
46726 if (DCI.isBeforeLegalizeOps()) {
46727 unsigned IndexWidth = Index.getScalarValueSizeInBits();
46729 // Make sure the index is either i32 or i64
46730 if (IndexWidth != 32 && IndexWidth != 64) {
46731 MVT EltVT = IndexWidth > 32 ? MVT::i64 : MVT::i32;
46732 EVT IndexVT = EVT::getVectorVT(*DAG.getContext(), EltVT,
46733 Index.getValueType().getVectorNumElements());
46734 Index = DAG.getSExtOrTrunc(Index, DL, IndexVT);
46735 return rebuildGatherScatter(GorS, Index, Base, Scale, DAG);
46739 // With vector masks we only demand the upper bit of the mask.
46740 SDValue Mask = GorS->getMask();
46741 if (Mask.getScalarValueSizeInBits() != 1) {
46742 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
46743 APInt DemandedMask(APInt::getSignMask(Mask.getScalarValueSizeInBits()));
46744 if (TLI.SimplifyDemandedBits(Mask, DemandedMask, DCI)) {
46745 if (N->getOpcode() != ISD::DELETED_NODE)
46746 DCI.AddToWorklist(N);
46747 return SDValue(N, 0);
46754 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
46755 static SDValue combineX86SetCC(SDNode *N, SelectionDAG &DAG,
46756 const X86Subtarget &Subtarget) {
46758 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
46759 SDValue EFLAGS = N->getOperand(1);
46761 // Try to simplify the EFLAGS and condition code operands.
46762 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget))
46763 return getSETCC(CC, Flags, DL, DAG);
46768 /// Optimize branch condition evaluation.
46769 static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
46770 const X86Subtarget &Subtarget) {
46772 SDValue EFLAGS = N->getOperand(3);
46773 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
46775 // Try to simplify the EFLAGS and condition code operands.
46776 // Make sure to not keep references to operands, as combineSetCCEFLAGS can
46777 // RAUW them under us.
46778 if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
46779 SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
46780 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
46781 N->getOperand(1), Cond, Flags);
46787 // TODO: Could we move this to DAGCombine?
46788 static SDValue combineVectorCompareAndMaskUnaryOp(SDNode *N,
46789 SelectionDAG &DAG) {
46790 // Take advantage of vector comparisons (etc.) producing 0 or -1 in each lane
46791 // to optimize away operation when it's from a constant.
46793 // The general transformation is:
46794 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
46795 // AND(VECTOR_CMP(x,y), constant2)
46796 // constant2 = UNARYOP(constant)
46798 // Early exit if this isn't a vector operation, the operand of the
46799 // unary operation isn't a bitwise AND, or if the sizes of the operations
46800 // aren't the same.
46801 EVT VT = N->getValueType(0);
46802 bool IsStrict = N->isStrictFPOpcode();
46803 unsigned NumEltBits = VT.getScalarSizeInBits();
46804 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
46805 if (!VT.isVector() || Op0.getOpcode() != ISD::AND ||
46806 DAG.ComputeNumSignBits(Op0.getOperand(0)) != NumEltBits ||
46807 VT.getSizeInBits() != Op0.getValueSizeInBits())
46810 // Now check that the other operand of the AND is a constant. We could
46811 // make the transformation for non-constant splats as well, but it's unclear
46812 // that would be a benefit as it would not eliminate any operations, just
46813 // perform one more step in scalar code before moving to the vector unit.
46814 if (auto *BV = dyn_cast<BuildVectorSDNode>(Op0.getOperand(1))) {
46815 // Bail out if the vector isn't a constant.
46816 if (!BV->isConstant())
46819 // Everything checks out. Build up the new and improved node.
46821 EVT IntVT = BV->getValueType(0);
46822 // Create a new constant of the appropriate type for the transformed
46824 SDValue SourceConst;
46826 SourceConst = DAG.getNode(N->getOpcode(), DL, {VT, MVT::Other},
46827 {N->getOperand(0), SDValue(BV, 0)});
46829 SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
46830 // The AND node needs bitcasts to/from an integer vector type around it.
46831 SDValue MaskConst = DAG.getBitcast(IntVT, SourceConst);
46832 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, Op0->getOperand(0),
46834 SDValue Res = DAG.getBitcast(VT, NewAnd);
46836 return DAG.getMergeValues({Res, SourceConst.getValue(1)}, DL);
46843 /// If we are converting a value to floating-point, try to replace scalar
46844 /// truncate of an extracted vector element with a bitcast. This tries to keep
46845 /// the sequence on XMM registers rather than moving between vector and GPRs.
46846 static SDValue combineToFPTruncExtElt(SDNode *N, SelectionDAG &DAG) {
46847 // TODO: This is currently only used by combineSIntToFP, but it is generalized
46848 // to allow being called by any similar cast opcode.
46849 // TODO: Consider merging this into lowering: vectorizeExtractedCast().
46850 SDValue Trunc = N->getOperand(0);
46851 if (!Trunc.hasOneUse() || Trunc.getOpcode() != ISD::TRUNCATE)
46854 SDValue ExtElt = Trunc.getOperand(0);
46855 if (!ExtElt.hasOneUse() || ExtElt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
46856 !isNullConstant(ExtElt.getOperand(1)))
46859 EVT TruncVT = Trunc.getValueType();
46860 EVT SrcVT = ExtElt.getValueType();
46861 unsigned DestWidth = TruncVT.getSizeInBits();
46862 unsigned SrcWidth = SrcVT.getSizeInBits();
46863 if (SrcWidth % DestWidth != 0)
46866 // inttofp (trunc (extelt X, 0)) --> inttofp (extelt (bitcast X), 0)
46867 EVT SrcVecVT = ExtElt.getOperand(0).getValueType();
46868 unsigned VecWidth = SrcVecVT.getSizeInBits();
46869 unsigned NumElts = VecWidth / DestWidth;
46870 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), TruncVT, NumElts);
46871 SDValue BitcastVec = DAG.getBitcast(BitcastVT, ExtElt.getOperand(0));
46873 SDValue NewExtElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, TruncVT,
46874 BitcastVec, ExtElt.getOperand(1));
46875 return DAG.getNode(N->getOpcode(), DL, N->getValueType(0), NewExtElt);
46878 static SDValue combineUIntToFP(SDNode *N, SelectionDAG &DAG,
46879 const X86Subtarget &Subtarget) {
46880 bool IsStrict = N->isStrictFPOpcode();
46881 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
46882 EVT VT = N->getValueType(0);
46883 EVT InVT = Op0.getValueType();
46885 // UINT_TO_FP(vXi1) -> SINT_TO_FP(ZEXT(vXi1 to vXi32))
46886 // UINT_TO_FP(vXi8) -> SINT_TO_FP(ZEXT(vXi8 to vXi32))
46887 // UINT_TO_FP(vXi16) -> SINT_TO_FP(ZEXT(vXi16 to vXi32))
46888 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
46890 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
46891 InVT.getVectorNumElements());
46892 SDValue P = DAG.getNode(ISD::ZERO_EXTEND, dl, DstVT, Op0);
46894 // UINT_TO_FP isn't legal without AVX512 so use SINT_TO_FP.
46896 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
46897 {N->getOperand(0), P});
46898 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
46901 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
46902 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
46903 // the optimization here.
46904 if (DAG.SignBitIsZero(Op0)) {
46906 return DAG.getNode(ISD::STRICT_SINT_TO_FP, SDLoc(N), {VT, MVT::Other},
46907 {N->getOperand(0), Op0});
46908 return DAG.getNode(ISD::SINT_TO_FP, SDLoc(N), VT, Op0);
46914 static SDValue combineSIntToFP(SDNode *N, SelectionDAG &DAG,
46915 TargetLowering::DAGCombinerInfo &DCI,
46916 const X86Subtarget &Subtarget) {
46917 // First try to optimize away the conversion entirely when it's
46918 // conditionally from a constant. Vectors only.
46919 bool IsStrict = N->isStrictFPOpcode();
46920 if (SDValue Res = combineVectorCompareAndMaskUnaryOp(N, DAG))
46923 // Now move on to more general possibilities.
46924 SDValue Op0 = N->getOperand(IsStrict ? 1 : 0);
46925 EVT VT = N->getValueType(0);
46926 EVT InVT = Op0.getValueType();
46928 // SINT_TO_FP(vXi1) -> SINT_TO_FP(SEXT(vXi1 to vXi32))
46929 // SINT_TO_FP(vXi8) -> SINT_TO_FP(SEXT(vXi8 to vXi32))
46930 // SINT_TO_FP(vXi16) -> SINT_TO_FP(SEXT(vXi16 to vXi32))
46931 if (InVT.isVector() && InVT.getScalarSizeInBits() < 32) {
46933 EVT DstVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
46934 InVT.getVectorNumElements());
46935 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
46937 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
46938 {N->getOperand(0), P});
46939 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, P);
46942 // Without AVX512DQ we only support i64 to float scalar conversion. For both
46943 // vectors and scalars, see if we know that the upper bits are all the sign
46944 // bit, in which case we can truncate the input to i32 and convert from that.
46945 if (InVT.getScalarSizeInBits() > 32 && !Subtarget.hasDQI()) {
46946 unsigned BitWidth = InVT.getScalarSizeInBits();
46947 unsigned NumSignBits = DAG.ComputeNumSignBits(Op0);
46948 if (NumSignBits >= (BitWidth - 31)) {
46949 EVT TruncVT = MVT::i32;
46950 if (InVT.isVector())
46951 TruncVT = EVT::getVectorVT(*DAG.getContext(), TruncVT,
46952 InVT.getVectorNumElements());
46954 if (DCI.isBeforeLegalize() || TruncVT != MVT::v2i32) {
46955 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, dl, TruncVT, Op0);
46957 return DAG.getNode(ISD::STRICT_SINT_TO_FP, dl, {VT, MVT::Other},
46958 {N->getOperand(0), Trunc});
46959 return DAG.getNode(ISD::SINT_TO_FP, dl, VT, Trunc);
46961 // If we're after legalize and the type is v2i32 we need to shuffle and
46963 assert(InVT == MVT::v2i64 && "Unexpected VT!");
46964 SDValue Cast = DAG.getBitcast(MVT::v4i32, Op0);
46965 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Cast, Cast,
46968 return DAG.getNode(X86ISD::STRICT_CVTSI2P, dl, {VT, MVT::Other},
46969 {N->getOperand(0), Shuf});
46970 return DAG.getNode(X86ISD::CVTSI2P, dl, VT, Shuf);
46974 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
46975 // a 32-bit target where SSE doesn't support i64->FP operations.
46976 if (!Subtarget.useSoftFloat() && Subtarget.hasX87() &&
46977 Op0.getOpcode() == ISD::LOAD) {
46978 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
46980 // This transformation is not supported if the result type is f16 or f128.
46981 if (VT == MVT::f16 || VT == MVT::f128)
46984 // If we have AVX512DQ we can use packed conversion instructions unless
46986 if (Subtarget.hasDQI() && VT != MVT::f80)
46989 if (Ld->isSimple() && !VT.isVector() && ISD::isNormalLoad(Op0.getNode()) &&
46990 Op0.hasOneUse() && !Subtarget.is64Bit() && InVT == MVT::i64) {
46991 std::pair<SDValue, SDValue> Tmp =
46992 Subtarget.getTargetLowering()->BuildFILD(
46993 VT, InVT, SDLoc(N), Ld->getChain(), Ld->getBasePtr(),
46994 Ld->getPointerInfo(), Ld->getOriginalAlign(), DAG);
46995 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), Tmp.second);
47003 if (SDValue V = combineToFPTruncExtElt(N, DAG))
47009 static bool needCarryOrOverflowFlag(SDValue Flags) {
47010 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
47012 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
47014 SDNode *User = *UI;
47017 switch (User->getOpcode()) {
47019 // Be conservative.
47021 case X86ISD::SETCC:
47022 case X86ISD::SETCC_CARRY:
47023 CC = (X86::CondCode)User->getConstantOperandVal(0);
47025 case X86ISD::BRCOND:
47026 CC = (X86::CondCode)User->getConstantOperandVal(2);
47029 CC = (X86::CondCode)User->getConstantOperandVal(2);
47035 case X86::COND_A: case X86::COND_AE:
47036 case X86::COND_B: case X86::COND_BE:
47037 case X86::COND_O: case X86::COND_NO:
47038 case X86::COND_G: case X86::COND_GE:
47039 case X86::COND_L: case X86::COND_LE:
47047 static bool onlyZeroFlagUsed(SDValue Flags) {
47048 assert(Flags.getValueType() == MVT::i32 && "Unexpected VT!");
47050 for (SDNode::use_iterator UI = Flags->use_begin(), UE = Flags->use_end();
47052 SDNode *User = *UI;
47055 switch (User->getOpcode()) {
47057 // Be conservative.
47059 case X86ISD::SETCC: CCOpNo = 0; break;
47060 case X86ISD::SETCC_CARRY: CCOpNo = 0; break;
47061 case X86ISD::BRCOND: CCOpNo = 2; break;
47062 case X86ISD::CMOV: CCOpNo = 2; break;
47065 X86::CondCode CC = (X86::CondCode)User->getConstantOperandVal(CCOpNo);
47066 if (CC != X86::COND_E && CC != X86::COND_NE)
47073 static SDValue combineCMP(SDNode *N, SelectionDAG &DAG) {
47074 // Only handle test patterns.
47075 if (!isNullConstant(N->getOperand(1)))
47078 // If we have a CMP of a truncated binop, see if we can make a smaller binop
47079 // and use its flags directly.
47080 // TODO: Maybe we should try promoting compares that only use the zero flag
47081 // first if we can prove the upper bits with computeKnownBits?
47083 SDValue Op = N->getOperand(0);
47084 EVT VT = Op.getValueType();
47086 // If we have a constant logical shift that's only used in a comparison
47087 // against zero turn it into an equivalent AND. This allows turning it into
47088 // a TEST instruction later.
47089 if ((Op.getOpcode() == ISD::SRL || Op.getOpcode() == ISD::SHL) &&
47090 Op.hasOneUse() && isa<ConstantSDNode>(Op.getOperand(1)) &&
47091 onlyZeroFlagUsed(SDValue(N, 0))) {
47092 unsigned BitWidth = VT.getSizeInBits();
47093 const APInt &ShAmt = Op.getConstantOperandAPInt(1);
47094 if (ShAmt.ult(BitWidth)) { // Avoid undefined shifts.
47095 unsigned MaskBits = BitWidth - ShAmt.getZExtValue();
47096 APInt Mask = Op.getOpcode() == ISD::SRL
47097 ? APInt::getHighBitsSet(BitWidth, MaskBits)
47098 : APInt::getLowBitsSet(BitWidth, MaskBits);
47099 if (Mask.isSignedIntN(32)) {
47100 Op = DAG.getNode(ISD::AND, dl, VT, Op.getOperand(0),
47101 DAG.getConstant(Mask, dl, VT));
47102 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
47103 DAG.getConstant(0, dl, VT));
47108 // Look for a truncate with a single use.
47109 if (Op.getOpcode() != ISD::TRUNCATE || !Op.hasOneUse())
47112 Op = Op.getOperand(0);
47114 // Arithmetic op can only have one use.
47115 if (!Op.hasOneUse())
47119 switch (Op.getOpcode()) {
47120 default: return SDValue();
47122 // Skip and with constant. We have special handling for and with immediate
47123 // during isel to generate test instructions.
47124 if (isa<ConstantSDNode>(Op.getOperand(1)))
47126 NewOpc = X86ISD::AND;
47128 case ISD::OR: NewOpc = X86ISD::OR; break;
47129 case ISD::XOR: NewOpc = X86ISD::XOR; break;
47131 // If the carry or overflow flag is used, we can't truncate.
47132 if (needCarryOrOverflowFlag(SDValue(N, 0)))
47134 NewOpc = X86ISD::ADD;
47137 // If the carry or overflow flag is used, we can't truncate.
47138 if (needCarryOrOverflowFlag(SDValue(N, 0)))
47140 NewOpc = X86ISD::SUB;
47144 // We found an op we can narrow. Truncate its inputs.
47145 SDValue Op0 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(0));
47146 SDValue Op1 = DAG.getNode(ISD::TRUNCATE, dl, VT, Op.getOperand(1));
47148 // Use a X86 specific opcode to avoid DAG combine messing with it.
47149 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47150 Op = DAG.getNode(NewOpc, dl, VTs, Op0, Op1);
47152 // For AND, keep a CMP so that we can match the test pattern.
47153 if (NewOpc == X86ISD::AND)
47154 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
47155 DAG.getConstant(0, dl, VT));
47157 // Return the flags.
47158 return Op.getValue(1);
47161 static SDValue combineX86AddSub(SDNode *N, SelectionDAG &DAG,
47162 TargetLowering::DAGCombinerInfo &DCI) {
47163 assert((X86ISD::ADD == N->getOpcode() || X86ISD::SUB == N->getOpcode()) &&
47164 "Expected X86ISD::ADD or X86ISD::SUB");
47167 SDValue LHS = N->getOperand(0);
47168 SDValue RHS = N->getOperand(1);
47169 MVT VT = LHS.getSimpleValueType();
47170 unsigned GenericOpc = X86ISD::ADD == N->getOpcode() ? ISD::ADD : ISD::SUB;
47172 // If we don't use the flag result, simplify back to a generic ADD/SUB.
47173 if (!N->hasAnyUseOfValue(1)) {
47174 SDValue Res = DAG.getNode(GenericOpc, DL, VT, LHS, RHS);
47175 return DAG.getMergeValues({Res, DAG.getConstant(0, DL, MVT::i32)}, DL);
47178 // Fold any similar generic ADD/SUB opcodes to reuse this node.
47179 auto MatchGeneric = [&](SDValue N0, SDValue N1, bool Negate) {
47180 SDValue Ops[] = {N0, N1};
47181 SDVTList VTs = DAG.getVTList(N->getValueType(0));
47182 if (SDNode *GenericAddSub = DAG.getNodeIfExists(GenericOpc, VTs, Ops)) {
47185 Op = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Op);
47186 DCI.CombineTo(GenericAddSub, Op);
47189 MatchGeneric(LHS, RHS, false);
47190 MatchGeneric(RHS, LHS, X86ISD::SUB == N->getOpcode());
47195 static SDValue combineSBB(SDNode *N, SelectionDAG &DAG) {
47196 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
47197 MVT VT = N->getSimpleValueType(0);
47198 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47199 return DAG.getNode(X86ISD::SBB, SDLoc(N), VTs,
47200 N->getOperand(0), N->getOperand(1),
47204 // Fold SBB(SUB(X,Y),0,Carry) -> SBB(X,Y,Carry)
47205 // iff the flag result is dead.
47206 SDValue Op0 = N->getOperand(0);
47207 SDValue Op1 = N->getOperand(1);
47208 if (Op0.getOpcode() == ISD::SUB && isNullConstant(Op1) &&
47209 !N->hasAnyUseOfValue(1))
47210 return DAG.getNode(X86ISD::SBB, SDLoc(N), N->getVTList(), Op0.getOperand(0),
47211 Op0.getOperand(1), N->getOperand(2));
47216 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
47217 static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
47218 TargetLowering::DAGCombinerInfo &DCI) {
47219 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
47220 // the result is either zero or one (depending on the input carry bit).
47221 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
47222 if (X86::isZeroNode(N->getOperand(0)) &&
47223 X86::isZeroNode(N->getOperand(1)) &&
47224 // We don't have a good way to replace an EFLAGS use, so only do this when
47226 SDValue(N, 1).use_empty()) {
47228 EVT VT = N->getValueType(0);
47229 SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
47231 DAG.getNode(ISD::AND, DL, VT,
47232 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47233 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47235 DAG.getConstant(1, DL, VT));
47236 return DCI.CombineTo(N, Res1, CarryOut);
47239 if (SDValue Flags = combineCarryThroughADD(N->getOperand(2), DAG)) {
47240 MVT VT = N->getSimpleValueType(0);
47241 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47242 return DAG.getNode(X86ISD::ADC, SDLoc(N), VTs,
47243 N->getOperand(0), N->getOperand(1),
47250 /// If this is an add or subtract where one operand is produced by a cmp+setcc,
47251 /// then try to convert it to an ADC or SBB. This replaces TEST+SET+{ADD/SUB}
47252 /// with CMP+{ADC, SBB}.
47253 static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
47254 bool IsSub = N->getOpcode() == ISD::SUB;
47255 SDValue X = N->getOperand(0);
47256 SDValue Y = N->getOperand(1);
47258 // If this is an add, canonicalize a zext operand to the RHS.
47259 // TODO: Incomplete? What if both sides are zexts?
47260 if (!IsSub && X.getOpcode() == ISD::ZERO_EXTEND &&
47261 Y.getOpcode() != ISD::ZERO_EXTEND)
47264 // Look through a one-use zext.
47265 bool PeekedThroughZext = false;
47266 if (Y.getOpcode() == ISD::ZERO_EXTEND && Y.hasOneUse()) {
47267 Y = Y.getOperand(0);
47268 PeekedThroughZext = true;
47271 // If this is an add, canonicalize a setcc operand to the RHS.
47272 // TODO: Incomplete? What if both sides are setcc?
47273 // TODO: Should we allow peeking through a zext of the other operand?
47274 if (!IsSub && !PeekedThroughZext && X.getOpcode() == X86ISD::SETCC &&
47275 Y.getOpcode() != X86ISD::SETCC)
47278 if (Y.getOpcode() != X86ISD::SETCC || !Y.hasOneUse())
47282 EVT VT = N->getValueType(0);
47283 X86::CondCode CC = (X86::CondCode)Y.getConstantOperandVal(0);
47285 // If X is -1 or 0, then we have an opportunity to avoid constants required in
47286 // the general case below.
47287 auto *ConstantX = dyn_cast<ConstantSDNode>(X);
47289 if ((!IsSub && CC == X86::COND_AE && ConstantX->isAllOnesValue()) ||
47290 (IsSub && CC == X86::COND_B && ConstantX->isNullValue())) {
47291 // This is a complicated way to get -1 or 0 from the carry flag:
47292 // -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
47293 // 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
47294 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47295 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47299 if ((!IsSub && CC == X86::COND_BE && ConstantX->isAllOnesValue()) ||
47300 (IsSub && CC == X86::COND_A && ConstantX->isNullValue())) {
47301 SDValue EFLAGS = Y->getOperand(1);
47302 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
47303 EFLAGS.getValueType().isInteger() &&
47304 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
47305 // Swap the operands of a SUB, and we have the same pattern as above.
47306 // -1 + SETBE (SUB A, B) --> -1 + SETAE (SUB B, A) --> SUB + SBB
47307 // 0 - SETA (SUB A, B) --> 0 - SETB (SUB B, A) --> SUB + SBB
47308 SDValue NewSub = DAG.getNode(
47309 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
47310 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
47311 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
47312 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47313 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47319 if (CC == X86::COND_B) {
47320 // X + SETB Z --> adc X, 0
47321 // X - SETB Z --> sbb X, 0
47322 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
47323 DAG.getVTList(VT, MVT::i32), X,
47324 DAG.getConstant(0, DL, VT), Y.getOperand(1));
47327 if (CC == X86::COND_A) {
47328 SDValue EFLAGS = Y.getOperand(1);
47329 // Try to convert COND_A into COND_B in an attempt to facilitate
47330 // materializing "setb reg".
47332 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
47333 // cannot take an immediate as its first operand.
47335 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
47336 EFLAGS.getValueType().isInteger() &&
47337 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
47338 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
47339 EFLAGS.getNode()->getVTList(),
47340 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
47341 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
47342 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL,
47343 DAG.getVTList(VT, MVT::i32), X,
47344 DAG.getConstant(0, DL, VT), NewEFLAGS);
47348 if (CC == X86::COND_AE) {
47349 // X + SETAE --> sbb X, -1
47350 // X - SETAE --> adc X, -1
47351 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
47352 DAG.getVTList(VT, MVT::i32), X,
47353 DAG.getConstant(-1, DL, VT), Y.getOperand(1));
47356 if (CC == X86::COND_BE) {
47357 // X + SETBE --> sbb X, -1
47358 // X - SETBE --> adc X, -1
47359 SDValue EFLAGS = Y.getOperand(1);
47360 // Try to convert COND_BE into COND_AE in an attempt to facilitate
47361 // materializing "setae reg".
47363 // Do not flip "e <= c", where "c" is a constant, because Cmp instruction
47364 // cannot take an immediate as its first operand.
47366 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.getNode()->hasOneUse() &&
47367 EFLAGS.getValueType().isInteger() &&
47368 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
47369 SDValue NewSub = DAG.getNode(
47370 X86ISD::SUB, SDLoc(EFLAGS), EFLAGS.getNode()->getVTList(),
47371 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
47372 SDValue NewEFLAGS = NewSub.getValue(EFLAGS.getResNo());
47373 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL,
47374 DAG.getVTList(VT, MVT::i32), X,
47375 DAG.getConstant(-1, DL, VT), NewEFLAGS);
47379 if (CC != X86::COND_E && CC != X86::COND_NE)
47382 SDValue Cmp = Y.getOperand(1);
47383 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
47384 !X86::isZeroNode(Cmp.getOperand(1)) ||
47385 !Cmp.getOperand(0).getValueType().isInteger())
47388 SDValue Z = Cmp.getOperand(0);
47389 EVT ZVT = Z.getValueType();
47391 // If X is -1 or 0, then we have an opportunity to avoid constants required in
47392 // the general case below.
47394 // 'neg' sets the carry flag when Z != 0, so create 0 or -1 using 'sbb' with
47396 // 0 - (Z != 0) --> sbb %eax, %eax, (neg Z)
47397 // -1 + (Z == 0) --> sbb %eax, %eax, (neg Z)
47398 if ((IsSub && CC == X86::COND_NE && ConstantX->isNullValue()) ||
47399 (!IsSub && CC == X86::COND_E && ConstantX->isAllOnesValue())) {
47400 SDValue Zero = DAG.getConstant(0, DL, ZVT);
47401 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
47402 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
47403 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47404 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47405 SDValue(Neg.getNode(), 1));
47408 // cmp with 1 sets the carry flag when Z == 0, so create 0 or -1 using 'sbb'
47409 // with fake operands:
47410 // 0 - (Z == 0) --> sbb %eax, %eax, (cmp Z, 1)
47411 // -1 + (Z != 0) --> sbb %eax, %eax, (cmp Z, 1)
47412 if ((IsSub && CC == X86::COND_E && ConstantX->isNullValue()) ||
47413 (!IsSub && CC == X86::COND_NE && ConstantX->isAllOnesValue())) {
47414 SDValue One = DAG.getConstant(1, DL, ZVT);
47415 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
47416 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
47417 return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
47418 DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
47423 // (cmp Z, 1) sets the carry flag if Z is 0.
47424 SDValue One = DAG.getConstant(1, DL, ZVT);
47425 SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
47426 SDValue Cmp1 = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Z, One);
47428 // Add the flags type for ADC/SBB nodes.
47429 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
47431 // X - (Z != 0) --> sub X, (zext(setne Z, 0)) --> adc X, -1, (cmp Z, 1)
47432 // X + (Z != 0) --> add X, (zext(setne Z, 0)) --> sbb X, -1, (cmp Z, 1)
47433 if (CC == X86::COND_NE)
47434 return DAG.getNode(IsSub ? X86ISD::ADC : X86ISD::SBB, DL, VTs, X,
47435 DAG.getConstant(-1ULL, DL, VT), Cmp1.getValue(1));
47437 // X - (Z == 0) --> sub X, (zext(sete Z, 0)) --> sbb X, 0, (cmp Z, 1)
47438 // X + (Z == 0) --> add X, (zext(sete Z, 0)) --> adc X, 0, (cmp Z, 1)
47439 return DAG.getNode(IsSub ? X86ISD::SBB : X86ISD::ADC, DL, VTs, X,
47440 DAG.getConstant(0, DL, VT), Cmp1.getValue(1));
47443 static SDValue matchPMADDWD(SelectionDAG &DAG, SDValue Op0, SDValue Op1,
47444 const SDLoc &DL, EVT VT,
47445 const X86Subtarget &Subtarget) {
47446 // Example of pattern we try to detect:
47447 // t := (v8i32 mul (sext (v8i16 x0), (sext (v8i16 x1))))
47448 //(add (build_vector (extract_elt t, 0),
47449 // (extract_elt t, 2),
47450 // (extract_elt t, 4),
47451 // (extract_elt t, 6)),
47452 // (build_vector (extract_elt t, 1),
47453 // (extract_elt t, 3),
47454 // (extract_elt t, 5),
47455 // (extract_elt t, 7)))
47457 if (!Subtarget.hasSSE2())
47460 if (Op0.getOpcode() != ISD::BUILD_VECTOR ||
47461 Op1.getOpcode() != ISD::BUILD_VECTOR)
47464 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
47465 VT.getVectorNumElements() < 4 ||
47466 !isPowerOf2_32(VT.getVectorNumElements()))
47469 // Check if one of Op0,Op1 is of the form:
47470 // (build_vector (extract_elt Mul, 0),
47471 // (extract_elt Mul, 2),
47472 // (extract_elt Mul, 4),
47474 // the other is of the form:
47475 // (build_vector (extract_elt Mul, 1),
47476 // (extract_elt Mul, 3),
47477 // (extract_elt Mul, 5),
47479 // and identify Mul.
47481 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; i += 2) {
47482 SDValue Op0L = Op0->getOperand(i), Op1L = Op1->getOperand(i),
47483 Op0H = Op0->getOperand(i + 1), Op1H = Op1->getOperand(i + 1);
47484 // TODO: Be more tolerant to undefs.
47485 if (Op0L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47486 Op1L.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47487 Op0H.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47488 Op1H.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
47490 auto *Const0L = dyn_cast<ConstantSDNode>(Op0L->getOperand(1));
47491 auto *Const1L = dyn_cast<ConstantSDNode>(Op1L->getOperand(1));
47492 auto *Const0H = dyn_cast<ConstantSDNode>(Op0H->getOperand(1));
47493 auto *Const1H = dyn_cast<ConstantSDNode>(Op1H->getOperand(1));
47494 if (!Const0L || !Const1L || !Const0H || !Const1H)
47496 unsigned Idx0L = Const0L->getZExtValue(), Idx1L = Const1L->getZExtValue(),
47497 Idx0H = Const0H->getZExtValue(), Idx1H = Const1H->getZExtValue();
47498 // Commutativity of mul allows factors of a product to reorder.
47500 std::swap(Idx0L, Idx1L);
47502 std::swap(Idx0H, Idx1H);
47503 // Commutativity of add allows pairs of factors to reorder.
47504 if (Idx0L > Idx0H) {
47505 std::swap(Idx0L, Idx0H);
47506 std::swap(Idx1L, Idx1H);
47508 if (Idx0L != 2 * i || Idx1L != 2 * i + 1 || Idx0H != 2 * i + 2 ||
47509 Idx1H != 2 * i + 3)
47512 // First time an extract_elt's source vector is visited. Must be a MUL
47513 // with 2X number of vector elements than the BUILD_VECTOR.
47514 // Both extracts must be from same MUL.
47515 Mul = Op0L->getOperand(0);
47516 if (Mul->getOpcode() != ISD::MUL ||
47517 Mul.getValueType().getVectorNumElements() != 2 * e)
47520 // Check that the extract is from the same MUL previously seen.
47521 if (Mul != Op0L->getOperand(0) || Mul != Op1L->getOperand(0) ||
47522 Mul != Op0H->getOperand(0) || Mul != Op1H->getOperand(0))
47526 // Check if the Mul source can be safely shrunk.
47528 if (!canReduceVMulWidth(Mul.getNode(), DAG, Mode) ||
47529 Mode == ShrinkMode::MULU16)
47532 EVT TruncVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
47533 VT.getVectorNumElements() * 2);
47534 SDValue N0 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(0));
47535 SDValue N1 = DAG.getNode(ISD::TRUNCATE, DL, TruncVT, Mul.getOperand(1));
47537 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47538 ArrayRef<SDValue> Ops) {
47539 EVT InVT = Ops[0].getValueType();
47540 assert(InVT == Ops[1].getValueType() && "Operands' types mismatch");
47541 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
47542 InVT.getVectorNumElements() / 2);
47543 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
47545 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { N0, N1 }, PMADDBuilder);
47548 // Attempt to turn this pattern into PMADDWD.
47549 // (add (mul (sext (build_vector)), (sext (build_vector))),
47550 // (mul (sext (build_vector)), (sext (build_vector)))
47551 static SDValue matchPMADDWD_2(SelectionDAG &DAG, SDValue N0, SDValue N1,
47552 const SDLoc &DL, EVT VT,
47553 const X86Subtarget &Subtarget) {
47554 if (!Subtarget.hasSSE2())
47557 if (N0.getOpcode() != ISD::MUL || N1.getOpcode() != ISD::MUL)
47560 if (!VT.isVector() || VT.getVectorElementType() != MVT::i32 ||
47561 VT.getVectorNumElements() < 4 ||
47562 !isPowerOf2_32(VT.getVectorNumElements()))
47565 SDValue N00 = N0.getOperand(0);
47566 SDValue N01 = N0.getOperand(1);
47567 SDValue N10 = N1.getOperand(0);
47568 SDValue N11 = N1.getOperand(1);
47570 // All inputs need to be sign extends.
47571 // TODO: Support ZERO_EXTEND from known positive?
47572 if (N00.getOpcode() != ISD::SIGN_EXTEND ||
47573 N01.getOpcode() != ISD::SIGN_EXTEND ||
47574 N10.getOpcode() != ISD::SIGN_EXTEND ||
47575 N11.getOpcode() != ISD::SIGN_EXTEND)
47578 // Peek through the extends.
47579 N00 = N00.getOperand(0);
47580 N01 = N01.getOperand(0);
47581 N10 = N10.getOperand(0);
47582 N11 = N11.getOperand(0);
47584 // Must be extending from vXi16.
47585 EVT InVT = N00.getValueType();
47586 if (InVT.getVectorElementType() != MVT::i16 || N01.getValueType() != InVT ||
47587 N10.getValueType() != InVT || N11.getValueType() != InVT)
47590 // All inputs should be build_vectors.
47591 if (N00.getOpcode() != ISD::BUILD_VECTOR ||
47592 N01.getOpcode() != ISD::BUILD_VECTOR ||
47593 N10.getOpcode() != ISD::BUILD_VECTOR ||
47594 N11.getOpcode() != ISD::BUILD_VECTOR)
47597 // For each element, we need to ensure we have an odd element from one vector
47598 // multiplied by the odd element of another vector and the even element from
47599 // one of the same vectors being multiplied by the even element from the
47600 // other vector. So we need to make sure for each element i, this operator
47601 // is being performed:
47602 // A[2 * i] * B[2 * i] + A[2 * i + 1] * B[2 * i + 1]
47604 for (unsigned i = 0; i != N00.getNumOperands(); ++i) {
47605 SDValue N00Elt = N00.getOperand(i);
47606 SDValue N01Elt = N01.getOperand(i);
47607 SDValue N10Elt = N10.getOperand(i);
47608 SDValue N11Elt = N11.getOperand(i);
47609 // TODO: Be more tolerant to undefs.
47610 if (N00Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47611 N01Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47612 N10Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
47613 N11Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
47615 auto *ConstN00Elt = dyn_cast<ConstantSDNode>(N00Elt.getOperand(1));
47616 auto *ConstN01Elt = dyn_cast<ConstantSDNode>(N01Elt.getOperand(1));
47617 auto *ConstN10Elt = dyn_cast<ConstantSDNode>(N10Elt.getOperand(1));
47618 auto *ConstN11Elt = dyn_cast<ConstantSDNode>(N11Elt.getOperand(1));
47619 if (!ConstN00Elt || !ConstN01Elt || !ConstN10Elt || !ConstN11Elt)
47621 unsigned IdxN00 = ConstN00Elt->getZExtValue();
47622 unsigned IdxN01 = ConstN01Elt->getZExtValue();
47623 unsigned IdxN10 = ConstN10Elt->getZExtValue();
47624 unsigned IdxN11 = ConstN11Elt->getZExtValue();
47625 // Add is commutative so indices can be reordered.
47626 if (IdxN00 > IdxN10) {
47627 std::swap(IdxN00, IdxN10);
47628 std::swap(IdxN01, IdxN11);
47630 // N0 indices be the even element. N1 indices must be the next odd element.
47631 if (IdxN00 != 2 * i || IdxN10 != 2 * i + 1 ||
47632 IdxN01 != 2 * i || IdxN11 != 2 * i + 1)
47634 SDValue N00In = N00Elt.getOperand(0);
47635 SDValue N01In = N01Elt.getOperand(0);
47636 SDValue N10In = N10Elt.getOperand(0);
47637 SDValue N11In = N11Elt.getOperand(0);
47638 // First time we find an input capture it.
47643 // Mul is commutative so the input vectors can be in any order.
47644 // Canonicalize to make the compares easier.
47646 std::swap(N00In, N01In);
47648 std::swap(N10In, N11In);
47649 if (In0 != N00In || In1 != N01In || In0 != N10In || In1 != N11In)
47653 auto PMADDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
47654 ArrayRef<SDValue> Ops) {
47655 // Shrink by adding truncate nodes and let DAGCombine fold with the
47657 EVT OpVT = Ops[0].getValueType();
47658 assert(OpVT.getScalarType() == MVT::i16 &&
47659 "Unexpected scalar element type");
47660 assert(OpVT == Ops[1].getValueType() && "Operands' types mismatch");
47661 EVT ResVT = EVT::getVectorVT(*DAG.getContext(), MVT::i32,
47662 OpVT.getVectorNumElements() / 2);
47663 return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT, Ops[0], Ops[1]);
47665 return SplitOpsAndApply(DAG, Subtarget, DL, VT, { In0, In1 },
47669 static SDValue combineAddOrSubToHADDorHSUB(SDNode *N, SelectionDAG &DAG,
47670 const X86Subtarget &Subtarget) {
47671 EVT VT = N->getValueType(0);
47672 SDValue Op0 = N->getOperand(0);
47673 SDValue Op1 = N->getOperand(1);
47674 bool IsAdd = N->getOpcode() == ISD::ADD;
47675 assert((IsAdd || N->getOpcode() == ISD::SUB) && "Wrong opcode");
47677 SmallVector<int, 8> PostShuffleMask;
47678 if ((VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v16i16 ||
47679 VT == MVT::v8i32) &&
47680 Subtarget.hasSSSE3() &&
47681 isHorizontalBinOp(Op0, Op1, DAG, Subtarget, IsAdd, PostShuffleMask)) {
47682 auto HOpBuilder = [IsAdd](SelectionDAG &DAG, const SDLoc &DL,
47683 ArrayRef<SDValue> Ops) {
47684 return DAG.getNode(IsAdd ? X86ISD::HADD : X86ISD::HSUB, DL,
47685 Ops[0].getValueType(), Ops);
47687 SDValue HorizBinOp =
47688 SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {Op0, Op1}, HOpBuilder);
47689 if (!PostShuffleMask.empty())
47690 HorizBinOp = DAG.getVectorShuffle(VT, SDLoc(HorizBinOp), HorizBinOp,
47691 DAG.getUNDEF(VT), PostShuffleMask);
47698 static SDValue combineAdd(SDNode *N, SelectionDAG &DAG,
47699 TargetLowering::DAGCombinerInfo &DCI,
47700 const X86Subtarget &Subtarget) {
47701 EVT VT = N->getValueType(0);
47702 SDValue Op0 = N->getOperand(0);
47703 SDValue Op1 = N->getOperand(1);
47705 if (SDValue MAdd = matchPMADDWD(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
47707 if (SDValue MAdd = matchPMADDWD_2(DAG, Op0, Op1, SDLoc(N), VT, Subtarget))
47710 // Try to synthesize horizontal adds from adds of shuffles.
47711 if (SDValue V = combineAddOrSubToHADDorHSUB(N, DAG, Subtarget))
47714 // If vectors of i1 are legal, turn (add (zext (vXi1 X)), Y) into
47715 // (sub Y, (sext (vXi1 X))).
47716 // FIXME: We have the (sub Y, (zext (vXi1 X))) -> (add (sext (vXi1 X)), Y) in
47717 // generic DAG combine without a legal type check, but adding this there
47718 // caused regressions.
47719 if (VT.isVector()) {
47720 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
47721 if (Op0.getOpcode() == ISD::ZERO_EXTEND &&
47722 Op0.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
47723 TLI.isTypeLegal(Op0.getOperand(0).getValueType())) {
47725 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op0.getOperand(0));
47726 return DAG.getNode(ISD::SUB, DL, VT, Op1, SExt);
47729 if (Op1.getOpcode() == ISD::ZERO_EXTEND &&
47730 Op1.getOperand(0).getValueType().getVectorElementType() == MVT::i1 &&
47731 TLI.isTypeLegal(Op1.getOperand(0).getValueType())) {
47733 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, VT, Op1.getOperand(0));
47734 return DAG.getNode(ISD::SUB, DL, VT, Op0, SExt);
47738 return combineAddOrSubToADCOrSBB(N, DAG);
47741 static SDValue combineSubToSubus(SDNode *N, SelectionDAG &DAG,
47742 const X86Subtarget &Subtarget) {
47743 SDValue Op0 = N->getOperand(0);
47744 SDValue Op1 = N->getOperand(1);
47745 EVT VT = N->getValueType(0);
47747 if (!VT.isVector())
47750 // PSUBUS is supported, starting from SSE2, but truncation for v8i32
47751 // is only worth it with SSSE3 (PSHUFB).
47752 EVT EltVT = VT.getVectorElementType();
47753 if (!(Subtarget.hasSSE2() && (EltVT == MVT::i8 || EltVT == MVT::i16)) &&
47754 !(Subtarget.hasSSSE3() && (VT == MVT::v8i32 || VT == MVT::v8i64)) &&
47755 !(Subtarget.useBWIRegs() && (VT == MVT::v16i32)))
47758 SDValue SubusLHS, SubusRHS;
47759 // Try to find umax(a,b) - b or a - umin(a,b) patterns
47760 // they may be converted to subus(a,b).
47761 // TODO: Need to add IR canonicalization for this code.
47762 if (Op0.getOpcode() == ISD::UMAX) {
47764 SDValue MaxLHS = Op0.getOperand(0);
47765 SDValue MaxRHS = Op0.getOperand(1);
47768 else if (MaxRHS == Op1)
47772 } else if (Op1.getOpcode() == ISD::UMIN) {
47774 SDValue MinLHS = Op1.getOperand(0);
47775 SDValue MinRHS = Op1.getOperand(1);
47778 else if (MinRHS == Op0)
47782 } else if (Op1.getOpcode() == ISD::TRUNCATE &&
47783 Op1.getOperand(0).getOpcode() == ISD::UMIN &&
47784 (EltVT == MVT::i8 || EltVT == MVT::i16)) {
47785 // Special case where the UMIN has been truncated. Try to push the truncate
47786 // further up. This is similar to the i32/i64 special processing.
47788 SDValue MinLHS = Op1.getOperand(0).getOperand(0);
47789 SDValue MinRHS = Op1.getOperand(0).getOperand(1);
47790 EVT TruncVT = Op1.getOperand(0).getValueType();
47791 if (!(Subtarget.hasSSSE3() && (TruncVT == MVT::v8i32 ||
47792 TruncVT == MVT::v8i64)) &&
47793 !(Subtarget.useBWIRegs() && (TruncVT == MVT::v16i32)))
47795 SDValue OpToSaturate;
47796 if (MinLHS.getOpcode() == ISD::ZERO_EXTEND &&
47797 MinLHS.getOperand(0) == Op0)
47798 OpToSaturate = MinRHS;
47799 else if (MinRHS.getOpcode() == ISD::ZERO_EXTEND &&
47800 MinRHS.getOperand(0) == Op0)
47801 OpToSaturate = MinLHS;
47805 // Saturate the non-extended input and then truncate it.
47807 SDValue SaturationConst =
47808 DAG.getConstant(APInt::getLowBitsSet(TruncVT.getScalarSizeInBits(),
47809 VT.getScalarSizeInBits()),
47811 SDValue UMin = DAG.getNode(ISD::UMIN, DL, TruncVT, OpToSaturate,
47813 SubusRHS = DAG.getNode(ISD::TRUNCATE, DL, VT, UMin);
47817 // PSUBUS doesn't support v8i32/v8i64/v16i32, but it can be enabled with
47818 // special preprocessing in some cases.
47819 if (EltVT == MVT::i8 || EltVT == MVT::i16)
47820 return DAG.getNode(ISD::USUBSAT, SDLoc(N), VT, SubusLHS, SubusRHS);
47822 assert((VT == MVT::v8i32 || VT == MVT::v16i32 || VT == MVT::v8i64) &&
47825 // Special preprocessing case can be only applied
47826 // if the value was zero extended from 16 bit,
47827 // so we require first 16 bits to be zeros for 32 bit
47828 // values, or first 48 bits for 64 bit values.
47829 KnownBits Known = DAG.computeKnownBits(SubusLHS);
47830 unsigned NumZeros = Known.countMinLeadingZeros();
47831 if ((VT == MVT::v8i64 && NumZeros < 48) || NumZeros < 16)
47834 EVT ExtType = SubusLHS.getValueType();
47836 if (VT == MVT::v8i32 || VT == MVT::v8i64)
47837 ShrinkedType = MVT::v8i16;
47839 ShrinkedType = NumZeros >= 24 ? MVT::v16i8 : MVT::v16i16;
47841 // If SubusLHS is zeroextended - truncate SubusRHS to it's
47842 // size SubusRHS = umin(0xFFF.., SubusRHS).
47843 SDValue SaturationConst =
47844 DAG.getConstant(APInt::getLowBitsSet(ExtType.getScalarSizeInBits(),
47845 ShrinkedType.getScalarSizeInBits()),
47846 SDLoc(SubusLHS), ExtType);
47847 SDValue UMin = DAG.getNode(ISD::UMIN, SDLoc(SubusLHS), ExtType, SubusRHS,
47849 SDValue NewSubusLHS =
47850 DAG.getZExtOrTrunc(SubusLHS, SDLoc(SubusLHS), ShrinkedType);
47851 SDValue NewSubusRHS = DAG.getZExtOrTrunc(UMin, SDLoc(SubusRHS), ShrinkedType);
47852 SDValue Psubus = DAG.getNode(ISD::USUBSAT, SDLoc(N), ShrinkedType,
47853 NewSubusLHS, NewSubusRHS);
47855 // Zero extend the result, it may be used somewhere as 32 bit,
47856 // if not zext and following trunc will shrink.
47857 return DAG.getZExtOrTrunc(Psubus, SDLoc(N), ExtType);
47860 static SDValue combineSub(SDNode *N, SelectionDAG &DAG,
47861 TargetLowering::DAGCombinerInfo &DCI,
47862 const X86Subtarget &Subtarget) {
47863 SDValue Op0 = N->getOperand(0);
47864 SDValue Op1 = N->getOperand(1);
47866 // X86 can't encode an immediate LHS of a sub. See if we can push the
47867 // negation into a preceding instruction.
47868 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
47869 // If the RHS of the sub is a XOR with one use and a constant, invert the
47870 // immediate. Then add one to the LHS of the sub so we can turn
47871 // X-Y -> X+~Y+1, saving one register.
47872 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
47873 isa<ConstantSDNode>(Op1.getOperand(1))) {
47874 const APInt &XorC = Op1.getConstantOperandAPInt(1);
47875 EVT VT = Op0.getValueType();
47876 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
47878 DAG.getConstant(~XorC, SDLoc(Op1), VT));
47879 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
47880 DAG.getConstant(C->getAPIntValue() + 1, SDLoc(N), VT));
47884 // Try to synthesize horizontal subs from subs of shuffles.
47885 if (SDValue V = combineAddOrSubToHADDorHSUB(N, DAG, Subtarget))
47888 // Try to create PSUBUS if SUB's argument is max/min
47889 if (SDValue V = combineSubToSubus(N, DAG, Subtarget))
47892 return combineAddOrSubToADCOrSBB(N, DAG);
47895 static SDValue combineVectorCompare(SDNode *N, SelectionDAG &DAG,
47896 const X86Subtarget &Subtarget) {
47897 MVT VT = N->getSimpleValueType(0);
47900 if (N->getOperand(0) == N->getOperand(1)) {
47901 if (N->getOpcode() == X86ISD::PCMPEQ)
47902 return DAG.getConstant(-1, DL, VT);
47903 if (N->getOpcode() == X86ISD::PCMPGT)
47904 return DAG.getConstant(0, DL, VT);
47910 /// Helper that combines an array of subvector ops as if they were the operands
47911 /// of a ISD::CONCAT_VECTORS node, but may have come from another source (e.g.
47912 /// ISD::INSERT_SUBVECTOR). The ops are assumed to be of the same type.
47913 static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
47914 ArrayRef<SDValue> Ops, SelectionDAG &DAG,
47915 TargetLowering::DAGCombinerInfo &DCI,
47916 const X86Subtarget &Subtarget) {
47917 assert(Subtarget.hasAVX() && "AVX assumed for concat_vectors");
47918 unsigned EltSizeInBits = VT.getScalarSizeInBits();
47920 if (llvm::all_of(Ops, [](SDValue Op) { return Op.isUndef(); }))
47921 return DAG.getUNDEF(VT);
47923 if (llvm::all_of(Ops, [](SDValue Op) {
47924 return ISD::isBuildVectorAllZeros(Op.getNode());
47926 return getZeroVector(VT, Subtarget, DAG, DL);
47928 SDValue Op0 = Ops[0];
47929 bool IsSplat = llvm::all_of(Ops, [&Op0](SDValue Op) { return Op == Op0; });
47931 // Fold subvector loads into one.
47932 // If needed, look through bitcasts to get to the load.
47933 if (auto *FirstLd = dyn_cast<LoadSDNode>(peekThroughBitcasts(Op0))) {
47935 const X86TargetLowering *TLI = Subtarget.getTargetLowering();
47936 if (TLI->allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
47937 *FirstLd->getMemOperand(), &Fast) &&
47940 EltsFromConsecutiveLoads(VT, Ops, DL, DAG, Subtarget, false))
47945 // Repeated subvectors.
47947 // If this broadcast/subv_broadcast is inserted into both halves, use a
47948 // larger broadcast/subv_broadcast.
47949 if (Op0.getOpcode() == X86ISD::VBROADCAST ||
47950 Op0.getOpcode() == X86ISD::SUBV_BROADCAST)
47951 return DAG.getNode(Op0.getOpcode(), DL, VT, Op0.getOperand(0));
47953 // If this broadcast_load is inserted into both halves, use a larger
47954 // broadcast_load. Update other uses to use an extracted subvector.
47955 if (Op0.getOpcode() == X86ISD::VBROADCAST_LOAD) {
47956 auto *MemIntr = cast<MemIntrinsicSDNode>(Op0);
47957 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
47958 SDValue Ops[] = {MemIntr->getChain(), MemIntr->getBasePtr()};
47959 SDValue BcastLd = DAG.getMemIntrinsicNode(
47960 X86ISD::VBROADCAST_LOAD, DL, Tys, Ops, MemIntr->getMemoryVT(),
47961 MemIntr->getMemOperand());
47962 DAG.ReplaceAllUsesOfValueWith(
47963 Op0, extractSubVector(BcastLd, 0, DAG, DL, Op0.getValueSizeInBits()));
47964 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
47968 // concat_vectors(movddup(x),movddup(x)) -> broadcast(x)
47969 if (Op0.getOpcode() == X86ISD::MOVDDUP && VT == MVT::v4f64 &&
47970 (Subtarget.hasAVX2() || MayFoldLoad(Op0.getOperand(0))))
47971 return DAG.getNode(X86ISD::VBROADCAST, DL, VT,
47972 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f64,
47974 DAG.getIntPtrConstant(0, DL)));
47976 // concat_vectors(scalar_to_vector(x),scalar_to_vector(x)) -> broadcast(x)
47977 if (Op0.getOpcode() == ISD::SCALAR_TO_VECTOR &&
47978 (Subtarget.hasAVX2() ||
47979 (EltSizeInBits >= 32 && MayFoldLoad(Op0.getOperand(0)))) &&
47980 Op0.getOperand(0).getValueType() == VT.getScalarType())
47981 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, Op0.getOperand(0));
47983 // concat_vectors(extract_subvector(broadcast(x)),
47984 // extract_subvector(broadcast(x))) -> broadcast(x)
47985 if (Op0.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
47986 Op0.getOperand(0).getValueType() == VT) {
47987 if (Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST ||
47988 Op0.getOperand(0).getOpcode() == X86ISD::VBROADCAST_LOAD)
47989 return Op0.getOperand(0);
47993 // Repeated opcode.
47994 // TODO - combineX86ShufflesRecursively should handle shuffle concatenation
47995 // but it currently struggles with different vector widths.
47996 if (llvm::all_of(Ops, [Op0](SDValue Op) {
47997 return Op.getOpcode() == Op0.getOpcode();
47999 unsigned NumOps = Ops.size();
48000 switch (Op0.getOpcode()) {
48001 case X86ISD::SHUFP: {
48002 // Add SHUFPD support if/when necessary.
48003 if (!IsSplat && VT.getScalarType() == MVT::f32 &&
48004 llvm::all_of(Ops, [Op0](SDValue Op) {
48005 return Op.getOperand(2) == Op0.getOperand(2);
48007 SmallVector<SDValue, 2> LHS, RHS;
48008 for (unsigned i = 0; i != NumOps; ++i) {
48009 LHS.push_back(Ops[i].getOperand(0));
48010 RHS.push_back(Ops[i].getOperand(1));
48012 return DAG.getNode(Op0.getOpcode(), DL, VT,
48013 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS),
48014 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, RHS),
48015 Op0.getOperand(2));
48019 case X86ISD::PSHUFHW:
48020 case X86ISD::PSHUFLW:
48021 case X86ISD::PSHUFD:
48022 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
48023 Subtarget.hasInt256() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
48024 SmallVector<SDValue, 2> Src;
48025 for (unsigned i = 0; i != NumOps; ++i)
48026 Src.push_back(Ops[i].getOperand(0));
48027 return DAG.getNode(Op0.getOpcode(), DL, VT,
48028 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
48029 Op0.getOperand(1));
48032 case X86ISD::VPERMILPI:
48033 // TODO - add support for vXf64/vXi64 shuffles.
48034 if (!IsSplat && NumOps == 2 && (VT == MVT::v8f32 || VT == MVT::v8i32) &&
48035 Subtarget.hasAVX() && Op0.getOperand(1) == Ops[1].getOperand(1)) {
48036 SmallVector<SDValue, 2> Src;
48037 for (unsigned i = 0; i != NumOps; ++i)
48038 Src.push_back(DAG.getBitcast(MVT::v4f32, Ops[i].getOperand(0)));
48039 SDValue Res = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8f32, Src);
48040 Res = DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, Res,
48041 Op0.getOperand(1));
48042 return DAG.getBitcast(VT, Res);
48045 case X86ISD::VSHLI:
48046 case X86ISD::VSRAI:
48047 case X86ISD::VSRLI:
48048 if (((VT.is256BitVector() && Subtarget.hasInt256()) ||
48049 (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
48050 (EltSizeInBits >= 32 || Subtarget.useBWIRegs()))) &&
48051 llvm::all_of(Ops, [Op0](SDValue Op) {
48052 return Op0.getOperand(1) == Op.getOperand(1);
48054 SmallVector<SDValue, 2> Src;
48055 for (unsigned i = 0; i != NumOps; ++i)
48056 Src.push_back(Ops[i].getOperand(0));
48057 return DAG.getNode(Op0.getOpcode(), DL, VT,
48058 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
48059 Op0.getOperand(1));
48062 case X86ISD::VPERMI:
48063 case X86ISD::VROTLI:
48064 case X86ISD::VROTRI:
48065 if (VT.is512BitVector() && Subtarget.useAVX512Regs() &&
48066 llvm::all_of(Ops, [Op0](SDValue Op) {
48067 return Op0.getOperand(1) == Op.getOperand(1);
48069 SmallVector<SDValue, 2> Src;
48070 for (unsigned i = 0; i != NumOps; ++i)
48071 Src.push_back(Ops[i].getOperand(0));
48072 return DAG.getNode(Op0.getOpcode(), DL, VT,
48073 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Src),
48074 Op0.getOperand(1));
48077 case X86ISD::PACKSS:
48078 case X86ISD::PACKUS:
48079 if (!IsSplat && NumOps == 2 && VT.is256BitVector() &&
48080 Subtarget.hasInt256()) {
48081 SmallVector<SDValue, 2> LHS, RHS;
48082 for (unsigned i = 0; i != NumOps; ++i) {
48083 LHS.push_back(Ops[i].getOperand(0));
48084 RHS.push_back(Ops[i].getOperand(1));
48086 MVT SrcVT = Op0.getOperand(0).getSimpleValueType();
48087 SrcVT = MVT::getVectorVT(SrcVT.getScalarType(),
48088 NumOps * SrcVT.getVectorNumElements());
48089 return DAG.getNode(Op0.getOpcode(), DL, VT,
48090 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, LHS),
48091 DAG.getNode(ISD::CONCAT_VECTORS, DL, SrcVT, RHS));
48094 case X86ISD::PALIGNR:
48096 ((VT.is256BitVector() && Subtarget.hasInt256()) ||
48097 (VT.is512BitVector() && Subtarget.useBWIRegs())) &&
48098 llvm::all_of(Ops, [Op0](SDValue Op) {
48099 return Op0.getOperand(2) == Op.getOperand(2);
48101 SmallVector<SDValue, 2> LHS, RHS;
48102 for (unsigned i = 0; i != NumOps; ++i) {
48103 LHS.push_back(Ops[i].getOperand(0));
48104 RHS.push_back(Ops[i].getOperand(1));
48106 return DAG.getNode(Op0.getOpcode(), DL, VT,
48107 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS),
48108 DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, RHS),
48109 Op0.getOperand(2));
48118 static SDValue combineConcatVectors(SDNode *N, SelectionDAG &DAG,
48119 TargetLowering::DAGCombinerInfo &DCI,
48120 const X86Subtarget &Subtarget) {
48121 EVT VT = N->getValueType(0);
48122 EVT SrcVT = N->getOperand(0).getValueType();
48123 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48125 // Don't do anything for i1 vectors.
48126 if (VT.getVectorElementType() == MVT::i1)
48129 if (Subtarget.hasAVX() && TLI.isTypeLegal(VT) && TLI.isTypeLegal(SrcVT)) {
48130 SmallVector<SDValue, 4> Ops(N->op_begin(), N->op_end());
48131 if (SDValue R = combineConcatVectorOps(SDLoc(N), VT.getSimpleVT(), Ops, DAG,
48139 static SDValue combineInsertSubvector(SDNode *N, SelectionDAG &DAG,
48140 TargetLowering::DAGCombinerInfo &DCI,
48141 const X86Subtarget &Subtarget) {
48142 if (DCI.isBeforeLegalizeOps())
48145 MVT OpVT = N->getSimpleValueType(0);
48147 bool IsI1Vector = OpVT.getVectorElementType() == MVT::i1;
48150 SDValue Vec = N->getOperand(0);
48151 SDValue SubVec = N->getOperand(1);
48153 uint64_t IdxVal = N->getConstantOperandVal(2);
48154 MVT SubVecVT = SubVec.getSimpleValueType();
48156 if (Vec.isUndef() && SubVec.isUndef())
48157 return DAG.getUNDEF(OpVT);
48159 // Inserting undefs/zeros into zeros/undefs is a zero vector.
48160 if ((Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())) &&
48161 (SubVec.isUndef() || ISD::isBuildVectorAllZeros(SubVec.getNode())))
48162 return getZeroVector(OpVT, Subtarget, DAG, dl);
48164 if (ISD::isBuildVectorAllZeros(Vec.getNode())) {
48165 // If we're inserting into a zero vector and then into a larger zero vector,
48166 // just insert into the larger zero vector directly.
48167 if (SubVec.getOpcode() == ISD::INSERT_SUBVECTOR &&
48168 ISD::isBuildVectorAllZeros(SubVec.getOperand(0).getNode())) {
48169 uint64_t Idx2Val = SubVec.getConstantOperandVal(2);
48170 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
48171 getZeroVector(OpVT, Subtarget, DAG, dl),
48172 SubVec.getOperand(1),
48173 DAG.getIntPtrConstant(IdxVal + Idx2Val, dl));
48176 // If we're inserting into a zero vector and our input was extracted from an
48177 // insert into a zero vector of the same type and the extraction was at
48178 // least as large as the original insertion. Just insert the original
48179 // subvector into a zero vector.
48180 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR && IdxVal == 0 &&
48181 isNullConstant(SubVec.getOperand(1)) &&
48182 SubVec.getOperand(0).getOpcode() == ISD::INSERT_SUBVECTOR) {
48183 SDValue Ins = SubVec.getOperand(0);
48184 if (isNullConstant(Ins.getOperand(2)) &&
48185 ISD::isBuildVectorAllZeros(Ins.getOperand(0).getNode()) &&
48186 Ins.getOperand(1).getValueSizeInBits() <= SubVecVT.getSizeInBits())
48187 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
48188 getZeroVector(OpVT, Subtarget, DAG, dl),
48189 Ins.getOperand(1), N->getOperand(2));
48193 // Stop here if this is an i1 vector.
48197 // If this is an insert of an extract, combine to a shuffle. Don't do this
48198 // if the insert or extract can be represented with a subregister operation.
48199 if (SubVec.getOpcode() == ISD::EXTRACT_SUBVECTOR &&
48200 SubVec.getOperand(0).getSimpleValueType() == OpVT &&
48202 !(Vec.isUndef() || ISD::isBuildVectorAllZeros(Vec.getNode())))) {
48203 int ExtIdxVal = SubVec.getConstantOperandVal(1);
48204 if (ExtIdxVal != 0) {
48205 int VecNumElts = OpVT.getVectorNumElements();
48206 int SubVecNumElts = SubVecVT.getVectorNumElements();
48207 SmallVector<int, 64> Mask(VecNumElts);
48208 // First create an identity shuffle mask.
48209 for (int i = 0; i != VecNumElts; ++i)
48211 // Now insert the extracted portion.
48212 for (int i = 0; i != SubVecNumElts; ++i)
48213 Mask[i + IdxVal] = i + ExtIdxVal + VecNumElts;
48215 return DAG.getVectorShuffle(OpVT, dl, Vec, SubVec.getOperand(0), Mask);
48219 // Match concat_vector style patterns.
48220 SmallVector<SDValue, 2> SubVectorOps;
48221 if (collectConcatOps(N, SubVectorOps)) {
48223 combineConcatVectorOps(dl, OpVT, SubVectorOps, DAG, DCI, Subtarget))
48226 // If we're inserting all zeros into the upper half, change this to
48227 // a concat with zero. We will match this to a move
48228 // with implicit upper bit zeroing during isel.
48229 // We do this here because we don't want combineConcatVectorOps to
48230 // create INSERT_SUBVECTOR from CONCAT_VECTORS.
48231 if (SubVectorOps.size() == 2 &&
48232 ISD::isBuildVectorAllZeros(SubVectorOps[1].getNode()))
48233 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT,
48234 getZeroVector(OpVT, Subtarget, DAG, dl),
48235 SubVectorOps[0], DAG.getIntPtrConstant(0, dl));
48238 // If this is a broadcast insert into an upper undef, use a larger broadcast.
48239 if (Vec.isUndef() && IdxVal != 0 && SubVec.getOpcode() == X86ISD::VBROADCAST)
48240 return DAG.getNode(X86ISD::VBROADCAST, dl, OpVT, SubVec.getOperand(0));
48242 // If this is a broadcast load inserted into an upper undef, use a larger
48244 if (Vec.isUndef() && IdxVal != 0 && SubVec.hasOneUse() &&
48245 SubVec.getOpcode() == X86ISD::VBROADCAST_LOAD) {
48246 auto *MemIntr = cast<MemIntrinsicSDNode>(SubVec);
48247 SDVTList Tys = DAG.getVTList(OpVT, MVT::Other);
48248 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
48250 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, dl, Tys, Ops,
48251 MemIntr->getMemoryVT(),
48252 MemIntr->getMemOperand());
48253 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
48260 /// If we are extracting a subvector of a vector select and the select condition
48261 /// is composed of concatenated vectors, try to narrow the select width. This
48262 /// is a common pattern for AVX1 integer code because 256-bit selects may be
48263 /// legal, but there is almost no integer math/logic available for 256-bit.
48264 /// This function should only be called with legal types (otherwise, the calls
48265 /// to get simple value types will assert).
48266 static SDValue narrowExtractedVectorSelect(SDNode *Ext, SelectionDAG &DAG) {
48267 SDValue Sel = peekThroughBitcasts(Ext->getOperand(0));
48268 SmallVector<SDValue, 4> CatOps;
48269 if (Sel.getOpcode() != ISD::VSELECT ||
48270 !collectConcatOps(Sel.getOperand(0).getNode(), CatOps))
48273 // Note: We assume simple value types because this should only be called with
48274 // legal operations/types.
48275 // TODO: This can be extended to handle extraction to 256-bits.
48276 MVT VT = Ext->getSimpleValueType(0);
48277 if (!VT.is128BitVector())
48280 MVT SelCondVT = Sel.getOperand(0).getSimpleValueType();
48281 if (!SelCondVT.is256BitVector() && !SelCondVT.is512BitVector())
48284 MVT WideVT = Ext->getOperand(0).getSimpleValueType();
48285 MVT SelVT = Sel.getSimpleValueType();
48286 assert((SelVT.is256BitVector() || SelVT.is512BitVector()) &&
48287 "Unexpected vector type with legal operations");
48289 unsigned SelElts = SelVT.getVectorNumElements();
48290 unsigned CastedElts = WideVT.getVectorNumElements();
48291 unsigned ExtIdx = Ext->getConstantOperandVal(1);
48292 if (SelElts % CastedElts == 0) {
48293 // The select has the same or more (narrower) elements than the extract
48294 // operand. The extraction index gets scaled by that factor.
48295 ExtIdx *= (SelElts / CastedElts);
48296 } else if (CastedElts % SelElts == 0) {
48297 // The select has less (wider) elements than the extract operand. Make sure
48298 // that the extraction index can be divided evenly.
48299 unsigned IndexDivisor = CastedElts / SelElts;
48300 if (ExtIdx % IndexDivisor != 0)
48302 ExtIdx /= IndexDivisor;
48304 llvm_unreachable("Element count of simple vector types are not divisible?");
48307 unsigned NarrowingFactor = WideVT.getSizeInBits() / VT.getSizeInBits();
48308 unsigned NarrowElts = SelElts / NarrowingFactor;
48309 MVT NarrowSelVT = MVT::getVectorVT(SelVT.getVectorElementType(), NarrowElts);
48311 SDValue ExtCond = extract128BitVector(Sel.getOperand(0), ExtIdx, DAG, DL);
48312 SDValue ExtT = extract128BitVector(Sel.getOperand(1), ExtIdx, DAG, DL);
48313 SDValue ExtF = extract128BitVector(Sel.getOperand(2), ExtIdx, DAG, DL);
48314 SDValue NarrowSel = DAG.getSelect(DL, NarrowSelVT, ExtCond, ExtT, ExtF);
48315 return DAG.getBitcast(VT, NarrowSel);
48318 static SDValue combineExtractSubvector(SDNode *N, SelectionDAG &DAG,
48319 TargetLowering::DAGCombinerInfo &DCI,
48320 const X86Subtarget &Subtarget) {
48321 // For AVX1 only, if we are extracting from a 256-bit and+not (which will
48322 // eventually get combined/lowered into ANDNP) with a concatenated operand,
48323 // split the 'and' into 128-bit ops to avoid the concatenate and extract.
48324 // We let generic combining take over from there to simplify the
48325 // insert/extract and 'not'.
48326 // This pattern emerges during AVX1 legalization. We handle it before lowering
48327 // to avoid complications like splitting constant vector loads.
48329 // Capture the original wide type in the likely case that we need to bitcast
48330 // back to this type.
48331 if (!N->getValueType(0).isSimple())
48334 MVT VT = N->getSimpleValueType(0);
48335 SDValue InVec = N->getOperand(0);
48336 unsigned IdxVal = N->getConstantOperandVal(1);
48337 SDValue InVecBC = peekThroughBitcasts(InVec);
48338 EVT InVecVT = InVec.getValueType();
48339 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48341 if (Subtarget.hasAVX() && !Subtarget.hasAVX2() &&
48342 TLI.isTypeLegal(InVecVT) &&
48343 InVecVT.getSizeInBits() == 256 && InVecBC.getOpcode() == ISD::AND) {
48344 auto isConcatenatedNot = [] (SDValue V) {
48345 V = peekThroughBitcasts(V);
48346 if (!isBitwiseNot(V))
48348 SDValue NotOp = V->getOperand(0);
48349 return peekThroughBitcasts(NotOp).getOpcode() == ISD::CONCAT_VECTORS;
48351 if (isConcatenatedNot(InVecBC.getOperand(0)) ||
48352 isConcatenatedNot(InVecBC.getOperand(1))) {
48353 // extract (and v4i64 X, (not (concat Y1, Y2))), n -> andnp v2i64 X(n), Y1
48354 SDValue Concat = splitVectorIntBinary(InVecBC, DAG);
48355 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, SDLoc(N), VT,
48356 DAG.getBitcast(InVecVT, Concat), N->getOperand(1));
48360 if (DCI.isBeforeLegalizeOps())
48363 if (SDValue V = narrowExtractedVectorSelect(N, DAG))
48366 if (ISD::isBuildVectorAllZeros(InVec.getNode()))
48367 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
48369 if (ISD::isBuildVectorAllOnes(InVec.getNode())) {
48370 if (VT.getScalarType() == MVT::i1)
48371 return DAG.getConstant(1, SDLoc(N), VT);
48372 return getOnesVector(VT, DAG, SDLoc(N));
48375 if (InVec.getOpcode() == ISD::BUILD_VECTOR)
48376 return DAG.getBuildVector(
48378 InVec.getNode()->ops().slice(IdxVal, VT.getVectorNumElements()));
48380 // If we are extracting from an insert into a zero vector, replace with a
48381 // smaller insert into zero if we don't access less than the original
48382 // subvector. Don't do this for i1 vectors.
48383 if (VT.getVectorElementType() != MVT::i1 &&
48384 InVec.getOpcode() == ISD::INSERT_SUBVECTOR && IdxVal == 0 &&
48385 InVec.hasOneUse() && isNullConstant(InVec.getOperand(2)) &&
48386 ISD::isBuildVectorAllZeros(InVec.getOperand(0).getNode()) &&
48387 InVec.getOperand(1).getValueSizeInBits() <= VT.getSizeInBits()) {
48389 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
48390 getZeroVector(VT, Subtarget, DAG, DL),
48391 InVec.getOperand(1), InVec.getOperand(2));
48394 // If we're extracting from a broadcast then we're better off just
48395 // broadcasting to the smaller type directly, assuming this is the only use.
48396 // As its a broadcast we don't care about the extraction index.
48397 if (InVec.getOpcode() == X86ISD::VBROADCAST && InVec.hasOneUse() &&
48398 InVec.getOperand(0).getValueSizeInBits() <= VT.getSizeInBits())
48399 return DAG.getNode(X86ISD::VBROADCAST, SDLoc(N), VT, InVec.getOperand(0));
48401 if (InVec.getOpcode() == X86ISD::VBROADCAST_LOAD && InVec.hasOneUse()) {
48402 auto *MemIntr = cast<MemIntrinsicSDNode>(InVec);
48403 if (MemIntr->getMemoryVT().getSizeInBits() <= VT.getSizeInBits()) {
48404 SDVTList Tys = DAG.getVTList(VT, MVT::Other);
48405 SDValue Ops[] = { MemIntr->getChain(), MemIntr->getBasePtr() };
48407 DAG.getMemIntrinsicNode(X86ISD::VBROADCAST_LOAD, SDLoc(N), Tys, Ops,
48408 MemIntr->getMemoryVT(),
48409 MemIntr->getMemOperand());
48410 DAG.ReplaceAllUsesOfValueWith(SDValue(MemIntr, 1), BcastLd.getValue(1));
48415 // If we're extracting an upper subvector from a broadcast we should just
48416 // extract the lowest subvector instead which should allow
48417 // SimplifyDemandedVectorElts do more simplifications.
48418 if (IdxVal != 0 && (InVec.getOpcode() == X86ISD::VBROADCAST ||
48419 InVec.getOpcode() == X86ISD::VBROADCAST_LOAD))
48420 return extractSubVector(InVec, 0, DAG, SDLoc(N), VT.getSizeInBits());
48422 // If we're extracting a broadcasted subvector, just use the source.
48423 if (InVec.getOpcode() == X86ISD::SUBV_BROADCAST &&
48424 InVec.getOperand(0).getValueType() == VT)
48425 return InVec.getOperand(0);
48427 // Attempt to extract from the source of a shuffle vector.
48428 if ((InVecVT.getSizeInBits() % VT.getSizeInBits()) == 0 &&
48429 (IdxVal % VT.getVectorNumElements()) == 0) {
48430 SmallVector<int, 32> ShuffleMask;
48431 SmallVector<int, 32> ScaledMask;
48432 SmallVector<SDValue, 2> ShuffleInputs;
48433 unsigned NumSubVecs = InVecVT.getSizeInBits() / VT.getSizeInBits();
48434 // Decode the shuffle mask and scale it so its shuffling subvectors.
48435 if (getTargetShuffleInputs(InVecBC, ShuffleInputs, ShuffleMask, DAG) &&
48436 scaleShuffleElements(ShuffleMask, NumSubVecs, ScaledMask)) {
48437 unsigned SubVecIdx = IdxVal / VT.getVectorNumElements();
48438 if (ScaledMask[SubVecIdx] == SM_SentinelUndef)
48439 return DAG.getUNDEF(VT);
48440 if (ScaledMask[SubVecIdx] == SM_SentinelZero)
48441 return getZeroVector(VT, Subtarget, DAG, SDLoc(N));
48442 SDValue Src = ShuffleInputs[ScaledMask[SubVecIdx] / NumSubVecs];
48443 if (Src.getValueSizeInBits() == InVecVT.getSizeInBits()) {
48444 unsigned SrcSubVecIdx = ScaledMask[SubVecIdx] % NumSubVecs;
48445 unsigned SrcEltIdx = SrcSubVecIdx * VT.getVectorNumElements();
48446 return extractSubVector(DAG.getBitcast(InVecVT, Src), SrcEltIdx, DAG,
48447 SDLoc(N), VT.getSizeInBits());
48452 // If we're extracting the lowest subvector and we're the only user,
48453 // we may be able to perform this with a smaller vector width.
48454 if (IdxVal == 0 && InVec.hasOneUse()) {
48455 unsigned InOpcode = InVec.getOpcode();
48456 if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
48457 // v2f64 CVTDQ2PD(v4i32).
48458 if (InOpcode == ISD::SINT_TO_FP &&
48459 InVec.getOperand(0).getValueType() == MVT::v4i32) {
48460 return DAG.getNode(X86ISD::CVTSI2P, SDLoc(N), VT, InVec.getOperand(0));
48462 // v2f64 CVTUDQ2PD(v4i32).
48463 if (InOpcode == ISD::UINT_TO_FP && Subtarget.hasVLX() &&
48464 InVec.getOperand(0).getValueType() == MVT::v4i32) {
48465 return DAG.getNode(X86ISD::CVTUI2P, SDLoc(N), VT, InVec.getOperand(0));
48467 // v2f64 CVTPS2PD(v4f32).
48468 if (InOpcode == ISD::FP_EXTEND &&
48469 InVec.getOperand(0).getValueType() == MVT::v4f32) {
48470 return DAG.getNode(X86ISD::VFPEXT, SDLoc(N), VT, InVec.getOperand(0));
48473 if ((InOpcode == ISD::ANY_EXTEND ||
48474 InOpcode == ISD::ANY_EXTEND_VECTOR_INREG ||
48475 InOpcode == ISD::ZERO_EXTEND ||
48476 InOpcode == ISD::ZERO_EXTEND_VECTOR_INREG ||
48477 InOpcode == ISD::SIGN_EXTEND ||
48478 InOpcode == ISD::SIGN_EXTEND_VECTOR_INREG) &&
48479 VT.is128BitVector() &&
48480 InVec.getOperand(0).getSimpleValueType().is128BitVector()) {
48481 unsigned ExtOp = getOpcode_EXTEND_VECTOR_INREG(InOpcode);
48482 return DAG.getNode(ExtOp, SDLoc(N), VT, InVec.getOperand(0));
48484 if (InOpcode == ISD::VSELECT &&
48485 InVec.getOperand(0).getValueType().is256BitVector() &&
48486 InVec.getOperand(1).getValueType().is256BitVector() &&
48487 InVec.getOperand(2).getValueType().is256BitVector()) {
48489 SDValue Ext0 = extractSubVector(InVec.getOperand(0), 0, DAG, DL, 128);
48490 SDValue Ext1 = extractSubVector(InVec.getOperand(1), 0, DAG, DL, 128);
48491 SDValue Ext2 = extractSubVector(InVec.getOperand(2), 0, DAG, DL, 128);
48492 return DAG.getNode(InOpcode, DL, VT, Ext0, Ext1, Ext2);
48499 static SDValue combineScalarToVector(SDNode *N, SelectionDAG &DAG) {
48500 EVT VT = N->getValueType(0);
48501 SDValue Src = N->getOperand(0);
48504 // If this is a scalar to vector to v1i1 from an AND with 1, bypass the and.
48505 // This occurs frequently in our masked scalar intrinsic code and our
48506 // floating point select lowering with AVX512.
48507 // TODO: SimplifyDemandedBits instead?
48508 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::AND && Src.hasOneUse())
48509 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
48510 if (C->getAPIntValue().isOneValue())
48511 return DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v1i1,
48512 Src.getOperand(0));
48514 // Combine scalar_to_vector of an extract_vector_elt into an extract_subvec.
48515 if (VT == MVT::v1i1 && Src.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
48516 Src.hasOneUse() && Src.getOperand(0).getValueType().isVector() &&
48517 Src.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
48518 if (auto *C = dyn_cast<ConstantSDNode>(Src.getOperand(1)))
48519 if (C->isNullValue())
48520 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Src.getOperand(0),
48521 Src.getOperand(1));
48523 // Reduce v2i64 to v4i32 if we don't need the upper bits.
48524 // TODO: Move to DAGCombine/SimplifyDemandedBits?
48525 if (VT == MVT::v2i64 || VT == MVT::v2f64) {
48526 auto IsAnyExt64 = [](SDValue Op) {
48527 if (Op.getValueType() != MVT::i64 || !Op.hasOneUse())
48529 if (Op.getOpcode() == ISD::ANY_EXTEND &&
48530 Op.getOperand(0).getScalarValueSizeInBits() <= 32)
48531 return Op.getOperand(0);
48532 if (auto *Ld = dyn_cast<LoadSDNode>(Op))
48533 if (Ld->getExtensionType() == ISD::EXTLOAD &&
48534 Ld->getMemoryVT().getScalarSizeInBits() <= 32)
48538 if (SDValue ExtSrc = IsAnyExt64(peekThroughOneUseBitcasts(Src)))
48539 return DAG.getBitcast(
48540 VT, DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v4i32,
48541 DAG.getAnyExtOrTrunc(ExtSrc, DL, MVT::i32)));
48544 // Combine (v2i64 (scalar_to_vector (i64 (bitconvert (mmx))))) to MOVQ2DQ.
48545 if (VT == MVT::v2i64 && Src.getOpcode() == ISD::BITCAST &&
48546 Src.getOperand(0).getValueType() == MVT::x86mmx)
48547 return DAG.getNode(X86ISD::MOVQ2DQ, DL, VT, Src.getOperand(0));
48552 // Simplify PMULDQ and PMULUDQ operations.
48553 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
48554 TargetLowering::DAGCombinerInfo &DCI,
48555 const X86Subtarget &Subtarget) {
48556 SDValue LHS = N->getOperand(0);
48557 SDValue RHS = N->getOperand(1);
48559 // Canonicalize constant to RHS.
48560 if (DAG.isConstantIntBuildVectorOrConstantInt(LHS) &&
48561 !DAG.isConstantIntBuildVectorOrConstantInt(RHS))
48562 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
48564 // Multiply by zero.
48565 // Don't return RHS as it may contain UNDEFs.
48566 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
48567 return DAG.getConstant(0, SDLoc(N), N->getValueType(0));
48569 // PMULDQ/PMULUDQ only uses lower 32 bits from each vector element.
48570 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48571 if (TLI.SimplifyDemandedBits(SDValue(N, 0), APInt::getAllOnesValue(64), DCI))
48572 return SDValue(N, 0);
48574 // If the input is an extend_invec and the SimplifyDemandedBits call didn't
48575 // convert it to any_extend_invec, due to the LegalOperations check, do the
48576 // conversion directly to a vector shuffle manually. This exposes combine
48577 // opportunities missed by combineExtInVec not calling
48578 // combineX86ShufflesRecursively on SSE4.1 targets.
48579 // FIXME: This is basically a hack around several other issues related to
48580 // ANY_EXTEND_VECTOR_INREG.
48581 if (N->getValueType(0) == MVT::v2i64 && LHS.hasOneUse() &&
48582 (LHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
48583 LHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
48584 LHS.getOperand(0).getValueType() == MVT::v4i32) {
48586 LHS = DAG.getVectorShuffle(MVT::v4i32, dl, LHS.getOperand(0),
48587 LHS.getOperand(0), { 0, -1, 1, -1 });
48588 LHS = DAG.getBitcast(MVT::v2i64, LHS);
48589 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
48591 if (N->getValueType(0) == MVT::v2i64 && RHS.hasOneUse() &&
48592 (RHS.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG ||
48593 RHS.getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG) &&
48594 RHS.getOperand(0).getValueType() == MVT::v4i32) {
48596 RHS = DAG.getVectorShuffle(MVT::v4i32, dl, RHS.getOperand(0),
48597 RHS.getOperand(0), { 0, -1, 1, -1 });
48598 RHS = DAG.getBitcast(MVT::v2i64, RHS);
48599 return DAG.getNode(N->getOpcode(), dl, MVT::v2i64, LHS, RHS);
48605 static SDValue combineExtInVec(SDNode *N, SelectionDAG &DAG,
48606 TargetLowering::DAGCombinerInfo &DCI,
48607 const X86Subtarget &Subtarget) {
48608 EVT VT = N->getValueType(0);
48609 SDValue In = N->getOperand(0);
48610 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48612 // Try to merge vector loads and extend_inreg to an extload.
48613 if (!DCI.isBeforeLegalizeOps() && ISD::isNormalLoad(In.getNode()) &&
48615 auto *Ld = cast<LoadSDNode>(In);
48616 if (Ld->isSimple()) {
48617 MVT SVT = In.getSimpleValueType().getVectorElementType();
48618 ISD::LoadExtType Ext = N->getOpcode() == ISD::SIGN_EXTEND_VECTOR_INREG
48622 EVT::getVectorVT(*DAG.getContext(), SVT, VT.getVectorNumElements());
48623 if (TLI.isLoadExtLegal(Ext, VT, MemVT)) {
48625 DAG.getExtLoad(Ext, SDLoc(N), VT, Ld->getChain(), Ld->getBasePtr(),
48626 Ld->getPointerInfo(), MemVT,
48627 Ld->getOriginalAlign(),
48628 Ld->getMemOperand()->getFlags());
48629 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
48635 // Attempt to combine as a shuffle.
48636 // TODO: SSE41 support
48637 if (Subtarget.hasAVX() && N->getOpcode() != ISD::SIGN_EXTEND_VECTOR_INREG) {
48639 if (TLI.isTypeLegal(VT) && TLI.isTypeLegal(In.getValueType()))
48640 if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
48647 static SDValue combineKSHIFT(SDNode *N, SelectionDAG &DAG,
48648 TargetLowering::DAGCombinerInfo &DCI) {
48649 EVT VT = N->getValueType(0);
48651 if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
48652 return DAG.getConstant(0, SDLoc(N), VT);
48654 APInt KnownUndef, KnownZero;
48655 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
48656 APInt DemandedElts = APInt::getAllOnesValue(VT.getVectorNumElements());
48657 if (TLI.SimplifyDemandedVectorElts(SDValue(N, 0), DemandedElts, KnownUndef,
48659 return SDValue(N, 0);
48664 // Optimize (fp16_to_fp (fp_to_fp16 X)) to VCVTPS2PH followed by VCVTPH2PS.
48665 // Done as a combine because the lowering for fp16_to_fp and fp_to_fp16 produce
48666 // extra instructions between the conversion due to going to scalar and back.
48667 static SDValue combineFP16_TO_FP(SDNode *N, SelectionDAG &DAG,
48668 const X86Subtarget &Subtarget) {
48669 if (Subtarget.useSoftFloat() || !Subtarget.hasF16C())
48672 if (N->getOperand(0).getOpcode() != ISD::FP_TO_FP16)
48675 if (N->getValueType(0) != MVT::f32 ||
48676 N->getOperand(0).getOperand(0).getValueType() != MVT::f32)
48680 SDValue Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32,
48681 N->getOperand(0).getOperand(0));
48682 Res = DAG.getNode(X86ISD::CVTPS2PH, dl, MVT::v8i16, Res,
48683 DAG.getTargetConstant(4, dl, MVT::i32));
48684 Res = DAG.getNode(X86ISD::CVTPH2PS, dl, MVT::v4f32, Res);
48685 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f32, Res,
48686 DAG.getIntPtrConstant(0, dl));
48689 static SDValue combineFP_EXTEND(SDNode *N, SelectionDAG &DAG,
48690 const X86Subtarget &Subtarget) {
48691 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
48694 bool IsStrict = N->isStrictFPOpcode();
48695 EVT VT = N->getValueType(0);
48696 SDValue Src = N->getOperand(IsStrict ? 1 : 0);
48697 EVT SrcVT = Src.getValueType();
48699 if (!SrcVT.isVector() || SrcVT.getVectorElementType() != MVT::f16)
48702 if (VT.getVectorElementType() != MVT::f32 &&
48703 VT.getVectorElementType() != MVT::f64)
48706 unsigned NumElts = VT.getVectorNumElements();
48707 if (NumElts == 1 || !isPowerOf2_32(NumElts))
48712 // Convert the input to vXi16.
48713 EVT IntVT = SrcVT.changeVectorElementTypeToInteger();
48714 Src = DAG.getBitcast(IntVT, Src);
48716 // Widen to at least 8 input elements.
48718 unsigned NumConcats = 8 / NumElts;
48719 SDValue Fill = NumElts == 4 ? DAG.getUNDEF(IntVT)
48720 : DAG.getConstant(0, dl, IntVT);
48721 SmallVector<SDValue, 4> Ops(NumConcats, Fill);
48723 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v8i16, Ops);
48726 // Destination is vXf32 with at least 4 elements.
48727 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::f32,
48728 std::max(4U, NumElts));
48729 SDValue Cvt, Chain;
48731 Cvt = DAG.getNode(X86ISD::STRICT_CVTPH2PS, dl, {CvtVT, MVT::Other},
48732 {N->getOperand(0), Src});
48733 Chain = Cvt.getValue(1);
48735 Cvt = DAG.getNode(X86ISD::CVTPH2PS, dl, CvtVT, Src);
48739 assert(NumElts == 2 && "Unexpected size");
48740 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MVT::v2f32, Cvt,
48741 DAG.getIntPtrConstant(0, dl));
48745 // Extend to the original VT if necessary.
48746 if (Cvt.getValueType() != VT) {
48747 Cvt = DAG.getNode(ISD::STRICT_FP_EXTEND, dl, {VT, MVT::Other},
48749 Chain = Cvt.getValue(1);
48751 return DAG.getMergeValues({Cvt, Chain}, dl);
48754 // Extend to the original VT if necessary.
48755 return DAG.getNode(ISD::FP_EXTEND, dl, VT, Cvt);
48758 // Try to find a larger VBROADCAST_LOAD that we can extract from. Limit this to
48759 // cases where the loads have the same input chain and the output chains are
48760 // unused. This avoids any memory ordering issues.
48761 static SDValue combineVBROADCAST_LOAD(SDNode *N, SelectionDAG &DAG,
48762 TargetLowering::DAGCombinerInfo &DCI) {
48763 // Only do this if the chain result is unused.
48764 if (N->hasAnyUseOfValue(1))
48767 auto *MemIntrin = cast<MemIntrinsicSDNode>(N);
48769 SDValue Ptr = MemIntrin->getBasePtr();
48770 SDValue Chain = MemIntrin->getChain();
48771 EVT VT = N->getSimpleValueType(0);
48772 EVT MemVT = MemIntrin->getMemoryVT();
48774 // Look at other users of our base pointer and try to find a wider broadcast.
48775 // The input chain and the size of the memory VT must match.
48776 for (SDNode *User : Ptr->uses())
48777 if (User != N && User->getOpcode() == X86ISD::VBROADCAST_LOAD &&
48778 cast<MemIntrinsicSDNode>(User)->getBasePtr() == Ptr &&
48779 cast<MemIntrinsicSDNode>(User)->getChain() == Chain &&
48780 cast<MemIntrinsicSDNode>(User)->getMemoryVT().getSizeInBits() ==
48781 MemVT.getSizeInBits() &&
48782 !User->hasAnyUseOfValue(1) &&
48783 User->getValueSizeInBits(0) > VT.getSizeInBits()) {
48784 SDValue Extract = extractSubVector(SDValue(User, 0), 0, DAG, SDLoc(N),
48785 VT.getSizeInBits());
48786 Extract = DAG.getBitcast(VT, Extract);
48787 return DCI.CombineTo(N, Extract, SDValue(User, 1));
48793 static SDValue combineFP_ROUND(SDNode *N, SelectionDAG &DAG,
48794 const X86Subtarget &Subtarget) {
48795 if (!Subtarget.hasF16C() || Subtarget.useSoftFloat())
48798 EVT VT = N->getValueType(0);
48799 SDValue Src = N->getOperand(0);
48800 EVT SrcVT = Src.getValueType();
48802 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
48803 SrcVT.getVectorElementType() != MVT::f32)
48806 unsigned NumElts = VT.getVectorNumElements();
48807 if (NumElts == 1 || !isPowerOf2_32(NumElts))
48812 // Widen to at least 4 input elements.
48814 Src = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32, Src,
48815 DAG.getConstantFP(0.0, dl, SrcVT));
48817 // Destination is v8i16 with at least 8 elements.
48818 EVT CvtVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16,
48819 std::max(8U, NumElts));
48820 SDValue Cvt = DAG.getNode(X86ISD::CVTPS2PH, dl, CvtVT, Src,
48821 DAG.getTargetConstant(4, dl, MVT::i32));
48823 // Extract down to real number of elements.
48825 EVT IntVT = VT.changeVectorElementTypeToInteger();
48826 Cvt = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, IntVT, Cvt,
48827 DAG.getIntPtrConstant(0, dl));
48830 return DAG.getBitcast(VT, Cvt);
48833 static SDValue combineMOVDQ2Q(SDNode *N, SelectionDAG &DAG) {
48834 SDValue Src = N->getOperand(0);
48836 // Turn MOVDQ2Q+simple_load into an mmx load.
48837 if (ISD::isNormalLoad(Src.getNode()) && Src.hasOneUse()) {
48838 LoadSDNode *LN = cast<LoadSDNode>(Src.getNode());
48840 if (LN->isSimple()) {
48841 SDValue NewLd = DAG.getLoad(MVT::x86mmx, SDLoc(N), LN->getChain(),
48843 LN->getPointerInfo(),
48844 LN->getOriginalAlign(),
48845 LN->getMemOperand()->getFlags());
48846 DAG.ReplaceAllUsesOfValueWith(SDValue(LN, 1), NewLd.getValue(1));
48854 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
48855 DAGCombinerInfo &DCI) const {
48856 SelectionDAG &DAG = DCI.DAG;
48857 switch (N->getOpcode()) {
48859 case ISD::SCALAR_TO_VECTOR:
48860 return combineScalarToVector(N, DAG);
48861 case ISD::EXTRACT_VECTOR_ELT:
48862 case X86ISD::PEXTRW:
48863 case X86ISD::PEXTRB:
48864 return combineExtractVectorElt(N, DAG, DCI, Subtarget);
48865 case ISD::CONCAT_VECTORS:
48866 return combineConcatVectors(N, DAG, DCI, Subtarget);
48867 case ISD::INSERT_SUBVECTOR:
48868 return combineInsertSubvector(N, DAG, DCI, Subtarget);
48869 case ISD::EXTRACT_SUBVECTOR:
48870 return combineExtractSubvector(N, DAG, DCI, Subtarget);
48873 case X86ISD::BLENDV: return combineSelect(N, DAG, DCI, Subtarget);
48874 case ISD::BITCAST: return combineBitcast(N, DAG, DCI, Subtarget);
48875 case X86ISD::CMOV: return combineCMov(N, DAG, DCI, Subtarget);
48876 case X86ISD::CMP: return combineCMP(N, DAG);
48877 case ISD::ADD: return combineAdd(N, DAG, DCI, Subtarget);
48878 case ISD::SUB: return combineSub(N, DAG, DCI, Subtarget);
48880 case X86ISD::SUB: return combineX86AddSub(N, DAG, DCI);
48881 case X86ISD::SBB: return combineSBB(N, DAG);
48882 case X86ISD::ADC: return combineADC(N, DAG, DCI);
48883 case ISD::MUL: return combineMul(N, DAG, DCI, Subtarget);
48884 case ISD::SHL: return combineShiftLeft(N, DAG);
48885 case ISD::SRA: return combineShiftRightArithmetic(N, DAG, Subtarget);
48886 case ISD::SRL: return combineShiftRightLogical(N, DAG, DCI, Subtarget);
48887 case ISD::AND: return combineAnd(N, DAG, DCI, Subtarget);
48888 case ISD::OR: return combineOr(N, DAG, DCI, Subtarget);
48889 case ISD::XOR: return combineXor(N, DAG, DCI, Subtarget);
48890 case X86ISD::BEXTR: return combineBEXTR(N, DAG, DCI, Subtarget);
48891 case ISD::LOAD: return combineLoad(N, DAG, DCI, Subtarget);
48892 case ISD::MLOAD: return combineMaskedLoad(N, DAG, DCI, Subtarget);
48893 case ISD::STORE: return combineStore(N, DAG, DCI, Subtarget);
48894 case ISD::MSTORE: return combineMaskedStore(N, DAG, DCI, Subtarget);
48895 case X86ISD::VEXTRACT_STORE:
48896 return combineVEXTRACT_STORE(N, DAG, DCI, Subtarget);
48897 case ISD::SINT_TO_FP:
48898 case ISD::STRICT_SINT_TO_FP:
48899 return combineSIntToFP(N, DAG, DCI, Subtarget);
48900 case ISD::UINT_TO_FP:
48901 case ISD::STRICT_UINT_TO_FP:
48902 return combineUIntToFP(N, DAG, Subtarget);
48904 case ISD::FSUB: return combineFaddFsub(N, DAG, Subtarget);
48905 case ISD::FNEG: return combineFneg(N, DAG, DCI, Subtarget);
48906 case ISD::TRUNCATE: return combineTruncate(N, DAG, Subtarget);
48907 case X86ISD::VTRUNC: return combineVTRUNC(N, DAG, DCI);
48908 case X86ISD::ANDNP: return combineAndnp(N, DAG, DCI, Subtarget);
48909 case X86ISD::FAND: return combineFAnd(N, DAG, Subtarget);
48910 case X86ISD::FANDN: return combineFAndn(N, DAG, Subtarget);
48912 case X86ISD::FOR: return combineFOr(N, DAG, DCI, Subtarget);
48914 case X86ISD::FMAX: return combineFMinFMax(N, DAG);
48916 case ISD::FMAXNUM: return combineFMinNumFMaxNum(N, DAG, Subtarget);
48917 case X86ISD::CVTSI2P:
48918 case X86ISD::CVTUI2P: return combineX86INT_TO_FP(N, DAG, DCI);
48919 case X86ISD::CVTP2SI:
48920 case X86ISD::CVTP2UI:
48921 case X86ISD::STRICT_CVTTP2SI:
48922 case X86ISD::CVTTP2SI:
48923 case X86ISD::STRICT_CVTTP2UI:
48924 case X86ISD::CVTTP2UI:
48925 return combineCVTP2I_CVTTP2I(N, DAG, DCI);
48926 case X86ISD::STRICT_CVTPH2PS:
48927 case X86ISD::CVTPH2PS: return combineCVTPH2PS(N, DAG, DCI);
48928 case X86ISD::BT: return combineBT(N, DAG, DCI);
48929 case ISD::ANY_EXTEND:
48930 case ISD::ZERO_EXTEND: return combineZext(N, DAG, DCI, Subtarget);
48931 case ISD::SIGN_EXTEND: return combineSext(N, DAG, DCI, Subtarget);
48932 case ISD::SIGN_EXTEND_INREG: return combineSignExtendInReg(N, DAG, Subtarget);
48933 case ISD::ANY_EXTEND_VECTOR_INREG:
48934 case ISD::SIGN_EXTEND_VECTOR_INREG:
48935 case ISD::ZERO_EXTEND_VECTOR_INREG: return combineExtInVec(N, DAG, DCI,
48937 case ISD::SETCC: return combineSetCC(N, DAG, Subtarget);
48938 case X86ISD::SETCC: return combineX86SetCC(N, DAG, Subtarget);
48939 case X86ISD::BRCOND: return combineBrCond(N, DAG, Subtarget);
48940 case X86ISD::PACKSS:
48941 case X86ISD::PACKUS: return combineVectorPack(N, DAG, DCI, Subtarget);
48945 return combineVectorShiftVar(N, DAG, DCI, Subtarget);
48946 case X86ISD::VSHLI:
48947 case X86ISD::VSRAI:
48948 case X86ISD::VSRLI:
48949 return combineVectorShiftImm(N, DAG, DCI, Subtarget);
48950 case ISD::INSERT_VECTOR_ELT:
48951 case X86ISD::PINSRB:
48952 case X86ISD::PINSRW: return combineVectorInsert(N, DAG, DCI, Subtarget);
48953 case X86ISD::SHUFP: // Handle all target specific shuffles
48954 case X86ISD::INSERTPS:
48955 case X86ISD::EXTRQI:
48956 case X86ISD::INSERTQI:
48957 case X86ISD::VALIGN:
48958 case X86ISD::PALIGNR:
48959 case X86ISD::VSHLDQ:
48960 case X86ISD::VSRLDQ:
48961 case X86ISD::BLENDI:
48962 case X86ISD::UNPCKH:
48963 case X86ISD::UNPCKL:
48964 case X86ISD::MOVHLPS:
48965 case X86ISD::MOVLHPS:
48966 case X86ISD::PSHUFB:
48967 case X86ISD::PSHUFD:
48968 case X86ISD::PSHUFHW:
48969 case X86ISD::PSHUFLW:
48970 case X86ISD::MOVSHDUP:
48971 case X86ISD::MOVSLDUP:
48972 case X86ISD::MOVDDUP:
48973 case X86ISD::MOVSS:
48974 case X86ISD::MOVSD:
48975 case X86ISD::VBROADCAST:
48976 case X86ISD::VPPERM:
48977 case X86ISD::VPERMI:
48978 case X86ISD::VPERMV:
48979 case X86ISD::VPERMV3:
48980 case X86ISD::VPERMIL2:
48981 case X86ISD::VPERMILPI:
48982 case X86ISD::VPERMILPV:
48983 case X86ISD::VPERM2X128:
48984 case X86ISD::SHUF128:
48985 case X86ISD::VZEXT_MOVL:
48986 case ISD::VECTOR_SHUFFLE: return combineShuffle(N, DAG, DCI,Subtarget);
48987 case X86ISD::FMADD_RND:
48988 case X86ISD::FMSUB:
48989 case X86ISD::STRICT_FMSUB:
48990 case X86ISD::FMSUB_RND:
48991 case X86ISD::FNMADD:
48992 case X86ISD::STRICT_FNMADD:
48993 case X86ISD::FNMADD_RND:
48994 case X86ISD::FNMSUB:
48995 case X86ISD::STRICT_FNMSUB:
48996 case X86ISD::FNMSUB_RND:
48998 case ISD::STRICT_FMA: return combineFMA(N, DAG, DCI, Subtarget);
48999 case X86ISD::FMADDSUB_RND:
49000 case X86ISD::FMSUBADD_RND:
49001 case X86ISD::FMADDSUB:
49002 case X86ISD::FMSUBADD: return combineFMADDSUB(N, DAG, DCI);
49003 case X86ISD::MOVMSK: return combineMOVMSK(N, DAG, DCI, Subtarget);
49004 case X86ISD::MGATHER:
49005 case X86ISD::MSCATTER: return combineX86GatherScatter(N, DAG, DCI);
49007 case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI);
49008 case X86ISD::PCMPEQ:
49009 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
49010 case X86ISD::PMULDQ:
49011 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
49012 case X86ISD::KSHIFTL:
49013 case X86ISD::KSHIFTR: return combineKSHIFT(N, DAG, DCI);
49014 case ISD::FP16_TO_FP: return combineFP16_TO_FP(N, DAG, Subtarget);
49015 case ISD::STRICT_FP_EXTEND:
49016 case ISD::FP_EXTEND: return combineFP_EXTEND(N, DAG, Subtarget);
49017 case ISD::FP_ROUND: return combineFP_ROUND(N, DAG, Subtarget);
49018 case X86ISD::VBROADCAST_LOAD: return combineVBROADCAST_LOAD(N, DAG, DCI);
49019 case X86ISD::MOVDQ2Q: return combineMOVDQ2Q(N, DAG);
49025 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
49026 if (!isTypeLegal(VT))
49029 // There are no vXi8 shifts.
49030 if (Opc == ISD::SHL && VT.isVector() && VT.getVectorElementType() == MVT::i8)
49033 // TODO: Almost no 8-bit ops are desirable because they have no actual
49034 // size/speed advantages vs. 32-bit ops, but they do have a major
49035 // potential disadvantage by causing partial register stalls.
49037 // 8-bit multiply/shl is probably not cheaper than 32-bit multiply/shl, and
49038 // we have specializations to turn 32-bit multiply/shl into LEA or other ops.
49039 // Also, see the comment in "IsDesirableToPromoteOp" - where we additionally
49040 // check for a constant operand to the multiply.
49041 if ((Opc == ISD::MUL || Opc == ISD::SHL) && VT == MVT::i8)
49044 // i16 instruction encodings are longer and some i16 instructions are slow,
49045 // so those are not desirable.
49046 if (VT == MVT::i16) {
49051 case ISD::SIGN_EXTEND:
49052 case ISD::ZERO_EXTEND:
49053 case ISD::ANY_EXTEND:
49067 // Any legal type not explicitly accounted for above here is desirable.
49071 SDValue X86TargetLowering::expandIndirectJTBranch(const SDLoc& dl,
49072 SDValue Value, SDValue Addr,
49073 SelectionDAG &DAG) const {
49074 const Module *M = DAG.getMachineFunction().getMMI().getModule();
49075 Metadata *IsCFProtectionSupported = M->getModuleFlag("cf-protection-branch");
49076 if (IsCFProtectionSupported) {
49077 // In case control-flow branch protection is enabled, we need to add
49078 // notrack prefix to the indirect branch.
49079 // In order to do that we create NT_BRIND SDNode.
49080 // Upon ISEL, the pattern will convert it to jmp with NoTrack prefix.
49081 return DAG.getNode(X86ISD::NT_BRIND, dl, MVT::Other, Value, Addr);
49084 return TargetLowering::expandIndirectJTBranch(dl, Value, Addr, DAG);
49087 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
49088 EVT VT = Op.getValueType();
49089 bool Is8BitMulByConstant = VT == MVT::i8 && Op.getOpcode() == ISD::MUL &&
49090 isa<ConstantSDNode>(Op.getOperand(1));
49092 // i16 is legal, but undesirable since i16 instruction encodings are longer
49093 // and some i16 instructions are slow.
49094 // 8-bit multiply-by-constant can usually be expanded to something cheaper
49095 // using LEA and/or other ALU ops.
49096 if (VT != MVT::i16 && !Is8BitMulByConstant)
49099 auto IsFoldableRMW = [](SDValue Load, SDValue Op) {
49100 if (!Op.hasOneUse())
49102 SDNode *User = *Op->use_begin();
49103 if (!ISD::isNormalStore(User))
49105 auto *Ld = cast<LoadSDNode>(Load);
49106 auto *St = cast<StoreSDNode>(User);
49107 return Ld->getBasePtr() == St->getBasePtr();
49110 auto IsFoldableAtomicRMW = [](SDValue Load, SDValue Op) {
49111 if (!Load.hasOneUse() || Load.getOpcode() != ISD::ATOMIC_LOAD)
49113 if (!Op.hasOneUse())
49115 SDNode *User = *Op->use_begin();
49116 if (User->getOpcode() != ISD::ATOMIC_STORE)
49118 auto *Ld = cast<AtomicSDNode>(Load);
49119 auto *St = cast<AtomicSDNode>(User);
49120 return Ld->getBasePtr() == St->getBasePtr();
49123 bool Commute = false;
49124 switch (Op.getOpcode()) {
49125 default: return false;
49126 case ISD::SIGN_EXTEND:
49127 case ISD::ZERO_EXTEND:
49128 case ISD::ANY_EXTEND:
49133 SDValue N0 = Op.getOperand(0);
49134 // Look out for (store (shl (load), x)).
49135 if (MayFoldLoad(N0) && IsFoldableRMW(N0, Op))
49147 SDValue N0 = Op.getOperand(0);
49148 SDValue N1 = Op.getOperand(1);
49149 // Avoid disabling potential load folding opportunities.
49150 if (MayFoldLoad(N1) &&
49151 (!Commute || !isa<ConstantSDNode>(N0) ||
49152 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N1, Op))))
49154 if (MayFoldLoad(N0) &&
49155 ((Commute && !isa<ConstantSDNode>(N1)) ||
49156 (Op.getOpcode() != ISD::MUL && IsFoldableRMW(N0, Op))))
49158 if (IsFoldableAtomicRMW(N0, Op) ||
49159 (Commute && IsFoldableAtomicRMW(N1, Op)))
49168 //===----------------------------------------------------------------------===//
49169 // X86 Inline Assembly Support
49170 //===----------------------------------------------------------------------===//
49172 // Helper to match a string separated by whitespace.
49173 static bool matchAsm(StringRef S, ArrayRef<const char *> Pieces) {
49174 S = S.substr(S.find_first_not_of(" \t")); // Skip leading whitespace.
49176 for (StringRef Piece : Pieces) {
49177 if (!S.startswith(Piece)) // Check if the piece matches.
49180 S = S.substr(Piece.size());
49181 StringRef::size_type Pos = S.find_first_not_of(" \t");
49182 if (Pos == 0) // We matched a prefix.
49191 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
49193 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
49194 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
49195 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
49196 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
49198 if (AsmPieces.size() == 3)
49200 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
49207 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
49208 InlineAsm *IA = cast<InlineAsm>(CI->getCalledOperand());
49210 const std::string &AsmStr = IA->getAsmString();
49212 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
49213 if (!Ty || Ty->getBitWidth() % 16 != 0)
49216 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
49217 SmallVector<StringRef, 4> AsmPieces;
49218 SplitString(AsmStr, AsmPieces, ";\n");
49220 switch (AsmPieces.size()) {
49221 default: return false;
49223 // FIXME: this should verify that we are targeting a 486 or better. If not,
49224 // we will turn this bswap into something that will be lowered to logical
49225 // ops instead of emitting the bswap asm. For now, we don't support 486 or
49226 // lower so don't worry about this.
49228 if (matchAsm(AsmPieces[0], {"bswap", "$0"}) ||
49229 matchAsm(AsmPieces[0], {"bswapl", "$0"}) ||
49230 matchAsm(AsmPieces[0], {"bswapq", "$0"}) ||
49231 matchAsm(AsmPieces[0], {"bswap", "${0:q}"}) ||
49232 matchAsm(AsmPieces[0], {"bswapl", "${0:q}"}) ||
49233 matchAsm(AsmPieces[0], {"bswapq", "${0:q}"})) {
49234 // No need to check constraints, nothing other than the equivalent of
49235 // "=r,0" would be valid here.
49236 return IntrinsicLowering::LowerToByteSwap(CI);
49239 // rorw $$8, ${0:w} --> llvm.bswap.i16
49240 if (CI->getType()->isIntegerTy(16) &&
49241 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
49242 (matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) ||
49243 matchAsm(AsmPieces[0], {"rolw", "$$8,", "${0:w}"}))) {
49245 StringRef ConstraintsStr = IA->getConstraintString();
49246 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
49247 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
49248 if (clobbersFlagRegisters(AsmPieces))
49249 return IntrinsicLowering::LowerToByteSwap(CI);
49253 if (CI->getType()->isIntegerTy(32) &&
49254 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
49255 matchAsm(AsmPieces[0], {"rorw", "$$8,", "${0:w}"}) &&
49256 matchAsm(AsmPieces[1], {"rorl", "$$16,", "$0"}) &&
49257 matchAsm(AsmPieces[2], {"rorw", "$$8,", "${0:w}"})) {
49259 StringRef ConstraintsStr = IA->getConstraintString();
49260 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
49261 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
49262 if (clobbersFlagRegisters(AsmPieces))
49263 return IntrinsicLowering::LowerToByteSwap(CI);
49266 if (CI->getType()->isIntegerTy(64)) {
49267 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
49268 if (Constraints.size() >= 2 &&
49269 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
49270 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
49271 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
49272 if (matchAsm(AsmPieces[0], {"bswap", "%eax"}) &&
49273 matchAsm(AsmPieces[1], {"bswap", "%edx"}) &&
49274 matchAsm(AsmPieces[2], {"xchgl", "%eax,", "%edx"}))
49275 return IntrinsicLowering::LowerToByteSwap(CI);
49283 static X86::CondCode parseConstraintCode(llvm::StringRef Constraint) {
49284 X86::CondCode Cond = StringSwitch<X86::CondCode>(Constraint)
49285 .Case("{@cca}", X86::COND_A)
49286 .Case("{@ccae}", X86::COND_AE)
49287 .Case("{@ccb}", X86::COND_B)
49288 .Case("{@ccbe}", X86::COND_BE)
49289 .Case("{@ccc}", X86::COND_B)
49290 .Case("{@cce}", X86::COND_E)
49291 .Case("{@ccz}", X86::COND_E)
49292 .Case("{@ccg}", X86::COND_G)
49293 .Case("{@ccge}", X86::COND_GE)
49294 .Case("{@ccl}", X86::COND_L)
49295 .Case("{@ccle}", X86::COND_LE)
49296 .Case("{@ccna}", X86::COND_BE)
49297 .Case("{@ccnae}", X86::COND_B)
49298 .Case("{@ccnb}", X86::COND_AE)
49299 .Case("{@ccnbe}", X86::COND_A)
49300 .Case("{@ccnc}", X86::COND_AE)
49301 .Case("{@ccne}", X86::COND_NE)
49302 .Case("{@ccnz}", X86::COND_NE)
49303 .Case("{@ccng}", X86::COND_LE)
49304 .Case("{@ccnge}", X86::COND_L)
49305 .Case("{@ccnl}", X86::COND_GE)
49306 .Case("{@ccnle}", X86::COND_G)
49307 .Case("{@ccno}", X86::COND_NO)
49308 .Case("{@ccnp}", X86::COND_P)
49309 .Case("{@ccns}", X86::COND_NS)
49310 .Case("{@cco}", X86::COND_O)
49311 .Case("{@ccp}", X86::COND_P)
49312 .Case("{@ccs}", X86::COND_S)
49313 .Default(X86::COND_INVALID);
49317 /// Given a constraint letter, return the type of constraint for this target.
49318 X86TargetLowering::ConstraintType
49319 X86TargetLowering::getConstraintType(StringRef Constraint) const {
49320 if (Constraint.size() == 1) {
49321 switch (Constraint[0]) {
49332 case 'k': // AVX512 masking registers.
49333 return C_RegisterClass;
49349 return C_Immediate;
49358 else if (Constraint.size() == 2) {
49359 switch (Constraint[0]) {
49363 switch (Constraint[1]) {
49373 return C_RegisterClass;
49376 } else if (parseConstraintCode(Constraint) != X86::COND_INVALID)
49378 return TargetLowering::getConstraintType(Constraint);
49381 /// Examine constraint type and operand type and determine a weight value.
49382 /// This object must already have been set up with the operand type
49383 /// and the current alternative constraint selected.
49384 TargetLowering::ConstraintWeight
49385 X86TargetLowering::getSingleConstraintMatchWeight(
49386 AsmOperandInfo &info, const char *constraint) const {
49387 ConstraintWeight weight = CW_Invalid;
49388 Value *CallOperandVal = info.CallOperandVal;
49389 // If we don't have a value, we can't do a match,
49390 // but allow it at the lowest weight.
49391 if (!CallOperandVal)
49393 Type *type = CallOperandVal->getType();
49394 // Look at the constraint type.
49395 switch (*constraint) {
49397 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
49409 if (CallOperandVal->getType()->isIntegerTy())
49410 weight = CW_SpecificReg;
49415 if (type->isFloatingPointTy())
49416 weight = CW_SpecificReg;
49419 if (type->isX86_MMXTy() && Subtarget.hasMMX())
49420 weight = CW_SpecificReg;
49423 if (StringRef(constraint).size() != 2)
49425 switch (constraint[1]) {
49430 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
49431 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()) ||
49432 ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512()))
49433 return CW_SpecificReg;
49435 // Conditional OpMask regs (AVX512)
49437 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
49438 return CW_Register;
49442 if (type->isX86_MMXTy() && Subtarget.hasMMX())
49445 // Any SSE reg when ISA >= SSE2, same as 'x'
49449 if (!Subtarget.hasSSE2())
49455 if ((type->getPrimitiveSizeInBits() == 512) && Subtarget.hasAVX512())
49456 weight = CW_Register;
49459 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) ||
49460 ((type->getPrimitiveSizeInBits() == 256) && Subtarget.hasAVX()))
49461 weight = CW_Register;
49464 // Enable conditional vector operations using %k<#> registers.
49465 if ((type->getPrimitiveSizeInBits() == 64) && Subtarget.hasAVX512())
49466 weight = CW_Register;
49469 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
49470 if (C->getZExtValue() <= 31)
49471 weight = CW_Constant;
49475 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49476 if (C->getZExtValue() <= 63)
49477 weight = CW_Constant;
49481 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49482 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
49483 weight = CW_Constant;
49487 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49488 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
49489 weight = CW_Constant;
49493 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49494 if (C->getZExtValue() <= 3)
49495 weight = CW_Constant;
49499 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49500 if (C->getZExtValue() <= 0xff)
49501 weight = CW_Constant;
49506 if (isa<ConstantFP>(CallOperandVal)) {
49507 weight = CW_Constant;
49511 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49512 if ((C->getSExtValue() >= -0x80000000LL) &&
49513 (C->getSExtValue() <= 0x7fffffffLL))
49514 weight = CW_Constant;
49518 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
49519 if (C->getZExtValue() <= 0xffffffff)
49520 weight = CW_Constant;
49527 /// Try to replace an X constraint, which matches anything, with another that
49528 /// has more specific requirements based on the type of the corresponding
49530 const char *X86TargetLowering::
49531 LowerXConstraint(EVT ConstraintVT) const {
49532 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
49533 // 'f' like normal targets.
49534 if (ConstraintVT.isFloatingPoint()) {
49535 if (Subtarget.hasSSE1())
49539 return TargetLowering::LowerXConstraint(ConstraintVT);
49542 // Lower @cc targets via setcc.
49543 SDValue X86TargetLowering::LowerAsmOutputForConstraint(
49544 SDValue &Chain, SDValue &Flag, SDLoc DL, const AsmOperandInfo &OpInfo,
49545 SelectionDAG &DAG) const {
49546 X86::CondCode Cond = parseConstraintCode(OpInfo.ConstraintCode);
49547 if (Cond == X86::COND_INVALID)
49549 // Check that return type is valid.
49550 if (OpInfo.ConstraintVT.isVector() || !OpInfo.ConstraintVT.isInteger() ||
49551 OpInfo.ConstraintVT.getSizeInBits() < 8)
49552 report_fatal_error("Flag output operand is of invalid type");
49554 // Get EFLAGS register. Only update chain when copyfrom is glued.
49555 if (Flag.getNode()) {
49556 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32, Flag);
49557 Chain = Flag.getValue(1);
49559 Flag = DAG.getCopyFromReg(Chain, DL, X86::EFLAGS, MVT::i32);
49560 // Extract CC code.
49561 SDValue CC = getSETCC(Cond, Flag, DL, DAG);
49562 // Extend to 32-bits
49563 SDValue Result = DAG.getNode(ISD::ZERO_EXTEND, DL, OpInfo.ConstraintVT, CC);
49568 /// Lower the specified operand into the Ops vector.
49569 /// If it is invalid, don't add anything to Ops.
49570 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
49571 std::string &Constraint,
49572 std::vector<SDValue>&Ops,
49573 SelectionDAG &DAG) const {
49576 // Only support length 1 constraints for now.
49577 if (Constraint.length() > 1) return;
49579 char ConstraintLetter = Constraint[0];
49580 switch (ConstraintLetter) {
49583 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49584 if (C->getZExtValue() <= 31) {
49585 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49586 Op.getValueType());
49592 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49593 if (C->getZExtValue() <= 63) {
49594 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49595 Op.getValueType());
49601 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49602 if (isInt<8>(C->getSExtValue())) {
49603 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49604 Op.getValueType());
49610 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49611 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
49612 (Subtarget.is64Bit() && C->getZExtValue() == 0xffffffff)) {
49613 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
49614 Op.getValueType());
49620 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49621 if (C->getZExtValue() <= 3) {
49622 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49623 Op.getValueType());
49629 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49630 if (C->getZExtValue() <= 255) {
49631 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49632 Op.getValueType());
49638 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49639 if (C->getZExtValue() <= 127) {
49640 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49641 Op.getValueType());
49647 // 32-bit signed value
49648 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49649 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
49650 C->getSExtValue())) {
49651 // Widen to 64 bits here to get it sign extended.
49652 Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op), MVT::i64);
49655 // FIXME gcc accepts some relocatable values here too, but only in certain
49656 // memory models; it's complicated.
49661 // 32-bit unsigned value
49662 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
49663 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
49664 C->getZExtValue())) {
49665 Result = DAG.getTargetConstant(C->getZExtValue(), SDLoc(Op),
49666 Op.getValueType());
49670 // FIXME gcc accepts some relocatable values here too, but only in certain
49671 // memory models; it's complicated.
49675 // Literal immediates are always ok.
49676 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
49677 bool IsBool = CST->getConstantIntValue()->getBitWidth() == 1;
49678 BooleanContent BCont = getBooleanContents(MVT::i64);
49679 ISD::NodeType ExtOpc = IsBool ? getExtendForContent(BCont)
49680 : ISD::SIGN_EXTEND;
49681 int64_t ExtVal = ExtOpc == ISD::ZERO_EXTEND ? CST->getZExtValue()
49682 : CST->getSExtValue();
49683 Result = DAG.getTargetConstant(ExtVal, SDLoc(Op), MVT::i64);
49687 // In any sort of PIC mode addresses need to be computed at runtime by
49688 // adding in a register or some sort of table lookup. These can't
49689 // be used as immediates.
49690 if (Subtarget.isPICStyleGOT() || Subtarget.isPICStyleStubPIC())
49693 // If we are in non-pic codegen mode, we allow the address of a global (with
49694 // an optional displacement) to be used with 'i'.
49695 if (auto *GA = dyn_cast<GlobalAddressSDNode>(Op))
49696 // If we require an extra load to get this address, as in PIC mode, we
49697 // can't accept it.
49698 if (isGlobalStubReference(
49699 Subtarget.classifyGlobalReference(GA->getGlobal())))
49705 if (Result.getNode()) {
49706 Ops.push_back(Result);
49709 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
49712 /// Check if \p RC is a general purpose register class.
49713 /// I.e., GR* or one of their variant.
49714 static bool isGRClass(const TargetRegisterClass &RC) {
49715 return RC.hasSuperClassEq(&X86::GR8RegClass) ||
49716 RC.hasSuperClassEq(&X86::GR16RegClass) ||
49717 RC.hasSuperClassEq(&X86::GR32RegClass) ||
49718 RC.hasSuperClassEq(&X86::GR64RegClass) ||
49719 RC.hasSuperClassEq(&X86::LOW32_ADDR_ACCESS_RBPRegClass);
49722 /// Check if \p RC is a vector register class.
49723 /// I.e., FR* / VR* or one of their variant.
49724 static bool isFRClass(const TargetRegisterClass &RC) {
49725 return RC.hasSuperClassEq(&X86::FR32XRegClass) ||
49726 RC.hasSuperClassEq(&X86::FR64XRegClass) ||
49727 RC.hasSuperClassEq(&X86::VR128XRegClass) ||
49728 RC.hasSuperClassEq(&X86::VR256XRegClass) ||
49729 RC.hasSuperClassEq(&X86::VR512RegClass);
49732 /// Check if \p RC is a mask register class.
49733 /// I.e., VK* or one of their variant.
49734 static bool isVKClass(const TargetRegisterClass &RC) {
49735 return RC.hasSuperClassEq(&X86::VK1RegClass) ||
49736 RC.hasSuperClassEq(&X86::VK2RegClass) ||
49737 RC.hasSuperClassEq(&X86::VK4RegClass) ||
49738 RC.hasSuperClassEq(&X86::VK8RegClass) ||
49739 RC.hasSuperClassEq(&X86::VK16RegClass) ||
49740 RC.hasSuperClassEq(&X86::VK32RegClass) ||
49741 RC.hasSuperClassEq(&X86::VK64RegClass);
49744 std::pair<unsigned, const TargetRegisterClass *>
49745 X86TargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
49746 StringRef Constraint,
49748 // First, see if this is a constraint that directly corresponds to an LLVM
49750 if (Constraint.size() == 1) {
49751 // GCC Constraint Letters
49752 switch (Constraint[0]) {
49754 // 'A' means [ER]AX + [ER]DX.
49756 if (Subtarget.is64Bit())
49757 return std::make_pair(X86::RAX, &X86::GR64_ADRegClass);
49758 assert((Subtarget.is32Bit() || Subtarget.is16Bit()) &&
49759 "Expecting 64, 32 or 16 bit subtarget");
49760 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
49762 // TODO: Slight differences here in allocation order and leaving
49763 // RIP in the class. Do they matter any more here than they do
49764 // in the normal allocation?
49766 if (Subtarget.hasAVX512()) {
49768 return std::make_pair(0U, &X86::VK1RegClass);
49770 return std::make_pair(0U, &X86::VK8RegClass);
49771 if (VT == MVT::i16)
49772 return std::make_pair(0U, &X86::VK16RegClass);
49774 if (Subtarget.hasBWI()) {
49775 if (VT == MVT::i32)
49776 return std::make_pair(0U, &X86::VK32RegClass);
49777 if (VT == MVT::i64)
49778 return std::make_pair(0U, &X86::VK64RegClass);
49781 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
49782 if (Subtarget.is64Bit()) {
49783 if (VT == MVT::i8 || VT == MVT::i1)
49784 return std::make_pair(0U, &X86::GR8RegClass);
49785 if (VT == MVT::i16)
49786 return std::make_pair(0U, &X86::GR16RegClass);
49787 if (VT == MVT::i32 || VT == MVT::f32)
49788 return std::make_pair(0U, &X86::GR32RegClass);
49789 if (VT != MVT::f80)
49790 return std::make_pair(0U, &X86::GR64RegClass);
49794 // 32-bit fallthrough
49795 case 'Q': // Q_REGS
49796 if (VT == MVT::i8 || VT == MVT::i1)
49797 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
49798 if (VT == MVT::i16)
49799 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
49800 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
49801 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
49802 if (VT != MVT::f80)
49803 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
49805 case 'r': // GENERAL_REGS
49806 case 'l': // INDEX_REGS
49807 if (VT == MVT::i8 || VT == MVT::i1)
49808 return std::make_pair(0U, &X86::GR8RegClass);
49809 if (VT == MVT::i16)
49810 return std::make_pair(0U, &X86::GR16RegClass);
49811 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
49812 return std::make_pair(0U, &X86::GR32RegClass);
49813 if (VT != MVT::f80)
49814 return std::make_pair(0U, &X86::GR64RegClass);
49816 case 'R': // LEGACY_REGS
49817 if (VT == MVT::i8 || VT == MVT::i1)
49818 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
49819 if (VT == MVT::i16)
49820 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
49821 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget.is64Bit())
49822 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
49823 if (VT != MVT::f80)
49824 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
49826 case 'f': // FP Stack registers.
49827 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
49828 // value to the correct fpstack register class.
49829 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
49830 return std::make_pair(0U, &X86::RFP32RegClass);
49831 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
49832 return std::make_pair(0U, &X86::RFP64RegClass);
49833 if (VT == MVT::f32 || VT == MVT::f64 || VT == MVT::f80)
49834 return std::make_pair(0U, &X86::RFP80RegClass);
49836 case 'y': // MMX_REGS if MMX allowed.
49837 if (!Subtarget.hasMMX()) break;
49838 return std::make_pair(0U, &X86::VR64RegClass);
49840 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
49841 if (!Subtarget.hasSSE1()) break;
49842 bool VConstraint = (Constraint[0] == 'v');
49844 switch (VT.SimpleTy) {
49846 // Scalar SSE types.
49849 if (VConstraint && Subtarget.hasVLX())
49850 return std::make_pair(0U, &X86::FR32XRegClass);
49851 return std::make_pair(0U, &X86::FR32RegClass);
49854 if (VConstraint && Subtarget.hasVLX())
49855 return std::make_pair(0U, &X86::FR64XRegClass);
49856 return std::make_pair(0U, &X86::FR64RegClass);
49858 if (Subtarget.is64Bit()) {
49859 if (VConstraint && Subtarget.hasVLX())
49860 return std::make_pair(0U, &X86::VR128XRegClass);
49861 return std::make_pair(0U, &X86::VR128RegClass);
49864 // Vector types and fp128.
49872 if (VConstraint && Subtarget.hasVLX())
49873 return std::make_pair(0U, &X86::VR128XRegClass);
49874 return std::make_pair(0U, &X86::VR128RegClass);
49882 if (VConstraint && Subtarget.hasVLX())
49883 return std::make_pair(0U, &X86::VR256XRegClass);
49884 if (Subtarget.hasAVX())
49885 return std::make_pair(0U, &X86::VR256RegClass);
49893 if (!Subtarget.hasAVX512()) break;
49895 return std::make_pair(0U, &X86::VR512RegClass);
49896 return std::make_pair(0U, &X86::VR512_0_15RegClass);
49900 } else if (Constraint.size() == 2 && Constraint[0] == 'Y') {
49901 switch (Constraint[1]) {
49907 return getRegForInlineAsmConstraint(TRI, "x", VT);
49909 if (!Subtarget.hasMMX()) break;
49910 return std::make_pair(0U, &X86::VR64RegClass);
49912 if (!Subtarget.hasSSE1()) break;
49913 switch (VT.SimpleTy) {
49915 // Scalar SSE types.
49918 return std::make_pair(X86::XMM0, &X86::FR32RegClass);
49921 return std::make_pair(X86::XMM0, &X86::FR64RegClass);
49929 return std::make_pair(X86::XMM0, &X86::VR128RegClass);
49937 if (Subtarget.hasAVX())
49938 return std::make_pair(X86::YMM0, &X86::VR256RegClass);
49946 if (Subtarget.hasAVX512())
49947 return std::make_pair(X86::ZMM0, &X86::VR512_0_15RegClass);
49952 // This register class doesn't allocate k0 for masked vector operation.
49953 if (Subtarget.hasAVX512()) {
49955 return std::make_pair(0U, &X86::VK1WMRegClass);
49957 return std::make_pair(0U, &X86::VK8WMRegClass);
49958 if (VT == MVT::i16)
49959 return std::make_pair(0U, &X86::VK16WMRegClass);
49961 if (Subtarget.hasBWI()) {
49962 if (VT == MVT::i32)
49963 return std::make_pair(0U, &X86::VK32WMRegClass);
49964 if (VT == MVT::i64)
49965 return std::make_pair(0U, &X86::VK64WMRegClass);
49971 if (parseConstraintCode(Constraint) != X86::COND_INVALID)
49972 return std::make_pair(0U, &X86::GR32RegClass);
49974 // Use the default implementation in TargetLowering to convert the register
49975 // constraint into a member of a register class.
49976 std::pair<Register, const TargetRegisterClass*> Res;
49977 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
49979 // Not found as a standard register?
49981 // Map st(0) -> st(7) -> ST0
49982 if (Constraint.size() == 7 && Constraint[0] == '{' &&
49983 tolower(Constraint[1]) == 's' && tolower(Constraint[2]) == 't' &&
49984 Constraint[3] == '(' &&
49985 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
49986 Constraint[5] == ')' && Constraint[6] == '}') {
49987 // st(7) is not allocatable and thus not a member of RFP80. Return
49988 // singleton class in cases where we have a reference to it.
49989 if (Constraint[4] == '7')
49990 return std::make_pair(X86::FP7, &X86::RFP80_7RegClass);
49991 return std::make_pair(X86::FP0 + Constraint[4] - '0',
49992 &X86::RFP80RegClass);
49995 // GCC allows "st(0)" to be called just plain "st".
49996 if (StringRef("{st}").equals_lower(Constraint))
49997 return std::make_pair(X86::FP0, &X86::RFP80RegClass);
50000 if (StringRef("{flags}").equals_lower(Constraint))
50001 return std::make_pair(X86::EFLAGS, &X86::CCRRegClass);
50004 if (StringRef("{dirflag}").equals_lower(Constraint))
50005 return std::make_pair(X86::DF, &X86::DFCCRRegClass);
50008 if (StringRef("{fpsr}").equals_lower(Constraint))
50009 return std::make_pair(X86::FPSW, &X86::FPCCRRegClass);
50014 // Make sure it isn't a register that requires 64-bit mode.
50015 if (!Subtarget.is64Bit() &&
50016 (isFRClass(*Res.second) || isGRClass(*Res.second)) &&
50017 TRI->getEncodingValue(Res.first) >= 8) {
50018 // Register requires REX prefix, but we're in 32-bit mode.
50019 return std::make_pair(0, nullptr);
50022 // Make sure it isn't a register that requires AVX512.
50023 if (!Subtarget.hasAVX512() && isFRClass(*Res.second) &&
50024 TRI->getEncodingValue(Res.first) & 0x10) {
50025 // Register requires EVEX prefix.
50026 return std::make_pair(0, nullptr);
50029 // Otherwise, check to see if this is a register class of the wrong value
50030 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
50031 // turn into {ax},{dx}.
50032 // MVT::Other is used to specify clobber names.
50033 if (TRI->isTypeLegalForClass(*Res.second, VT) || VT == MVT::Other)
50034 return Res; // Correct type already, nothing to do.
50036 // Get a matching integer of the correct size. i.e. "ax" with MVT::32 should
50037 // return "eax". This should even work for things like getting 64bit integer
50038 // registers when given an f64 type.
50039 const TargetRegisterClass *Class = Res.second;
50040 // The generic code will match the first register class that contains the
50041 // given register. Thus, based on the ordering of the tablegened file,
50042 // the "plain" GR classes might not come first.
50043 // Therefore, use a helper method.
50044 if (isGRClass(*Class)) {
50045 unsigned Size = VT.getSizeInBits();
50046 if (Size == 1) Size = 8;
50047 Register DestReg = getX86SubSuperRegisterOrZero(Res.first, Size);
50049 bool is64Bit = Subtarget.is64Bit();
50050 const TargetRegisterClass *RC =
50051 Size == 8 ? (is64Bit ? &X86::GR8RegClass : &X86::GR8_NOREXRegClass)
50052 : Size == 16 ? (is64Bit ? &X86::GR16RegClass : &X86::GR16_NOREXRegClass)
50053 : Size == 32 ? (is64Bit ? &X86::GR32RegClass : &X86::GR32_NOREXRegClass)
50054 : Size == 64 ? (is64Bit ? &X86::GR64RegClass : nullptr)
50056 if (Size == 64 && !is64Bit) {
50057 // Model GCC's behavior here and select a fixed pair of 32-bit
50061 return std::make_pair(X86::EAX, &X86::GR32_ADRegClass);
50063 return std::make_pair(X86::EDX, &X86::GR32_DCRegClass);
50065 return std::make_pair(X86::ECX, &X86::GR32_CBRegClass);
50067 return std::make_pair(X86::EBX, &X86::GR32_BSIRegClass);
50069 return std::make_pair(X86::ESI, &X86::GR32_SIDIRegClass);
50071 return std::make_pair(X86::EDI, &X86::GR32_DIBPRegClass);
50073 return std::make_pair(X86::EBP, &X86::GR32_BPSPRegClass);
50075 return std::make_pair(0, nullptr);
50078 if (RC && RC->contains(DestReg))
50079 return std::make_pair(DestReg, RC);
50082 // No register found/type mismatch.
50083 return std::make_pair(0, nullptr);
50084 } else if (isFRClass(*Class)) {
50085 // Handle references to XMM physical registers that got mapped into the
50086 // wrong class. This can happen with constraints like {xmm0} where the
50087 // target independent register mapper will just pick the first match it can
50088 // find, ignoring the required type.
50090 // TODO: Handle f128 and i128 in FR128RegClass after it is tested well.
50091 if (VT == MVT::f32 || VT == MVT::i32)
50092 Res.second = &X86::FR32XRegClass;
50093 else if (VT == MVT::f64 || VT == MVT::i64)
50094 Res.second = &X86::FR64XRegClass;
50095 else if (TRI->isTypeLegalForClass(X86::VR128XRegClass, VT))
50096 Res.second = &X86::VR128XRegClass;
50097 else if (TRI->isTypeLegalForClass(X86::VR256XRegClass, VT))
50098 Res.second = &X86::VR256XRegClass;
50099 else if (TRI->isTypeLegalForClass(X86::VR512RegClass, VT))
50100 Res.second = &X86::VR512RegClass;
50102 // Type mismatch and not a clobber: Return an error;
50104 Res.second = nullptr;
50106 } else if (isVKClass(*Class)) {
50108 Res.second = &X86::VK1RegClass;
50109 else if (VT == MVT::i8)
50110 Res.second = &X86::VK8RegClass;
50111 else if (VT == MVT::i16)
50112 Res.second = &X86::VK16RegClass;
50113 else if (VT == MVT::i32)
50114 Res.second = &X86::VK32RegClass;
50115 else if (VT == MVT::i64)
50116 Res.second = &X86::VK64RegClass;
50118 // Type mismatch and not a clobber: Return an error;
50120 Res.second = nullptr;
50127 int X86TargetLowering::getScalingFactorCost(const DataLayout &DL,
50128 const AddrMode &AM, Type *Ty,
50129 unsigned AS) const {
50130 // Scaling factors are not free at all.
50131 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
50132 // will take 2 allocations in the out of order engine instead of 1
50133 // for plain addressing mode, i.e. inst (reg1).
50135 // vaddps (%rsi,%rdx), %ymm0, %ymm1
50136 // Requires two allocations (one for the load, one for the computation)
50138 // vaddps (%rsi), %ymm0, %ymm1
50139 // Requires just 1 allocation, i.e., freeing allocations for other operations
50140 // and having less micro operations to execute.
50142 // For some X86 architectures, this is even worse because for instance for
50143 // stores, the complex addressing mode forces the instruction to use the
50144 // "load" ports instead of the dedicated "store" port.
50145 // E.g., on Haswell:
50146 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
50147 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
50148 if (isLegalAddressingMode(DL, AM, Ty, AS))
50149 // Scale represents reg2 * scale, thus account for 1
50150 // as soon as we use a second register.
50151 return AM.Scale != 0;
50155 bool X86TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
50156 // Integer division on x86 is expensive. However, when aggressively optimizing
50157 // for code size, we prefer to use a div instruction, as it is usually smaller
50158 // than the alternative sequence.
50159 // The exception to this is vector division. Since x86 doesn't have vector
50160 // integer division, leaving the division as-is is a loss even in terms of
50161 // size, because it will have to be scalarized, while the alternative code
50162 // sequence can be performed in vector form.
50163 bool OptSize = Attr.hasFnAttribute(Attribute::MinSize);
50164 return OptSize && !VT.isVector();
50167 void X86TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const {
50168 if (!Subtarget.is64Bit())
50171 // Update IsSplitCSR in X86MachineFunctionInfo.
50172 X86MachineFunctionInfo *AFI =
50173 Entry->getParent()->getInfo<X86MachineFunctionInfo>();
50174 AFI->setIsSplitCSR(true);
50177 void X86TargetLowering::insertCopiesSplitCSR(
50178 MachineBasicBlock *Entry,
50179 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
50180 const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
50181 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent());
50185 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
50186 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo();
50187 MachineBasicBlock::iterator MBBI = Entry->begin();
50188 for (const MCPhysReg *I = IStart; *I; ++I) {
50189 const TargetRegisterClass *RC = nullptr;
50190 if (X86::GR64RegClass.contains(*I))
50191 RC = &X86::GR64RegClass;
50193 llvm_unreachable("Unexpected register class in CSRsViaCopy!");
50195 Register NewVR = MRI->createVirtualRegister(RC);
50196 // Create copy from CSR to a virtual register.
50197 // FIXME: this currently does not emit CFI pseudo-instructions, it works
50198 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be
50199 // nounwind. If we want to generalize this later, we may need to emit
50200 // CFI pseudo-instructions.
50202 Entry->getParent()->getFunction().hasFnAttribute(Attribute::NoUnwind) &&
50203 "Function should be nounwind in insertCopiesSplitCSR!");
50204 Entry->addLiveIn(*I);
50205 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR)
50208 // Insert the copy-back instructions right before the terminator.
50209 for (auto *Exit : Exits)
50210 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(),
50211 TII->get(TargetOpcode::COPY), *I)
50216 bool X86TargetLowering::supportSwiftError() const {
50217 return Subtarget.is64Bit();
50220 /// Returns true if stack probing through a function call is requested.
50221 bool X86TargetLowering::hasStackProbeSymbol(MachineFunction &MF) const {
50222 return !getStackProbeSymbolName(MF).empty();
50225 /// Returns true if stack probing through inline assembly is requested.
50226 bool X86TargetLowering::hasInlineStackProbe(MachineFunction &MF) const {
50228 // No inline stack probe for Windows, they have their own mechanism.
50229 if (Subtarget.isOSWindows() ||
50230 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
50233 // If the function specifically requests inline stack probes, emit them.
50234 if (MF.getFunction().hasFnAttribute("probe-stack"))
50235 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString() ==
50241 /// Returns the name of the symbol used to emit stack probes or the empty
50242 /// string if not applicable.
50244 X86TargetLowering::getStackProbeSymbolName(MachineFunction &MF) const {
50245 // Inline Stack probes disable stack probe call
50246 if (hasInlineStackProbe(MF))
50249 // If the function specifically requests stack probes, emit them.
50250 if (MF.getFunction().hasFnAttribute("probe-stack"))
50251 return MF.getFunction().getFnAttribute("probe-stack").getValueAsString();
50253 // Generally, if we aren't on Windows, the platform ABI does not include
50254 // support for stack probes, so don't emit them.
50255 if (!Subtarget.isOSWindows() || Subtarget.isTargetMachO() ||
50256 MF.getFunction().hasFnAttribute("no-stack-arg-probe"))
50259 // We need a stack probe to conform to the Windows ABI. Choose the right
50261 if (Subtarget.is64Bit())
50262 return Subtarget.isTargetCygMing() ? "___chkstk_ms" : "__chkstk";
50263 return Subtarget.isTargetCygMing() ? "_alloca" : "_chkstk";
50267 X86TargetLowering::getStackProbeSize(MachineFunction &MF) const {
50268 // The default stack probe size is 4096 if the function has no stackprobesize
50270 unsigned StackProbeSize = 4096;
50271 const Function &Fn = MF.getFunction();
50272 if (Fn.hasFnAttribute("stack-probe-size"))
50273 Fn.getFnAttribute("stack-probe-size")
50274 .getValueAsString()
50275 .getAsInteger(0, StackProbeSize);
50276 return StackProbeSize;