1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
12 //===----------------------------------------------------------------------===//
14 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/Attr.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/Basic/CodeGenOptions.h"
23 #include "clang/Basic/DiagnosticFrontend.h"
24 #include "clang/CodeGen/CGFunctionInfo.h"
25 #include "clang/CodeGen/SwiftCallingConv.h"
26 #include "llvm/ADT/SmallBitVector.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/ADT/StringSwitch.h"
29 #include "llvm/ADT/Triple.h"
30 #include "llvm/ADT/Twine.h"
31 #include "llvm/IR/DataLayout.h"
32 #include "llvm/IR/IntrinsicsNVPTX.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include <algorithm> // std::sort
37 using namespace clang;
38 using namespace CodeGen;
40 // Helper for coercing an aggregate argument or return value into an integer
41 // array of the same size (including padding) and alignment. This alternate
42 // coercion happens only for the RenderScript ABI and can be removed after
43 // runtimes that rely on it are no longer supported.
45 // RenderScript assumes that the size of the argument / return value in the IR
46 // is the same as the size of the corresponding qualified type. This helper
47 // coerces the aggregate type into an array of the same size (including
48 // padding). This coercion is used in lieu of expansion of struct members or
49 // other canonical coercions that return a coerced-type of larger size.
51 // Ty - The argument / return value type
52 // Context - The associated ASTContext
53 // LLVMContext - The associated LLVMContext
54 static ABIArgInfo coerceToIntArray(QualType Ty,
56 llvm::LLVMContext &LLVMContext) {
57 // Alignment and Size are measured in bits.
58 const uint64_t Size = Context.getTypeSize(Ty);
59 const uint64_t Alignment = Context.getTypeAlign(Ty);
60 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
61 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
62 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
65 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
70 // Alternatively, we could emit this as a loop in the source.
71 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
73 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
74 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
78 static bool isAggregateTypeForABI(QualType T) {
79 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
80 T->isMemberFunctionPointerType();
84 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
85 llvm::Type *Padding) const {
86 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
87 ByRef, Realign, Padding);
91 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
92 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
93 /*ByRef*/ false, Realign);
96 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
98 return Address::invalid();
101 bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
102 if (Ty->isPromotableIntegerType())
105 if (const auto *EIT = Ty->getAs<ExtIntType>())
106 if (EIT->getNumBits() < getContext().getTypeSize(getContext().IntTy))
112 ABIInfo::~ABIInfo() {}
114 /// Does the given lowering require more than the given number of
115 /// registers when expanded?
117 /// This is intended to be the basis of a reasonable basic implementation
118 /// of should{Pass,Return}IndirectlyForSwift.
120 /// For most targets, a limit of four total registers is reasonable; this
121 /// limits the amount of code required in order to move around the value
122 /// in case it wasn't produced immediately prior to the call by the caller
123 /// (or wasn't produced in exactly the right registers) or isn't used
124 /// immediately within the callee. But some targets may need to further
125 /// limit the register count due to an inability to support that many
126 /// return registers.
127 static bool occupiesMoreThan(CodeGenTypes &cgt,
128 ArrayRef<llvm::Type*> scalarTypes,
129 unsigned maxAllRegisters) {
130 unsigned intCount = 0, fpCount = 0;
131 for (llvm::Type *type : scalarTypes) {
132 if (type->isPointerTy()) {
134 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
135 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
136 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
138 assert(type->isVectorTy() || type->isFloatingPointTy());
143 return (intCount + fpCount > maxAllRegisters);
146 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
148 unsigned numElts) const {
149 // The default implementation of this assumes that the target guarantees
150 // 128-bit SIMD support but nothing more.
151 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
154 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
156 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
158 if (!RT->getDecl()->canPassInRegisters())
159 return CGCXXABI::RAA_Indirect;
160 return CGCXXABI::RAA_Default;
162 return CXXABI.getRecordArgABI(RD);
165 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
167 const RecordType *RT = T->getAs<RecordType>();
169 return CGCXXABI::RAA_Default;
170 return getRecordArgABI(RT, CXXABI);
173 static bool classifyReturnType(const CGCXXABI &CXXABI, CGFunctionInfo &FI,
174 const ABIInfo &Info) {
175 QualType Ty = FI.getReturnType();
177 if (const auto *RT = Ty->getAs<RecordType>())
178 if (!isa<CXXRecordDecl>(RT->getDecl()) &&
179 !RT->getDecl()->canPassInRegisters()) {
180 FI.getReturnInfo() = Info.getNaturalAlignIndirect(Ty);
184 return CXXABI.classifyReturnType(FI);
187 /// Pass transparent unions as if they were the type of the first element. Sema
188 /// should ensure that all elements of the union have the same "machine type".
189 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
190 if (const RecordType *UT = Ty->getAsUnionType()) {
191 const RecordDecl *UD = UT->getDecl();
192 if (UD->hasAttr<TransparentUnionAttr>()) {
193 assert(!UD->field_empty() && "sema created an empty transparent union");
194 return UD->field_begin()->getType();
200 CGCXXABI &ABIInfo::getCXXABI() const {
201 return CGT.getCXXABI();
204 ASTContext &ABIInfo::getContext() const {
205 return CGT.getContext();
208 llvm::LLVMContext &ABIInfo::getVMContext() const {
209 return CGT.getLLVMContext();
212 const llvm::DataLayout &ABIInfo::getDataLayout() const {
213 return CGT.getDataLayout();
216 const TargetInfo &ABIInfo::getTarget() const {
217 return CGT.getTarget();
220 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
221 return CGT.getCodeGenOpts();
224 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
226 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
230 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
231 uint64_t Members) const {
235 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
236 raw_ostream &OS = llvm::errs();
237 OS << "(ABIArgInfo Kind=";
240 OS << "Direct Type=";
241 if (llvm::Type *Ty = getCoerceToType())
253 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
256 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
257 << " ByVal=" << getIndirectByVal()
258 << " Realign=" << getIndirectRealign();
263 case CoerceAndExpand:
264 OS << "CoerceAndExpand Type=";
265 getCoerceAndExpandType()->print(OS);
271 // Dynamically round a pointer up to a multiple of the given alignment.
272 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
275 llvm::Value *PtrAsInt = Ptr;
276 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
277 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
278 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
279 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
280 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
281 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
282 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
284 Ptr->getName() + ".aligned");
288 /// Emit va_arg for a platform using the common void* representation,
289 /// where arguments are simply emitted in an array of slots on the stack.
291 /// This version implements the core direct-value passing rules.
293 /// \param SlotSize - The size and alignment of a stack slot.
294 /// Each argument will be allocated to a multiple of this number of
295 /// slots, and all the slots will be aligned to this value.
296 /// \param AllowHigherAlign - The slot alignment is not a cap;
297 /// an argument type with an alignment greater than the slot size
298 /// will be emitted on a higher-alignment address, potentially
299 /// leaving one or more empty slots behind as padding. If this
300 /// is false, the returned address might be less-aligned than
302 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
304 llvm::Type *DirectTy,
305 CharUnits DirectSize,
306 CharUnits DirectAlign,
308 bool AllowHigherAlign) {
309 // Cast the element type to i8* if necessary. Some platforms define
310 // va_list as a struct containing an i8* instead of just an i8*.
311 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
312 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
314 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
316 // If the CC aligns values higher than the slot size, do so if needed.
317 Address Addr = Address::invalid();
318 if (AllowHigherAlign && DirectAlign > SlotSize) {
319 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
322 Addr = Address(Ptr, SlotSize);
325 // Advance the pointer past the argument, then store that back.
326 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
328 CGF.Builder.CreateConstInBoundsByteGEP(Addr, FullDirectSize, "argp.next");
329 CGF.Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
331 // If the argument is smaller than a slot, and this is a big-endian
332 // target, the argument will be right-adjusted in its slot.
333 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
334 !DirectTy->isStructTy()) {
335 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
338 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
342 /// Emit va_arg for a platform using the common void* representation,
343 /// where arguments are simply emitted in an array of slots on the stack.
345 /// \param IsIndirect - Values of this type are passed indirectly.
346 /// \param ValueInfo - The size and alignment of this type, generally
347 /// computed with getContext().getTypeInfoInChars(ValueTy).
348 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
349 /// Each argument will be allocated to a multiple of this number of
350 /// slots, and all the slots will be aligned to this value.
351 /// \param AllowHigherAlign - The slot alignment is not a cap;
352 /// an argument type with an alignment greater than the slot size
353 /// will be emitted on a higher-alignment address, potentially
354 /// leaving one or more empty slots behind as padding.
355 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
356 QualType ValueTy, bool IsIndirect,
357 std::pair<CharUnits, CharUnits> ValueInfo,
358 CharUnits SlotSizeAndAlign,
359 bool AllowHigherAlign) {
360 // The size and alignment of the value that was passed directly.
361 CharUnits DirectSize, DirectAlign;
363 DirectSize = CGF.getPointerSize();
364 DirectAlign = CGF.getPointerAlign();
366 DirectSize = ValueInfo.first;
367 DirectAlign = ValueInfo.second;
370 // Cast the address we've calculated to the right type.
371 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
373 DirectTy = DirectTy->getPointerTo(0);
375 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
376 DirectSize, DirectAlign,
381 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
388 static Address emitMergePHI(CodeGenFunction &CGF,
389 Address Addr1, llvm::BasicBlock *Block1,
390 Address Addr2, llvm::BasicBlock *Block2,
391 const llvm::Twine &Name = "") {
392 assert(Addr1.getType() == Addr2.getType());
393 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
394 PHI->addIncoming(Addr1.getPointer(), Block1);
395 PHI->addIncoming(Addr2.getPointer(), Block2);
396 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
397 return Address(PHI, Align);
400 TargetCodeGenInfo::~TargetCodeGenInfo() = default;
402 // If someone can figure out a general rule for this, that would be great.
403 // It's probably just doomed to be platform-dependent, though.
404 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
406 // x86-64 FreeBSD, Linux, Darwin
407 // x86-32 FreeBSD, Linux, Darwin
408 // PowerPC Linux, Darwin
409 // ARM Darwin (*not* EABI)
414 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
415 const FunctionNoProtoType *fnType) const {
416 // The following conventions are known to require this to be false:
419 // For everything else, we just prefer false unless we opt out.
424 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
425 llvm::SmallString<24> &Opt) const {
426 // This assumes the user is passing a library name like "rt" instead of a
427 // filename like "librt.a/so", and that they don't care whether it's static or
433 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
434 // OpenCL kernels are called via an explicit runtime API with arguments
435 // set with clSetKernelArg(), not as normal sub-functions.
436 // Return SPIR_KERNEL by default as the kernel calling convention to
437 // ensure the fingerprint is fixed such way that each OpenCL argument
438 // gets one matching argument in the produced kernel function argument
439 // list to enable feasible implementation of clSetKernelArg() with
440 // aggregates etc. In case we would use the default C calling conv here,
441 // clSetKernelArg() might break depending on the target-specific
442 // conventions; different targets might split structs passed as values
443 // to multiple function arguments etc.
444 return llvm::CallingConv::SPIR_KERNEL;
447 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
448 llvm::PointerType *T, QualType QT) const {
449 return llvm::ConstantPointerNull::get(T);
452 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
453 const VarDecl *D) const {
454 assert(!CGM.getLangOpts().OpenCL &&
455 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
456 "Address space agnostic languages only");
457 return D ? D->getType().getAddressSpace() : LangAS::Default;
460 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
461 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
462 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
463 // Since target may map different address spaces in AST to the same address
464 // space, an address space conversion may end up as a bitcast.
465 if (auto *C = dyn_cast<llvm::Constant>(Src))
466 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
467 // Try to preserve the source's name to make IR more readable.
468 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
469 Src, DestTy, Src->hasName() ? Src->getName() + ".ascast" : "");
473 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
474 LangAS SrcAddr, LangAS DestAddr,
475 llvm::Type *DestTy) const {
476 // Since target may map different address spaces in AST to the same address
477 // space, an address space conversion may end up as a bitcast.
478 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
482 TargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
484 llvm::AtomicOrdering Ordering,
485 llvm::LLVMContext &Ctx) const {
486 return Ctx.getOrInsertSyncScopeID(""); /* default sync scope */
489 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
491 /// isEmptyField - Return true iff a the field is "empty", that is it
492 /// is an unnamed bit-field or an (array of) empty record(s).
493 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
495 if (FD->isUnnamedBitfield())
498 QualType FT = FD->getType();
500 // Constant arrays of empty records count as empty, strip them off.
501 // Constant arrays of zero length always count as empty.
502 bool WasArray = false;
504 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
505 if (AT->getSize() == 0)
507 FT = AT->getElementType();
508 // The [[no_unique_address]] special case below does not apply to
509 // arrays of C++ empty records, so we need to remember this fact.
513 const RecordType *RT = FT->getAs<RecordType>();
517 // C++ record fields are never empty, at least in the Itanium ABI.
519 // FIXME: We should use a predicate for whether this behavior is true in the
522 // The exception to the above rule are fields marked with the
523 // [[no_unique_address]] attribute (since C++20). Those do count as empty
524 // according to the Itanium ABI. The exception applies only to records,
525 // not arrays of records, so we must also check whether we stripped off an
527 if (isa<CXXRecordDecl>(RT->getDecl()) &&
528 (WasArray || !FD->hasAttr<NoUniqueAddressAttr>()))
531 return isEmptyRecord(Context, FT, AllowArrays);
534 /// isEmptyRecord - Return true iff a structure contains only empty
535 /// fields. Note that a structure with a flexible array member is not
536 /// considered empty.
537 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
538 const RecordType *RT = T->getAs<RecordType>();
541 const RecordDecl *RD = RT->getDecl();
542 if (RD->hasFlexibleArrayMember())
545 // If this is a C++ record, check the bases first.
546 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
547 for (const auto &I : CXXRD->bases())
548 if (!isEmptyRecord(Context, I.getType(), true))
551 for (const auto *I : RD->fields())
552 if (!isEmptyField(Context, I, AllowArrays))
557 /// isSingleElementStruct - Determine if a structure is a "single
558 /// element struct", i.e. it has exactly one non-empty field or
559 /// exactly one field which is itself a single element
560 /// struct. Structures with flexible array members are never
561 /// considered single element structs.
563 /// \return The field declaration for the single non-empty field, if
565 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
566 const RecordType *RT = T->getAs<RecordType>();
570 const RecordDecl *RD = RT->getDecl();
571 if (RD->hasFlexibleArrayMember())
574 const Type *Found = nullptr;
576 // If this is a C++ record, check the bases first.
577 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
578 for (const auto &I : CXXRD->bases()) {
579 // Ignore empty records.
580 if (isEmptyRecord(Context, I.getType(), true))
583 // If we already found an element then this isn't a single-element struct.
587 // If this is non-empty and not a single element struct, the composite
588 // cannot be a single element struct.
589 Found = isSingleElementStruct(I.getType(), Context);
595 // Check for single element.
596 for (const auto *FD : RD->fields()) {
597 QualType FT = FD->getType();
599 // Ignore empty fields.
600 if (isEmptyField(Context, FD, true))
603 // If we already found an element then this isn't a single-element
608 // Treat single element arrays as the element.
609 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
610 if (AT->getSize().getZExtValue() != 1)
612 FT = AT->getElementType();
615 if (!isAggregateTypeForABI(FT)) {
616 Found = FT.getTypePtr();
618 Found = isSingleElementStruct(FT, Context);
624 // We don't consider a struct a single-element struct if it has
625 // padding beyond the element type.
626 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
633 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
634 const ABIArgInfo &AI) {
635 // This default implementation defers to the llvm backend's va_arg
636 // instruction. It can handle only passing arguments directly
637 // (typically only handled in the backend for primitive types), or
638 // aggregates passed indirectly by pointer (NOTE: if the "byval"
639 // flag has ABI impact in the callee, this implementation cannot
642 // Only a few cases are covered here at the moment -- those needed
643 // by the default abi.
646 if (AI.isIndirect()) {
647 assert(!AI.getPaddingType() &&
648 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
650 !AI.getIndirectRealign() &&
651 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
653 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
654 CharUnits TyAlignForABI = TyInfo.second;
657 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
659 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
660 return Address(Addr, TyAlignForABI);
662 assert((AI.isDirect() || AI.isExtend()) &&
663 "Unexpected ArgInfo Kind in generic VAArg emitter!");
665 assert(!AI.getInReg() &&
666 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
667 assert(!AI.getPaddingType() &&
668 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
669 assert(!AI.getDirectOffset() &&
670 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
671 assert(!AI.getCoerceToType() &&
672 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
674 Address Temp = CGF.CreateMemTemp(Ty, "varet");
675 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
676 CGF.Builder.CreateStore(Val, Temp);
681 /// DefaultABIInfo - The default implementation for ABI specific
682 /// details. This implementation provides information which results in
683 /// self-consistent and sensible LLVM IR generation, but does not
684 /// conform to any particular ABI.
685 class DefaultABIInfo : public ABIInfo {
687 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
689 ABIArgInfo classifyReturnType(QualType RetTy) const;
690 ABIArgInfo classifyArgumentType(QualType RetTy) const;
692 void computeInfo(CGFunctionInfo &FI) const override {
693 if (!getCXXABI().classifyReturnType(FI))
694 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
695 for (auto &I : FI.arguments())
696 I.info = classifyArgumentType(I.type);
699 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
700 QualType Ty) const override {
701 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
705 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
707 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
708 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
711 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
712 Ty = useFirstFieldIfTransparentUnion(Ty);
714 if (isAggregateTypeForABI(Ty)) {
715 // Records with non-trivial destructors/copy-constructors should not be
717 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
718 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
720 return getNaturalAlignIndirect(Ty);
723 // Treat an enum type as its underlying type.
724 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
725 Ty = EnumTy->getDecl()->getIntegerType();
727 ASTContext &Context = getContext();
728 if (const auto *EIT = Ty->getAs<ExtIntType>())
729 if (EIT->getNumBits() >
730 Context.getTypeSize(Context.getTargetInfo().hasInt128Type()
732 : Context.LongLongTy))
733 return getNaturalAlignIndirect(Ty);
735 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
736 : ABIArgInfo::getDirect());
739 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
740 if (RetTy->isVoidType())
741 return ABIArgInfo::getIgnore();
743 if (isAggregateTypeForABI(RetTy))
744 return getNaturalAlignIndirect(RetTy);
746 // Treat an enum type as its underlying type.
747 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
748 RetTy = EnumTy->getDecl()->getIntegerType();
750 if (const auto *EIT = RetTy->getAs<ExtIntType>())
751 if (EIT->getNumBits() >
752 getContext().getTypeSize(getContext().getTargetInfo().hasInt128Type()
753 ? getContext().Int128Ty
754 : getContext().LongLongTy))
755 return getNaturalAlignIndirect(RetTy);
757 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
758 : ABIArgInfo::getDirect());
761 //===----------------------------------------------------------------------===//
762 // WebAssembly ABI Implementation
764 // This is a very simple ABI that relies a lot on DefaultABIInfo.
765 //===----------------------------------------------------------------------===//
767 class WebAssemblyABIInfo final : public SwiftABIInfo {
775 DefaultABIInfo defaultInfo;
779 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
780 : SwiftABIInfo(CGT), defaultInfo(CGT), Kind(Kind) {}
783 ABIArgInfo classifyReturnType(QualType RetTy) const;
784 ABIArgInfo classifyArgumentType(QualType Ty) const;
786 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
787 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
789 void computeInfo(CGFunctionInfo &FI) const override {
790 if (!getCXXABI().classifyReturnType(FI))
791 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
792 for (auto &Arg : FI.arguments())
793 Arg.info = classifyArgumentType(Arg.type);
796 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
797 QualType Ty) const override;
799 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
800 bool asReturnValue) const override {
801 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
804 bool isSwiftErrorInRegister() const override {
809 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
811 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
812 WebAssemblyABIInfo::ABIKind K)
813 : TargetCodeGenInfo(std::make_unique<WebAssemblyABIInfo>(CGT, K)) {}
815 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
816 CodeGen::CodeGenModule &CGM) const override {
817 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
818 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
819 if (const auto *Attr = FD->getAttr<WebAssemblyImportModuleAttr>()) {
820 llvm::Function *Fn = cast<llvm::Function>(GV);
822 B.addAttribute("wasm-import-module", Attr->getImportModule());
823 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
825 if (const auto *Attr = FD->getAttr<WebAssemblyImportNameAttr>()) {
826 llvm::Function *Fn = cast<llvm::Function>(GV);
828 B.addAttribute("wasm-import-name", Attr->getImportName());
829 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
831 if (const auto *Attr = FD->getAttr<WebAssemblyExportNameAttr>()) {
832 llvm::Function *Fn = cast<llvm::Function>(GV);
834 B.addAttribute("wasm-export-name", Attr->getExportName());
835 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
839 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D)) {
840 llvm::Function *Fn = cast<llvm::Function>(GV);
841 if (!FD->doesThisDeclarationHaveABody() && !FD->hasPrototype())
842 Fn->addFnAttr("no-prototype");
847 /// Classify argument of given type \p Ty.
848 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
849 Ty = useFirstFieldIfTransparentUnion(Ty);
851 if (isAggregateTypeForABI(Ty)) {
852 // Records with non-trivial destructors/copy-constructors should not be
854 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
855 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
856 // Ignore empty structs/unions.
857 if (isEmptyRecord(getContext(), Ty, true))
858 return ABIArgInfo::getIgnore();
859 // Lower single-element structs to just pass a regular value. TODO: We
860 // could do reasonable-size multiple-element structs too, using getExpand(),
861 // though watch out for things like bitfields.
862 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
863 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
864 // For the experimental multivalue ABI, fully expand all other aggregates
865 if (Kind == ABIKind::ExperimentalMV) {
866 const RecordType *RT = Ty->getAs<RecordType>();
868 bool HasBitField = false;
869 for (auto *Field : RT->getDecl()->fields()) {
870 if (Field->isBitField()) {
876 return ABIArgInfo::getExpand();
880 // Otherwise just do the default thing.
881 return defaultInfo.classifyArgumentType(Ty);
884 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
885 if (isAggregateTypeForABI(RetTy)) {
886 // Records with non-trivial destructors/copy-constructors should not be
887 // returned by value.
888 if (!getRecordArgABI(RetTy, getCXXABI())) {
889 // Ignore empty structs/unions.
890 if (isEmptyRecord(getContext(), RetTy, true))
891 return ABIArgInfo::getIgnore();
892 // Lower single-element structs to just return a regular value. TODO: We
893 // could do reasonable-size multiple-element structs too, using
894 // ABIArgInfo::getDirect().
895 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
896 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
897 // For the experimental multivalue ABI, return all other aggregates
898 if (Kind == ABIKind::ExperimentalMV)
899 return ABIArgInfo::getDirect();
903 // Otherwise just do the default thing.
904 return defaultInfo.classifyReturnType(RetTy);
907 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
909 bool IsIndirect = isAggregateTypeForABI(Ty) &&
910 !isEmptyRecord(getContext(), Ty, true) &&
911 !isSingleElementStruct(Ty, getContext());
912 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
913 getContext().getTypeInfoInChars(Ty),
914 CharUnits::fromQuantity(4),
915 /*AllowHigherAlign=*/true);
918 //===----------------------------------------------------------------------===//
919 // le32/PNaCl bitcode ABI Implementation
921 // This is a simplified version of the x86_32 ABI. Arguments and return values
922 // are always passed on the stack.
923 //===----------------------------------------------------------------------===//
925 class PNaClABIInfo : public ABIInfo {
927 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
929 ABIArgInfo classifyReturnType(QualType RetTy) const;
930 ABIArgInfo classifyArgumentType(QualType RetTy) const;
932 void computeInfo(CGFunctionInfo &FI) const override;
933 Address EmitVAArg(CodeGenFunction &CGF,
934 Address VAListAddr, QualType Ty) const override;
937 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
939 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
940 : TargetCodeGenInfo(std::make_unique<PNaClABIInfo>(CGT)) {}
943 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
944 if (!getCXXABI().classifyReturnType(FI))
945 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
947 for (auto &I : FI.arguments())
948 I.info = classifyArgumentType(I.type);
951 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
953 // The PNaCL ABI is a bit odd, in that varargs don't use normal
954 // function classification. Structs get passed directly for varargs
955 // functions, through a rewriting transform in
956 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
957 // this target to actually support a va_arg instructions with an
958 // aggregate type, unlike other targets.
959 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
962 /// Classify argument of given type \p Ty.
963 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
964 if (isAggregateTypeForABI(Ty)) {
965 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
966 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
967 return getNaturalAlignIndirect(Ty);
968 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
969 // Treat an enum type as its underlying type.
970 Ty = EnumTy->getDecl()->getIntegerType();
971 } else if (Ty->isFloatingType()) {
972 // Floating-point types don't go inreg.
973 return ABIArgInfo::getDirect();
974 } else if (const auto *EIT = Ty->getAs<ExtIntType>()) {
975 // Treat extended integers as integers if <=64, otherwise pass indirectly.
976 if (EIT->getNumBits() > 64)
977 return getNaturalAlignIndirect(Ty);
978 return ABIArgInfo::getDirect();
981 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
982 : ABIArgInfo::getDirect());
985 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
986 if (RetTy->isVoidType())
987 return ABIArgInfo::getIgnore();
989 // In the PNaCl ABI we always return records/structures on the stack.
990 if (isAggregateTypeForABI(RetTy))
991 return getNaturalAlignIndirect(RetTy);
993 // Treat extended integers as integers if <=64, otherwise pass indirectly.
994 if (const auto *EIT = RetTy->getAs<ExtIntType>()) {
995 if (EIT->getNumBits() > 64)
996 return getNaturalAlignIndirect(RetTy);
997 return ABIArgInfo::getDirect();
1000 // Treat an enum type as its underlying type.
1001 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1002 RetTy = EnumTy->getDecl()->getIntegerType();
1004 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1005 : ABIArgInfo::getDirect());
1008 /// IsX86_MMXType - Return true if this is an MMX type.
1009 bool IsX86_MMXType(llvm::Type *IRType) {
1010 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
1011 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
1012 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
1013 IRType->getScalarSizeInBits() != 64;
1016 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1017 StringRef Constraint,
1019 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
1020 .Cases("y", "&y", "^Ym", true)
1022 if (IsMMXCons && Ty->isVectorTy()) {
1023 if (cast<llvm::VectorType>(Ty)->getPrimitiveSizeInBits().getFixedSize() !=
1025 // Invalid MMX constraint
1029 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
1032 // No operation needed
1036 /// Returns true if this type can be passed in SSE registers with the
1037 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1038 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
1039 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1040 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
1041 if (BT->getKind() == BuiltinType::LongDouble) {
1042 if (&Context.getTargetInfo().getLongDoubleFormat() ==
1043 &llvm::APFloat::x87DoubleExtended())
1048 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
1049 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
1050 // registers specially.
1051 unsigned VecSize = Context.getTypeSize(VT);
1052 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
1058 /// Returns true if this aggregate is small enough to be passed in SSE registers
1059 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
1060 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
1061 return NumMembers <= 4;
1064 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
1065 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
1066 auto AI = ABIArgInfo::getDirect(T);
1068 AI.setCanBeFlattened(false);
1072 //===----------------------------------------------------------------------===//
1073 // X86-32 ABI Implementation
1074 //===----------------------------------------------------------------------===//
1076 /// Similar to llvm::CCState, but for Clang.
1078 CCState(CGFunctionInfo &FI)
1079 : IsPreassigned(FI.arg_size()), CC(FI.getCallingConvention()) {}
1081 llvm::SmallBitVector IsPreassigned;
1082 unsigned CC = CallingConv::CC_C;
1083 unsigned FreeRegs = 0;
1084 unsigned FreeSSERegs = 0;
1088 // Vectorcall only allows the first 6 parameters to be passed in registers.
1089 VectorcallMaxParamNumAsReg = 6
1092 /// X86_32ABIInfo - The X86-32 ABI information.
1093 class X86_32ABIInfo : public SwiftABIInfo {
1099 static const unsigned MinABIStackAlignInBytes = 4;
1101 bool IsDarwinVectorABI;
1102 bool IsRetSmallStructInRegABI;
1103 bool IsWin32StructABI;
1104 bool IsSoftFloatABI;
1106 unsigned DefaultNumRegisterParameters;
1108 static bool isRegisterSize(unsigned Size) {
1109 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
1112 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1113 // FIXME: Assumes vectorcall is in use.
1114 return isX86VectorTypeForVectorCall(getContext(), Ty);
1117 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1118 uint64_t NumMembers) const override {
1119 // FIXME: Assumes vectorcall is in use.
1120 return isX86VectorCallAggregateSmallEnough(NumMembers);
1123 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
1125 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1126 /// such that the argument will be passed in memory.
1127 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
1129 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
1131 /// Return the alignment to use for the given type on the stack.
1132 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
1134 Class classify(QualType Ty) const;
1135 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
1136 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
1138 /// Updates the number of available free registers, returns
1139 /// true if any registers were allocated.
1140 bool updateFreeRegs(QualType Ty, CCState &State) const;
1142 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1143 bool &NeedsPadding) const;
1144 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1146 bool canExpandIndirectArgument(QualType Ty) const;
1148 /// Rewrite the function info so that all memory arguments use
1150 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1152 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1153 CharUnits &StackOffset, ABIArgInfo &Info,
1154 QualType Type) const;
1155 void runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const;
1159 void computeInfo(CGFunctionInfo &FI) const override;
1160 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1161 QualType Ty) const override;
1163 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1164 bool RetSmallStructInRegABI, bool Win32StructABI,
1165 unsigned NumRegisterParameters, bool SoftFloatABI)
1166 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1167 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1168 IsWin32StructABI(Win32StructABI),
1169 IsSoftFloatABI(SoftFloatABI),
1170 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1171 DefaultNumRegisterParameters(NumRegisterParameters) {}
1173 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
1174 bool asReturnValue) const override {
1175 // LLVM's x86-32 lowering currently only assigns up to three
1176 // integer registers and three fp registers. Oddly, it'll use up to
1177 // four vector registers for vectors, but those can overlap with the
1178 // scalar registers.
1179 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1182 bool isSwiftErrorInRegister() const override {
1183 // x86-32 lowering does not support passing swifterror in a register.
1188 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1190 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1191 bool RetSmallStructInRegABI, bool Win32StructABI,
1192 unsigned NumRegisterParameters, bool SoftFloatABI)
1193 : TargetCodeGenInfo(std::make_unique<X86_32ABIInfo>(
1194 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1195 NumRegisterParameters, SoftFloatABI)) {}
1197 static bool isStructReturnInRegABI(
1198 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1200 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1201 CodeGen::CodeGenModule &CGM) const override;
1203 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1204 // Darwin uses different dwarf register numbers for EH.
1205 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1209 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1210 llvm::Value *Address) const override;
1212 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1213 StringRef Constraint,
1214 llvm::Type* Ty) const override {
1215 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1218 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1219 std::string &Constraints,
1220 std::vector<llvm::Type *> &ResultRegTypes,
1221 std::vector<llvm::Type *> &ResultTruncRegTypes,
1222 std::vector<LValue> &ResultRegDests,
1223 std::string &AsmString,
1224 unsigned NumOutputs) const override;
1227 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1228 unsigned Sig = (0xeb << 0) | // jmp rel8
1229 (0x06 << 8) | // .+0x08
1232 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1235 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1236 return "movl\t%ebp, %ebp"
1237 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1243 /// Rewrite input constraint references after adding some output constraints.
1244 /// In the case where there is one output and one input and we add one output,
1245 /// we need to replace all operand references greater than or equal to 1:
1248 /// The result will be:
1251 static void rewriteInputConstraintReferences(unsigned FirstIn,
1252 unsigned NumNewOuts,
1253 std::string &AsmString) {
1255 llvm::raw_string_ostream OS(Buf);
1257 while (Pos < AsmString.size()) {
1258 size_t DollarStart = AsmString.find('$', Pos);
1259 if (DollarStart == std::string::npos)
1260 DollarStart = AsmString.size();
1261 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1262 if (DollarEnd == std::string::npos)
1263 DollarEnd = AsmString.size();
1264 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1266 size_t NumDollars = DollarEnd - DollarStart;
1267 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1268 // We have an operand reference.
1269 size_t DigitStart = Pos;
1270 if (AsmString[DigitStart] == '{') {
1274 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1275 if (DigitEnd == std::string::npos)
1276 DigitEnd = AsmString.size();
1277 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1278 unsigned OperandIndex;
1279 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1280 if (OperandIndex >= FirstIn)
1281 OperandIndex += NumNewOuts;
1289 AsmString = std::move(OS.str());
1292 /// Add output constraints for EAX:EDX because they are return registers.
1293 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1294 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1295 std::vector<llvm::Type *> &ResultRegTypes,
1296 std::vector<llvm::Type *> &ResultTruncRegTypes,
1297 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1298 unsigned NumOutputs) const {
1299 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1301 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1303 if (!Constraints.empty())
1305 if (RetWidth <= 32) {
1306 Constraints += "={eax}";
1307 ResultRegTypes.push_back(CGF.Int32Ty);
1309 // Use the 'A' constraint for EAX:EDX.
1310 Constraints += "=A";
1311 ResultRegTypes.push_back(CGF.Int64Ty);
1314 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1315 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1316 ResultTruncRegTypes.push_back(CoerceTy);
1318 // Coerce the integer by bitcasting the return slot pointer.
1319 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
1320 CoerceTy->getPointerTo()));
1321 ResultRegDests.push_back(ReturnSlot);
1323 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1326 /// shouldReturnTypeInRegister - Determine if the given type should be
1327 /// returned in a register (for the Darwin and MCU ABI).
1328 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1329 ASTContext &Context) const {
1330 uint64_t Size = Context.getTypeSize(Ty);
1332 // For i386, type must be register sized.
1333 // For the MCU ABI, it only needs to be <= 8-byte
1334 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1337 if (Ty->isVectorType()) {
1338 // 64- and 128- bit vectors inside structures are not returned in
1340 if (Size == 64 || Size == 128)
1346 // If this is a builtin, pointer, enum, complex type, member pointer, or
1347 // member function pointer it is ok.
1348 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1349 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1350 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1353 // Arrays are treated like records.
1354 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1355 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1357 // Otherwise, it must be a record type.
1358 const RecordType *RT = Ty->getAs<RecordType>();
1359 if (!RT) return false;
1361 // FIXME: Traverse bases here too.
1363 // Structure types are passed in register if all fields would be
1364 // passed in a register.
1365 for (const auto *FD : RT->getDecl()->fields()) {
1366 // Empty fields are ignored.
1367 if (isEmptyField(Context, FD, true))
1370 // Check fields recursively.
1371 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1377 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1378 // Treat complex types as the element type.
1379 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1380 Ty = CTy->getElementType();
1382 // Check for a type which we know has a simple scalar argument-passing
1383 // convention without any padding. (We're specifically looking for 32
1384 // and 64-bit integer and integer-equivalents, float, and double.)
1385 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1386 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1389 uint64_t Size = Context.getTypeSize(Ty);
1390 return Size == 32 || Size == 64;
1393 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1395 for (const auto *FD : RD->fields()) {
1396 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1397 // argument is smaller than 32-bits, expanding the struct will create
1398 // alignment padding.
1399 if (!is32Or64BitBasicType(FD->getType(), Context))
1402 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1403 // how to expand them yet, and the predicate for telling if a bitfield still
1404 // counts as "basic" is more complicated than what we were doing previously.
1405 if (FD->isBitField())
1408 Size += Context.getTypeSize(FD->getType());
1413 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1415 // Don't do this if there are any non-empty bases.
1416 for (const CXXBaseSpecifier &Base : RD->bases()) {
1417 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1421 if (!addFieldSizes(Context, RD, Size))
1426 /// Test whether an argument type which is to be passed indirectly (on the
1427 /// stack) would have the equivalent layout if it was expanded into separate
1428 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1430 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1431 // We can only expand structure types.
1432 const RecordType *RT = Ty->getAs<RecordType>();
1435 const RecordDecl *RD = RT->getDecl();
1437 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1438 if (!IsWin32StructABI) {
1439 // On non-Windows, we have to conservatively match our old bitcode
1440 // prototypes in order to be ABI-compatible at the bitcode level.
1441 if (!CXXRD->isCLike())
1444 // Don't do this for dynamic classes.
1445 if (CXXRD->isDynamicClass())
1448 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1451 if (!addFieldSizes(getContext(), RD, Size))
1455 // We can do this if there was no alignment padding.
1456 return Size == getContext().getTypeSize(Ty);
1459 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1460 // If the return value is indirect, then the hidden argument is consuming one
1461 // integer register.
1462 if (State.FreeRegs) {
1465 return getNaturalAlignIndirectInReg(RetTy);
1467 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1470 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1471 CCState &State) const {
1472 if (RetTy->isVoidType())
1473 return ABIArgInfo::getIgnore();
1475 const Type *Base = nullptr;
1476 uint64_t NumElts = 0;
1477 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1478 State.CC == llvm::CallingConv::X86_RegCall) &&
1479 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1480 // The LLVM struct type for such an aggregate should lower properly.
1481 return ABIArgInfo::getDirect();
1484 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1485 // On Darwin, some vectors are returned in registers.
1486 if (IsDarwinVectorABI) {
1487 uint64_t Size = getContext().getTypeSize(RetTy);
1489 // 128-bit vectors are a special case; they are returned in
1490 // registers and we need to make sure to pick a type the LLVM
1491 // backend will like.
1493 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
1494 llvm::Type::getInt64Ty(getVMContext()), 2));
1496 // Always return in register if it fits in a general purpose
1497 // register, or if it is 64 bits and has a single element.
1498 if ((Size == 8 || Size == 16 || Size == 32) ||
1499 (Size == 64 && VT->getNumElements() == 1))
1500 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1503 return getIndirectReturnResult(RetTy, State);
1506 return ABIArgInfo::getDirect();
1509 if (isAggregateTypeForABI(RetTy)) {
1510 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1511 // Structures with flexible arrays are always indirect.
1512 if (RT->getDecl()->hasFlexibleArrayMember())
1513 return getIndirectReturnResult(RetTy, State);
1516 // If specified, structs and unions are always indirect.
1517 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1518 return getIndirectReturnResult(RetTy, State);
1520 // Ignore empty structs/unions.
1521 if (isEmptyRecord(getContext(), RetTy, true))
1522 return ABIArgInfo::getIgnore();
1524 // Small structures which are register sized are generally returned
1526 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1527 uint64_t Size = getContext().getTypeSize(RetTy);
1529 // As a special-case, if the struct is a "single-element" struct, and
1530 // the field is of type "float" or "double", return it in a
1531 // floating-point register. (MSVC does not apply this special case.)
1532 // We apply a similar transformation for pointer types to improve the
1533 // quality of the generated IR.
1534 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1535 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1536 || SeltTy->hasPointerRepresentation())
1537 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1539 // FIXME: We should be able to narrow this integer in cases with dead
1541 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1544 return getIndirectReturnResult(RetTy, State);
1547 // Treat an enum type as its underlying type.
1548 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1549 RetTy = EnumTy->getDecl()->getIntegerType();
1551 if (const auto *EIT = RetTy->getAs<ExtIntType>())
1552 if (EIT->getNumBits() > 64)
1553 return getIndirectReturnResult(RetTy, State);
1555 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
1556 : ABIArgInfo::getDirect());
1559 static bool isSIMDVectorType(ASTContext &Context, QualType Ty) {
1560 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1563 static bool isRecordWithSIMDVectorType(ASTContext &Context, QualType Ty) {
1564 const RecordType *RT = Ty->getAs<RecordType>();
1567 const RecordDecl *RD = RT->getDecl();
1569 // If this is a C++ record, check the bases first.
1570 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1571 for (const auto &I : CXXRD->bases())
1572 if (!isRecordWithSIMDVectorType(Context, I.getType()))
1575 for (const auto *i : RD->fields()) {
1576 QualType FT = i->getType();
1578 if (isSIMDVectorType(Context, FT))
1581 if (isRecordWithSIMDVectorType(Context, FT))
1588 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1589 unsigned Align) const {
1590 // Otherwise, if the alignment is less than or equal to the minimum ABI
1591 // alignment, just use the default; the backend will handle this.
1592 if (Align <= MinABIStackAlignInBytes)
1593 return 0; // Use default alignment.
1595 // On non-Darwin, the stack type alignment is always 4.
1596 if (!IsDarwinVectorABI) {
1597 // Set explicit alignment, since we may need to realign the top.
1598 return MinABIStackAlignInBytes;
1601 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1602 if (Align >= 16 && (isSIMDVectorType(getContext(), Ty) ||
1603 isRecordWithSIMDVectorType(getContext(), Ty)))
1606 return MinABIStackAlignInBytes;
1609 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1610 CCState &State) const {
1612 if (State.FreeRegs) {
1613 --State.FreeRegs; // Non-byval indirects just use one pointer.
1615 return getNaturalAlignIndirectInReg(Ty);
1617 return getNaturalAlignIndirect(Ty, false);
1620 // Compute the byval alignment.
1621 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1622 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1623 if (StackAlign == 0)
1624 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1626 // If the stack alignment is less than the type alignment, realign the
1628 bool Realign = TypeAlign > StackAlign;
1629 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1630 /*ByVal=*/true, Realign);
1633 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1634 const Type *T = isSingleElementStruct(Ty, getContext());
1636 T = Ty.getTypePtr();
1638 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1639 BuiltinType::Kind K = BT->getKind();
1640 if (K == BuiltinType::Float || K == BuiltinType::Double)
1646 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1647 if (!IsSoftFloatABI) {
1648 Class C = classify(Ty);
1653 unsigned Size = getContext().getTypeSize(Ty);
1654 unsigned SizeInRegs = (Size + 31) / 32;
1656 if (SizeInRegs == 0)
1660 if (SizeInRegs > State.FreeRegs) {
1665 // The MCU psABI allows passing parameters in-reg even if there are
1666 // earlier parameters that are passed on the stack. Also,
1667 // it does not allow passing >8-byte structs in-register,
1668 // even if there are 3 free registers available.
1669 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1673 State.FreeRegs -= SizeInRegs;
1677 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1679 bool &NeedsPadding) const {
1680 // On Windows, aggregates other than HFAs are never passed in registers, and
1681 // they do not consume register slots. Homogenous floating-point aggregates
1682 // (HFAs) have already been dealt with at this point.
1683 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1686 NeedsPadding = false;
1689 if (!updateFreeRegs(Ty, State))
1695 if (State.CC == llvm::CallingConv::X86_FastCall ||
1696 State.CC == llvm::CallingConv::X86_VectorCall ||
1697 State.CC == llvm::CallingConv::X86_RegCall) {
1698 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1699 NeedsPadding = true;
1707 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1708 if (!updateFreeRegs(Ty, State))
1714 if (State.CC == llvm::CallingConv::X86_FastCall ||
1715 State.CC == llvm::CallingConv::X86_VectorCall ||
1716 State.CC == llvm::CallingConv::X86_RegCall) {
1717 if (getContext().getTypeSize(Ty) > 32)
1720 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1721 Ty->isReferenceType());
1727 void X86_32ABIInfo::runVectorCallFirstPass(CGFunctionInfo &FI, CCState &State) const {
1728 // Vectorcall x86 works subtly different than in x64, so the format is
1729 // a bit different than the x64 version. First, all vector types (not HVAs)
1730 // are assigned, with the first 6 ending up in the [XYZ]MM0-5 registers.
1731 // This differs from the x64 implementation, where the first 6 by INDEX get
1733 // In the second pass over the arguments, HVAs are passed in the remaining
1734 // vector registers if possible, or indirectly by address. The address will be
1735 // passed in ECX/EDX if available. Any other arguments are passed according to
1736 // the usual fastcall rules.
1737 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1738 for (int I = 0, E = Args.size(); I < E; ++I) {
1739 const Type *Base = nullptr;
1740 uint64_t NumElts = 0;
1741 const QualType &Ty = Args[I].type;
1742 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1743 isHomogeneousAggregate(Ty, Base, NumElts)) {
1744 if (State.FreeSSERegs >= NumElts) {
1745 State.FreeSSERegs -= NumElts;
1746 Args[I].info = ABIArgInfo::getDirectInReg();
1747 State.IsPreassigned.set(I);
1753 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1754 CCState &State) const {
1755 // FIXME: Set alignment on indirect arguments.
1756 bool IsFastCall = State.CC == llvm::CallingConv::X86_FastCall;
1757 bool IsRegCall = State.CC == llvm::CallingConv::X86_RegCall;
1758 bool IsVectorCall = State.CC == llvm::CallingConv::X86_VectorCall;
1760 Ty = useFirstFieldIfTransparentUnion(Ty);
1761 TypeInfo TI = getContext().getTypeInfo(Ty);
1763 // Check with the C++ ABI first.
1764 const RecordType *RT = Ty->getAs<RecordType>();
1766 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1767 if (RAA == CGCXXABI::RAA_Indirect) {
1768 return getIndirectResult(Ty, false, State);
1769 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1770 // The field index doesn't matter, we'll fix it up later.
1771 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1775 // Regcall uses the concept of a homogenous vector aggregate, similar
1776 // to other targets.
1777 const Type *Base = nullptr;
1778 uint64_t NumElts = 0;
1779 if ((IsRegCall || IsVectorCall) &&
1780 isHomogeneousAggregate(Ty, Base, NumElts)) {
1781 if (State.FreeSSERegs >= NumElts) {
1782 State.FreeSSERegs -= NumElts;
1784 // Vectorcall passes HVAs directly and does not flatten them, but regcall
1787 return getDirectX86Hva();
1789 if (Ty->isBuiltinType() || Ty->isVectorType())
1790 return ABIArgInfo::getDirect();
1791 return ABIArgInfo::getExpand();
1793 return getIndirectResult(Ty, /*ByVal=*/false, State);
1796 if (isAggregateTypeForABI(Ty)) {
1797 // Structures with flexible arrays are always indirect.
1798 // FIXME: This should not be byval!
1799 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1800 return getIndirectResult(Ty, true, State);
1802 // Ignore empty structs/unions on non-Windows.
1803 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1804 return ABIArgInfo::getIgnore();
1806 llvm::LLVMContext &LLVMContext = getVMContext();
1807 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1808 bool NeedsPadding = false;
1810 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1811 unsigned SizeInRegs = (TI.Width + 31) / 32;
1812 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1813 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1815 return ABIArgInfo::getDirectInReg(Result);
1817 return ABIArgInfo::getDirect(Result);
1819 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1821 // Pass over-aligned aggregates on Windows indirectly. This behavior was
1822 // added in MSVC 2015.
1823 if (IsWin32StructABI && TI.AlignIsRequired && TI.Align > 32)
1824 return getIndirectResult(Ty, /*ByVal=*/false, State);
1826 // Expand small (<= 128-bit) record types when we know that the stack layout
1827 // of those arguments will match the struct. This is important because the
1828 // LLVM backend isn't smart enough to remove byval, which inhibits many
1830 // Don't do this for the MCU if there are still free integer registers
1831 // (see X86_64 ABI for full explanation).
1832 if (TI.Width <= 4 * 32 && (!IsMCUABI || State.FreeRegs == 0) &&
1833 canExpandIndirectArgument(Ty))
1834 return ABIArgInfo::getExpandWithPadding(
1835 IsFastCall || IsVectorCall || IsRegCall, PaddingType);
1837 return getIndirectResult(Ty, true, State);
1840 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1841 // On Windows, vectors are passed directly if registers are available, or
1842 // indirectly if not. This avoids the need to align argument memory. Pass
1843 // user-defined vector types larger than 512 bits indirectly for simplicity.
1844 if (IsWin32StructABI) {
1845 if (TI.Width <= 512 && State.FreeSSERegs > 0) {
1846 --State.FreeSSERegs;
1847 return ABIArgInfo::getDirectInReg();
1849 return getIndirectResult(Ty, /*ByVal=*/false, State);
1852 // On Darwin, some vectors are passed in memory, we handle this by passing
1853 // it as an i8/i16/i32/i64.
1854 if (IsDarwinVectorABI) {
1855 if ((TI.Width == 8 || TI.Width == 16 || TI.Width == 32) ||
1856 (TI.Width == 64 && VT->getNumElements() == 1))
1857 return ABIArgInfo::getDirect(
1858 llvm::IntegerType::get(getVMContext(), TI.Width));
1861 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1862 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1864 return ABIArgInfo::getDirect();
1868 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1869 Ty = EnumTy->getDecl()->getIntegerType();
1871 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1873 if (isPromotableIntegerTypeForABI(Ty)) {
1875 return ABIArgInfo::getExtendInReg(Ty);
1876 return ABIArgInfo::getExtend(Ty);
1879 if (const auto * EIT = Ty->getAs<ExtIntType>()) {
1880 if (EIT->getNumBits() <= 64) {
1882 return ABIArgInfo::getDirectInReg();
1883 return ABIArgInfo::getDirect();
1885 return getIndirectResult(Ty, /*ByVal=*/false, State);
1889 return ABIArgInfo::getDirectInReg();
1890 return ABIArgInfo::getDirect();
1893 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1897 else if (State.CC == llvm::CallingConv::X86_FastCall) {
1899 State.FreeSSERegs = 3;
1900 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1902 State.FreeSSERegs = 6;
1903 } else if (FI.getHasRegParm())
1904 State.FreeRegs = FI.getRegParm();
1905 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1907 State.FreeSSERegs = 8;
1908 } else if (IsWin32StructABI) {
1909 // Since MSVC 2015, the first three SSE vectors have been passed in
1910 // registers. The rest are passed indirectly.
1911 State.FreeRegs = DefaultNumRegisterParameters;
1912 State.FreeSSERegs = 3;
1914 State.FreeRegs = DefaultNumRegisterParameters;
1916 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
1917 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1918 } else if (FI.getReturnInfo().isIndirect()) {
1919 // The C++ ABI is not aware of register usage, so we have to check if the
1920 // return value was sret and put it in a register ourselves if appropriate.
1921 if (State.FreeRegs) {
1922 --State.FreeRegs; // The sret parameter consumes a register.
1924 FI.getReturnInfo().setInReg(true);
1928 // The chain argument effectively gives us another free register.
1929 if (FI.isChainCall())
1932 // For vectorcall, do a first pass over the arguments, assigning FP and vector
1933 // arguments to XMM registers as available.
1934 if (State.CC == llvm::CallingConv::X86_VectorCall)
1935 runVectorCallFirstPass(FI, State);
1937 bool UsedInAlloca = false;
1938 MutableArrayRef<CGFunctionInfoArgInfo> Args = FI.arguments();
1939 for (int I = 0, E = Args.size(); I < E; ++I) {
1940 // Skip arguments that have already been assigned.
1941 if (State.IsPreassigned.test(I))
1944 Args[I].info = classifyArgumentType(Args[I].type, State);
1945 UsedInAlloca |= (Args[I].info.getKind() == ABIArgInfo::InAlloca);
1948 // If we needed to use inalloca for any argument, do a second pass and rewrite
1949 // all the memory arguments to use inalloca.
1951 rewriteWithInAlloca(FI);
1955 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1956 CharUnits &StackOffset, ABIArgInfo &Info,
1957 QualType Type) const {
1958 // Arguments are always 4-byte-aligned.
1959 CharUnits WordSize = CharUnits::fromQuantity(4);
1960 assert(StackOffset.isMultipleOf(WordSize) && "unaligned inalloca struct");
1962 // sret pointers and indirect things will require an extra pointer
1963 // indirection, unless they are byval. Most things are byval, and will not
1964 // require this indirection.
1965 bool IsIndirect = false;
1966 if (Info.isIndirect() && !Info.getIndirectByVal())
1968 Info = ABIArgInfo::getInAlloca(FrameFields.size(), IsIndirect);
1969 llvm::Type *LLTy = CGT.ConvertTypeForMem(Type);
1971 LLTy = LLTy->getPointerTo(0);
1972 FrameFields.push_back(LLTy);
1973 StackOffset += IsIndirect ? WordSize : getContext().getTypeSizeInChars(Type);
1975 // Insert padding bytes to respect alignment.
1976 CharUnits FieldEnd = StackOffset;
1977 StackOffset = FieldEnd.alignTo(WordSize);
1978 if (StackOffset != FieldEnd) {
1979 CharUnits NumBytes = StackOffset - FieldEnd;
1980 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1981 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1982 FrameFields.push_back(Ty);
1986 static bool isArgInAlloca(const ABIArgInfo &Info) {
1987 // Leave ignored and inreg arguments alone.
1988 switch (Info.getKind()) {
1989 case ABIArgInfo::InAlloca:
1991 case ABIArgInfo::Ignore:
1993 case ABIArgInfo::Indirect:
1994 case ABIArgInfo::Direct:
1995 case ABIArgInfo::Extend:
1996 return !Info.getInReg();
1997 case ABIArgInfo::Expand:
1998 case ABIArgInfo::CoerceAndExpand:
1999 // These are aggregate types which are never passed in registers when
2000 // inalloca is involved.
2003 llvm_unreachable("invalid enum");
2006 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
2007 assert(IsWin32StructABI && "inalloca only supported on win32");
2009 // Build a packed struct type for all of the arguments in memory.
2010 SmallVector<llvm::Type *, 6> FrameFields;
2012 // The stack alignment is always 4.
2013 CharUnits StackAlign = CharUnits::fromQuantity(4);
2015 CharUnits StackOffset;
2016 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
2018 // Put 'this' into the struct before 'sret', if necessary.
2020 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
2021 ABIArgInfo &Ret = FI.getReturnInfo();
2022 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
2023 isArgInAlloca(I->info)) {
2024 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2028 // Put the sret parameter into the inalloca struct if it's in memory.
2029 if (Ret.isIndirect() && !Ret.getInReg()) {
2030 addFieldToArgStruct(FrameFields, StackOffset, Ret, FI.getReturnType());
2031 // On Windows, the hidden sret parameter is always returned in eax.
2032 Ret.setInAllocaSRet(IsWin32StructABI);
2035 // Skip the 'this' parameter in ecx.
2039 // Put arguments passed in memory into the struct.
2040 for (; I != E; ++I) {
2041 if (isArgInAlloca(I->info))
2042 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
2045 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
2050 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
2051 Address VAListAddr, QualType Ty) const {
2053 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
2055 // x86-32 changes the alignment of certain arguments on the stack.
2057 // Just messing with TypeInfo like this works because we never pass
2058 // anything indirectly.
2059 TypeInfo.second = CharUnits::fromQuantity(
2060 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
2062 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
2063 TypeInfo, CharUnits::fromQuantity(4),
2064 /*AllowHigherAlign*/ true);
2067 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
2068 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
2069 assert(Triple.getArch() == llvm::Triple::x86);
2071 switch (Opts.getStructReturnConvention()) {
2072 case CodeGenOptions::SRCK_Default:
2074 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
2076 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
2080 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
2083 switch (Triple.getOS()) {
2084 case llvm::Triple::DragonFly:
2085 case llvm::Triple::FreeBSD:
2086 case llvm::Triple::OpenBSD:
2087 case llvm::Triple::Win32:
2094 void X86_32TargetCodeGenInfo::setTargetAttributes(
2095 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2096 if (GV->isDeclaration())
2098 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2099 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2100 llvm::Function *Fn = cast<llvm::Function>(GV);
2101 Fn->addFnAttr("stackrealign");
2103 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2104 llvm::Function *Fn = cast<llvm::Function>(GV);
2105 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2110 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
2111 CodeGen::CodeGenFunction &CGF,
2112 llvm::Value *Address) const {
2113 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2115 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
2117 // 0-7 are the eight integer registers; the order is different
2118 // on Darwin (for EH), but the range is the same.
2120 AssignToArrayRange(Builder, Address, Four8, 0, 8);
2122 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
2123 // 12-16 are st(0..4). Not sure why we stop at 4.
2124 // These have size 16, which is sizeof(long double) on
2125 // platforms with 8-byte alignment for that type.
2126 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
2127 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
2130 // 9 is %eflags, which doesn't get a size on Darwin for some
2132 Builder.CreateAlignedStore(
2133 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
2136 // 11-16 are st(0..5). Not sure why we stop at 5.
2137 // These have size 12, which is sizeof(long double) on
2138 // platforms with 4-byte alignment for that type.
2139 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
2140 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
2146 //===----------------------------------------------------------------------===//
2147 // X86-64 ABI Implementation
2148 //===----------------------------------------------------------------------===//
2152 /// The AVX ABI level for X86 targets.
2153 enum class X86AVXABILevel {
2159 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
2160 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2162 case X86AVXABILevel::AVX512:
2164 case X86AVXABILevel::AVX:
2166 case X86AVXABILevel::None:
2169 llvm_unreachable("Unknown AVXLevel");
2172 /// X86_64ABIInfo - The X86_64 ABI information.
2173 class X86_64ABIInfo : public SwiftABIInfo {
2185 /// merge - Implement the X86_64 ABI merging algorithm.
2187 /// Merge an accumulating classification \arg Accum with a field
2188 /// classification \arg Field.
2190 /// \param Accum - The accumulating classification. This should
2191 /// always be either NoClass or the result of a previous merge
2192 /// call. In addition, this should never be Memory (the caller
2193 /// should just return Memory for the aggregate).
2194 static Class merge(Class Accum, Class Field);
2196 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2198 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2199 /// final MEMORY or SSE classes when necessary.
2201 /// \param AggregateSize - The size of the current aggregate in
2202 /// the classification process.
2204 /// \param Lo - The classification for the parts of the type
2205 /// residing in the low word of the containing object.
2207 /// \param Hi - The classification for the parts of the type
2208 /// residing in the higher words of the containing object.
2210 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2212 /// classify - Determine the x86_64 register classes in which the
2213 /// given type T should be passed.
2215 /// \param Lo - The classification for the parts of the type
2216 /// residing in the low word of the containing object.
2218 /// \param Hi - The classification for the parts of the type
2219 /// residing in the high word of the containing object.
2221 /// \param OffsetBase - The bit offset of this type in the
2222 /// containing object. Some parameters are classified different
2223 /// depending on whether they straddle an eightbyte boundary.
2225 /// \param isNamedArg - Whether the argument in question is a "named"
2226 /// argument, as used in AMD64-ABI 3.5.7.
2228 /// If a word is unused its result will be NoClass; if a type should
2229 /// be passed in Memory then at least the classification of \arg Lo
2232 /// The \arg Lo class will be NoClass iff the argument is ignored.
2234 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2235 /// also be ComplexX87.
2236 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2237 bool isNamedArg) const;
2239 llvm::Type *GetByteVectorType(QualType Ty) const;
2240 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2241 unsigned IROffset, QualType SourceTy,
2242 unsigned SourceOffset) const;
2243 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2244 unsigned IROffset, QualType SourceTy,
2245 unsigned SourceOffset) const;
2247 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2248 /// such that the argument will be returned in memory.
2249 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2251 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2252 /// such that the argument will be passed in memory.
2254 /// \param freeIntRegs - The number of free integer registers remaining
2256 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2258 ABIArgInfo classifyReturnType(QualType RetTy) const;
2260 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2261 unsigned &neededInt, unsigned &neededSSE,
2262 bool isNamedArg) const;
2264 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2265 unsigned &NeededSSE) const;
2267 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2268 unsigned &NeededSSE) const;
2270 bool IsIllegalVectorType(QualType Ty) const;
2272 /// The 0.98 ABI revision clarified a lot of ambiguities,
2273 /// unfortunately in ways that were not always consistent with
2274 /// certain previous compilers. In particular, platforms which
2275 /// required strict binary compatibility with older versions of GCC
2276 /// may need to exempt themselves.
2277 bool honorsRevision0_98() const {
2278 return !getTarget().getTriple().isOSDarwin();
2281 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2282 /// classify it as INTEGER (for compatibility with older clang compilers).
2283 bool classifyIntegerMMXAsSSE() const {
2284 // Clang <= 3.8 did not do this.
2285 if (getContext().getLangOpts().getClangABICompat() <=
2286 LangOptions::ClangABI::Ver3_8)
2289 const llvm::Triple &Triple = getTarget().getTriple();
2290 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2292 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2297 // GCC classifies vectors of __int128 as memory.
2298 bool passInt128VectorsInMem() const {
2299 // Clang <= 9.0 did not do this.
2300 if (getContext().getLangOpts().getClangABICompat() <=
2301 LangOptions::ClangABI::Ver9)
2304 const llvm::Triple &T = getTarget().getTriple();
2305 return T.isOSLinux() || T.isOSNetBSD();
2308 X86AVXABILevel AVXLevel;
2309 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2311 bool Has64BitPointers;
2314 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2315 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2316 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2319 bool isPassedUsingAVXType(QualType type) const {
2320 unsigned neededInt, neededSSE;
2321 // The freeIntRegs argument doesn't matter here.
2322 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2323 /*isNamedArg*/true);
2324 if (info.isDirect()) {
2325 llvm::Type *ty = info.getCoerceToType();
2326 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2327 return vectorTy->getPrimitiveSizeInBits().getFixedSize() > 128;
2332 void computeInfo(CGFunctionInfo &FI) const override;
2334 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2335 QualType Ty) const override;
2336 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2337 QualType Ty) const override;
2339 bool has64BitPointers() const {
2340 return Has64BitPointers;
2343 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
2344 bool asReturnValue) const override {
2345 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2347 bool isSwiftErrorInRegister() const override {
2352 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2353 class WinX86_64ABIInfo : public SwiftABIInfo {
2355 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2356 : SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2357 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2359 void computeInfo(CGFunctionInfo &FI) const override;
2361 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2362 QualType Ty) const override;
2364 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2365 // FIXME: Assumes vectorcall is in use.
2366 return isX86VectorTypeForVectorCall(getContext(), Ty);
2369 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2370 uint64_t NumMembers) const override {
2371 // FIXME: Assumes vectorcall is in use.
2372 return isX86VectorCallAggregateSmallEnough(NumMembers);
2375 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type *> scalars,
2376 bool asReturnValue) const override {
2377 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2380 bool isSwiftErrorInRegister() const override {
2385 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2386 bool IsVectorCall, bool IsRegCall) const;
2387 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2388 const ABIArgInfo ¤t) const;
2389 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2390 bool IsVectorCall, bool IsRegCall) const;
2392 X86AVXABILevel AVXLevel;
2397 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2399 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2400 : TargetCodeGenInfo(std::make_unique<X86_64ABIInfo>(CGT, AVXLevel)) {}
2402 const X86_64ABIInfo &getABIInfo() const {
2403 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2406 /// Disable tail call on x86-64. The epilogue code before the tail jump blocks
2407 /// the autoreleaseRV/retainRV optimization.
2408 bool shouldSuppressTailCallsOfRetainAutoreleasedReturnValue() const override {
2412 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2416 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2417 llvm::Value *Address) const override {
2418 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2420 // 0-15 are the 16 integer registers.
2422 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2426 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2427 StringRef Constraint,
2428 llvm::Type* Ty) const override {
2429 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2432 bool isNoProtoCallVariadic(const CallArgList &args,
2433 const FunctionNoProtoType *fnType) const override {
2434 // The default CC on x86-64 sets %al to the number of SSA
2435 // registers used, and GCC sets this when calling an unprototyped
2436 // function, so we override the default behavior. However, don't do
2437 // that when AVX types are involved: the ABI explicitly states it is
2438 // undefined, and it doesn't work in practice because of how the ABI
2439 // defines varargs anyway.
2440 if (fnType->getCallConv() == CC_C) {
2441 bool HasAVXType = false;
2442 for (CallArgList::const_iterator
2443 it = args.begin(), ie = args.end(); it != ie; ++it) {
2444 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2454 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2458 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2459 unsigned Sig = (0xeb << 0) | // jmp rel8
2460 (0x06 << 8) | // .+0x08
2463 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2466 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2467 CodeGen::CodeGenModule &CGM) const override {
2468 if (GV->isDeclaration())
2470 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2471 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2472 llvm::Function *Fn = cast<llvm::Function>(GV);
2473 Fn->addFnAttr("stackrealign");
2475 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2476 llvm::Function *Fn = cast<llvm::Function>(GV);
2477 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2482 void checkFunctionCallABI(CodeGenModule &CGM, SourceLocation CallLoc,
2483 const FunctionDecl *Caller,
2484 const FunctionDecl *Callee,
2485 const CallArgList &Args) const override;
2488 static void initFeatureMaps(const ASTContext &Ctx,
2489 llvm::StringMap<bool> &CallerMap,
2490 const FunctionDecl *Caller,
2491 llvm::StringMap<bool> &CalleeMap,
2492 const FunctionDecl *Callee) {
2493 if (CalleeMap.empty() && CallerMap.empty()) {
2494 // The caller is potentially nullptr in the case where the call isn't in a
2495 // function. In this case, the getFunctionFeatureMap ensures we just get
2496 // the TU level setting (since it cannot be modified by 'target'..
2497 Ctx.getFunctionFeatureMap(CallerMap, Caller);
2498 Ctx.getFunctionFeatureMap(CalleeMap, Callee);
2502 static bool checkAVXParamFeature(DiagnosticsEngine &Diag,
2503 SourceLocation CallLoc,
2504 const llvm::StringMap<bool> &CallerMap,
2505 const llvm::StringMap<bool> &CalleeMap,
2506 QualType Ty, StringRef Feature,
2508 bool CallerHasFeat = CallerMap.lookup(Feature);
2509 bool CalleeHasFeat = CalleeMap.lookup(Feature);
2510 if (!CallerHasFeat && !CalleeHasFeat)
2511 return Diag.Report(CallLoc, diag::warn_avx_calling_convention)
2512 << IsArgument << Ty << Feature;
2514 // Mixing calling conventions here is very clearly an error.
2515 if (!CallerHasFeat || !CalleeHasFeat)
2516 return Diag.Report(CallLoc, diag::err_avx_calling_convention)
2517 << IsArgument << Ty << Feature;
2519 // Else, both caller and callee have the required feature, so there is no need
2524 static bool checkAVXParam(DiagnosticsEngine &Diag, ASTContext &Ctx,
2525 SourceLocation CallLoc,
2526 const llvm::StringMap<bool> &CallerMap,
2527 const llvm::StringMap<bool> &CalleeMap, QualType Ty,
2529 uint64_t Size = Ctx.getTypeSize(Ty);
2531 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty,
2532 "avx512f", IsArgument);
2535 return checkAVXParamFeature(Diag, CallLoc, CallerMap, CalleeMap, Ty, "avx",
2541 void X86_64TargetCodeGenInfo::checkFunctionCallABI(
2542 CodeGenModule &CGM, SourceLocation CallLoc, const FunctionDecl *Caller,
2543 const FunctionDecl *Callee, const CallArgList &Args) const {
2544 llvm::StringMap<bool> CallerMap;
2545 llvm::StringMap<bool> CalleeMap;
2546 unsigned ArgIndex = 0;
2548 // We need to loop through the actual call arguments rather than the the
2549 // function's parameters, in case this variadic.
2550 for (const CallArg &Arg : Args) {
2551 // The "avx" feature changes how vectors >128 in size are passed. "avx512f"
2552 // additionally changes how vectors >256 in size are passed. Like GCC, we
2553 // warn when a function is called with an argument where this will change.
2554 // Unlike GCC, we also error when it is an obvious ABI mismatch, that is,
2555 // the caller and callee features are mismatched.
2556 // Unfortunately, we cannot do this diagnostic in SEMA, since the callee can
2557 // change its ABI with attribute-target after this call.
2558 if (Arg.getType()->isVectorType() &&
2559 CGM.getContext().getTypeSize(Arg.getType()) > 128) {
2560 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2561 QualType Ty = Arg.getType();
2562 // The CallArg seems to have desugared the type already, so for clearer
2563 // diagnostics, replace it with the type in the FunctionDecl if possible.
2564 if (ArgIndex < Callee->getNumParams())
2565 Ty = Callee->getParamDecl(ArgIndex)->getType();
2567 if (checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2568 CalleeMap, Ty, /*IsArgument*/ true))
2574 // Check return always, as we don't have a good way of knowing in codegen
2575 // whether this value is used, tail-called, etc.
2576 if (Callee->getReturnType()->isVectorType() &&
2577 CGM.getContext().getTypeSize(Callee->getReturnType()) > 128) {
2578 initFeatureMaps(CGM.getContext(), CallerMap, Caller, CalleeMap, Callee);
2579 checkAVXParam(CGM.getDiags(), CGM.getContext(), CallLoc, CallerMap,
2580 CalleeMap, Callee->getReturnType(),
2581 /*IsArgument*/ false);
2585 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2586 // If the argument does not end in .lib, automatically add the suffix.
2587 // If the argument contains a space, enclose it in quotes.
2588 // This matches the behavior of MSVC.
2589 bool Quote = (Lib.find(" ") != StringRef::npos);
2590 std::string ArgStr = Quote ? "\"" : "";
2592 if (!Lib.endswith_lower(".lib") && !Lib.endswith_lower(".a"))
2594 ArgStr += Quote ? "\"" : "";
2598 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2600 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2601 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2602 unsigned NumRegisterParameters)
2603 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2604 Win32StructABI, NumRegisterParameters, false) {}
2606 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2607 CodeGen::CodeGenModule &CGM) const override;
2609 void getDependentLibraryOption(llvm::StringRef Lib,
2610 llvm::SmallString<24> &Opt) const override {
2611 Opt = "/DEFAULTLIB:";
2612 Opt += qualifyWindowsLibrary(Lib);
2615 void getDetectMismatchOption(llvm::StringRef Name,
2616 llvm::StringRef Value,
2617 llvm::SmallString<32> &Opt) const override {
2618 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2622 static void addStackProbeTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2623 CodeGen::CodeGenModule &CGM) {
2624 if (llvm::Function *Fn = dyn_cast_or_null<llvm::Function>(GV)) {
2626 if (CGM.getCodeGenOpts().StackProbeSize != 4096)
2627 Fn->addFnAttr("stack-probe-size",
2628 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2629 if (CGM.getCodeGenOpts().NoStackArgProbe)
2630 Fn->addFnAttr("no-stack-arg-probe");
2634 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2635 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2636 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2637 if (GV->isDeclaration())
2639 addStackProbeTargetAttributes(D, GV, CGM);
2642 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2644 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2645 X86AVXABILevel AVXLevel)
2646 : TargetCodeGenInfo(std::make_unique<WinX86_64ABIInfo>(CGT, AVXLevel)) {}
2648 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2649 CodeGen::CodeGenModule &CGM) const override;
2651 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2655 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2656 llvm::Value *Address) const override {
2657 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2659 // 0-15 are the 16 integer registers.
2661 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2665 void getDependentLibraryOption(llvm::StringRef Lib,
2666 llvm::SmallString<24> &Opt) const override {
2667 Opt = "/DEFAULTLIB:";
2668 Opt += qualifyWindowsLibrary(Lib);
2671 void getDetectMismatchOption(llvm::StringRef Name,
2672 llvm::StringRef Value,
2673 llvm::SmallString<32> &Opt) const override {
2674 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2678 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2679 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
2680 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2681 if (GV->isDeclaration())
2683 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2684 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2685 llvm::Function *Fn = cast<llvm::Function>(GV);
2686 Fn->addFnAttr("stackrealign");
2688 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2689 llvm::Function *Fn = cast<llvm::Function>(GV);
2690 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2694 addStackProbeTargetAttributes(D, GV, CGM);
2698 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2700 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2702 // (a) If one of the classes is Memory, the whole argument is passed in
2705 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2708 // (c) If the size of the aggregate exceeds two eightbytes and the first
2709 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2710 // argument is passed in memory. NOTE: This is necessary to keep the
2711 // ABI working for processors that don't support the __m256 type.
2713 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2715 // Some of these are enforced by the merging logic. Others can arise
2716 // only with unions; for example:
2717 // union { _Complex double; unsigned; }
2719 // Note that clauses (b) and (c) were added in 0.98.
2723 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2725 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2727 if (Hi == SSEUp && Lo != SSE)
2731 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2732 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2733 // classified recursively so that always two fields are
2734 // considered. The resulting class is calculated according to
2735 // the classes of the fields in the eightbyte:
2737 // (a) If both classes are equal, this is the resulting class.
2739 // (b) If one of the classes is NO_CLASS, the resulting class is
2742 // (c) If one of the classes is MEMORY, the result is the MEMORY
2745 // (d) If one of the classes is INTEGER, the result is the
2748 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2749 // MEMORY is used as class.
2751 // (f) Otherwise class SSE is used.
2753 // Accum should never be memory (we should have returned) or
2754 // ComplexX87 (because this cannot be passed in a structure).
2755 assert((Accum != Memory && Accum != ComplexX87) &&
2756 "Invalid accumulated classification during merge.");
2757 if (Accum == Field || Field == NoClass)
2759 if (Field == Memory)
2761 if (Accum == NoClass)
2763 if (Accum == Integer || Field == Integer)
2765 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2766 Accum == X87 || Accum == X87Up)
2771 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2772 Class &Lo, Class &Hi, bool isNamedArg) const {
2773 // FIXME: This code can be simplified by introducing a simple value class for
2774 // Class pairs with appropriate constructor methods for the various
2777 // FIXME: Some of the split computations are wrong; unaligned vectors
2778 // shouldn't be passed in registers for example, so there is no chance they
2779 // can straddle an eightbyte. Verify & simplify.
2783 Class &Current = OffsetBase < 64 ? Lo : Hi;
2786 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2787 BuiltinType::Kind k = BT->getKind();
2789 if (k == BuiltinType::Void) {
2791 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2794 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2796 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2798 } else if (k == BuiltinType::LongDouble) {
2799 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2800 if (LDF == &llvm::APFloat::IEEEquad()) {
2803 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2806 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2809 llvm_unreachable("unexpected long double representation!");
2811 // FIXME: _Decimal32 and _Decimal64 are SSE.
2812 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2816 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2817 // Classify the underlying integer type.
2818 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2822 if (Ty->hasPointerRepresentation()) {
2827 if (Ty->isMemberPointerType()) {
2828 if (Ty->isMemberFunctionPointerType()) {
2829 if (Has64BitPointers) {
2830 // If Has64BitPointers, this is an {i64, i64}, so classify both
2834 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2835 // straddles an eightbyte boundary, Hi should be classified as well.
2836 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2837 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2838 if (EB_FuncPtr != EB_ThisAdj) {
2850 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2851 uint64_t Size = getContext().getTypeSize(VT);
2852 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2853 // gcc passes the following as integer:
2854 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2855 // 2 bytes - <2 x char>, <1 x short>
2856 // 1 byte - <1 x char>
2859 // If this type crosses an eightbyte boundary, it should be
2861 uint64_t EB_Lo = (OffsetBase) / 64;
2862 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2865 } else if (Size == 64) {
2866 QualType ElementType = VT->getElementType();
2868 // gcc passes <1 x double> in memory. :(
2869 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2872 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2873 // pass them as integer. For platforms where clang is the de facto
2874 // platform compiler, we must continue to use integer.
2875 if (!classifyIntegerMMXAsSSE() &&
2876 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2877 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2878 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2879 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2884 // If this type crosses an eightbyte boundary, it should be
2886 if (OffsetBase && OffsetBase != 64)
2888 } else if (Size == 128 ||
2889 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2890 QualType ElementType = VT->getElementType();
2892 // gcc passes 256 and 512 bit <X x __int128> vectors in memory. :(
2893 if (passInt128VectorsInMem() && Size != 128 &&
2894 (ElementType->isSpecificBuiltinType(BuiltinType::Int128) ||
2895 ElementType->isSpecificBuiltinType(BuiltinType::UInt128)))
2898 // Arguments of 256-bits are split into four eightbyte chunks. The
2899 // least significant one belongs to class SSE and all the others to class
2900 // SSEUP. The original Lo and Hi design considers that types can't be
2901 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2902 // This design isn't correct for 256-bits, but since there're no cases
2903 // where the upper parts would need to be inspected, avoid adding
2904 // complexity and just consider Hi to match the 64-256 part.
2906 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2907 // registers if they are "named", i.e. not part of the "..." of a
2908 // variadic function.
2910 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2911 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2918 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2919 QualType ET = getContext().getCanonicalType(CT->getElementType());
2921 uint64_t Size = getContext().getTypeSize(Ty);
2922 if (ET->isIntegralOrEnumerationType()) {
2925 else if (Size <= 128)
2927 } else if (ET == getContext().FloatTy) {
2929 } else if (ET == getContext().DoubleTy) {
2931 } else if (ET == getContext().LongDoubleTy) {
2932 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2933 if (LDF == &llvm::APFloat::IEEEquad())
2935 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2936 Current = ComplexX87;
2937 else if (LDF == &llvm::APFloat::IEEEdouble())
2940 llvm_unreachable("unexpected long double representation!");
2943 // If this complex type crosses an eightbyte boundary then it
2945 uint64_t EB_Real = (OffsetBase) / 64;
2946 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2947 if (Hi == NoClass && EB_Real != EB_Imag)
2953 if (const auto *EITy = Ty->getAs<ExtIntType>()) {
2954 if (EITy->getNumBits() <= 64)
2956 else if (EITy->getNumBits() <= 128)
2958 // Larger values need to get passed in memory.
2962 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2963 // Arrays are treated like structures.
2965 uint64_t Size = getContext().getTypeSize(Ty);
2967 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2968 // than eight eightbytes, ..., it has class MEMORY.
2972 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2973 // fields, it has class MEMORY.
2975 // Only need to check alignment of array base.
2976 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2979 // Otherwise implement simplified merge. We could be smarter about
2980 // this, but it isn't worth it and would be harder to verify.
2982 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2983 uint64_t ArraySize = AT->getSize().getZExtValue();
2985 // The only case a 256-bit wide vector could be used is when the array
2986 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2987 // to work for sizes wider than 128, early check and fallback to memory.
2990 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2993 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2994 Class FieldLo, FieldHi;
2995 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2996 Lo = merge(Lo, FieldLo);
2997 Hi = merge(Hi, FieldHi);
2998 if (Lo == Memory || Hi == Memory)
3002 postMerge(Size, Lo, Hi);
3003 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
3007 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3008 uint64_t Size = getContext().getTypeSize(Ty);
3010 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
3011 // than eight eightbytes, ..., it has class MEMORY.
3015 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
3016 // copy constructor or a non-trivial destructor, it is passed by invisible
3018 if (getRecordArgABI(RT, getCXXABI()))
3021 const RecordDecl *RD = RT->getDecl();
3023 // Assume variable sized types are passed in memory.
3024 if (RD->hasFlexibleArrayMember())
3027 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
3029 // Reset Lo class, this will be recomputed.
3032 // If this is a C++ record, classify the bases first.
3033 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3034 for (const auto &I : CXXRD->bases()) {
3035 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3036 "Unexpected base class!");
3038 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3040 // Classify this field.
3042 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
3043 // single eightbyte, each is classified separately. Each eightbyte gets
3044 // initialized to class NO_CLASS.
3045 Class FieldLo, FieldHi;
3047 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
3048 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
3049 Lo = merge(Lo, FieldLo);
3050 Hi = merge(Hi, FieldHi);
3051 if (Lo == Memory || Hi == Memory) {
3052 postMerge(Size, Lo, Hi);
3058 // Classify the fields one at a time, merging the results.
3060 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3061 i != e; ++i, ++idx) {
3062 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3063 bool BitField = i->isBitField();
3065 // Ignore padding bit-fields.
3066 if (BitField && i->isUnnamedBitfield())
3069 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
3070 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
3072 // The only case a 256-bit wide vector could be used is when the struct
3073 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
3074 // to work for sizes wider than 128, early check and fallback to memory.
3076 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
3077 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
3079 postMerge(Size, Lo, Hi);
3082 // Note, skip this test for bit-fields, see below.
3083 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
3085 postMerge(Size, Lo, Hi);
3089 // Classify this field.
3091 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
3092 // exceeds a single eightbyte, each is classified
3093 // separately. Each eightbyte gets initialized to class
3095 Class FieldLo, FieldHi;
3097 // Bit-fields require special handling, they do not force the
3098 // structure to be passed in memory even if unaligned, and
3099 // therefore they can straddle an eightbyte.
3101 assert(!i->isUnnamedBitfield());
3102 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
3103 uint64_t Size = i->getBitWidthValue(getContext());
3105 uint64_t EB_Lo = Offset / 64;
3106 uint64_t EB_Hi = (Offset + Size - 1) / 64;
3109 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
3114 FieldHi = EB_Hi ? Integer : NoClass;
3117 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
3118 Lo = merge(Lo, FieldLo);
3119 Hi = merge(Hi, FieldHi);
3120 if (Lo == Memory || Hi == Memory)
3124 postMerge(Size, Lo, Hi);
3128 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
3129 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3131 if (!isAggregateTypeForABI(Ty)) {
3132 // Treat an enum type as its underlying type.
3133 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3134 Ty = EnumTy->getDecl()->getIntegerType();
3136 if (Ty->isExtIntType())
3137 return getNaturalAlignIndirect(Ty);
3139 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3140 : ABIArgInfo::getDirect());
3143 return getNaturalAlignIndirect(Ty);
3146 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
3147 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
3148 uint64_t Size = getContext().getTypeSize(VecTy);
3149 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
3150 if (Size <= 64 || Size > LargestVector)
3152 QualType EltTy = VecTy->getElementType();
3153 if (passInt128VectorsInMem() &&
3154 (EltTy->isSpecificBuiltinType(BuiltinType::Int128) ||
3155 EltTy->isSpecificBuiltinType(BuiltinType::UInt128)))
3162 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
3163 unsigned freeIntRegs) const {
3164 // If this is a scalar LLVM value then assume LLVM will pass it in the right
3167 // This assumption is optimistic, as there could be free registers available
3168 // when we need to pass this argument in memory, and LLVM could try to pass
3169 // the argument in the free register. This does not seem to happen currently,
3170 // but this code would be much safer if we could mark the argument with
3171 // 'onstack'. See PR12193.
3172 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty) &&
3173 !Ty->isExtIntType()) {
3174 // Treat an enum type as its underlying type.
3175 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3176 Ty = EnumTy->getDecl()->getIntegerType();
3178 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
3179 : ABIArgInfo::getDirect());
3182 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3183 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3185 // Compute the byval alignment. We specify the alignment of the byval in all
3186 // cases so that the mid-level optimizer knows the alignment of the byval.
3187 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
3189 // Attempt to avoid passing indirect results using byval when possible. This
3190 // is important for good codegen.
3192 // We do this by coercing the value into a scalar type which the backend can
3193 // handle naturally (i.e., without using byval).
3195 // For simplicity, we currently only do this when we have exhausted all of the
3196 // free integer registers. Doing this when there are free integer registers
3197 // would require more care, as we would have to ensure that the coerced value
3198 // did not claim the unused register. That would require either reording the
3199 // arguments to the function (so that any subsequent inreg values came first),
3200 // or only doing this optimization when there were no following arguments that
3203 // We currently expect it to be rare (particularly in well written code) for
3204 // arguments to be passed on the stack when there are still free integer
3205 // registers available (this would typically imply large structs being passed
3206 // by value), so this seems like a fair tradeoff for now.
3208 // We can revisit this if the backend grows support for 'onstack' parameter
3209 // attributes. See PR12193.
3210 if (freeIntRegs == 0) {
3211 uint64_t Size = getContext().getTypeSize(Ty);
3213 // If this type fits in an eightbyte, coerce it into the matching integral
3214 // type, which will end up on the stack (with alignment 8).
3215 if (Align == 8 && Size <= 64)
3216 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
3220 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
3223 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
3224 /// register. Pick an LLVM IR type that will be passed as a vector register.
3225 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
3226 // Wrapper structs/arrays that only contain vectors are passed just like
3227 // vectors; strip them off if present.
3228 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
3229 Ty = QualType(InnerTy, 0);
3231 llvm::Type *IRType = CGT.ConvertType(Ty);
3232 if (isa<llvm::VectorType>(IRType)) {
3233 // Don't pass vXi128 vectors in their native type, the backend can't
3235 if (passInt128VectorsInMem() &&
3236 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy(128)) {
3237 // Use a vXi64 vector.
3238 uint64_t Size = getContext().getTypeSize(Ty);
3239 return llvm::FixedVectorType::get(llvm::Type::getInt64Ty(getVMContext()),
3246 if (IRType->getTypeID() == llvm::Type::FP128TyID)
3249 // We couldn't find the preferred IR vector type for 'Ty'.
3250 uint64_t Size = getContext().getTypeSize(Ty);
3251 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
3254 // Return a LLVM IR vector type based on the size of 'Ty'.
3255 return llvm::FixedVectorType::get(llvm::Type::getDoubleTy(getVMContext()),
3259 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
3260 /// is known to either be off the end of the specified type or being in
3261 /// alignment padding. The user type specified is known to be at most 128 bits
3262 /// in size, and have passed through X86_64ABIInfo::classify with a successful
3263 /// classification that put one of the two halves in the INTEGER class.
3265 /// It is conservatively correct to return false.
3266 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
3267 unsigned EndBit, ASTContext &Context) {
3268 // If the bytes being queried are off the end of the type, there is no user
3269 // data hiding here. This handles analysis of builtins, vectors and other
3270 // types that don't contain interesting padding.
3271 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
3272 if (TySize <= StartBit)
3275 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
3276 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
3277 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
3279 // Check each element to see if the element overlaps with the queried range.
3280 for (unsigned i = 0; i != NumElts; ++i) {
3281 // If the element is after the span we care about, then we're done..
3282 unsigned EltOffset = i*EltSize;
3283 if (EltOffset >= EndBit) break;
3285 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
3286 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
3287 EndBit-EltOffset, Context))
3290 // If it overlaps no elements, then it is safe to process as padding.
3294 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3295 const RecordDecl *RD = RT->getDecl();
3296 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3298 // If this is a C++ record, check the bases first.
3299 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3300 for (const auto &I : CXXRD->bases()) {
3301 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3302 "Unexpected base class!");
3304 cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
3306 // If the base is after the span we care about, ignore it.
3307 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3308 if (BaseOffset >= EndBit) continue;
3310 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3311 if (!BitsContainNoUserData(I.getType(), BaseStart,
3312 EndBit-BaseOffset, Context))
3317 // Verify that no field has data that overlaps the region of interest. Yes
3318 // this could be sped up a lot by being smarter about queried fields,
3319 // however we're only looking at structs up to 16 bytes, so we don't care
3322 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3323 i != e; ++i, ++idx) {
3324 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3326 // If we found a field after the region we care about, then we're done.
3327 if (FieldOffset >= EndBit) break;
3329 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3330 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3335 // If nothing in this record overlapped the area of interest, then we're
3343 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3344 /// float member at the specified offset. For example, {int,{float}} has a
3345 /// float at offset 4. It is conservatively correct for this routine to return
3347 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3348 const llvm::DataLayout &TD) {
3349 // Base case if we find a float.
3350 if (IROffset == 0 && IRType->isFloatTy())
3353 // If this is a struct, recurse into the field at the specified offset.
3354 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3355 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3356 unsigned Elt = SL->getElementContainingOffset(IROffset);
3357 IROffset -= SL->getElementOffset(Elt);
3358 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3361 // If this is an array, recurse into the field at the specified offset.
3362 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3363 llvm::Type *EltTy = ATy->getElementType();
3364 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3365 IROffset -= IROffset/EltSize*EltSize;
3366 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3373 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3374 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3375 llvm::Type *X86_64ABIInfo::
3376 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3377 QualType SourceTy, unsigned SourceOffset) const {
3378 // The only three choices we have are either double, <2 x float>, or float. We
3379 // pass as float if the last 4 bytes is just padding. This happens for
3380 // structs that contain 3 floats.
3381 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3382 SourceOffset*8+64, getContext()))
3383 return llvm::Type::getFloatTy(getVMContext());
3385 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3386 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3388 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3389 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3390 return llvm::FixedVectorType::get(llvm::Type::getFloatTy(getVMContext()),
3393 return llvm::Type::getDoubleTy(getVMContext());
3397 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3398 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3399 /// about the high or low part of an up-to-16-byte struct. This routine picks
3400 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3401 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3404 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3405 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3406 /// the 8-byte value references. PrefType may be null.
3408 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3409 /// an offset into this that we're processing (which is always either 0 or 8).
3411 llvm::Type *X86_64ABIInfo::
3412 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3413 QualType SourceTy, unsigned SourceOffset) const {
3414 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3415 // returning an 8-byte unit starting with it. See if we can safely use it.
3416 if (IROffset == 0) {
3417 // Pointers and int64's always fill the 8-byte unit.
3418 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3419 IRType->isIntegerTy(64))
3422 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3423 // goodness in the source type is just tail padding. This is allowed to
3424 // kick in for struct {double,int} on the int, but not on
3425 // struct{double,int,int} because we wouldn't return the second int. We
3426 // have to do this analysis on the source type because we can't depend on
3427 // unions being lowered a specific way etc.
3428 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3429 IRType->isIntegerTy(32) ||
3430 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3431 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3432 cast<llvm::IntegerType>(IRType)->getBitWidth();
3434 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3435 SourceOffset*8+64, getContext()))
3440 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3441 // If this is a struct, recurse into the field at the specified offset.
3442 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3443 if (IROffset < SL->getSizeInBytes()) {
3444 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3445 IROffset -= SL->getElementOffset(FieldIdx);
3447 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3448 SourceTy, SourceOffset);
3452 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3453 llvm::Type *EltTy = ATy->getElementType();
3454 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3455 unsigned EltOffset = IROffset/EltSize*EltSize;
3456 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3460 // Okay, we don't have any better idea of what to pass, so we pass this in an
3461 // integer register that isn't too big to fit the rest of the struct.
3462 unsigned TySizeInBytes =
3463 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3465 assert(TySizeInBytes != SourceOffset && "Empty field?");
3467 // It is always safe to classify this as an integer type up to i64 that
3468 // isn't larger than the structure.
3469 return llvm::IntegerType::get(getVMContext(),
3470 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3474 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3475 /// be used as elements of a two register pair to pass or return, return a
3476 /// first class aggregate to represent them. For example, if the low part of
3477 /// a by-value argument should be passed as i32* and the high part as float,
3478 /// return {i32*, float}.
3480 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3481 const llvm::DataLayout &TD) {
3482 // In order to correctly satisfy the ABI, we need to the high part to start
3483 // at offset 8. If the high and low parts we inferred are both 4-byte types
3484 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3485 // the second element at offset 8. Check for this:
3486 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3487 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3488 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3489 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3491 // To handle this, we have to increase the size of the low part so that the
3492 // second element will start at an 8 byte offset. We can't increase the size
3493 // of the second element because it might make us access off the end of the
3496 // There are usually two sorts of types the ABI generation code can produce
3497 // for the low part of a pair that aren't 8 bytes in size: float or
3498 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3500 // Promote these to a larger type.
3501 if (Lo->isFloatTy())
3502 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3504 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3505 && "Invalid/unknown lo type");
3506 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3510 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3512 // Verify that the second element is at an 8-byte offset.
3513 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3514 "Invalid x86-64 argument pair!");
3518 ABIArgInfo X86_64ABIInfo::
3519 classifyReturnType(QualType RetTy) const {
3520 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3521 // classification algorithm.
3522 X86_64ABIInfo::Class Lo, Hi;
3523 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3525 // Check some invariants.
3526 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3527 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3529 llvm::Type *ResType = nullptr;
3533 return ABIArgInfo::getIgnore();
3534 // If the low part is just padding, it takes no register, leave ResType
3536 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3537 "Unknown missing lo part");
3542 llvm_unreachable("Invalid classification for lo word.");
3544 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3547 return getIndirectReturnResult(RetTy);
3549 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3550 // available register of the sequence %rax, %rdx is used.
3552 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3554 // If we have a sign or zero extended integer, make sure to return Extend
3555 // so that the parameter gets the right LLVM IR attributes.
3556 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3557 // Treat an enum type as its underlying type.
3558 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3559 RetTy = EnumTy->getDecl()->getIntegerType();
3561 if (RetTy->isIntegralOrEnumerationType() &&
3562 isPromotableIntegerTypeForABI(RetTy))
3563 return ABIArgInfo::getExtend(RetTy);
3567 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3568 // available SSE register of the sequence %xmm0, %xmm1 is used.
3570 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3573 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3574 // returned on the X87 stack in %st0 as 80-bit x87 number.
3576 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3579 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3580 // part of the value is returned in %st0 and the imaginary part in
3583 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3584 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3585 llvm::Type::getX86_FP80Ty(getVMContext()));
3589 llvm::Type *HighPart = nullptr;
3591 // Memory was handled previously and X87 should
3592 // never occur as a hi class.
3595 llvm_unreachable("Invalid classification for hi word.");
3597 case ComplexX87: // Previously handled.
3602 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3603 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3604 return ABIArgInfo::getDirect(HighPart, 8);
3607 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3608 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3609 return ABIArgInfo::getDirect(HighPart, 8);
3612 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3613 // is passed in the next available eightbyte chunk if the last used
3616 // SSEUP should always be preceded by SSE, just widen.
3618 assert(Lo == SSE && "Unexpected SSEUp classification.");
3619 ResType = GetByteVectorType(RetTy);
3622 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3623 // returned together with the previous X87 value in %st0.
3625 // If X87Up is preceded by X87, we don't need to do
3626 // anything. However, in some cases with unions it may not be
3627 // preceded by X87. In such situations we follow gcc and pass the
3628 // extra bits in an SSE reg.
3630 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3631 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3632 return ABIArgInfo::getDirect(HighPart, 8);
3637 // If a high part was specified, merge it together with the low part. It is
3638 // known to pass in the high eightbyte of the result. We do this by forming a
3639 // first class struct aggregate with the high and low part: {low, high}
3641 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3643 return ABIArgInfo::getDirect(ResType);
3646 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3647 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3651 Ty = useFirstFieldIfTransparentUnion(Ty);
3653 X86_64ABIInfo::Class Lo, Hi;
3654 classify(Ty, 0, Lo, Hi, isNamedArg);
3656 // Check some invariants.
3657 // FIXME: Enforce these by construction.
3658 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3659 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3663 llvm::Type *ResType = nullptr;
3667 return ABIArgInfo::getIgnore();
3668 // If the low part is just padding, it takes no register, leave ResType
3670 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3671 "Unknown missing lo part");
3674 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3678 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3679 // COMPLEX_X87, it is passed in memory.
3682 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3684 return getIndirectResult(Ty, freeIntRegs);
3688 llvm_unreachable("Invalid classification for lo word.");
3690 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3691 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3696 // Pick an 8-byte type based on the preferred type.
3697 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3699 // If we have a sign or zero extended integer, make sure to return Extend
3700 // so that the parameter gets the right LLVM IR attributes.
3701 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3702 // Treat an enum type as its underlying type.
3703 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3704 Ty = EnumTy->getDecl()->getIntegerType();
3706 if (Ty->isIntegralOrEnumerationType() &&
3707 isPromotableIntegerTypeForABI(Ty))
3708 return ABIArgInfo::getExtend(Ty);
3713 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3714 // available SSE register is used, the registers are taken in the
3715 // order from %xmm0 to %xmm7.
3717 llvm::Type *IRType = CGT.ConvertType(Ty);
3718 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3724 llvm::Type *HighPart = nullptr;
3726 // Memory was handled previously, ComplexX87 and X87 should
3727 // never occur as hi classes, and X87Up must be preceded by X87,
3728 // which is passed in memory.
3732 llvm_unreachable("Invalid classification for hi word.");
3734 case NoClass: break;
3738 // Pick an 8-byte type based on the preferred type.
3739 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3741 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3742 return ABIArgInfo::getDirect(HighPart, 8);
3745 // X87Up generally doesn't occur here (long double is passed in
3746 // memory), except in situations involving unions.
3749 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3751 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3752 return ABIArgInfo::getDirect(HighPart, 8);
3757 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3758 // eightbyte is passed in the upper half of the last used SSE
3759 // register. This only happens when 128-bit vectors are passed.
3761 assert(Lo == SSE && "Unexpected SSEUp classification");
3762 ResType = GetByteVectorType(Ty);
3766 // If a high part was specified, merge it together with the low part. It is
3767 // known to pass in the high eightbyte of the result. We do this by forming a
3768 // first class struct aggregate with the high and low part: {low, high}
3770 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3772 return ABIArgInfo::getDirect(ResType);
3776 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3777 unsigned &NeededSSE) const {
3778 auto RT = Ty->getAs<RecordType>();
3779 assert(RT && "classifyRegCallStructType only valid with struct types");
3781 if (RT->getDecl()->hasFlexibleArrayMember())
3782 return getIndirectReturnResult(Ty);
3785 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3786 if (CXXRD->isDynamicClass()) {
3787 NeededInt = NeededSSE = 0;
3788 return getIndirectReturnResult(Ty);
3791 for (const auto &I : CXXRD->bases())
3792 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3794 NeededInt = NeededSSE = 0;
3795 return getIndirectReturnResult(Ty);
3800 for (const auto *FD : RT->getDecl()->fields()) {
3801 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3802 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3804 NeededInt = NeededSSE = 0;
3805 return getIndirectReturnResult(Ty);
3808 unsigned LocalNeededInt, LocalNeededSSE;
3809 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3810 LocalNeededSSE, true)
3812 NeededInt = NeededSSE = 0;
3813 return getIndirectReturnResult(Ty);
3815 NeededInt += LocalNeededInt;
3816 NeededSSE += LocalNeededSSE;
3820 return ABIArgInfo::getDirect();
3823 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3824 unsigned &NeededInt,
3825 unsigned &NeededSSE) const {
3830 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3833 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3835 const unsigned CallingConv = FI.getCallingConvention();
3836 // It is possible to force Win64 calling convention on any x86_64 target by
3837 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3838 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3839 if (CallingConv == llvm::CallingConv::Win64) {
3840 WinX86_64ABIInfo Win64ABIInfo(CGT, AVXLevel);
3841 Win64ABIInfo.computeInfo(FI);
3845 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3847 // Keep track of the number of assigned registers.
3848 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3849 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3850 unsigned NeededInt, NeededSSE;
3852 if (!::classifyReturnType(getCXXABI(), FI, *this)) {
3853 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3854 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3855 FI.getReturnInfo() =
3856 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3857 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3858 FreeIntRegs -= NeededInt;
3859 FreeSSERegs -= NeededSSE;
3861 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3863 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>() &&
3864 getContext().getCanonicalType(FI.getReturnType()
3865 ->getAs<ComplexType>()
3866 ->getElementType()) ==
3867 getContext().LongDoubleTy)
3868 // Complex Long Double Type is passed in Memory when Regcall
3869 // calling convention is used.
3870 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3872 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3875 // If the return value is indirect, then the hidden argument is consuming one
3876 // integer register.
3877 if (FI.getReturnInfo().isIndirect())
3880 // The chain argument effectively gives us another free register.
3881 if (FI.isChainCall())
3884 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3885 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3886 // get assigned (in left-to-right order) for passing as follows...
3888 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3889 it != ie; ++it, ++ArgNo) {
3890 bool IsNamedArg = ArgNo < NumRequiredArgs;
3892 if (IsRegCall && it->type->isStructureOrClassType())
3893 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3895 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3896 NeededSSE, IsNamedArg);
3898 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3899 // eightbyte of an argument, the whole argument is passed on the
3900 // stack. If registers have already been assigned for some
3901 // eightbytes of such an argument, the assignments get reverted.
3902 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3903 FreeIntRegs -= NeededInt;
3904 FreeSSERegs -= NeededSSE;
3906 it->info = getIndirectResult(it->type, FreeIntRegs);
3911 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3912 Address VAListAddr, QualType Ty) {
3913 Address overflow_arg_area_p =
3914 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
3915 llvm::Value *overflow_arg_area =
3916 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3918 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3919 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3920 // It isn't stated explicitly in the standard, but in practice we use
3921 // alignment greater than 16 where necessary.
3922 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3923 if (Align > CharUnits::fromQuantity(8)) {
3924 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3928 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3929 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3931 CGF.Builder.CreateBitCast(overflow_arg_area,
3932 llvm::PointerType::getUnqual(LTy));
3934 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3935 // l->overflow_arg_area + sizeof(type).
3936 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3937 // an 8 byte boundary.
3939 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3940 llvm::Value *Offset =
3941 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3942 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3943 "overflow_arg_area.next");
3944 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3946 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3947 return Address(Res, Align);
3950 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3951 QualType Ty) const {
3952 // Assume that va_list type is correct; should be pointer to LLVM type:
3956 // i8* overflow_arg_area;
3957 // i8* reg_save_area;
3959 unsigned neededInt, neededSSE;
3961 Ty = getContext().getCanonicalType(Ty);
3962 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3963 /*isNamedArg*/false);
3965 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3966 // in the registers. If not go to step 7.
3967 if (!neededInt && !neededSSE)
3968 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3970 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3971 // general purpose registers needed to pass type and num_fp to hold
3972 // the number of floating point registers needed.
3974 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3975 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3976 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3978 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3979 // register save space).
3981 llvm::Value *InRegs = nullptr;
3982 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3983 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3985 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
3986 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3987 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3988 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3992 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
3993 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3994 llvm::Value *FitsInFP =
3995 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3996 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3997 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
4000 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4001 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
4002 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4003 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
4005 // Emit code to load the value if it was passed in registers.
4007 CGF.EmitBlock(InRegBlock);
4009 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
4010 // an offset of l->gp_offset and/or l->fp_offset. This may require
4011 // copying to a temporary location in case the parameter is passed
4012 // in different register classes or requires an alignment greater
4013 // than 8 for general purpose registers and 16 for XMM registers.
4015 // FIXME: This really results in shameful code when we end up needing to
4016 // collect arguments from different places; often what should result in a
4017 // simple assembling of a structure from scattered addresses has many more
4018 // loads than necessary. Can we clean this up?
4019 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
4020 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
4021 CGF.Builder.CreateStructGEP(VAListAddr, 3), "reg_save_area");
4023 Address RegAddr = Address::invalid();
4024 if (neededInt && neededSSE) {
4026 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
4027 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
4028 Address Tmp = CGF.CreateMemTemp(Ty);
4029 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4030 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
4031 llvm::Type *TyLo = ST->getElementType(0);
4032 llvm::Type *TyHi = ST->getElementType(1);
4033 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
4034 "Unexpected ABI info for mixed regs");
4035 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
4036 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
4037 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
4038 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
4039 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
4040 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
4042 // Copy the first element.
4043 // FIXME: Our choice of alignment here and below is probably pessimistic.
4044 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
4045 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
4046 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
4047 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4049 // Copy the second element.
4050 V = CGF.Builder.CreateAlignedLoad(
4051 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
4052 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
4053 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4055 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4056 } else if (neededInt) {
4057 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
4058 CharUnits::fromQuantity(8));
4059 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4061 // Copy to a temporary if necessary to ensure the appropriate alignment.
4062 std::pair<CharUnits, CharUnits> SizeAlign =
4063 getContext().getTypeInfoInChars(Ty);
4064 uint64_t TySize = SizeAlign.first.getQuantity();
4065 CharUnits TyAlign = SizeAlign.second;
4067 // Copy into a temporary if the type is more aligned than the
4068 // register save area.
4069 if (TyAlign.getQuantity() > 8) {
4070 Address Tmp = CGF.CreateMemTemp(Ty);
4071 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
4075 } else if (neededSSE == 1) {
4076 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4077 CharUnits::fromQuantity(16));
4078 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
4080 assert(neededSSE == 2 && "Invalid number of needed registers!");
4081 // SSE registers are spaced 16 bytes apart in the register save
4082 // area, we need to collect the two eightbytes together.
4083 // The ABI isn't explicit about this, but it seems reasonable
4084 // to assume that the slots are 16-byte aligned, since the stack is
4085 // naturally 16-byte aligned and the prologue is expected to store
4086 // all the SSE registers to the RSA.
4087 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
4088 CharUnits::fromQuantity(16));
4090 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
4091 CharUnits::fromQuantity(16));
4092 llvm::Type *ST = AI.canHaveCoerceToType()
4093 ? AI.getCoerceToType()
4094 : llvm::StructType::get(CGF.DoubleTy, CGF.DoubleTy);
4096 Address Tmp = CGF.CreateMemTemp(Ty);
4097 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
4098 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4099 RegAddrLo, ST->getStructElementType(0)));
4100 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
4101 V = CGF.Builder.CreateLoad(CGF.Builder.CreateElementBitCast(
4102 RegAddrHi, ST->getStructElementType(1)));
4103 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
4105 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
4108 // AMD64-ABI 3.5.7p5: Step 5. Set:
4109 // l->gp_offset = l->gp_offset + num_gp * 8
4110 // l->fp_offset = l->fp_offset + num_fp * 16.
4112 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
4113 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
4117 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
4118 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
4121 CGF.EmitBranch(ContBlock);
4123 // Emit code to load the value if it was passed in memory.
4125 CGF.EmitBlock(InMemBlock);
4126 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
4128 // Return the appropriate result.
4130 CGF.EmitBlock(ContBlock);
4131 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
4136 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4137 QualType Ty) const {
4138 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
4139 CGF.getContext().getTypeInfoInChars(Ty),
4140 CharUnits::fromQuantity(8),
4141 /*allowHigherAlign*/ false);
4145 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
4146 const ABIArgInfo ¤t) const {
4147 // Assumes vectorCall calling convention.
4148 const Type *Base = nullptr;
4149 uint64_t NumElts = 0;
4151 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
4152 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
4153 FreeSSERegs -= NumElts;
4154 return getDirectX86Hva();
4159 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
4160 bool IsReturnType, bool IsVectorCall,
4161 bool IsRegCall) const {
4163 if (Ty->isVoidType())
4164 return ABIArgInfo::getIgnore();
4166 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4167 Ty = EnumTy->getDecl()->getIntegerType();
4169 TypeInfo Info = getContext().getTypeInfo(Ty);
4170 uint64_t Width = Info.Width;
4171 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
4173 const RecordType *RT = Ty->getAs<RecordType>();
4175 if (!IsReturnType) {
4176 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
4177 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4180 if (RT->getDecl()->hasFlexibleArrayMember())
4181 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4185 const Type *Base = nullptr;
4186 uint64_t NumElts = 0;
4187 // vectorcall adds the concept of a homogenous vector aggregate, similar to
4189 if ((IsVectorCall || IsRegCall) &&
4190 isHomogeneousAggregate(Ty, Base, NumElts)) {
4192 if (FreeSSERegs >= NumElts) {
4193 FreeSSERegs -= NumElts;
4194 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
4195 return ABIArgInfo::getDirect();
4196 return ABIArgInfo::getExpand();
4198 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4199 } else if (IsVectorCall) {
4200 if (FreeSSERegs >= NumElts &&
4201 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
4202 FreeSSERegs -= NumElts;
4203 return ABIArgInfo::getDirect();
4204 } else if (IsReturnType) {
4205 return ABIArgInfo::getExpand();
4206 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
4207 // HVAs are delayed and reclassified in the 2nd step.
4208 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4213 if (Ty->isMemberPointerType()) {
4214 // If the member pointer is represented by an LLVM int or ptr, pass it
4216 llvm::Type *LLTy = CGT.ConvertType(Ty);
4217 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
4218 return ABIArgInfo::getDirect();
4221 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
4222 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4223 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4224 if (Width > 64 || !llvm::isPowerOf2_64(Width))
4225 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4227 // Otherwise, coerce it to a small integer.
4228 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
4231 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4232 switch (BT->getKind()) {
4233 case BuiltinType::Bool:
4234 // Bool type is always extended to the ABI, other builtin types are not
4236 return ABIArgInfo::getExtend(Ty);
4238 case BuiltinType::LongDouble:
4239 // Mingw64 GCC uses the old 80 bit extended precision floating point
4240 // unit. It passes them indirectly through memory.
4242 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
4243 if (LDF == &llvm::APFloat::x87DoubleExtended())
4244 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4248 case BuiltinType::Int128:
4249 case BuiltinType::UInt128:
4250 // If it's a parameter type, the normal ABI rule is that arguments larger
4251 // than 8 bytes are passed indirectly. GCC follows it. We follow it too,
4252 // even though it isn't particularly efficient.
4254 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4256 // Mingw64 GCC returns i128 in XMM0. Coerce to v2i64 to handle that.
4257 // Clang matches them for compatibility.
4258 return ABIArgInfo::getDirect(llvm::FixedVectorType::get(
4259 llvm::Type::getInt64Ty(getVMContext()), 2));
4266 if (Ty->isExtIntType()) {
4267 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4268 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4269 // However, non-power-of-two _ExtInts will be passed as 1,2,4 or 8 bytes
4270 // anyway as long is it fits in them, so we don't have to check the power of
4273 return ABIArgInfo::getDirect();
4274 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
4277 return ABIArgInfo::getDirect();
4280 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
4281 unsigned FreeSSERegs,
4283 bool IsRegCall) const {
4285 for (auto &I : FI.arguments()) {
4286 // Vectorcall in x64 only permits the first 6 arguments to be passed
4287 // as XMM/YMM registers.
4288 if (Count < VectorcallMaxParamNumAsReg)
4289 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4291 // Since these cannot be passed in registers, pretend no registers
4293 unsigned ZeroSSERegsAvail = 0;
4294 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
4295 IsVectorCall, IsRegCall);
4300 for (auto &I : FI.arguments()) {
4301 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
4305 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
4306 const unsigned CC = FI.getCallingConvention();
4307 bool IsVectorCall = CC == llvm::CallingConv::X86_VectorCall;
4308 bool IsRegCall = CC == llvm::CallingConv::X86_RegCall;
4310 // If __attribute__((sysv_abi)) is in use, use the SysV argument
4311 // classification rules.
4312 if (CC == llvm::CallingConv::X86_64_SysV) {
4313 X86_64ABIInfo SysVABIInfo(CGT, AVXLevel);
4314 SysVABIInfo.computeInfo(FI);
4318 unsigned FreeSSERegs = 0;
4320 // We can use up to 4 SSE return registers with vectorcall.
4322 } else if (IsRegCall) {
4323 // RegCall gives us 16 SSE registers.
4327 if (!getCXXABI().classifyReturnType(FI))
4328 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4329 IsVectorCall, IsRegCall);
4332 // We can use up to 6 SSE register parameters with vectorcall.
4334 } else if (IsRegCall) {
4335 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4340 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4342 for (auto &I : FI.arguments())
4343 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4348 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4349 QualType Ty) const {
4351 bool IsIndirect = false;
4353 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4354 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4355 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4356 uint64_t Width = getContext().getTypeSize(Ty);
4357 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4360 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4361 CGF.getContext().getTypeInfoInChars(Ty),
4362 CharUnits::fromQuantity(8),
4363 /*allowHigherAlign*/ false);
4366 static bool PPC_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4367 llvm::Value *Address, bool Is64Bit,
4369 // This is calculated from the LLVM and GCC tables and verified
4370 // against gcc output. AFAIK all PPC ABIs use the same encoding.
4372 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4374 llvm::IntegerType *i8 = CGF.Int8Ty;
4375 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4376 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4377 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4379 // 0-31: r0-31, the 4-byte or 8-byte general-purpose registers
4380 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 0, 31);
4382 // 32-63: fp0-31, the 8-byte floating-point registers
4383 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4385 // 64-67 are various 4-byte or 8-byte special-purpose registers:
4390 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 64, 67);
4392 // 68-76 are various 4-byte special-purpose registers:
4395 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4397 // 77-108: v0-31, the 16-byte vector registers
4398 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4402 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 109, 110);
4404 // AIX does not utilize the rest of the registers.
4411 AssignToArrayRange(Builder, Address, Is64Bit ? Eight8 : Four8, 111, 113);
4416 // TODO: Need to verify if these registers are used on 64 bit AIX with Power8
4418 // 64-bit only registers:
4422 AssignToArrayRange(Builder, Address, Eight8, 114, 116);
4429 /// AIXABIInfo - The AIX XCOFF ABI information.
4430 class AIXABIInfo : public ABIInfo {
4432 const unsigned PtrByteSize;
4433 CharUnits getParamTypeAlignment(QualType Ty) const;
4436 AIXABIInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4437 : ABIInfo(CGT), Is64Bit(Is64Bit), PtrByteSize(Is64Bit ? 8 : 4) {}
4439 bool isPromotableTypeForABI(QualType Ty) const;
4441 ABIArgInfo classifyReturnType(QualType RetTy) const;
4442 ABIArgInfo classifyArgumentType(QualType Ty) const;
4444 void computeInfo(CGFunctionInfo &FI) const override {
4445 if (!getCXXABI().classifyReturnType(FI))
4446 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4448 for (auto &I : FI.arguments())
4449 I.info = classifyArgumentType(I.type);
4452 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4453 QualType Ty) const override;
4456 class AIXTargetCodeGenInfo : public TargetCodeGenInfo {
4460 AIXTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool Is64Bit)
4461 : TargetCodeGenInfo(std::make_unique<AIXABIInfo>(CGT, Is64Bit)),
4463 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4464 return 1; // r1 is the dedicated stack pointer
4467 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4468 llvm::Value *Address) const override;
4472 // Return true if the ABI requires Ty to be passed sign- or zero-
4473 // extended to 32/64 bits.
4474 bool AIXABIInfo::isPromotableTypeForABI(QualType Ty) const {
4475 // Treat an enum type as its underlying type.
4476 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4477 Ty = EnumTy->getDecl()->getIntegerType();
4479 // Promotable integer types are required to be promoted by the ABI.
4480 if (Ty->isPromotableIntegerType())
4486 // For 64 bit mode, in addition to the usual promotable integer types, we also
4487 // need to extend all 32-bit types, since the ABI requires promotion to 64
4489 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4490 switch (BT->getKind()) {
4491 case BuiltinType::Int:
4492 case BuiltinType::UInt:
4501 ABIArgInfo AIXABIInfo::classifyReturnType(QualType RetTy) const {
4502 if (RetTy->isAnyComplexType())
4503 llvm::report_fatal_error("complex type is not supported on AIX yet");
4505 if (RetTy->isVectorType())
4506 llvm::report_fatal_error("vector type is not supported on AIX yet");
4508 if (RetTy->isVoidType())
4509 return ABIArgInfo::getIgnore();
4511 // TODO: Evaluate if AIX power alignment rule would have an impact on the
4513 if (isAggregateTypeForABI(RetTy))
4514 return getNaturalAlignIndirect(RetTy);
4516 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
4517 : ABIArgInfo::getDirect());
4520 ABIArgInfo AIXABIInfo::classifyArgumentType(QualType Ty) const {
4521 Ty = useFirstFieldIfTransparentUnion(Ty);
4523 if (Ty->isAnyComplexType())
4524 llvm::report_fatal_error("complex type is not supported on AIX yet");
4526 if (Ty->isVectorType())
4527 llvm::report_fatal_error("vector type is not supported on AIX yet");
4529 // TODO: Evaluate if AIX power alignment rule would have an impact on the
4531 if (isAggregateTypeForABI(Ty)) {
4532 // Records with non-trivial destructors/copy-constructors should not be
4534 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4535 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4537 CharUnits CCAlign = getParamTypeAlignment(Ty);
4538 CharUnits TyAlign = getContext().getTypeAlignInChars(Ty);
4540 return ABIArgInfo::getIndirect(CCAlign, /*ByVal*/ true,
4541 /*Realign*/ TyAlign > CCAlign);
4544 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
4545 : ABIArgInfo::getDirect());
4548 CharUnits AIXABIInfo::getParamTypeAlignment(QualType Ty) const {
4549 if (Ty->isAnyComplexType())
4550 llvm::report_fatal_error("complex type is not supported on AIX yet");
4552 if (Ty->isVectorType())
4553 llvm::report_fatal_error("vector type is not supported on AIX yet");
4555 // If the structure contains a vector type, the alignment is 16.
4556 if (isRecordWithSIMDVectorType(getContext(), Ty))
4557 return CharUnits::fromQuantity(16);
4559 return CharUnits::fromQuantity(PtrByteSize);
4562 Address AIXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4563 QualType Ty) const {
4564 if (Ty->isAnyComplexType())
4565 llvm::report_fatal_error("complex type is not supported on AIX yet");
4567 if (Ty->isVectorType())
4568 llvm::report_fatal_error("vector type is not supported on AIX yet");
4570 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4571 TypeInfo.second = getParamTypeAlignment(Ty);
4573 CharUnits SlotSize = CharUnits::fromQuantity(PtrByteSize);
4575 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false, TypeInfo,
4576 SlotSize, /*AllowHigher*/ true);
4579 bool AIXTargetCodeGenInfo::initDwarfEHRegSizeTable(
4580 CodeGen::CodeGenFunction &CGF, llvm::Value *Address) const {
4581 return PPC_initDwarfEHRegSizeTable(CGF, Address, Is64Bit, /*IsAIX*/ true);
4586 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4587 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4588 bool IsSoftFloatABI;
4589 bool IsRetSmallStructInRegABI;
4591 CharUnits getParamTypeAlignment(QualType Ty) const;
4594 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI,
4595 bool RetSmallStructInRegABI)
4596 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI),
4597 IsRetSmallStructInRegABI(RetSmallStructInRegABI) {}
4599 ABIArgInfo classifyReturnType(QualType RetTy) const;
4601 void computeInfo(CGFunctionInfo &FI) const override {
4602 if (!getCXXABI().classifyReturnType(FI))
4603 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4604 for (auto &I : FI.arguments())
4605 I.info = classifyArgumentType(I.type);
4608 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4609 QualType Ty) const override;
4612 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4614 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI,
4615 bool RetSmallStructInRegABI)
4616 : TargetCodeGenInfo(std::make_unique<PPC32_SVR4_ABIInfo>(
4617 CGT, SoftFloatABI, RetSmallStructInRegABI)) {}
4619 static bool isStructReturnInRegABI(const llvm::Triple &Triple,
4620 const CodeGenOptions &Opts);
4622 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4623 // This is recovered from gcc output.
4624 return 1; // r1 is the dedicated stack pointer
4627 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4628 llvm::Value *Address) const override;
4632 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4633 // Complex types are passed just like their elements.
4634 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4635 Ty = CTy->getElementType();
4637 if (Ty->isVectorType())
4638 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4641 // For single-element float/vector structs, we consider the whole type
4642 // to have the same alignment requirements as its single element.
4643 const Type *AlignTy = nullptr;
4644 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4645 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4646 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4647 (BT && BT->isFloatingPoint()))
4652 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4653 return CharUnits::fromQuantity(4);
4656 ABIArgInfo PPC32_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4659 // -msvr4-struct-return puts small aggregates in GPR3 and GPR4.
4660 if (isAggregateTypeForABI(RetTy) && IsRetSmallStructInRegABI &&
4661 (Size = getContext().getTypeSize(RetTy)) <= 64) {
4662 // System V ABI (1995), page 3-22, specified:
4663 // > A structure or union whose size is less than or equal to 8 bytes
4664 // > shall be returned in r3 and r4, as if it were first stored in the
4665 // > 8-byte aligned memory area and then the low addressed word were
4666 // > loaded into r3 and the high-addressed word into r4. Bits beyond
4667 // > the last member of the structure or union are not defined.
4669 // GCC for big-endian PPC32 inserts the pad before the first member,
4670 // not "beyond the last member" of the struct. To stay compatible
4671 // with GCC, we coerce the struct to an integer of the same size.
4672 // LLVM will extend it and return i32 in r3, or i64 in r3:r4.
4674 return ABIArgInfo::getIgnore();
4676 llvm::Type *CoerceTy = llvm::Type::getIntNTy(getVMContext(), Size);
4677 return ABIArgInfo::getDirect(CoerceTy);
4681 return DefaultABIInfo::classifyReturnType(RetTy);
4684 // TODO: this implementation is now likely redundant with
4685 // DefaultABIInfo::EmitVAArg.
4686 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4687 QualType Ty) const {
4688 if (getTarget().getTriple().isOSDarwin()) {
4689 auto TI = getContext().getTypeInfoInChars(Ty);
4690 TI.second = getParamTypeAlignment(Ty);
4692 CharUnits SlotSize = CharUnits::fromQuantity(4);
4693 return emitVoidPtrVAArg(CGF, VAList, Ty,
4694 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4695 /*AllowHigherAlign=*/true);
4698 const unsigned OverflowLimit = 8;
4699 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4700 // TODO: Implement this. For now ignore.
4702 return Address::invalid(); // FIXME?
4705 // struct __va_list_tag {
4706 // unsigned char gpr;
4707 // unsigned char fpr;
4708 // unsigned short reserved;
4709 // void *overflow_arg_area;
4710 // void *reg_save_area;
4713 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4715 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4716 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4718 // All aggregates are passed indirectly? That doesn't seem consistent
4719 // with the argument-lowering code.
4720 bool isIndirect = Ty->isAggregateType();
4722 CGBuilderTy &Builder = CGF.Builder;
4724 // The calling convention either uses 1-2 GPRs or 1 FPR.
4725 Address NumRegsAddr = Address::invalid();
4726 if (isInt || IsSoftFloatABI) {
4727 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, "gpr");
4729 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, "fpr");
4732 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4734 // "Align" the register count when TY is i64.
4735 if (isI64 || (isF64 && IsSoftFloatABI)) {
4736 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4737 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4741 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4743 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4744 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4745 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4747 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4749 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4750 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4752 // Case 1: consume registers.
4753 Address RegAddr = Address::invalid();
4755 CGF.EmitBlock(UsingRegs);
4757 Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
4758 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4759 CharUnits::fromQuantity(8));
4760 assert(RegAddr.getElementType() == CGF.Int8Ty);
4762 // Floating-point registers start after the general-purpose registers.
4763 if (!(isInt || IsSoftFloatABI)) {
4764 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4765 CharUnits::fromQuantity(32));
4768 // Get the address of the saved value by scaling the number of
4769 // registers we've used by the number of
4770 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4771 llvm::Value *RegOffset =
4772 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4773 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4774 RegAddr.getPointer(), RegOffset),
4775 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4776 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4778 // Increase the used-register count.
4780 Builder.CreateAdd(NumRegs,
4781 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4782 Builder.CreateStore(NumRegs, NumRegsAddr);
4784 CGF.EmitBranch(Cont);
4787 // Case 2: consume space in the overflow area.
4788 Address MemAddr = Address::invalid();
4790 CGF.EmitBlock(UsingOverflow);
4792 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4794 // Everything in the overflow area is rounded up to a size of at least 4.
4795 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4799 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4800 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4802 Size = CGF.getPointerSize();
4805 Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
4806 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4808 // Round up address of argument to alignment
4809 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4810 if (Align > OverflowAreaAlign) {
4811 llvm::Value *Ptr = OverflowArea.getPointer();
4812 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4816 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4818 // Increase the overflow area.
4819 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4820 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4821 CGF.EmitBranch(Cont);
4824 CGF.EmitBlock(Cont);
4826 // Merge the cases with a phi.
4827 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4830 // Load the pointer if the argument was passed indirectly.
4832 Result = Address(Builder.CreateLoad(Result, "aggr"),
4833 getContext().getTypeAlignInChars(Ty));
4839 bool PPC32TargetCodeGenInfo::isStructReturnInRegABI(
4840 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
4841 assert(Triple.getArch() == llvm::Triple::ppc);
4843 switch (Opts.getStructReturnConvention()) {
4844 case CodeGenOptions::SRCK_Default:
4846 case CodeGenOptions::SRCK_OnStack: // -maix-struct-return
4848 case CodeGenOptions::SRCK_InRegs: // -msvr4-struct-return
4852 if (Triple.isOSBinFormatELF() && !Triple.isOSLinux())
4859 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4860 llvm::Value *Address) const {
4861 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ false,
4868 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4869 class PPC64_SVR4_ABIInfo : public SwiftABIInfo {
4877 static const unsigned GPRBits = 64;
4880 bool IsSoftFloatABI;
4882 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4883 // will be passed in a QPX register.
4884 bool IsQPXVectorTy(const Type *Ty) const {
4888 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4889 unsigned NumElements = VT->getNumElements();
4890 if (NumElements == 1)
4893 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4894 if (getContext().getTypeSize(Ty) <= 256)
4896 } else if (VT->getElementType()->
4897 isSpecificBuiltinType(BuiltinType::Float)) {
4898 if (getContext().getTypeSize(Ty) <= 128)
4906 bool IsQPXVectorTy(QualType Ty) const {
4907 return IsQPXVectorTy(Ty.getTypePtr());
4911 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4913 : SwiftABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4914 IsSoftFloatABI(SoftFloatABI) {}
4916 bool isPromotableTypeForABI(QualType Ty) const;
4917 CharUnits getParamTypeAlignment(QualType Ty) const;
4919 ABIArgInfo classifyReturnType(QualType RetTy) const;
4920 ABIArgInfo classifyArgumentType(QualType Ty) const;
4922 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4923 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4924 uint64_t Members) const override;
4926 // TODO: We can add more logic to computeInfo to improve performance.
4927 // Example: For aggregate arguments that fit in a register, we could
4928 // use getDirectInReg (as is done below for structs containing a single
4929 // floating-point value) to avoid pushing them to memory on function
4930 // entry. This would require changing the logic in PPCISelLowering
4931 // when lowering the parameters in the caller and args in the callee.
4932 void computeInfo(CGFunctionInfo &FI) const override {
4933 if (!getCXXABI().classifyReturnType(FI))
4934 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4935 for (auto &I : FI.arguments()) {
4936 // We rely on the default argument classification for the most part.
4937 // One exception: An aggregate containing a single floating-point
4938 // or vector item must be passed in a register if one is available.
4939 const Type *T = isSingleElementStruct(I.type, getContext());
4941 const BuiltinType *BT = T->getAs<BuiltinType>();
4942 if (IsQPXVectorTy(T) ||
4943 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4944 (BT && BT->isFloatingPoint())) {
4946 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4950 I.info = classifyArgumentType(I.type);
4954 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4955 QualType Ty) const override;
4957 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
4958 bool asReturnValue) const override {
4959 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4962 bool isSwiftErrorInRegister() const override {
4967 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4970 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4971 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4973 : TargetCodeGenInfo(std::make_unique<PPC64_SVR4_ABIInfo>(
4974 CGT, Kind, HasQPX, SoftFloatABI)) {}
4976 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4977 // This is recovered from gcc output.
4978 return 1; // r1 is the dedicated stack pointer
4981 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4982 llvm::Value *Address) const override;
4985 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4987 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4989 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4990 // This is recovered from gcc output.
4991 return 1; // r1 is the dedicated stack pointer
4994 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4995 llvm::Value *Address) const override;
5000 // Return true if the ABI requires Ty to be passed sign- or zero-
5001 // extended to 64 bits.
5003 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
5004 // Treat an enum type as its underlying type.
5005 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5006 Ty = EnumTy->getDecl()->getIntegerType();
5008 // Promotable integer types are required to be promoted by the ABI.
5009 if (isPromotableIntegerTypeForABI(Ty))
5012 // In addition to the usual promotable integer types, we also need to
5013 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
5014 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5015 switch (BT->getKind()) {
5016 case BuiltinType::Int:
5017 case BuiltinType::UInt:
5023 if (const auto *EIT = Ty->getAs<ExtIntType>())
5024 if (EIT->getNumBits() < 64)
5030 /// isAlignedParamType - Determine whether a type requires 16-byte or
5031 /// higher alignment in the parameter area. Always returns at least 8.
5032 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
5033 // Complex types are passed just like their elements.
5034 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
5035 Ty = CTy->getElementType();
5037 // Only vector types of size 16 bytes need alignment (larger types are
5038 // passed via reference, smaller types are not aligned).
5039 if (IsQPXVectorTy(Ty)) {
5040 if (getContext().getTypeSize(Ty) > 128)
5041 return CharUnits::fromQuantity(32);
5043 return CharUnits::fromQuantity(16);
5044 } else if (Ty->isVectorType()) {
5045 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
5048 // For single-element float/vector structs, we consider the whole type
5049 // to have the same alignment requirements as its single element.
5050 const Type *AlignAsType = nullptr;
5051 const Type *EltType = isSingleElementStruct(Ty, getContext());
5053 const BuiltinType *BT = EltType->getAs<BuiltinType>();
5054 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
5055 getContext().getTypeSize(EltType) == 128) ||
5056 (BT && BT->isFloatingPoint()))
5057 AlignAsType = EltType;
5060 // Likewise for ELFv2 homogeneous aggregates.
5061 const Type *Base = nullptr;
5062 uint64_t Members = 0;
5063 if (!AlignAsType && Kind == ELFv2 &&
5064 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
5067 // With special case aggregates, only vector base types need alignment.
5068 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
5069 if (getContext().getTypeSize(AlignAsType) > 128)
5070 return CharUnits::fromQuantity(32);
5072 return CharUnits::fromQuantity(16);
5073 } else if (AlignAsType) {
5074 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
5077 // Otherwise, we only need alignment for any aggregate type that
5078 // has an alignment requirement of >= 16 bytes.
5079 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
5080 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
5081 return CharUnits::fromQuantity(32);
5082 return CharUnits::fromQuantity(16);
5085 return CharUnits::fromQuantity(8);
5088 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
5089 /// aggregate. Base is set to the base element type, and Members is set
5090 /// to the number of base elements.
5091 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
5092 uint64_t &Members) const {
5093 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
5094 uint64_t NElements = AT->getSize().getZExtValue();
5097 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
5099 Members *= NElements;
5100 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
5101 const RecordDecl *RD = RT->getDecl();
5102 if (RD->hasFlexibleArrayMember())
5107 // If this is a C++ record, check the bases first.
5108 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
5109 for (const auto &I : CXXRD->bases()) {
5110 // Ignore empty records.
5111 if (isEmptyRecord(getContext(), I.getType(), true))
5114 uint64_t FldMembers;
5115 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
5118 Members += FldMembers;
5122 for (const auto *FD : RD->fields()) {
5123 // Ignore (non-zero arrays of) empty records.
5124 QualType FT = FD->getType();
5125 while (const ConstantArrayType *AT =
5126 getContext().getAsConstantArrayType(FT)) {
5127 if (AT->getSize().getZExtValue() == 0)
5129 FT = AT->getElementType();
5131 if (isEmptyRecord(getContext(), FT, true))
5134 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5135 if (getContext().getLangOpts().CPlusPlus &&
5136 FD->isZeroLengthBitField(getContext()))
5139 uint64_t FldMembers;
5140 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
5143 Members = (RD->isUnion() ?
5144 std::max(Members, FldMembers) : Members + FldMembers);
5150 // Ensure there is no padding.
5151 if (getContext().getTypeSize(Base) * Members !=
5152 getContext().getTypeSize(Ty))
5156 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
5158 Ty = CT->getElementType();
5161 // Most ABIs only support float, double, and some vector type widths.
5162 if (!isHomogeneousAggregateBaseType(Ty))
5165 // The base type must be the same for all members. Types that
5166 // agree in both total size and mode (float vs. vector) are
5167 // treated as being equivalent here.
5168 const Type *TyPtr = Ty.getTypePtr();
5171 // If it's a non-power-of-2 vector, its size is already a power-of-2,
5172 // so make sure to widen it explicitly.
5173 if (const VectorType *VT = Base->getAs<VectorType>()) {
5174 QualType EltTy = VT->getElementType();
5175 unsigned NumElements =
5176 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
5178 .getVectorType(EltTy, NumElements, VT->getVectorKind())
5183 if (Base->isVectorType() != TyPtr->isVectorType() ||
5184 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
5187 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
5190 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5191 // Homogeneous aggregates for ELFv2 must have base types of float,
5192 // double, long double, or 128-bit vectors.
5193 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5194 if (BT->getKind() == BuiltinType::Float ||
5195 BT->getKind() == BuiltinType::Double ||
5196 BT->getKind() == BuiltinType::LongDouble ||
5197 (getContext().getTargetInfo().hasFloat128Type() &&
5198 (BT->getKind() == BuiltinType::Float128))) {
5204 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5205 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
5211 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
5212 const Type *Base, uint64_t Members) const {
5213 // Vector and fp128 types require one register, other floating point types
5214 // require one or two registers depending on their size.
5216 ((getContext().getTargetInfo().hasFloat128Type() &&
5217 Base->isFloat128Type()) ||
5218 Base->isVectorType()) ? 1
5219 : (getContext().getTypeSize(Base) + 63) / 64;
5221 // Homogeneous Aggregates may occupy at most 8 registers.
5222 return Members * NumRegs <= 8;
5226 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
5227 Ty = useFirstFieldIfTransparentUnion(Ty);
5229 if (Ty->isAnyComplexType())
5230 return ABIArgInfo::getDirect();
5232 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
5233 // or via reference (larger than 16 bytes).
5234 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
5235 uint64_t Size = getContext().getTypeSize(Ty);
5237 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5238 else if (Size < 128) {
5239 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5240 return ABIArgInfo::getDirect(CoerceTy);
5244 if (const auto *EIT = Ty->getAs<ExtIntType>())
5245 if (EIT->getNumBits() > 128)
5246 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
5248 if (isAggregateTypeForABI(Ty)) {
5249 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5250 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5252 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
5253 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
5255 // ELFv2 homogeneous aggregates are passed as array types.
5256 const Type *Base = nullptr;
5257 uint64_t Members = 0;
5258 if (Kind == ELFv2 &&
5259 isHomogeneousAggregate(Ty, Base, Members)) {
5260 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5261 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5262 return ABIArgInfo::getDirect(CoerceTy);
5265 // If an aggregate may end up fully in registers, we do not
5266 // use the ByVal method, but pass the aggregate as array.
5267 // This is usually beneficial since we avoid forcing the
5268 // back-end to store the argument to memory.
5269 uint64_t Bits = getContext().getTypeSize(Ty);
5270 if (Bits > 0 && Bits <= 8 * GPRBits) {
5271 llvm::Type *CoerceTy;
5273 // Types up to 8 bytes are passed as integer type (which will be
5274 // properly aligned in the argument save area doubleword).
5275 if (Bits <= GPRBits)
5277 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5278 // Larger types are passed as arrays, with the base type selected
5279 // according to the required alignment in the save area.
5281 uint64_t RegBits = ABIAlign * 8;
5282 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
5283 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
5284 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
5287 return ABIArgInfo::getDirect(CoerceTy);
5290 // All other aggregates are passed ByVal.
5291 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5293 /*Realign=*/TyAlign > ABIAlign);
5296 return (isPromotableTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
5297 : ABIArgInfo::getDirect());
5301 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
5302 if (RetTy->isVoidType())
5303 return ABIArgInfo::getIgnore();
5305 if (RetTy->isAnyComplexType())
5306 return ABIArgInfo::getDirect();
5308 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
5309 // or via reference (larger than 16 bytes).
5310 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
5311 uint64_t Size = getContext().getTypeSize(RetTy);
5313 return getNaturalAlignIndirect(RetTy);
5314 else if (Size < 128) {
5315 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
5316 return ABIArgInfo::getDirect(CoerceTy);
5320 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5321 if (EIT->getNumBits() > 128)
5322 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
5324 if (isAggregateTypeForABI(RetTy)) {
5325 // ELFv2 homogeneous aggregates are returned as array types.
5326 const Type *Base = nullptr;
5327 uint64_t Members = 0;
5328 if (Kind == ELFv2 &&
5329 isHomogeneousAggregate(RetTy, Base, Members)) {
5330 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
5331 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
5332 return ABIArgInfo::getDirect(CoerceTy);
5335 // ELFv2 small aggregates are returned in up to two registers.
5336 uint64_t Bits = getContext().getTypeSize(RetTy);
5337 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
5339 return ABIArgInfo::getIgnore();
5341 llvm::Type *CoerceTy;
5342 if (Bits > GPRBits) {
5343 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
5344 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
5347 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
5348 return ABIArgInfo::getDirect(CoerceTy);
5351 // All other aggregates are returned indirectly.
5352 return getNaturalAlignIndirect(RetTy);
5355 return (isPromotableTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
5356 : ABIArgInfo::getDirect());
5359 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
5360 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5361 QualType Ty) const {
5362 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
5363 TypeInfo.second = getParamTypeAlignment(Ty);
5365 CharUnits SlotSize = CharUnits::fromQuantity(8);
5367 // If we have a complex type and the base type is smaller than 8 bytes,
5368 // the ABI calls for the real and imaginary parts to be right-adjusted
5369 // in separate doublewords. However, Clang expects us to produce a
5370 // pointer to a structure with the two parts packed tightly. So generate
5371 // loads of the real and imaginary parts relative to the va_list pointer,
5372 // and store them to a temporary structure.
5373 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
5374 CharUnits EltSize = TypeInfo.first / 2;
5375 if (EltSize < SlotSize) {
5376 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
5377 SlotSize * 2, SlotSize,
5378 SlotSize, /*AllowHigher*/ true);
5380 Address RealAddr = Addr;
5381 Address ImagAddr = RealAddr;
5382 if (CGF.CGM.getDataLayout().isBigEndian()) {
5383 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
5384 SlotSize - EltSize);
5385 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
5386 2 * SlotSize - EltSize);
5388 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
5391 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
5392 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
5393 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
5394 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
5395 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
5397 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
5398 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
5404 // Otherwise, just use the general rule.
5405 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
5406 TypeInfo, SlotSize, /*AllowHigher*/ true);
5410 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
5411 CodeGen::CodeGenFunction &CGF,
5412 llvm::Value *Address) const {
5413 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5418 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5419 llvm::Value *Address) const {
5420 return PPC_initDwarfEHRegSizeTable(CGF, Address, /*Is64Bit*/ true,
5424 //===----------------------------------------------------------------------===//
5425 // AArch64 ABI Implementation
5426 //===----------------------------------------------------------------------===//
5430 class AArch64ABIInfo : public SwiftABIInfo {
5442 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
5443 : SwiftABIInfo(CGT), Kind(Kind) {}
5446 ABIKind getABIKind() const { return Kind; }
5447 bool isDarwinPCS() const { return Kind == DarwinPCS; }
5449 ABIArgInfo classifyReturnType(QualType RetTy, bool IsVariadic) const;
5450 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5451 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5452 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5453 uint64_t Members) const override;
5455 bool isIllegalVectorType(QualType Ty) const;
5457 void computeInfo(CGFunctionInfo &FI) const override {
5458 if (!::classifyReturnType(getCXXABI(), FI, *this))
5459 FI.getReturnInfo() =
5460 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5462 for (auto &it : FI.arguments())
5463 it.info = classifyArgumentType(it.type);
5466 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5467 CodeGenFunction &CGF) const;
5469 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
5470 CodeGenFunction &CGF) const;
5472 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5473 QualType Ty) const override {
5474 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5475 if (isa<llvm::ScalableVectorType>(BaseTy))
5476 llvm::report_fatal_error("Passing SVE types to variadic functions is "
5477 "currently not supported");
5479 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
5480 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
5481 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
5484 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5485 QualType Ty) const override;
5487 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
5488 bool asReturnValue) const override {
5489 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5491 bool isSwiftErrorInRegister() const override {
5495 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5496 unsigned elts) const override;
5498 bool allowBFloatArgsAndRet() const override {
5499 return getTarget().hasBFloat16Type();
5503 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
5505 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
5506 : TargetCodeGenInfo(std::make_unique<AArch64ABIInfo>(CGT, Kind)) {}
5508 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5509 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
5512 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5516 bool doesReturnSlotInterfereWithArgs() const override { return false; }
5518 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5519 CodeGen::CodeGenModule &CGM) const override {
5520 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5524 LangOptions::SignReturnAddressScopeKind Scope =
5525 CGM.getLangOpts().getSignReturnAddressScope();
5526 LangOptions::SignReturnAddressKeyKind Key =
5527 CGM.getLangOpts().getSignReturnAddressKey();
5528 bool BranchTargetEnforcement = CGM.getLangOpts().BranchTargetEnforcement;
5529 if (const auto *TA = FD->getAttr<TargetAttr>()) {
5530 ParsedTargetAttr Attr = TA->parse();
5531 if (!Attr.BranchProtection.empty()) {
5532 TargetInfo::BranchProtectionInfo BPI;
5534 (void)CGM.getTarget().validateBranchProtection(Attr.BranchProtection,
5536 assert(Error.empty());
5537 Scope = BPI.SignReturnAddr;
5539 BranchTargetEnforcement = BPI.BranchTargetEnforcement;
5543 auto *Fn = cast<llvm::Function>(GV);
5544 if (Scope != LangOptions::SignReturnAddressScopeKind::None) {
5545 Fn->addFnAttr("sign-return-address",
5546 Scope == LangOptions::SignReturnAddressScopeKind::All
5550 Fn->addFnAttr("sign-return-address-key",
5551 Key == LangOptions::SignReturnAddressKeyKind::AKey
5556 if (BranchTargetEnforcement)
5557 Fn->addFnAttr("branch-target-enforcement");
5561 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
5563 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
5564 : AArch64TargetCodeGenInfo(CGT, K) {}
5566 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5567 CodeGen::CodeGenModule &CGM) const override;
5569 void getDependentLibraryOption(llvm::StringRef Lib,
5570 llvm::SmallString<24> &Opt) const override {
5571 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5574 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5575 llvm::SmallString<32> &Opt) const override {
5576 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5580 void WindowsAArch64TargetCodeGenInfo::setTargetAttributes(
5581 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5582 AArch64TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5583 if (GV->isDeclaration())
5585 addStackProbeTargetAttributes(D, GV, CGM);
5589 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
5590 Ty = useFirstFieldIfTransparentUnion(Ty);
5592 // Handle illegal vector types here.
5593 if (isIllegalVectorType(Ty)) {
5594 uint64_t Size = getContext().getTypeSize(Ty);
5595 // Android promotes <2 x i8> to i16, not i32
5596 if (isAndroid() && (Size <= 16)) {
5597 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
5598 return ABIArgInfo::getDirect(ResType);
5601 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
5602 return ABIArgInfo::getDirect(ResType);
5606 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
5607 return ABIArgInfo::getDirect(ResType);
5611 llvm::FixedVectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5612 return ABIArgInfo::getDirect(ResType);
5614 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5617 if (!isAggregateTypeForABI(Ty)) {
5618 // Treat an enum type as its underlying type.
5619 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5620 Ty = EnumTy->getDecl()->getIntegerType();
5622 if (const auto *EIT = Ty->getAs<ExtIntType>())
5623 if (EIT->getNumBits() > 128)
5624 return getNaturalAlignIndirect(Ty);
5626 return (isPromotableIntegerTypeForABI(Ty) && isDarwinPCS()
5627 ? ABIArgInfo::getExtend(Ty)
5628 : ABIArgInfo::getDirect());
5631 // Structures with either a non-trivial destructor or a non-trivial
5632 // copy constructor are always indirect.
5633 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5634 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5635 CGCXXABI::RAA_DirectInMemory);
5638 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5639 // elsewhere for GNU compatibility.
5640 uint64_t Size = getContext().getTypeSize(Ty);
5641 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5642 if (IsEmpty || Size == 0) {
5643 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5644 return ABIArgInfo::getIgnore();
5646 // GNU C mode. The only argument that gets ignored is an empty one with size
5648 if (IsEmpty && Size == 0)
5649 return ABIArgInfo::getIgnore();
5650 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5653 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5654 const Type *Base = nullptr;
5655 uint64_t Members = 0;
5656 if (isHomogeneousAggregate(Ty, Base, Members)) {
5657 return ABIArgInfo::getDirect(
5658 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5661 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5663 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5664 // same size and alignment.
5665 if (getTarget().isRenderScriptTarget()) {
5666 return coerceToIntArray(Ty, getContext(), getVMContext());
5669 if (Kind == AArch64ABIInfo::AAPCS) {
5670 Alignment = getContext().getTypeUnadjustedAlign(Ty);
5671 Alignment = Alignment < 128 ? 64 : 128;
5673 Alignment = std::max(getContext().getTypeAlign(Ty),
5674 (unsigned)getTarget().getPointerWidth(0));
5676 Size = llvm::alignTo(Size, Alignment);
5678 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5679 // For aggregates with 16-byte alignment, we use i128.
5680 llvm::Type *BaseTy = llvm::Type::getIntNTy(getVMContext(), Alignment);
5681 return ABIArgInfo::getDirect(
5682 Size == Alignment ? BaseTy
5683 : llvm::ArrayType::get(BaseTy, Size / Alignment));
5686 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5689 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy,
5690 bool IsVariadic) const {
5691 if (RetTy->isVoidType())
5692 return ABIArgInfo::getIgnore();
5694 // Large vector types should be returned via memory.
5695 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5696 return getNaturalAlignIndirect(RetTy);
5698 if (!isAggregateTypeForABI(RetTy)) {
5699 // Treat an enum type as its underlying type.
5700 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5701 RetTy = EnumTy->getDecl()->getIntegerType();
5703 if (const auto *EIT = RetTy->getAs<ExtIntType>())
5704 if (EIT->getNumBits() > 128)
5705 return getNaturalAlignIndirect(RetTy);
5707 return (isPromotableIntegerTypeForABI(RetTy) && isDarwinPCS()
5708 ? ABIArgInfo::getExtend(RetTy)
5709 : ABIArgInfo::getDirect());
5712 uint64_t Size = getContext().getTypeSize(RetTy);
5713 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5714 return ABIArgInfo::getIgnore();
5716 const Type *Base = nullptr;
5717 uint64_t Members = 0;
5718 if (isHomogeneousAggregate(RetTy, Base, Members) &&
5719 !(getTarget().getTriple().getArch() == llvm::Triple::aarch64_32 &&
5721 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5722 return ABIArgInfo::getDirect();
5724 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5726 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5727 // same size and alignment.
5728 if (getTarget().isRenderScriptTarget()) {
5729 return coerceToIntArray(RetTy, getContext(), getVMContext());
5731 unsigned Alignment = getContext().getTypeAlign(RetTy);
5732 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5734 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5735 // For aggregates with 16-byte alignment, we use i128.
5736 if (Alignment < 128 && Size == 128) {
5737 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5738 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5740 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5743 return getNaturalAlignIndirect(RetTy);
5746 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
5747 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5748 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5749 // Check whether VT is legal.
5750 unsigned NumElements = VT->getNumElements();
5751 uint64_t Size = getContext().getTypeSize(VT);
5752 // NumElements should be power of 2.
5753 if (!llvm::isPowerOf2_32(NumElements))
5756 // arm64_32 has to be compatible with the ARM logic here, which allows huge
5757 // vectors for some reason.
5758 llvm::Triple Triple = getTarget().getTriple();
5759 if (Triple.getArch() == llvm::Triple::aarch64_32 &&
5760 Triple.isOSBinFormatMachO())
5763 return Size != 64 && (Size != 128 || NumElements == 1);
5768 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5770 unsigned elts) const {
5771 if (!llvm::isPowerOf2_32(elts))
5773 if (totalSize.getQuantity() != 8 &&
5774 (totalSize.getQuantity() != 16 || elts == 1))
5779 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5780 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5781 // point type or a short-vector type. This is the same as the 32-bit ABI,
5782 // but with the difference that any floating-point type is allowed,
5783 // including __fp16.
5784 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5785 if (BT->isFloatingPoint())
5787 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5788 unsigned VecSize = getContext().getTypeSize(VT);
5789 if (VecSize == 64 || VecSize == 128)
5795 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5796 uint64_t Members) const {
5797 return Members <= 4;
5800 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5802 CodeGenFunction &CGF) const {
5803 ABIArgInfo AI = classifyArgumentType(Ty);
5804 bool IsIndirect = AI.isIndirect();
5806 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5808 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5809 else if (AI.getCoerceToType())
5810 BaseTy = AI.getCoerceToType();
5812 unsigned NumRegs = 1;
5813 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5814 BaseTy = ArrTy->getElementType();
5815 NumRegs = ArrTy->getNumElements();
5817 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5819 // The AArch64 va_list type and handling is specified in the Procedure Call
5820 // Standard, section B.4:
5830 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5831 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5832 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5833 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5835 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
5836 CharUnits TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty);
5838 Address reg_offs_p = Address::invalid();
5839 llvm::Value *reg_offs = nullptr;
5841 int RegSize = IsIndirect ? 8 : TySize.getQuantity();
5843 // 3 is the field number of __gr_offs
5844 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
5845 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5846 reg_top_index = 1; // field number for __gr_top
5847 RegSize = llvm::alignTo(RegSize, 8);
5849 // 4 is the field number of __vr_offs.
5850 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
5851 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5852 reg_top_index = 2; // field number for __vr_top
5853 RegSize = 16 * NumRegs;
5856 //=======================================
5857 // Find out where argument was passed
5858 //=======================================
5860 // If reg_offs >= 0 we're already using the stack for this type of
5861 // argument. We don't want to keep updating reg_offs (in case it overflows,
5862 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5863 // whatever they get).
5864 llvm::Value *UsingStack = nullptr;
5865 UsingStack = CGF.Builder.CreateICmpSGE(
5866 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5868 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5870 // Otherwise, at least some kind of argument could go in these registers, the
5871 // question is whether this particular type is too big.
5872 CGF.EmitBlock(MaybeRegBlock);
5874 // Integer arguments may need to correct register alignment (for example a
5875 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5876 // align __gr_offs to calculate the potential address.
5877 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5878 int Align = TyAlign.getQuantity();
5880 reg_offs = CGF.Builder.CreateAdd(
5881 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5883 reg_offs = CGF.Builder.CreateAnd(
5884 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5888 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5889 // The fact that this is done unconditionally reflects the fact that
5890 // allocating an argument to the stack also uses up all the remaining
5891 // registers of the appropriate kind.
5892 llvm::Value *NewOffset = nullptr;
5893 NewOffset = CGF.Builder.CreateAdd(
5894 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5895 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5897 // Now we're in a position to decide whether this argument really was in
5898 // registers or not.
5899 llvm::Value *InRegs = nullptr;
5900 InRegs = CGF.Builder.CreateICmpSLE(
5901 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5903 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5905 //=======================================
5906 // Argument was in registers
5907 //=======================================
5909 // Now we emit the code for if the argument was originally passed in
5910 // registers. First start the appropriate block:
5911 CGF.EmitBlock(InRegBlock);
5913 llvm::Value *reg_top = nullptr;
5915 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
5916 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5917 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5918 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5919 Address RegAddr = Address::invalid();
5920 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5923 // If it's been passed indirectly (actually a struct), whatever we find from
5924 // stored registers or on the stack will actually be a struct **.
5925 MemTy = llvm::PointerType::getUnqual(MemTy);
5928 const Type *Base = nullptr;
5929 uint64_t NumMembers = 0;
5930 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5931 if (IsHFA && NumMembers > 1) {
5932 // Homogeneous aggregates passed in registers will have their elements split
5933 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5934 // qN+1, ...). We reload and store into a temporary local variable
5936 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5937 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5938 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5939 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5940 Address Tmp = CGF.CreateTempAlloca(HFATy,
5941 std::max(TyAlign, BaseTyInfo.second));
5943 // On big-endian platforms, the value will be right-aligned in its slot.
5945 if (CGF.CGM.getDataLayout().isBigEndian() &&
5946 BaseTyInfo.first.getQuantity() < 16)
5947 Offset = 16 - BaseTyInfo.first.getQuantity();
5949 for (unsigned i = 0; i < NumMembers; ++i) {
5950 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5952 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5953 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5955 Address StoreAddr = CGF.Builder.CreateConstArrayGEP(Tmp, i);
5957 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5958 CGF.Builder.CreateStore(Elem, StoreAddr);
5961 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5963 // Otherwise the object is contiguous in memory.
5965 // It might be right-aligned in its slot.
5966 CharUnits SlotSize = BaseAddr.getAlignment();
5967 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5968 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5969 TySize < SlotSize) {
5970 CharUnits Offset = SlotSize - TySize;
5971 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5974 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5977 CGF.EmitBranch(ContBlock);
5979 //=======================================
5980 // Argument was on the stack
5981 //=======================================
5982 CGF.EmitBlock(OnStackBlock);
5984 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
5985 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5987 // Again, stack arguments may need realignment. In this case both integer and
5988 // floating-point ones might be affected.
5989 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5990 int Align = TyAlign.getQuantity();
5992 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5994 OnStackPtr = CGF.Builder.CreateAdd(
5995 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5997 OnStackPtr = CGF.Builder.CreateAnd(
5998 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
6001 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
6003 Address OnStackAddr(OnStackPtr,
6004 std::max(CharUnits::fromQuantity(8), TyAlign));
6006 // All stack slots are multiples of 8 bytes.
6007 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
6008 CharUnits StackSize;
6010 StackSize = StackSlotSize;
6012 StackSize = TySize.alignTo(StackSlotSize);
6014 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
6015 llvm::Value *NewStack =
6016 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
6018 // Write the new value of __stack for the next call to va_arg
6019 CGF.Builder.CreateStore(NewStack, stack_p);
6021 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
6022 TySize < StackSlotSize) {
6023 CharUnits Offset = StackSlotSize - TySize;
6024 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
6027 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
6029 CGF.EmitBranch(ContBlock);
6031 //=======================================
6033 //=======================================
6034 CGF.EmitBlock(ContBlock);
6036 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6037 OnStackAddr, OnStackBlock, "vaargs.addr");
6040 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
6046 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
6047 CodeGenFunction &CGF) const {
6048 // The backend's lowering doesn't support va_arg for aggregates or
6049 // illegal vector types. Lower VAArg here for these cases and use
6050 // the LLVM va_arg instruction for everything else.
6051 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
6052 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
6054 uint64_t PointerSize = getTarget().getPointerWidth(0) / 8;
6055 CharUnits SlotSize = CharUnits::fromQuantity(PointerSize);
6057 // Empty records are ignored for parameter passing purposes.
6058 if (isEmptyRecord(getContext(), Ty, true)) {
6059 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
6060 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6064 // The size of the actual thing passed, which might end up just
6065 // being a pointer for indirect types.
6066 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6068 // Arguments bigger than 16 bytes which aren't homogeneous
6069 // aggregates should be passed indirectly.
6070 bool IsIndirect = false;
6071 if (TyInfo.first.getQuantity() > 16) {
6072 const Type *Base = nullptr;
6073 uint64_t Members = 0;
6074 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
6077 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
6078 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
6081 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
6082 QualType Ty) const {
6083 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6084 CGF.getContext().getTypeInfoInChars(Ty),
6085 CharUnits::fromQuantity(8),
6086 /*allowHigherAlign*/ false);
6089 //===----------------------------------------------------------------------===//
6090 // ARM ABI Implementation
6091 //===----------------------------------------------------------------------===//
6095 class ARMABIInfo : public SwiftABIInfo {
6106 bool IsFloatABISoftFP;
6109 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
6110 : SwiftABIInfo(CGT), Kind(_Kind) {
6112 IsFloatABISoftFP = CGT.getCodeGenOpts().FloatABI == "softfp" ||
6113 CGT.getCodeGenOpts().FloatABI == ""; // default
6116 bool isEABI() const {
6117 switch (getTarget().getTriple().getEnvironment()) {
6118 case llvm::Triple::Android:
6119 case llvm::Triple::EABI:
6120 case llvm::Triple::EABIHF:
6121 case llvm::Triple::GNUEABI:
6122 case llvm::Triple::GNUEABIHF:
6123 case llvm::Triple::MuslEABI:
6124 case llvm::Triple::MuslEABIHF:
6131 bool isEABIHF() const {
6132 switch (getTarget().getTriple().getEnvironment()) {
6133 case llvm::Triple::EABIHF:
6134 case llvm::Triple::GNUEABIHF:
6135 case llvm::Triple::MuslEABIHF:
6142 ABIKind getABIKind() const { return Kind; }
6144 bool allowBFloatArgsAndRet() const override {
6145 return !IsFloatABISoftFP && getTarget().hasBFloat16Type();
6149 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic,
6150 unsigned functionCallConv) const;
6151 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
6152 unsigned functionCallConv) const;
6153 ABIArgInfo classifyHomogeneousAggregate(QualType Ty, const Type *Base,
6154 uint64_t Members) const;
6155 ABIArgInfo coerceIllegalVector(QualType Ty) const;
6156 bool isIllegalVectorType(QualType Ty) const;
6157 bool containsAnyFP16Vectors(QualType Ty) const;
6159 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
6160 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
6161 uint64_t Members) const override;
6163 bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
6165 void computeInfo(CGFunctionInfo &FI) const override;
6167 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6168 QualType Ty) const override;
6170 llvm::CallingConv::ID getLLVMDefaultCC() const;
6171 llvm::CallingConv::ID getABIDefaultCC() const;
6174 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
6175 bool asReturnValue) const override {
6176 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6178 bool isSwiftErrorInRegister() const override {
6181 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
6182 unsigned elts) const override;
6185 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
6187 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6188 : TargetCodeGenInfo(std::make_unique<ARMABIInfo>(CGT, K)) {}
6190 const ARMABIInfo &getABIInfo() const {
6191 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
6194 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6198 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
6199 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
6202 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6203 llvm::Value *Address) const override {
6204 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6206 // 0-15 are the 16 integer registers.
6207 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
6211 unsigned getSizeOfUnwindException() const override {
6212 if (getABIInfo().isEABI()) return 88;
6213 return TargetCodeGenInfo::getSizeOfUnwindException();
6216 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6217 CodeGen::CodeGenModule &CGM) const override {
6218 if (GV->isDeclaration())
6220 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6224 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
6229 switch (Attr->getInterrupt()) {
6230 case ARMInterruptAttr::Generic: Kind = ""; break;
6231 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
6232 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
6233 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
6234 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
6235 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
6238 llvm::Function *Fn = cast<llvm::Function>(GV);
6240 Fn->addFnAttr("interrupt", Kind);
6242 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
6243 if (ABI == ARMABIInfo::APCS)
6246 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
6247 // however this is not necessarily true on taking any interrupt. Instruct
6248 // the backend to perform a realignment as part of the function prologue.
6249 llvm::AttrBuilder B;
6250 B.addStackAlignmentAttr(8);
6251 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
6255 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
6257 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
6258 : ARMTargetCodeGenInfo(CGT, K) {}
6260 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6261 CodeGen::CodeGenModule &CGM) const override;
6263 void getDependentLibraryOption(llvm::StringRef Lib,
6264 llvm::SmallString<24> &Opt) const override {
6265 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
6268 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
6269 llvm::SmallString<32> &Opt) const override {
6270 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
6274 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
6275 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
6276 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
6277 if (GV->isDeclaration())
6279 addStackProbeTargetAttributes(D, GV, CGM);
6283 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
6284 if (!::classifyReturnType(getCXXABI(), FI, *this))
6285 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic(),
6286 FI.getCallingConvention());
6288 for (auto &I : FI.arguments())
6289 I.info = classifyArgumentType(I.type, FI.isVariadic(),
6290 FI.getCallingConvention());
6293 // Always honor user-specified calling convention.
6294 if (FI.getCallingConvention() != llvm::CallingConv::C)
6297 llvm::CallingConv::ID cc = getRuntimeCC();
6298 if (cc != llvm::CallingConv::C)
6299 FI.setEffectiveCallingConvention(cc);
6302 /// Return the default calling convention that LLVM will use.
6303 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
6304 // The default calling convention that LLVM will infer.
6305 if (isEABIHF() || getTarget().getTriple().isWatchABI())
6306 return llvm::CallingConv::ARM_AAPCS_VFP;
6308 return llvm::CallingConv::ARM_AAPCS;
6310 return llvm::CallingConv::ARM_APCS;
6313 /// Return the calling convention that our ABI would like us to use
6314 /// as the C calling convention.
6315 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
6316 switch (getABIKind()) {
6317 case APCS: return llvm::CallingConv::ARM_APCS;
6318 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
6319 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6320 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
6322 llvm_unreachable("bad ABI kind");
6325 void ARMABIInfo::setCCs() {
6326 assert(getRuntimeCC() == llvm::CallingConv::C);
6328 // Don't muddy up the IR with a ton of explicit annotations if
6329 // they'd just match what LLVM will infer from the triple.
6330 llvm::CallingConv::ID abiCC = getABIDefaultCC();
6331 if (abiCC != getLLVMDefaultCC())
6335 ABIArgInfo ARMABIInfo::coerceIllegalVector(QualType Ty) const {
6336 uint64_t Size = getContext().getTypeSize(Ty);
6338 llvm::Type *ResType =
6339 llvm::Type::getInt32Ty(getVMContext());
6340 return ABIArgInfo::getDirect(ResType);
6342 if (Size == 64 || Size == 128) {
6343 auto *ResType = llvm::FixedVectorType::get(
6344 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6345 return ABIArgInfo::getDirect(ResType);
6347 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6350 ABIArgInfo ARMABIInfo::classifyHomogeneousAggregate(QualType Ty,
6352 uint64_t Members) const {
6353 assert(Base && "Base class should be set for homogeneous aggregate");
6354 // Base can be a floating-point or a vector.
6355 if (const VectorType *VT = Base->getAs<VectorType>()) {
6356 // FP16 vectors should be converted to integer vectors
6357 if (!getTarget().hasLegalHalfType() && containsAnyFP16Vectors(Ty)) {
6358 uint64_t Size = getContext().getTypeSize(VT);
6359 auto *NewVecTy = llvm::FixedVectorType::get(
6360 llvm::Type::getInt32Ty(getVMContext()), Size / 32);
6361 llvm::Type *Ty = llvm::ArrayType::get(NewVecTy, Members);
6362 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6365 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
6368 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
6369 unsigned functionCallConv) const {
6370 // 6.1.2.1 The following argument types are VFP CPRCs:
6371 // A single-precision floating-point type (including promoted
6372 // half-precision types); A double-precision floating-point type;
6373 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
6374 // with a Base Type of a single- or double-precision floating-point type,
6375 // 64-bit containerized vectors or 128-bit containerized vectors with one
6376 // to four Elements.
6377 // Variadic functions should always marshal to the base standard.
6379 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ false);
6381 Ty = useFirstFieldIfTransparentUnion(Ty);
6383 // Handle illegal vector types here.
6384 if (isIllegalVectorType(Ty))
6385 return coerceIllegalVector(Ty);
6387 if (!isAggregateTypeForABI(Ty)) {
6388 // Treat an enum type as its underlying type.
6389 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
6390 Ty = EnumTy->getDecl()->getIntegerType();
6393 if (const auto *EIT = Ty->getAs<ExtIntType>())
6394 if (EIT->getNumBits() > 64)
6395 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6397 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
6398 : ABIArgInfo::getDirect());
6401 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6402 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6405 // Ignore empty records.
6406 if (isEmptyRecord(getContext(), Ty, true))
6407 return ABIArgInfo::getIgnore();
6410 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
6411 // into VFP registers.
6412 const Type *Base = nullptr;
6413 uint64_t Members = 0;
6414 if (isHomogeneousAggregate(Ty, Base, Members))
6415 return classifyHomogeneousAggregate(Ty, Base, Members);
6416 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6417 // WatchOS does have homogeneous aggregates. Note that we intentionally use
6418 // this convention even for a variadic function: the backend will use GPRs
6420 const Type *Base = nullptr;
6421 uint64_t Members = 0;
6422 if (isHomogeneousAggregate(Ty, Base, Members)) {
6423 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
6425 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
6426 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
6430 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6431 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
6432 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
6433 // bigger than 128-bits, they get placed in space allocated by the caller,
6434 // and a pointer is passed.
6435 return ABIArgInfo::getIndirect(
6436 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
6439 // Support byval for ARM.
6440 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
6441 // most 8-byte. We realign the indirect argument if type alignment is bigger
6442 // than ABI alignment.
6443 uint64_t ABIAlign = 4;
6445 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6446 getABIKind() == ARMABIInfo::AAPCS) {
6447 TyAlign = getContext().getTypeUnadjustedAlignInChars(Ty).getQuantity();
6448 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
6450 TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
6452 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
6453 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
6454 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
6456 /*Realign=*/TyAlign > ABIAlign);
6459 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
6460 // same size and alignment.
6461 if (getTarget().isRenderScriptTarget()) {
6462 return coerceToIntArray(Ty, getContext(), getVMContext());
6465 // Otherwise, pass by coercing to a structure of the appropriate size.
6468 // FIXME: Try to match the types of the arguments more accurately where
6471 ElemTy = llvm::Type::getInt32Ty(getVMContext());
6472 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6474 ElemTy = llvm::Type::getInt64Ty(getVMContext());
6475 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
6478 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
6481 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
6482 llvm::LLVMContext &VMContext) {
6483 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
6484 // is called integer-like if its size is less than or equal to one word, and
6485 // the offset of each of its addressable sub-fields is zero.
6487 uint64_t Size = Context.getTypeSize(Ty);
6489 // Check that the type fits in a word.
6493 // FIXME: Handle vector types!
6494 if (Ty->isVectorType())
6497 // Float types are never treated as "integer like".
6498 if (Ty->isRealFloatingType())
6501 // If this is a builtin or pointer type then it is ok.
6502 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
6505 // Small complex integer types are "integer like".
6506 if (const ComplexType *CT = Ty->getAs<ComplexType>())
6507 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
6509 // Single element and zero sized arrays should be allowed, by the definition
6510 // above, but they are not.
6512 // Otherwise, it must be a record type.
6513 const RecordType *RT = Ty->getAs<RecordType>();
6514 if (!RT) return false;
6516 // Ignore records with flexible arrays.
6517 const RecordDecl *RD = RT->getDecl();
6518 if (RD->hasFlexibleArrayMember())
6521 // Check that all sub-fields are at offset 0, and are themselves "integer
6523 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
6525 bool HadField = false;
6527 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6528 i != e; ++i, ++idx) {
6529 const FieldDecl *FD = *i;
6531 // Bit-fields are not addressable, we only need to verify they are "integer
6532 // like". We still have to disallow a subsequent non-bitfield, for example:
6533 // struct { int : 0; int x }
6534 // is non-integer like according to gcc.
6535 if (FD->isBitField()) {
6539 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6545 // Check if this field is at offset 0.
6546 if (Layout.getFieldOffset(idx) != 0)
6549 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
6552 // Only allow at most one field in a structure. This doesn't match the
6553 // wording above, but follows gcc in situations with a field following an
6555 if (!RD->isUnion()) {
6566 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy, bool isVariadic,
6567 unsigned functionCallConv) const {
6569 // Variadic functions should always marshal to the base standard.
6571 !isVariadic && isEffectivelyAAPCS_VFP(functionCallConv, /* AAPCS16 */ true);
6573 if (RetTy->isVoidType())
6574 return ABIArgInfo::getIgnore();
6576 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
6577 // Large vector types should be returned via memory.
6578 if (getContext().getTypeSize(RetTy) > 128)
6579 return getNaturalAlignIndirect(RetTy);
6580 // TODO: FP16/BF16 vectors should be converted to integer vectors
6581 // This check is similar to isIllegalVectorType - refactor?
6582 if ((!getTarget().hasLegalHalfType() &&
6583 (VT->getElementType()->isFloat16Type() ||
6584 VT->getElementType()->isHalfType())) ||
6585 (IsFloatABISoftFP &&
6586 VT->getElementType()->isBFloat16Type()))
6587 return coerceIllegalVector(RetTy);
6590 if (!isAggregateTypeForABI(RetTy)) {
6591 // Treat an enum type as its underlying type.
6592 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6593 RetTy = EnumTy->getDecl()->getIntegerType();
6595 if (const auto *EIT = RetTy->getAs<ExtIntType>())
6596 if (EIT->getNumBits() > 64)
6597 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
6599 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
6600 : ABIArgInfo::getDirect();
6603 // Are we following APCS?
6604 if (getABIKind() == APCS) {
6605 if (isEmptyRecord(getContext(), RetTy, false))
6606 return ABIArgInfo::getIgnore();
6608 // Complex types are all returned as packed integers.
6610 // FIXME: Consider using 2 x vector types if the back end handles them
6612 if (RetTy->isAnyComplexType())
6613 return ABIArgInfo::getDirect(llvm::IntegerType::get(
6614 getVMContext(), getContext().getTypeSize(RetTy)));
6616 // Integer like structures are returned in r0.
6617 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
6618 // Return in the smallest viable integer type.
6619 uint64_t Size = getContext().getTypeSize(RetTy);
6621 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6623 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6624 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6627 // Otherwise return in memory.
6628 return getNaturalAlignIndirect(RetTy);
6631 // Otherwise this is an AAPCS variant.
6633 if (isEmptyRecord(getContext(), RetTy, true))
6634 return ABIArgInfo::getIgnore();
6636 // Check for homogeneous aggregates with AAPCS-VFP.
6638 const Type *Base = nullptr;
6639 uint64_t Members = 0;
6640 if (isHomogeneousAggregate(RetTy, Base, Members))
6641 return classifyHomogeneousAggregate(RetTy, Base, Members);
6644 // Aggregates <= 4 bytes are returned in r0; other aggregates
6645 // are returned indirectly.
6646 uint64_t Size = getContext().getTypeSize(RetTy);
6648 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6649 // same size and alignment.
6650 if (getTarget().isRenderScriptTarget()) {
6651 return coerceToIntArray(RetTy, getContext(), getVMContext());
6653 if (getDataLayout().isBigEndian())
6654 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6655 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6657 // Return in the smallest viable integer type.
6659 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6661 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6662 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6663 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6664 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6665 llvm::Type *CoerceTy =
6666 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6667 return ABIArgInfo::getDirect(CoerceTy);
6670 return getNaturalAlignIndirect(RetTy);
6673 /// isIllegalVector - check whether Ty is an illegal vector type.
6674 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6675 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6676 // On targets that don't support half, fp16 or bfloat, they are expanded
6677 // into float, and we don't want the ABI to depend on whether or not they
6678 // are supported in hardware. Thus return false to coerce vectors of these
6679 // types into integer vectors.
6680 // We do not depend on hasLegalHalfType for bfloat as it is a
6681 // separate IR type.
6682 if ((!getTarget().hasLegalHalfType() &&
6683 (VT->getElementType()->isFloat16Type() ||
6684 VT->getElementType()->isHalfType())) ||
6685 (IsFloatABISoftFP &&
6686 VT->getElementType()->isBFloat16Type()))
6689 // Android shipped using Clang 3.1, which supported a slightly different
6690 // vector ABI. The primary differences were that 3-element vector types
6691 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6692 // accepts that legacy behavior for Android only.
6693 // Check whether VT is legal.
6694 unsigned NumElements = VT->getNumElements();
6695 // NumElements should be power of 2 or equal to 3.
6696 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6699 // Check whether VT is legal.
6700 unsigned NumElements = VT->getNumElements();
6701 uint64_t Size = getContext().getTypeSize(VT);
6702 // NumElements should be power of 2.
6703 if (!llvm::isPowerOf2_32(NumElements))
6705 // Size should be greater than 32 bits.
6712 /// Return true if a type contains any 16-bit floating point vectors
6713 bool ARMABIInfo::containsAnyFP16Vectors(QualType Ty) const {
6714 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
6715 uint64_t NElements = AT->getSize().getZExtValue();
6718 return containsAnyFP16Vectors(AT->getElementType());
6719 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
6720 const RecordDecl *RD = RT->getDecl();
6722 // If this is a C++ record, check the bases first.
6723 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6724 if (llvm::any_of(CXXRD->bases(), [this](const CXXBaseSpecifier &B) {
6725 return containsAnyFP16Vectors(B.getType());
6729 if (llvm::any_of(RD->fields(), [this](FieldDecl *FD) {
6730 return FD && containsAnyFP16Vectors(FD->getType());
6736 if (const VectorType *VT = Ty->getAs<VectorType>())
6737 return (VT->getElementType()->isFloat16Type() ||
6738 VT->getElementType()->isBFloat16Type() ||
6739 VT->getElementType()->isHalfType());
6744 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6746 unsigned numElts) const {
6747 if (!llvm::isPowerOf2_32(numElts))
6749 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6752 if (vectorSize.getQuantity() != 8 &&
6753 (vectorSize.getQuantity() != 16 || numElts == 1))
6758 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6759 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6760 // double, or 64-bit or 128-bit vectors.
6761 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6762 if (BT->getKind() == BuiltinType::Float ||
6763 BT->getKind() == BuiltinType::Double ||
6764 BT->getKind() == BuiltinType::LongDouble)
6766 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6767 unsigned VecSize = getContext().getTypeSize(VT);
6768 if (VecSize == 64 || VecSize == 128)
6774 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6775 uint64_t Members) const {
6776 return Members <= 4;
6779 bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
6780 bool acceptHalf) const {
6781 // Give precedence to user-specified calling conventions.
6782 if (callConvention != llvm::CallingConv::C)
6783 return (callConvention == llvm::CallingConv::ARM_AAPCS_VFP);
6785 return (getABIKind() == AAPCS_VFP) ||
6786 (acceptHalf && (getABIKind() == AAPCS16_VFP));
6789 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6790 QualType Ty) const {
6791 CharUnits SlotSize = CharUnits::fromQuantity(4);
6793 // Empty records are ignored for parameter passing purposes.
6794 if (isEmptyRecord(getContext(), Ty, true)) {
6795 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6796 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6800 CharUnits TySize = getContext().getTypeSizeInChars(Ty);
6801 CharUnits TyAlignForABI = getContext().getTypeUnadjustedAlignInChars(Ty);
6803 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6804 bool IsIndirect = false;
6805 const Type *Base = nullptr;
6806 uint64_t Members = 0;
6807 if (TySize > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6810 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6811 // allocated by the caller.
6812 } else if (TySize > CharUnits::fromQuantity(16) &&
6813 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6814 !isHomogeneousAggregate(Ty, Base, Members)) {
6817 // Otherwise, bound the type's ABI alignment.
6818 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6819 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6820 // Our callers should be prepared to handle an under-aligned address.
6821 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6822 getABIKind() == ARMABIInfo::AAPCS) {
6823 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6824 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6825 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6826 // ARMv7k allows type alignment up to 16 bytes.
6827 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6828 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6830 TyAlignForABI = CharUnits::fromQuantity(4);
6833 std::pair<CharUnits, CharUnits> TyInfo = { TySize, TyAlignForABI };
6834 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6835 SlotSize, /*AllowHigherAlign*/ true);
6838 //===----------------------------------------------------------------------===//
6839 // NVPTX ABI Implementation
6840 //===----------------------------------------------------------------------===//
6844 class NVPTXTargetCodeGenInfo;
6846 class NVPTXABIInfo : public ABIInfo {
6847 NVPTXTargetCodeGenInfo &CGInfo;
6850 NVPTXABIInfo(CodeGenTypes &CGT, NVPTXTargetCodeGenInfo &Info)
6851 : ABIInfo(CGT), CGInfo(Info) {}
6853 ABIArgInfo classifyReturnType(QualType RetTy) const;
6854 ABIArgInfo classifyArgumentType(QualType Ty) const;
6856 void computeInfo(CGFunctionInfo &FI) const override;
6857 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6858 QualType Ty) const override;
6859 bool isUnsupportedType(QualType T) const;
6860 ABIArgInfo coerceToIntArrayWithLimit(QualType Ty, unsigned MaxSize) const;
6863 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6865 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6866 : TargetCodeGenInfo(std::make_unique<NVPTXABIInfo>(CGT, *this)) {}
6868 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6869 CodeGen::CodeGenModule &M) const override;
6870 bool shouldEmitStaticExternCAliases() const override;
6872 llvm::Type *getCUDADeviceBuiltinSurfaceDeviceType() const override {
6873 // On the device side, surface reference is represented as an object handle
6874 // in 64-bit integer.
6875 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6878 llvm::Type *getCUDADeviceBuiltinTextureDeviceType() const override {
6879 // On the device side, texture reference is represented as an object handle
6880 // in 64-bit integer.
6881 return llvm::Type::getInt64Ty(getABIInfo().getVMContext());
6884 bool emitCUDADeviceBuiltinSurfaceDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6885 LValue Src) const override {
6886 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6890 bool emitCUDADeviceBuiltinTextureDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6891 LValue Src) const override {
6892 emitBuiltinSurfTexDeviceCopy(CGF, Dst, Src);
6897 // Adds a NamedMDNode with GV, Name, and Operand as operands, and adds the
6898 // resulting MDNode to the nvvm.annotations MDNode.
6899 static void addNVVMMetadata(llvm::GlobalValue *GV, StringRef Name,
6902 static void emitBuiltinSurfTexDeviceCopy(CodeGenFunction &CGF, LValue Dst,
6904 llvm::Value *Handle = nullptr;
6906 llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).getPointer());
6907 // Lookup `addrspacecast` through the constant pointer if any.
6908 if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
6909 C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
6910 if (auto *GV = llvm::dyn_cast_or_null<llvm::GlobalVariable>(C)) {
6911 // Load the handle from the specific global variable using
6912 // `nvvm.texsurf.handle.internal` intrinsic.
6913 Handle = CGF.EmitRuntimeCall(
6914 CGF.CGM.getIntrinsic(llvm::Intrinsic::nvvm_texsurf_handle_internal,
6916 {GV}, "texsurf_handle");
6918 Handle = CGF.EmitLoadOfScalar(Src, SourceLocation());
6919 CGF.EmitStoreOfScalar(Handle, Dst);
6923 /// Checks if the type is unsupported directly by the current target.
6924 bool NVPTXABIInfo::isUnsupportedType(QualType T) const {
6925 ASTContext &Context = getContext();
6926 if (!Context.getTargetInfo().hasFloat16Type() && T->isFloat16Type())
6928 if (!Context.getTargetInfo().hasFloat128Type() &&
6929 (T->isFloat128Type() ||
6930 (T->isRealFloatingType() && Context.getTypeSize(T) == 128)))
6932 if (const auto *EIT = T->getAs<ExtIntType>())
6933 return EIT->getNumBits() >
6934 (Context.getTargetInfo().hasInt128Type() ? 128U : 64U);
6935 if (!Context.getTargetInfo().hasInt128Type() && T->isIntegerType() &&
6936 Context.getTypeSize(T) > 64U)
6938 if (const auto *AT = T->getAsArrayTypeUnsafe())
6939 return isUnsupportedType(AT->getElementType());
6940 const auto *RT = T->getAs<RecordType>();
6943 const RecordDecl *RD = RT->getDecl();
6945 // If this is a C++ record, check the bases first.
6946 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6947 for (const CXXBaseSpecifier &I : CXXRD->bases())
6948 if (isUnsupportedType(I.getType()))
6951 for (const FieldDecl *I : RD->fields())
6952 if (isUnsupportedType(I->getType()))
6957 /// Coerce the given type into an array with maximum allowed size of elements.
6958 ABIArgInfo NVPTXABIInfo::coerceToIntArrayWithLimit(QualType Ty,
6959 unsigned MaxSize) const {
6960 // Alignment and Size are measured in bits.
6961 const uint64_t Size = getContext().getTypeSize(Ty);
6962 const uint64_t Alignment = getContext().getTypeAlign(Ty);
6963 const unsigned Div = std::min<unsigned>(MaxSize, Alignment);
6964 llvm::Type *IntType = llvm::Type::getIntNTy(getVMContext(), Div);
6965 const uint64_t NumElements = (Size + Div - 1) / Div;
6966 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
6969 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6970 if (RetTy->isVoidType())
6971 return ABIArgInfo::getIgnore();
6973 if (getContext().getLangOpts().OpenMP &&
6974 getContext().getLangOpts().OpenMPIsDevice && isUnsupportedType(RetTy))
6975 return coerceToIntArrayWithLimit(RetTy, 64);
6977 // note: this is different from default ABI
6978 if (!RetTy->isScalarType())
6979 return ABIArgInfo::getDirect();
6981 // Treat an enum type as its underlying type.
6982 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6983 RetTy = EnumTy->getDecl()->getIntegerType();
6985 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
6986 : ABIArgInfo::getDirect());
6989 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6990 // Treat an enum type as its underlying type.
6991 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6992 Ty = EnumTy->getDecl()->getIntegerType();
6994 // Return aggregates type as indirect by value
6995 if (isAggregateTypeForABI(Ty)) {
6996 // Under CUDA device compilation, tex/surf builtin types are replaced with
6997 // object types and passed directly.
6998 if (getContext().getLangOpts().CUDAIsDevice) {
6999 if (Ty->isCUDADeviceBuiltinSurfaceType())
7000 return ABIArgInfo::getDirect(
7001 CGInfo.getCUDADeviceBuiltinSurfaceDeviceType());
7002 if (Ty->isCUDADeviceBuiltinTextureType())
7003 return ABIArgInfo::getDirect(
7004 CGInfo.getCUDADeviceBuiltinTextureDeviceType());
7006 return getNaturalAlignIndirect(Ty, /* byval */ true);
7009 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
7010 if ((EIT->getNumBits() > 128) ||
7011 (!getContext().getTargetInfo().hasInt128Type() &&
7012 EIT->getNumBits() > 64))
7013 return getNaturalAlignIndirect(Ty, /* byval */ true);
7016 return (isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
7017 : ABIArgInfo::getDirect());
7020 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
7021 if (!getCXXABI().classifyReturnType(FI))
7022 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7023 for (auto &I : FI.arguments())
7024 I.info = classifyArgumentType(I.type);
7026 // Always honor user-specified calling convention.
7027 if (FI.getCallingConvention() != llvm::CallingConv::C)
7030 FI.setEffectiveCallingConvention(getRuntimeCC());
7033 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7034 QualType Ty) const {
7035 llvm_unreachable("NVPTX does not support varargs");
7038 void NVPTXTargetCodeGenInfo::setTargetAttributes(
7039 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7040 if (GV->isDeclaration())
7042 const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
7044 if (M.getLangOpts().CUDA) {
7045 if (VD->getType()->isCUDADeviceBuiltinSurfaceType())
7046 addNVVMMetadata(GV, "surface", 1);
7047 else if (VD->getType()->isCUDADeviceBuiltinTextureType())
7048 addNVVMMetadata(GV, "texture", 1);
7053 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7056 llvm::Function *F = cast<llvm::Function>(GV);
7058 // Perform special handling in OpenCL mode
7059 if (M.getLangOpts().OpenCL) {
7060 // Use OpenCL function attributes to check for kernel functions
7061 // By default, all functions are device functions
7062 if (FD->hasAttr<OpenCLKernelAttr>()) {
7063 // OpenCL __kernel functions get kernel metadata
7064 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7065 addNVVMMetadata(F, "kernel", 1);
7066 // And kernel functions are not subject to inlining
7067 F->addFnAttr(llvm::Attribute::NoInline);
7071 // Perform special handling in CUDA mode.
7072 if (M.getLangOpts().CUDA) {
7073 // CUDA __global__ functions get a kernel metadata entry. Since
7074 // __global__ functions cannot be called from the device, we do not
7075 // need to set the noinline attribute.
7076 if (FD->hasAttr<CUDAGlobalAttr>()) {
7077 // Create !{<func-ref>, metadata !"kernel", i32 1} node
7078 addNVVMMetadata(F, "kernel", 1);
7080 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
7081 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
7082 llvm::APSInt MaxThreads(32);
7083 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
7085 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
7087 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
7088 // not specified in __launch_bounds__ or if the user specified a 0 value,
7089 // we don't have to add a PTX directive.
7090 if (Attr->getMinBlocks()) {
7091 llvm::APSInt MinBlocks(32);
7092 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
7094 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
7095 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
7101 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::GlobalValue *GV,
7102 StringRef Name, int Operand) {
7103 llvm::Module *M = GV->getParent();
7104 llvm::LLVMContext &Ctx = M->getContext();
7106 // Get "nvvm.annotations" metadata node
7107 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
7109 llvm::Metadata *MDVals[] = {
7110 llvm::ConstantAsMetadata::get(GV), llvm::MDString::get(Ctx, Name),
7111 llvm::ConstantAsMetadata::get(
7112 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
7113 // Append metadata to nvvm.annotations
7114 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7117 bool NVPTXTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
7122 //===----------------------------------------------------------------------===//
7123 // SystemZ ABI Implementation
7124 //===----------------------------------------------------------------------===//
7128 class SystemZABIInfo : public SwiftABIInfo {
7130 bool IsSoftFloatABI;
7133 SystemZABIInfo(CodeGenTypes &CGT, bool HV, bool SF)
7134 : SwiftABIInfo(CGT), HasVector(HV), IsSoftFloatABI(SF) {}
7136 bool isPromotableIntegerTypeForABI(QualType Ty) const;
7137 bool isCompoundType(QualType Ty) const;
7138 bool isVectorArgumentType(QualType Ty) const;
7139 bool isFPArgumentType(QualType Ty) const;
7140 QualType GetSingleElementType(QualType Ty) const;
7142 ABIArgInfo classifyReturnType(QualType RetTy) const;
7143 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
7145 void computeInfo(CGFunctionInfo &FI) const override {
7146 if (!getCXXABI().classifyReturnType(FI))
7147 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7148 for (auto &I : FI.arguments())
7149 I.info = classifyArgumentType(I.type);
7152 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7153 QualType Ty) const override;
7155 bool shouldPassIndirectlyForSwift(ArrayRef<llvm::Type*> scalars,
7156 bool asReturnValue) const override {
7157 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
7159 bool isSwiftErrorInRegister() const override {
7164 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
7166 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector, bool SoftFloatABI)
7167 : TargetCodeGenInfo(
7168 std::make_unique<SystemZABIInfo>(CGT, HasVector, SoftFloatABI)) {}
7173 bool SystemZABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
7174 // Treat an enum type as its underlying type.
7175 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7176 Ty = EnumTy->getDecl()->getIntegerType();
7178 // Promotable integer types are required to be promoted by the ABI.
7179 if (ABIInfo::isPromotableIntegerTypeForABI(Ty))
7182 if (const auto *EIT = Ty->getAs<ExtIntType>())
7183 if (EIT->getNumBits() < 64)
7186 // 32-bit values must also be promoted.
7187 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7188 switch (BT->getKind()) {
7189 case BuiltinType::Int:
7190 case BuiltinType::UInt:
7198 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
7199 return (Ty->isAnyComplexType() ||
7200 Ty->isVectorType() ||
7201 isAggregateTypeForABI(Ty));
7204 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
7205 return (HasVector &&
7206 Ty->isVectorType() &&
7207 getContext().getTypeSize(Ty) <= 128);
7210 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
7214 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
7215 switch (BT->getKind()) {
7216 case BuiltinType::Float:
7217 case BuiltinType::Double:
7226 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
7227 const RecordType *RT = Ty->getAs<RecordType>();
7229 if (RT && RT->isStructureOrClassType()) {
7230 const RecordDecl *RD = RT->getDecl();
7233 // If this is a C++ record, check the bases first.
7234 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
7235 for (const auto &I : CXXRD->bases()) {
7236 QualType Base = I.getType();
7238 // Empty bases don't affect things either way.
7239 if (isEmptyRecord(getContext(), Base, true))
7242 if (!Found.isNull())
7244 Found = GetSingleElementType(Base);
7247 // Check the fields.
7248 for (const auto *FD : RD->fields()) {
7249 // For compatibility with GCC, ignore empty bitfields in C++ mode.
7250 // Unlike isSingleElementStruct(), empty structure and array fields
7251 // do count. So do anonymous bitfields that aren't zero-sized.
7252 if (getContext().getLangOpts().CPlusPlus &&
7253 FD->isZeroLengthBitField(getContext()))
7255 // Like isSingleElementStruct(), ignore C++20 empty data members.
7256 if (FD->hasAttr<NoUniqueAddressAttr>() &&
7257 isEmptyRecord(getContext(), FD->getType(), true))
7260 // Unlike isSingleElementStruct(), arrays do not count.
7261 // Nested structures still do though.
7262 if (!Found.isNull())
7264 Found = GetSingleElementType(FD->getType());
7267 // Unlike isSingleElementStruct(), trailing padding is allowed.
7268 // An 8-byte aligned struct s { float f; } is passed as a double.
7269 if (!Found.isNull())
7276 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7277 QualType Ty) const {
7278 // Assume that va_list type is correct; should be pointer to LLVM type:
7282 // i8 *__overflow_arg_area;
7283 // i8 *__reg_save_area;
7286 // Every non-vector argument occupies 8 bytes and is passed by preference
7287 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
7288 // always passed on the stack.
7289 Ty = getContext().getCanonicalType(Ty);
7290 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7291 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
7292 llvm::Type *DirectTy = ArgTy;
7293 ABIArgInfo AI = classifyArgumentType(Ty);
7294 bool IsIndirect = AI.isIndirect();
7295 bool InFPRs = false;
7296 bool IsVector = false;
7297 CharUnits UnpaddedSize;
7298 CharUnits DirectAlign;
7300 DirectTy = llvm::PointerType::getUnqual(DirectTy);
7301 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
7303 if (AI.getCoerceToType())
7304 ArgTy = AI.getCoerceToType();
7305 InFPRs = (!IsSoftFloatABI && (ArgTy->isFloatTy() || ArgTy->isDoubleTy()));
7306 IsVector = ArgTy->isVectorTy();
7307 UnpaddedSize = TyInfo.first;
7308 DirectAlign = TyInfo.second;
7310 CharUnits PaddedSize = CharUnits::fromQuantity(8);
7311 if (IsVector && UnpaddedSize > PaddedSize)
7312 PaddedSize = CharUnits::fromQuantity(16);
7313 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
7315 CharUnits Padding = (PaddedSize - UnpaddedSize);
7317 llvm::Type *IndexTy = CGF.Int64Ty;
7318 llvm::Value *PaddedSizeV =
7319 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
7322 // Work out the address of a vector argument on the stack.
7323 // Vector arguments are always passed in the high bits of a
7324 // single (8 byte) or double (16 byte) stack slot.
7325 Address OverflowArgAreaPtr =
7326 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7327 Address OverflowArgArea =
7328 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7331 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
7333 // Update overflow_arg_area_ptr pointer
7334 llvm::Value *NewOverflowArgArea =
7335 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
7336 "overflow_arg_area");
7337 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7342 assert(PaddedSize.getQuantity() == 8);
7344 unsigned MaxRegs, RegCountField, RegSaveIndex;
7345 CharUnits RegPadding;
7347 MaxRegs = 4; // Maximum of 4 FPR arguments
7348 RegCountField = 1; // __fpr
7349 RegSaveIndex = 16; // save offset for f0
7350 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
7352 MaxRegs = 5; // Maximum of 5 GPR arguments
7353 RegCountField = 0; // __gpr
7354 RegSaveIndex = 2; // save offset for r2
7355 RegPadding = Padding; // values are passed in the low bits of a GPR
7358 Address RegCountPtr =
7359 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
7360 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
7361 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
7362 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
7365 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
7366 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
7367 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
7368 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
7370 // Emit code to load the value if it was passed in registers.
7371 CGF.EmitBlock(InRegBlock);
7373 // Work out the address of an argument register.
7374 llvm::Value *ScaledRegCount =
7375 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
7376 llvm::Value *RegBase =
7377 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
7378 + RegPadding.getQuantity());
7379 llvm::Value *RegOffset =
7380 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
7381 Address RegSaveAreaPtr =
7382 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
7383 llvm::Value *RegSaveArea =
7384 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
7385 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
7389 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
7391 // Update the register count
7392 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
7393 llvm::Value *NewRegCount =
7394 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
7395 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
7396 CGF.EmitBranch(ContBlock);
7398 // Emit code to load the value if it was passed in memory.
7399 CGF.EmitBlock(InMemBlock);
7401 // Work out the address of a stack argument.
7402 Address OverflowArgAreaPtr =
7403 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
7404 Address OverflowArgArea =
7405 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
7407 Address RawMemAddr =
7408 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
7410 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
7412 // Update overflow_arg_area_ptr pointer
7413 llvm::Value *NewOverflowArgArea =
7414 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
7415 "overflow_arg_area");
7416 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
7417 CGF.EmitBranch(ContBlock);
7419 // Return the appropriate result.
7420 CGF.EmitBlock(ContBlock);
7421 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
7422 MemAddr, InMemBlock, "va_arg.addr");
7425 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
7431 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
7432 if (RetTy->isVoidType())
7433 return ABIArgInfo::getIgnore();
7434 if (isVectorArgumentType(RetTy))
7435 return ABIArgInfo::getDirect();
7436 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
7437 return getNaturalAlignIndirect(RetTy);
7438 return (isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
7439 : ABIArgInfo::getDirect());
7442 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
7443 // Handle the generic C++ ABI.
7444 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7445 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7447 // Integers and enums are extended to full register width.
7448 if (isPromotableIntegerTypeForABI(Ty))
7449 return ABIArgInfo::getExtend(Ty);
7451 // Handle vector types and vector-like structure types. Note that
7452 // as opposed to float-like structure types, we do not allow any
7453 // padding for vector-like structures, so verify the sizes match.
7454 uint64_t Size = getContext().getTypeSize(Ty);
7455 QualType SingleElementTy = GetSingleElementType(Ty);
7456 if (isVectorArgumentType(SingleElementTy) &&
7457 getContext().getTypeSize(SingleElementTy) == Size)
7458 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
7460 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
7461 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
7462 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7464 // Handle small structures.
7465 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7466 // Structures with flexible arrays have variable length, so really
7467 // fail the size test above.
7468 const RecordDecl *RD = RT->getDecl();
7469 if (RD->hasFlexibleArrayMember())
7470 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7472 // The structure is passed as an unextended integer, a float, or a double.
7474 if (isFPArgumentType(SingleElementTy)) {
7475 assert(Size == 32 || Size == 64);
7477 PassTy = llvm::Type::getFloatTy(getVMContext());
7479 PassTy = llvm::Type::getDoubleTy(getVMContext());
7481 PassTy = llvm::IntegerType::get(getVMContext(), Size);
7482 return ABIArgInfo::getDirect(PassTy);
7485 // Non-structure compounds are passed indirectly.
7486 if (isCompoundType(Ty))
7487 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7489 return ABIArgInfo::getDirect(nullptr);
7492 //===----------------------------------------------------------------------===//
7493 // MSP430 ABI Implementation
7494 //===----------------------------------------------------------------------===//
7498 class MSP430ABIInfo : public DefaultABIInfo {
7499 static ABIArgInfo complexArgInfo() {
7500 ABIArgInfo Info = ABIArgInfo::getDirect();
7501 Info.setCanBeFlattened(false);
7506 MSP430ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7508 ABIArgInfo classifyReturnType(QualType RetTy) const {
7509 if (RetTy->isAnyComplexType())
7510 return complexArgInfo();
7512 return DefaultABIInfo::classifyReturnType(RetTy);
7515 ABIArgInfo classifyArgumentType(QualType RetTy) const {
7516 if (RetTy->isAnyComplexType())
7517 return complexArgInfo();
7519 return DefaultABIInfo::classifyArgumentType(RetTy);
7522 // Just copy the original implementations because
7523 // DefaultABIInfo::classify{Return,Argument}Type() are not virtual
7524 void computeInfo(CGFunctionInfo &FI) const override {
7525 if (!getCXXABI().classifyReturnType(FI))
7526 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7527 for (auto &I : FI.arguments())
7528 I.info = classifyArgumentType(I.type);
7531 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7532 QualType Ty) const override {
7533 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
7537 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
7539 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
7540 : TargetCodeGenInfo(std::make_unique<MSP430ABIInfo>(CGT)) {}
7541 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7542 CodeGen::CodeGenModule &M) const override;
7547 void MSP430TargetCodeGenInfo::setTargetAttributes(
7548 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
7549 if (GV->isDeclaration())
7551 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
7552 const auto *InterruptAttr = FD->getAttr<MSP430InterruptAttr>();
7556 // Handle 'interrupt' attribute:
7557 llvm::Function *F = cast<llvm::Function>(GV);
7559 // Step 1: Set ISR calling convention.
7560 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
7562 // Step 2: Add attributes goodness.
7563 F->addFnAttr(llvm::Attribute::NoInline);
7564 F->addFnAttr("interrupt", llvm::utostr(InterruptAttr->getNumber()));
7568 //===----------------------------------------------------------------------===//
7569 // MIPS ABI Implementation. This works for both little-endian and
7570 // big-endian variants.
7571 //===----------------------------------------------------------------------===//
7574 class MipsABIInfo : public ABIInfo {
7576 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
7577 void CoerceToIntArgs(uint64_t TySize,
7578 SmallVectorImpl<llvm::Type *> &ArgList) const;
7579 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
7580 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
7581 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
7583 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
7584 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
7585 StackAlignInBytes(IsO32 ? 8 : 16) {}
7587 ABIArgInfo classifyReturnType(QualType RetTy) const;
7588 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
7589 void computeInfo(CGFunctionInfo &FI) const override;
7590 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7591 QualType Ty) const override;
7592 ABIArgInfo extendType(QualType Ty) const;
7595 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
7596 unsigned SizeOfUnwindException;
7598 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
7599 : TargetCodeGenInfo(std::make_unique<MipsABIInfo>(CGT, IsO32)),
7600 SizeOfUnwindException(IsO32 ? 24 : 32) {}
7602 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
7606 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7607 CodeGen::CodeGenModule &CGM) const override {
7608 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7610 llvm::Function *Fn = cast<llvm::Function>(GV);
7612 if (FD->hasAttr<MipsLongCallAttr>())
7613 Fn->addFnAttr("long-call");
7614 else if (FD->hasAttr<MipsShortCallAttr>())
7615 Fn->addFnAttr("short-call");
7617 // Other attributes do not have a meaning for declarations.
7618 if (GV->isDeclaration())
7621 if (FD->hasAttr<Mips16Attr>()) {
7622 Fn->addFnAttr("mips16");
7624 else if (FD->hasAttr<NoMips16Attr>()) {
7625 Fn->addFnAttr("nomips16");
7628 if (FD->hasAttr<MicroMipsAttr>())
7629 Fn->addFnAttr("micromips");
7630 else if (FD->hasAttr<NoMicroMipsAttr>())
7631 Fn->addFnAttr("nomicromips");
7633 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
7638 switch (Attr->getInterrupt()) {
7639 case MipsInterruptAttr::eic: Kind = "eic"; break;
7640 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
7641 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
7642 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
7643 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
7644 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
7645 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
7646 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
7647 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
7650 Fn->addFnAttr("interrupt", Kind);
7654 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7655 llvm::Value *Address) const override;
7657 unsigned getSizeOfUnwindException() const override {
7658 return SizeOfUnwindException;
7663 void MipsABIInfo::CoerceToIntArgs(
7664 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
7665 llvm::IntegerType *IntTy =
7666 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
7668 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
7669 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
7670 ArgList.push_back(IntTy);
7672 // If necessary, add one more integer type to ArgList.
7673 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
7676 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
7679 // In N32/64, an aligned double precision floating point field is passed in
7681 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
7682 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
7685 CoerceToIntArgs(TySize, ArgList);
7686 return llvm::StructType::get(getVMContext(), ArgList);
7689 if (Ty->isComplexType())
7690 return CGT.ConvertType(Ty);
7692 const RecordType *RT = Ty->getAs<RecordType>();
7694 // Unions/vectors are passed in integer registers.
7695 if (!RT || !RT->isStructureOrClassType()) {
7696 CoerceToIntArgs(TySize, ArgList);
7697 return llvm::StructType::get(getVMContext(), ArgList);
7700 const RecordDecl *RD = RT->getDecl();
7701 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7702 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
7704 uint64_t LastOffset = 0;
7706 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
7708 // Iterate over fields in the struct/class and check if there are any aligned
7710 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
7711 i != e; ++i, ++idx) {
7712 const QualType Ty = i->getType();
7713 const BuiltinType *BT = Ty->getAs<BuiltinType>();
7715 if (!BT || BT->getKind() != BuiltinType::Double)
7718 uint64_t Offset = Layout.getFieldOffset(idx);
7719 if (Offset % 64) // Ignore doubles that are not aligned.
7722 // Add ((Offset - LastOffset) / 64) args of type i64.
7723 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
7724 ArgList.push_back(I64);
7727 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
7728 LastOffset = Offset + 64;
7731 CoerceToIntArgs(TySize - LastOffset, IntArgList);
7732 ArgList.append(IntArgList.begin(), IntArgList.end());
7734 return llvm::StructType::get(getVMContext(), ArgList);
7737 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
7738 uint64_t Offset) const {
7739 if (OrigOffset + MinABIStackAlignInBytes > Offset)
7742 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
7746 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
7747 Ty = useFirstFieldIfTransparentUnion(Ty);
7749 uint64_t OrigOffset = Offset;
7750 uint64_t TySize = getContext().getTypeSize(Ty);
7751 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
7753 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
7754 (uint64_t)StackAlignInBytes);
7755 unsigned CurrOffset = llvm::alignTo(Offset, Align);
7756 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
7758 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
7759 // Ignore empty aggregates.
7761 return ABIArgInfo::getIgnore();
7763 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
7764 Offset = OrigOffset + MinABIStackAlignInBytes;
7765 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7768 // If we have reached here, aggregates are passed directly by coercing to
7769 // another structure type. Padding is inserted if the offset of the
7770 // aggregate is unaligned.
7771 ABIArgInfo ArgInfo =
7772 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
7773 getPaddingType(OrigOffset, CurrOffset));
7774 ArgInfo.setInReg(true);
7778 // Treat an enum type as its underlying type.
7779 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7780 Ty = EnumTy->getDecl()->getIntegerType();
7782 // Make sure we pass indirectly things that are too large.
7783 if (const auto *EIT = Ty->getAs<ExtIntType>())
7784 if (EIT->getNumBits() > 128 ||
7785 (EIT->getNumBits() > 64 &&
7786 !getContext().getTargetInfo().hasInt128Type()))
7787 return getNaturalAlignIndirect(Ty);
7789 // All integral types are promoted to the GPR width.
7790 if (Ty->isIntegralOrEnumerationType())
7791 return extendType(Ty);
7793 return ABIArgInfo::getDirect(
7794 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
7798 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
7799 const RecordType *RT = RetTy->getAs<RecordType>();
7800 SmallVector<llvm::Type*, 8> RTList;
7802 if (RT && RT->isStructureOrClassType()) {
7803 const RecordDecl *RD = RT->getDecl();
7804 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
7805 unsigned FieldCnt = Layout.getFieldCount();
7807 // N32/64 returns struct/classes in floating point registers if the
7808 // following conditions are met:
7809 // 1. The size of the struct/class is no larger than 128-bit.
7810 // 2. The struct/class has one or two fields all of which are floating
7812 // 3. The offset of the first field is zero (this follows what gcc does).
7814 // Any other composite results are returned in integer registers.
7816 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
7817 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
7818 for (; b != e; ++b) {
7819 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
7821 if (!BT || !BT->isFloatingPoint())
7824 RTList.push_back(CGT.ConvertType(b->getType()));
7828 return llvm::StructType::get(getVMContext(), RTList,
7829 RD->hasAttr<PackedAttr>());
7835 CoerceToIntArgs(Size, RTList);
7836 return llvm::StructType::get(getVMContext(), RTList);
7839 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
7840 uint64_t Size = getContext().getTypeSize(RetTy);
7842 if (RetTy->isVoidType())
7843 return ABIArgInfo::getIgnore();
7845 // O32 doesn't treat zero-sized structs differently from other structs.
7846 // However, N32/N64 ignores zero sized return values.
7847 if (!IsO32 && Size == 0)
7848 return ABIArgInfo::getIgnore();
7850 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
7852 if (RetTy->isAnyComplexType())
7853 return ABIArgInfo::getDirect();
7855 // O32 returns integer vectors in registers and N32/N64 returns all small
7856 // aggregates in registers.
7858 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
7859 ABIArgInfo ArgInfo =
7860 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
7861 ArgInfo.setInReg(true);
7866 return getNaturalAlignIndirect(RetTy);
7869 // Treat an enum type as its underlying type.
7870 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7871 RetTy = EnumTy->getDecl()->getIntegerType();
7873 // Make sure we pass indirectly things that are too large.
7874 if (const auto *EIT = RetTy->getAs<ExtIntType>())
7875 if (EIT->getNumBits() > 128 ||
7876 (EIT->getNumBits() > 64 &&
7877 !getContext().getTargetInfo().hasInt128Type()))
7878 return getNaturalAlignIndirect(RetTy);
7880 if (isPromotableIntegerTypeForABI(RetTy))
7881 return ABIArgInfo::getExtend(RetTy);
7883 if ((RetTy->isUnsignedIntegerOrEnumerationType() ||
7884 RetTy->isSignedIntegerOrEnumerationType()) && Size == 32 && !IsO32)
7885 return ABIArgInfo::getSignExtend(RetTy);
7887 return ABIArgInfo::getDirect();
7890 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
7891 ABIArgInfo &RetInfo = FI.getReturnInfo();
7892 if (!getCXXABI().classifyReturnType(FI))
7893 RetInfo = classifyReturnType(FI.getReturnType());
7895 // Check if a pointer to an aggregate is passed as a hidden argument.
7896 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7898 for (auto &I : FI.arguments())
7899 I.info = classifyArgumentType(I.type, Offset);
7902 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7903 QualType OrigTy) const {
7904 QualType Ty = OrigTy;
7906 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7907 // Pointers are also promoted in the same way but this only matters for N32.
7908 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7909 unsigned PtrWidth = getTarget().getPointerWidth(0);
7910 bool DidPromote = false;
7911 if ((Ty->isIntegerType() &&
7912 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7913 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7915 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7916 Ty->isSignedIntegerType());
7919 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7921 // The alignment of things in the argument area is never larger than
7922 // StackAlignInBytes.
7924 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
7926 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7927 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7929 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7930 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7933 // If there was a promotion, "unpromote" into a temporary.
7934 // TODO: can we just use a pointer into a subset of the original slot?
7936 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7937 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7939 // Truncate down to the right width.
7940 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7942 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7943 if (OrigTy->isPointerType())
7944 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7946 CGF.Builder.CreateStore(V, Temp);
7953 ABIArgInfo MipsABIInfo::extendType(QualType Ty) const {
7954 int TySize = getContext().getTypeSize(Ty);
7956 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7957 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7958 return ABIArgInfo::getSignExtend(Ty);
7960 return ABIArgInfo::getExtend(Ty);
7964 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7965 llvm::Value *Address) const {
7966 // This information comes from gcc's implementation, which seems to
7967 // as canonical as it gets.
7969 // Everything on MIPS is 4 bytes. Double-precision FP registers
7970 // are aliased to pairs of single-precision FP registers.
7971 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
7973 // 0-31 are the general purpose registers, $0 - $31.
7974 // 32-63 are the floating-point registers, $f0 - $f31.
7975 // 64 and 65 are the multiply/divide registers, $hi and $lo.
7976 // 66 is the (notional, I think) register for signal-handler return.
7977 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
7979 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
7980 // They are one bit wide and ignored here.
7982 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
7983 // (coprocessor 1 is the FP unit)
7984 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
7985 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
7986 // 176-181 are the DSP accumulator registers.
7987 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
7991 //===----------------------------------------------------------------------===//
7992 // AVR ABI Implementation.
7993 //===----------------------------------------------------------------------===//
7996 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
7998 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
7999 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
8001 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8002 CodeGen::CodeGenModule &CGM) const override {
8003 if (GV->isDeclaration())
8005 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
8007 auto *Fn = cast<llvm::Function>(GV);
8009 if (FD->getAttr<AVRInterruptAttr>())
8010 Fn->addFnAttr("interrupt");
8012 if (FD->getAttr<AVRSignalAttr>())
8013 Fn->addFnAttr("signal");
8018 //===----------------------------------------------------------------------===//
8019 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
8020 // Currently subclassed only to implement custom OpenCL C function attribute
8022 //===----------------------------------------------------------------------===//
8026 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
8028 TCETargetCodeGenInfo(CodeGenTypes &CGT)
8029 : DefaultTargetCodeGenInfo(CGT) {}
8031 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8032 CodeGen::CodeGenModule &M) const override;
8035 void TCETargetCodeGenInfo::setTargetAttributes(
8036 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8037 if (GV->isDeclaration())
8039 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8042 llvm::Function *F = cast<llvm::Function>(GV);
8044 if (M.getLangOpts().OpenCL) {
8045 if (FD->hasAttr<OpenCLKernelAttr>()) {
8046 // OpenCL C Kernel functions are not subject to inlining
8047 F->addFnAttr(llvm::Attribute::NoInline);
8048 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
8050 // Convert the reqd_work_group_size() attributes to metadata.
8051 llvm::LLVMContext &Context = F->getContext();
8052 llvm::NamedMDNode *OpenCLMetadata =
8053 M.getModule().getOrInsertNamedMetadata(
8054 "opencl.kernel_wg_size_info");
8056 SmallVector<llvm::Metadata *, 5> Operands;
8057 Operands.push_back(llvm::ConstantAsMetadata::get(F));
8060 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8061 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
8063 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8064 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
8066 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
8067 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
8069 // Add a boolean constant operand for "required" (true) or "hint"
8070 // (false) for implementing the work_group_size_hint attr later.
8071 // Currently always true as the hint is not yet implemented.
8073 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
8074 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
8082 //===----------------------------------------------------------------------===//
8083 // Hexagon ABI Implementation
8084 //===----------------------------------------------------------------------===//
8088 class HexagonABIInfo : public DefaultABIInfo {
8090 HexagonABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8093 ABIArgInfo classifyReturnType(QualType RetTy) const;
8094 ABIArgInfo classifyArgumentType(QualType RetTy) const;
8095 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned *RegsLeft) const;
8097 void computeInfo(CGFunctionInfo &FI) const override;
8099 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8100 QualType Ty) const override;
8101 Address EmitVAArgFromMemory(CodeGenFunction &CFG, Address VAListAddr,
8103 Address EmitVAArgForHexagon(CodeGenFunction &CFG, Address VAListAddr,
8105 Address EmitVAArgForHexagonLinux(CodeGenFunction &CFG, Address VAListAddr,
8109 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
8111 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
8112 : TargetCodeGenInfo(std::make_unique<HexagonABIInfo>(CGT)) {}
8114 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8118 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8119 CodeGen::CodeGenModule &GCM) const override {
8120 if (GV->isDeclaration())
8122 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8130 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
8131 unsigned RegsLeft = 6;
8132 if (!getCXXABI().classifyReturnType(FI))
8133 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8134 for (auto &I : FI.arguments())
8135 I.info = classifyArgumentType(I.type, &RegsLeft);
8138 static bool HexagonAdjustRegsLeft(uint64_t Size, unsigned *RegsLeft) {
8139 assert(Size <= 64 && "Not expecting to pass arguments larger than 64 bits"
8140 " through registers");
8150 if (2 <= (*RegsLeft & (~1U))) {
8151 *RegsLeft = (*RegsLeft & (~1U)) - 2;
8155 // Next available register was r5 but candidate was greater than 32-bits so it
8156 // has to go on the stack. However we still consume r5
8163 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty,
8164 unsigned *RegsLeft) const {
8165 if (!isAggregateTypeForABI(Ty)) {
8166 // Treat an enum type as its underlying type.
8167 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8168 Ty = EnumTy->getDecl()->getIntegerType();
8170 uint64_t Size = getContext().getTypeSize(Ty);
8172 HexagonAdjustRegsLeft(Size, RegsLeft);
8174 if (Size > 64 && Ty->isExtIntType())
8175 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8177 return isPromotableIntegerTypeForABI(Ty) ? ABIArgInfo::getExtend(Ty)
8178 : ABIArgInfo::getDirect();
8181 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8182 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8184 // Ignore empty records.
8185 if (isEmptyRecord(getContext(), Ty, true))
8186 return ABIArgInfo::getIgnore();
8188 uint64_t Size = getContext().getTypeSize(Ty);
8189 unsigned Align = getContext().getTypeAlign(Ty);
8192 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
8194 if (HexagonAdjustRegsLeft(Size, RegsLeft))
8195 Align = Size <= 32 ? 32 : 64;
8196 if (Size <= Align) {
8197 // Pass in the smallest viable integer type.
8198 if (!llvm::isPowerOf2_64(Size))
8199 Size = llvm::NextPowerOf2(Size);
8200 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8202 return DefaultABIInfo::classifyArgumentType(Ty);
8205 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
8206 if (RetTy->isVoidType())
8207 return ABIArgInfo::getIgnore();
8209 const TargetInfo &T = CGT.getTarget();
8210 uint64_t Size = getContext().getTypeSize(RetTy);
8212 if (RetTy->getAs<VectorType>()) {
8213 // HVX vectors are returned in vector registers or register pairs.
8214 if (T.hasFeature("hvx")) {
8215 assert(T.hasFeature("hvx-length64b") || T.hasFeature("hvx-length128b"));
8216 uint64_t VecSize = T.hasFeature("hvx-length64b") ? 64*8 : 128*8;
8217 if (Size == VecSize || Size == 2*VecSize)
8218 return ABIArgInfo::getDirectInReg();
8220 // Large vector types should be returned via memory.
8222 return getNaturalAlignIndirect(RetTy);
8225 if (!isAggregateTypeForABI(RetTy)) {
8226 // Treat an enum type as its underlying type.
8227 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
8228 RetTy = EnumTy->getDecl()->getIntegerType();
8230 if (Size > 64 && RetTy->isExtIntType())
8231 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
8233 return isPromotableIntegerTypeForABI(RetTy) ? ABIArgInfo::getExtend(RetTy)
8234 : ABIArgInfo::getDirect();
8237 if (isEmptyRecord(getContext(), RetTy, true))
8238 return ABIArgInfo::getIgnore();
8240 // Aggregates <= 8 bytes are returned in registers, other aggregates
8241 // are returned indirectly.
8243 // Return in the smallest viable integer type.
8244 if (!llvm::isPowerOf2_64(Size))
8245 Size = llvm::NextPowerOf2(Size);
8246 return ABIArgInfo::getDirect(llvm::Type::getIntNTy(getVMContext(), Size));
8248 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
8251 Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
8253 QualType Ty) const {
8254 // Load the overflow area pointer.
8255 Address __overflow_area_pointer_p =
8256 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8257 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8258 __overflow_area_pointer_p, "__overflow_area_pointer");
8260 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
8262 // Alignment should be a power of 2.
8263 assert((Align & (Align - 1)) == 0 && "Alignment is not power of 2!");
8265 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
8266 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
8268 // Add offset to the current pointer to access the argument.
8269 __overflow_area_pointer =
8270 CGF.Builder.CreateGEP(__overflow_area_pointer, Offset);
8271 llvm::Value *AsInt =
8272 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8274 // Create a mask which should be "AND"ed
8275 // with (overflow_arg_area + align - 1)
8276 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -(int)Align);
8277 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8278 CGF.Builder.CreateAnd(AsInt, Mask), __overflow_area_pointer->getType(),
8279 "__overflow_area_pointer.align");
8282 // Get the type of the argument from memory and bitcast
8283 // overflow area pointer to the argument type.
8284 llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
8285 Address AddrTyped = CGF.Builder.CreateBitCast(
8286 Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
8287 llvm::PointerType::getUnqual(PTy));
8289 // Round up to the minimum stack alignment for varargs which is 4 bytes.
8290 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8292 __overflow_area_pointer = CGF.Builder.CreateGEP(
8293 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
8294 "__overflow_area_pointer.next");
8295 CGF.Builder.CreateStore(__overflow_area_pointer, __overflow_area_pointer_p);
8300 Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
8302 QualType Ty) const {
8303 // FIXME: Need to handle alignment
8304 llvm::Type *BP = CGF.Int8PtrTy;
8305 llvm::Type *BPP = CGF.Int8PtrPtrTy;
8306 CGBuilderTy &Builder = CGF.Builder;
8307 Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
8308 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
8309 // Handle address alignment for type alignment > 32 bits
8310 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
8312 assert((TyAlign & (TyAlign - 1)) == 0 && "Alignment is not power of 2!");
8313 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
8314 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
8315 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
8316 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
8318 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
8319 Address AddrTyped = Builder.CreateBitCast(
8320 Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
8322 uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
8323 llvm::Value *NextAddr = Builder.CreateGEP(
8324 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
8325 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
8330 Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
8332 QualType Ty) const {
8333 int ArgSize = CGF.getContext().getTypeSize(Ty) / 8;
8336 return EmitVAArgFromMemory(CGF, VAListAddr, Ty);
8338 // Here we have check if the argument is in register area or
8339 // in overflow area.
8340 // If the saved register area pointer + argsize rounded up to alignment >
8341 // saved register area end pointer, argument is in overflow area.
8342 unsigned RegsLeft = 6;
8343 Ty = CGF.getContext().getCanonicalType(Ty);
8344 (void)classifyArgumentType(Ty, &RegsLeft);
8346 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
8347 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
8348 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
8349 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
8351 // Get rounded size of the argument.GCC does not allow vararg of
8352 // size < 4 bytes. We follow the same logic here.
8353 ArgSize = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8354 int ArgAlign = (CGF.getContext().getTypeSize(Ty) <= 32) ? 4 : 8;
8356 // Argument may be in saved register area
8357 CGF.EmitBlock(MaybeRegBlock);
8359 // Load the current saved register area pointer.
8360 Address __current_saved_reg_area_pointer_p = CGF.Builder.CreateStructGEP(
8361 VAListAddr, 0, "__current_saved_reg_area_pointer_p");
8362 llvm::Value *__current_saved_reg_area_pointer = CGF.Builder.CreateLoad(
8363 __current_saved_reg_area_pointer_p, "__current_saved_reg_area_pointer");
8365 // Load the saved register area end pointer.
8366 Address __saved_reg_area_end_pointer_p = CGF.Builder.CreateStructGEP(
8367 VAListAddr, 1, "__saved_reg_area_end_pointer_p");
8368 llvm::Value *__saved_reg_area_end_pointer = CGF.Builder.CreateLoad(
8369 __saved_reg_area_end_pointer_p, "__saved_reg_area_end_pointer");
8371 // If the size of argument is > 4 bytes, check if the stack
8372 // location is aligned to 8 bytes
8375 llvm::Value *__current_saved_reg_area_pointer_int =
8376 CGF.Builder.CreatePtrToInt(__current_saved_reg_area_pointer,
8379 __current_saved_reg_area_pointer_int = CGF.Builder.CreateAdd(
8380 __current_saved_reg_area_pointer_int,
8381 llvm::ConstantInt::get(CGF.Int32Ty, (ArgAlign - 1)),
8382 "align_current_saved_reg_area_pointer");
8384 __current_saved_reg_area_pointer_int =
8385 CGF.Builder.CreateAnd(__current_saved_reg_area_pointer_int,
8386 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8387 "align_current_saved_reg_area_pointer");
8389 __current_saved_reg_area_pointer =
8390 CGF.Builder.CreateIntToPtr(__current_saved_reg_area_pointer_int,
8391 __current_saved_reg_area_pointer->getType(),
8392 "align_current_saved_reg_area_pointer");
8395 llvm::Value *__new_saved_reg_area_pointer =
8396 CGF.Builder.CreateGEP(__current_saved_reg_area_pointer,
8397 llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8398 "__new_saved_reg_area_pointer");
8400 llvm::Value *UsingStack = 0;
8401 UsingStack = CGF.Builder.CreateICmpSGT(__new_saved_reg_area_pointer,
8402 __saved_reg_area_end_pointer);
8404 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, InRegBlock);
8406 // Argument in saved register area
8407 // Implement the block where argument is in register saved area
8408 CGF.EmitBlock(InRegBlock);
8410 llvm::Type *PTy = CGF.ConvertType(Ty);
8411 llvm::Value *__saved_reg_area_p = CGF.Builder.CreateBitCast(
8412 __current_saved_reg_area_pointer, llvm::PointerType::getUnqual(PTy));
8414 CGF.Builder.CreateStore(__new_saved_reg_area_pointer,
8415 __current_saved_reg_area_pointer_p);
8417 CGF.EmitBranch(ContBlock);
8419 // Argument in overflow area
8420 // Implement the block where the argument is in overflow area.
8421 CGF.EmitBlock(OnStackBlock);
8423 // Load the overflow area pointer
8424 Address __overflow_area_pointer_p =
8425 CGF.Builder.CreateStructGEP(VAListAddr, 2, "__overflow_area_pointer_p");
8426 llvm::Value *__overflow_area_pointer = CGF.Builder.CreateLoad(
8427 __overflow_area_pointer_p, "__overflow_area_pointer");
8429 // Align the overflow area pointer according to the alignment of the argument
8431 llvm::Value *__overflow_area_pointer_int =
8432 CGF.Builder.CreatePtrToInt(__overflow_area_pointer, CGF.Int32Ty);
8434 __overflow_area_pointer_int =
8435 CGF.Builder.CreateAdd(__overflow_area_pointer_int,
8436 llvm::ConstantInt::get(CGF.Int32Ty, ArgAlign - 1),
8437 "align_overflow_area_pointer");
8439 __overflow_area_pointer_int =
8440 CGF.Builder.CreateAnd(__overflow_area_pointer_int,
8441 llvm::ConstantInt::get(CGF.Int32Ty, -ArgAlign),
8442 "align_overflow_area_pointer");
8444 __overflow_area_pointer = CGF.Builder.CreateIntToPtr(
8445 __overflow_area_pointer_int, __overflow_area_pointer->getType(),
8446 "align_overflow_area_pointer");
8449 // Get the pointer for next argument in overflow area and store it
8450 // to overflow area pointer.
8451 llvm::Value *__new_overflow_area_pointer = CGF.Builder.CreateGEP(
8452 __overflow_area_pointer, llvm::ConstantInt::get(CGF.Int32Ty, ArgSize),
8453 "__overflow_area_pointer.next");
8455 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8456 __overflow_area_pointer_p);
8458 CGF.Builder.CreateStore(__new_overflow_area_pointer,
8459 __current_saved_reg_area_pointer_p);
8461 // Bitcast the overflow area pointer to the type of argument.
8462 llvm::Type *OverflowPTy = CGF.ConvertTypeForMem(Ty);
8463 llvm::Value *__overflow_area_p = CGF.Builder.CreateBitCast(
8464 __overflow_area_pointer, llvm::PointerType::getUnqual(OverflowPTy));
8466 CGF.EmitBranch(ContBlock);
8468 // Get the correct pointer to load the variable argument
8469 // Implement the ContBlock
8470 CGF.EmitBlock(ContBlock);
8472 llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
8473 llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
8474 ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
8475 ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
8477 return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
8480 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8481 QualType Ty) const {
8483 if (getTarget().getTriple().isMusl())
8484 return EmitVAArgForHexagonLinux(CGF, VAListAddr, Ty);
8486 return EmitVAArgForHexagon(CGF, VAListAddr, Ty);
8489 //===----------------------------------------------------------------------===//
8490 // Lanai ABI Implementation
8491 //===----------------------------------------------------------------------===//
8494 class LanaiABIInfo : public DefaultABIInfo {
8496 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8498 bool shouldUseInReg(QualType Ty, CCState &State) const;
8500 void computeInfo(CGFunctionInfo &FI) const override {
8502 // Lanai uses 4 registers to pass arguments unless the function has the
8503 // regparm attribute set.
8504 if (FI.getHasRegParm()) {
8505 State.FreeRegs = FI.getRegParm();
8510 if (!getCXXABI().classifyReturnType(FI))
8511 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8512 for (auto &I : FI.arguments())
8513 I.info = classifyArgumentType(I.type, State);
8516 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
8517 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
8519 } // end anonymous namespace
8521 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
8522 unsigned Size = getContext().getTypeSize(Ty);
8523 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
8525 if (SizeInRegs == 0)
8528 if (SizeInRegs > State.FreeRegs) {
8533 State.FreeRegs -= SizeInRegs;
8538 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
8539 CCState &State) const {
8541 if (State.FreeRegs) {
8542 --State.FreeRegs; // Non-byval indirects just use one pointer.
8543 return getNaturalAlignIndirectInReg(Ty);
8545 return getNaturalAlignIndirect(Ty, false);
8548 // Compute the byval alignment.
8549 const unsigned MinABIStackAlignInBytes = 4;
8550 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
8551 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
8552 /*Realign=*/TypeAlign >
8553 MinABIStackAlignInBytes);
8556 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
8557 CCState &State) const {
8558 // Check with the C++ ABI first.
8559 const RecordType *RT = Ty->getAs<RecordType>();
8561 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
8562 if (RAA == CGCXXABI::RAA_Indirect) {
8563 return getIndirectResult(Ty, /*ByVal=*/false, State);
8564 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
8565 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
8569 if (isAggregateTypeForABI(Ty)) {
8570 // Structures with flexible arrays are always indirect.
8571 if (RT && RT->getDecl()->hasFlexibleArrayMember())
8572 return getIndirectResult(Ty, /*ByVal=*/true, State);
8574 // Ignore empty structs/unions.
8575 if (isEmptyRecord(getContext(), Ty, true))
8576 return ABIArgInfo::getIgnore();
8578 llvm::LLVMContext &LLVMContext = getVMContext();
8579 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
8580 if (SizeInRegs <= State.FreeRegs) {
8581 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
8582 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
8583 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
8584 State.FreeRegs -= SizeInRegs;
8585 return ABIArgInfo::getDirectInReg(Result);
8589 return getIndirectResult(Ty, true, State);
8592 // Treat an enum type as its underlying type.
8593 if (const auto *EnumTy = Ty->getAs<EnumType>())
8594 Ty = EnumTy->getDecl()->getIntegerType();
8596 bool InReg = shouldUseInReg(Ty, State);
8598 // Don't pass >64 bit integers in registers.
8599 if (const auto *EIT = Ty->getAs<ExtIntType>())
8600 if (EIT->getNumBits() > 64)
8601 return getIndirectResult(Ty, /*ByVal=*/true, State);
8603 if (isPromotableIntegerTypeForABI(Ty)) {
8605 return ABIArgInfo::getDirectInReg();
8606 return ABIArgInfo::getExtend(Ty);
8609 return ABIArgInfo::getDirectInReg();
8610 return ABIArgInfo::getDirect();
8614 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
8616 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8617 : TargetCodeGenInfo(std::make_unique<LanaiABIInfo>(CGT)) {}
8621 //===----------------------------------------------------------------------===//
8622 // AMDGPU ABI Implementation
8623 //===----------------------------------------------------------------------===//
8627 class AMDGPUABIInfo final : public DefaultABIInfo {
8629 static const unsigned MaxNumRegsForArgsRet = 16;
8631 unsigned numRegsForType(QualType Ty) const;
8633 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
8634 bool isHomogeneousAggregateSmallEnough(const Type *Base,
8635 uint64_t Members) const override;
8637 // Coerce HIP pointer arguments from generic pointers to global ones.
8638 llvm::Type *coerceKernelArgumentType(llvm::Type *Ty, unsigned FromAS,
8639 unsigned ToAS) const {
8641 if (auto STy = dyn_cast<llvm::StructType>(Ty)) {
8642 SmallVector<llvm::Type *, 8> EltTys;
8643 bool Changed = false;
8644 for (auto T : STy->elements()) {
8645 auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
8646 EltTys.push_back(NT);
8647 Changed |= (NT != T);
8649 // Skip if there is no change in element types.
8653 return llvm::StructType::create(
8654 EltTys, (STy->getName() + ".coerce").str(), STy->isPacked());
8655 return llvm::StructType::get(getVMContext(), EltTys, STy->isPacked());
8658 if (auto ATy = dyn_cast<llvm::ArrayType>(Ty)) {
8659 auto T = ATy->getElementType();
8660 auto NT = coerceKernelArgumentType(T, FromAS, ToAS);
8661 // Skip if there is no change in that element type.
8664 return llvm::ArrayType::get(NT, ATy->getNumElements());
8666 // Single value types.
8667 if (Ty->isPointerTy() && Ty->getPointerAddressSpace() == FromAS)
8668 return llvm::PointerType::get(
8669 cast<llvm::PointerType>(Ty)->getElementType(), ToAS);
8674 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
8675 DefaultABIInfo(CGT) {}
8677 ABIArgInfo classifyReturnType(QualType RetTy) const;
8678 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
8679 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
8681 void computeInfo(CGFunctionInfo &FI) const override;
8682 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8683 QualType Ty) const override;
8686 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
8690 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
8691 const Type *Base, uint64_t Members) const {
8692 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
8694 // Homogeneous Aggregates may occupy at most 16 registers.
8695 return Members * NumRegs <= MaxNumRegsForArgsRet;
8698 /// Estimate number of registers the type will use when passed in registers.
8699 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
8700 unsigned NumRegs = 0;
8702 if (const VectorType *VT = Ty->getAs<VectorType>()) {
8703 // Compute from the number of elements. The reported size is based on the
8704 // in-memory size, which includes the padding 4th element for 3-vectors.
8705 QualType EltTy = VT->getElementType();
8706 unsigned EltSize = getContext().getTypeSize(EltTy);
8708 // 16-bit element vectors should be passed as packed.
8710 return (VT->getNumElements() + 1) / 2;
8712 unsigned EltNumRegs = (EltSize + 31) / 32;
8713 return EltNumRegs * VT->getNumElements();
8716 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8717 const RecordDecl *RD = RT->getDecl();
8718 assert(!RD->hasFlexibleArrayMember());
8720 for (const FieldDecl *Field : RD->fields()) {
8721 QualType FieldTy = Field->getType();
8722 NumRegs += numRegsForType(FieldTy);
8728 return (getContext().getTypeSize(Ty) + 31) / 32;
8731 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
8732 llvm::CallingConv::ID CC = FI.getCallingConvention();
8734 if (!getCXXABI().classifyReturnType(FI))
8735 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
8737 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
8738 for (auto &Arg : FI.arguments()) {
8739 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
8740 Arg.info = classifyKernelArgumentType(Arg.type);
8742 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
8747 Address AMDGPUABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8748 QualType Ty) const {
8749 llvm_unreachable("AMDGPU does not support varargs");
8752 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
8753 if (isAggregateTypeForABI(RetTy)) {
8754 // Records with non-trivial destructors/copy-constructors should not be
8755 // returned by value.
8756 if (!getRecordArgABI(RetTy, getCXXABI())) {
8757 // Ignore empty structs/unions.
8758 if (isEmptyRecord(getContext(), RetTy, true))
8759 return ABIArgInfo::getIgnore();
8761 // Lower single-element structs to just return a regular value.
8762 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
8763 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8765 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
8766 const RecordDecl *RD = RT->getDecl();
8767 if (RD->hasFlexibleArrayMember())
8768 return DefaultABIInfo::classifyReturnType(RetTy);
8771 // Pack aggregates <= 4 bytes into single VGPR or pair.
8772 uint64_t Size = getContext().getTypeSize(RetTy);
8774 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8777 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8780 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8781 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8784 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
8785 return ABIArgInfo::getDirect();
8789 // Otherwise just do the default thing.
8790 return DefaultABIInfo::classifyReturnType(RetTy);
8793 /// For kernels all parameters are really passed in a special buffer. It doesn't
8794 /// make sense to pass anything byval, so everything must be direct.
8795 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
8796 Ty = useFirstFieldIfTransparentUnion(Ty);
8798 // TODO: Can we omit empty structs?
8800 llvm::Type *LTy = nullptr;
8801 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8802 LTy = CGT.ConvertType(QualType(SeltTy, 0));
8804 if (getContext().getLangOpts().HIP) {
8806 LTy = CGT.ConvertType(Ty);
8807 LTy = coerceKernelArgumentType(
8808 LTy, /*FromAS=*/getContext().getTargetAddressSpace(LangAS::Default),
8809 /*ToAS=*/getContext().getTargetAddressSpace(LangAS::cuda_device));
8812 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
8813 // individual elements, which confuses the Clover OpenCL backend; therefore we
8814 // have to set it to false here. Other args of getDirect() are just defaults.
8815 return ABIArgInfo::getDirect(LTy, 0, nullptr, false);
8818 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
8819 unsigned &NumRegsLeft) const {
8820 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
8822 Ty = useFirstFieldIfTransparentUnion(Ty);
8824 if (isAggregateTypeForABI(Ty)) {
8825 // Records with non-trivial destructors/copy-constructors should not be
8827 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
8828 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8830 // Ignore empty structs/unions.
8831 if (isEmptyRecord(getContext(), Ty, true))
8832 return ABIArgInfo::getIgnore();
8834 // Lower single-element structs to just pass a regular value. TODO: We
8835 // could do reasonable-size multiple-element structs too, using getExpand(),
8836 // though watch out for things like bitfields.
8837 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
8838 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
8840 if (const RecordType *RT = Ty->getAs<RecordType>()) {
8841 const RecordDecl *RD = RT->getDecl();
8842 if (RD->hasFlexibleArrayMember())
8843 return DefaultABIInfo::classifyArgumentType(Ty);
8846 // Pack aggregates <= 8 bytes into single VGPR or pair.
8847 uint64_t Size = getContext().getTypeSize(Ty);
8849 unsigned NumRegs = (Size + 31) / 32;
8850 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
8853 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
8856 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
8858 // XXX: Should this be i64 instead, and should the limit increase?
8859 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
8860 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
8863 if (NumRegsLeft > 0) {
8864 unsigned NumRegs = numRegsForType(Ty);
8865 if (NumRegsLeft >= NumRegs) {
8866 NumRegsLeft -= NumRegs;
8867 return ABIArgInfo::getDirect();
8872 // Otherwise just do the default thing.
8873 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
8874 if (!ArgInfo.isIndirect()) {
8875 unsigned NumRegs = numRegsForType(Ty);
8876 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
8882 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
8884 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
8885 : TargetCodeGenInfo(std::make_unique<AMDGPUABIInfo>(CGT)) {}
8886 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
8887 CodeGen::CodeGenModule &M) const override;
8888 unsigned getOpenCLKernelCallingConv() const override;
8890 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
8891 llvm::PointerType *T, QualType QT) const override;
8893 LangAS getASTAllocaAddressSpace() const override {
8894 return getLangASFromTargetAS(
8895 getABIInfo().getDataLayout().getAllocaAddrSpace());
8897 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
8898 const VarDecl *D) const override;
8899 llvm::SyncScope::ID getLLVMSyncScopeID(const LangOptions &LangOpts,
8901 llvm::AtomicOrdering Ordering,
8902 llvm::LLVMContext &Ctx) const override;
8904 createEnqueuedBlockKernel(CodeGenFunction &CGF,
8905 llvm::Function *BlockInvokeFunc,
8906 llvm::Value *BlockLiteral) const override;
8907 bool shouldEmitStaticExternCAliases() const override;
8908 void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
8912 static bool requiresAMDGPUProtectedVisibility(const Decl *D,
8913 llvm::GlobalValue *GV) {
8914 if (GV->getVisibility() != llvm::GlobalValue::HiddenVisibility)
8917 return D->hasAttr<OpenCLKernelAttr>() ||
8918 (isa<FunctionDecl>(D) && D->hasAttr<CUDAGlobalAttr>()) ||
8920 (D->hasAttr<CUDADeviceAttr>() || D->hasAttr<CUDAConstantAttr>() ||
8921 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinSurfaceType() ||
8922 cast<VarDecl>(D)->getType()->isCUDADeviceBuiltinTextureType()));
8925 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
8926 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
8927 if (requiresAMDGPUProtectedVisibility(D, GV)) {
8928 GV->setVisibility(llvm::GlobalValue::ProtectedVisibility);
8929 GV->setDSOLocal(true);
8932 if (GV->isDeclaration())
8934 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
8938 llvm::Function *F = cast<llvm::Function>(GV);
8940 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
8941 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
8944 const bool IsOpenCLKernel = M.getLangOpts().OpenCL &&
8945 FD->hasAttr<OpenCLKernelAttr>();
8946 const bool IsHIPKernel = M.getLangOpts().HIP &&
8947 FD->hasAttr<CUDAGlobalAttr>();
8948 if ((IsOpenCLKernel || IsHIPKernel) &&
8949 (M.getTriple().getOS() == llvm::Triple::AMDHSA))
8950 F->addFnAttr("amdgpu-implicitarg-num-bytes", "56");
8953 F->addFnAttr("uniform-work-group-size", "true");
8956 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
8957 if (ReqdWGS || FlatWGS) {
8961 Min = FlatWGS->getMin()
8962 ->EvaluateKnownConstInt(M.getContext())
8964 Max = FlatWGS->getMax()
8965 ->EvaluateKnownConstInt(M.getContext())
8968 if (ReqdWGS && Min == 0 && Max == 0)
8969 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
8972 assert(Min <= Max && "Min must be less than or equal Max");
8974 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
8975 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
8977 assert(Max == 0 && "Max must be zero");
8978 } else if (IsOpenCLKernel || IsHIPKernel) {
8979 // By default, restrict the maximum size to a value specified by
8980 // --gpu-max-threads-per-block=n or its default value for HIP.
8981 const unsigned OpenCLDefaultMaxWorkGroupSize = 256;
8982 const unsigned DefaultMaxWorkGroupSize =
8983 IsOpenCLKernel ? OpenCLDefaultMaxWorkGroupSize
8984 : M.getLangOpts().GPUMaxThreadsPerBlock;
8985 std::string AttrVal =
8986 std::string("1,") + llvm::utostr(DefaultMaxWorkGroupSize);
8987 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
8990 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
8992 Attr->getMin()->EvaluateKnownConstInt(M.getContext()).getExtValue();
8993 unsigned Max = Attr->getMax() ? Attr->getMax()
8994 ->EvaluateKnownConstInt(M.getContext())
8999 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
9001 std::string AttrVal = llvm::utostr(Min);
9003 AttrVal = AttrVal + "," + llvm::utostr(Max);
9004 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
9006 assert(Max == 0 && "Max must be zero");
9009 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
9010 unsigned NumSGPR = Attr->getNumSGPR();
9013 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
9016 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
9017 uint32_t NumVGPR = Attr->getNumVGPR();
9020 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
9024 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9025 return llvm::CallingConv::AMDGPU_KERNEL;
9028 // Currently LLVM assumes null pointers always have value 0,
9029 // which results in incorrectly transformed IR. Therefore, instead of
9030 // emitting null pointers in private and local address spaces, a null
9031 // pointer in generic address space is emitted which is casted to a
9032 // pointer in local or private address space.
9033 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
9034 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
9035 QualType QT) const {
9036 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
9037 return llvm::ConstantPointerNull::get(PT);
9039 auto &Ctx = CGM.getContext();
9040 auto NPT = llvm::PointerType::get(PT->getElementType(),
9041 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
9042 return llvm::ConstantExpr::getAddrSpaceCast(
9043 llvm::ConstantPointerNull::get(NPT), PT);
9047 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
9048 const VarDecl *D) const {
9049 assert(!CGM.getLangOpts().OpenCL &&
9050 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
9051 "Address space agnostic languages only");
9052 LangAS DefaultGlobalAS = getLangASFromTargetAS(
9053 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
9055 return DefaultGlobalAS;
9057 LangAS AddrSpace = D->getType().getAddressSpace();
9058 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
9059 if (AddrSpace != LangAS::Default)
9062 if (CGM.isTypeConstant(D->getType(), false)) {
9063 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
9064 return ConstAS.getValue();
9066 return DefaultGlobalAS;
9070 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(const LangOptions &LangOpts,
9072 llvm::AtomicOrdering Ordering,
9073 llvm::LLVMContext &Ctx) const {
9076 case SyncScope::OpenCLWorkGroup:
9079 case SyncScope::OpenCLDevice:
9082 case SyncScope::OpenCLAllSVMDevices:
9085 case SyncScope::OpenCLSubGroup:
9089 if (Ordering != llvm::AtomicOrdering::SequentiallyConsistent) {
9091 Name = Twine(Twine(Name) + Twine("-")).str();
9093 Name = Twine(Twine(Name) + Twine("one-as")).str();
9096 return Ctx.getOrInsertSyncScopeID(Name);
9099 bool AMDGPUTargetCodeGenInfo::shouldEmitStaticExternCAliases() const {
9103 void AMDGPUTargetCodeGenInfo::setCUDAKernelCallingConvention(
9104 const FunctionType *&FT) const {
9105 FT = getABIInfo().getContext().adjustFunctionType(
9106 FT, FT->getExtInfo().withCallingConv(CC_OpenCLKernel));
9109 //===----------------------------------------------------------------------===//
9110 // SPARC v8 ABI Implementation.
9111 // Based on the SPARC Compliance Definition version 2.4.1.
9113 // Ensures that complex values are passed in registers.
9116 class SparcV8ABIInfo : public DefaultABIInfo {
9118 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9121 ABIArgInfo classifyReturnType(QualType RetTy) const;
9122 void computeInfo(CGFunctionInfo &FI) const override;
9124 } // end anonymous namespace
9128 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
9129 if (Ty->isAnyComplexType()) {
9130 return ABIArgInfo::getDirect();
9133 return DefaultABIInfo::classifyReturnType(Ty);
9137 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9139 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9140 for (auto &Arg : FI.arguments())
9141 Arg.info = classifyArgumentType(Arg.type);
9145 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
9147 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
9148 : TargetCodeGenInfo(std::make_unique<SparcV8ABIInfo>(CGT)) {}
9150 } // end anonymous namespace
9152 //===----------------------------------------------------------------------===//
9153 // SPARC v9 ABI Implementation.
9154 // Based on the SPARC Compliance Definition version 2.4.1.
9156 // Function arguments a mapped to a nominal "parameter array" and promoted to
9157 // registers depending on their type. Each argument occupies 8 or 16 bytes in
9158 // the array, structs larger than 16 bytes are passed indirectly.
9160 // One case requires special care:
9167 // When a struct mixed is passed by value, it only occupies 8 bytes in the
9168 // parameter array, but the int is passed in an integer register, and the float
9169 // is passed in a floating point register. This is represented as two arguments
9170 // with the LLVM IR inreg attribute:
9172 // declare void f(i32 inreg %i, float inreg %f)
9174 // The code generator will only allocate 4 bytes from the parameter array for
9175 // the inreg arguments. All other arguments are allocated a multiple of 8
9179 class SparcV9ABIInfo : public ABIInfo {
9181 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
9184 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
9185 void computeInfo(CGFunctionInfo &FI) const override;
9186 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9187 QualType Ty) const override;
9189 // Coercion type builder for structs passed in registers. The coercion type
9190 // serves two purposes:
9192 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
9194 // 2. Expose aligned floating point elements as first-level elements, so the
9195 // code generator knows to pass them in floating point registers.
9197 // We also compute the InReg flag which indicates that the struct contains
9198 // aligned 32-bit floats.
9200 struct CoerceBuilder {
9201 llvm::LLVMContext &Context;
9202 const llvm::DataLayout &DL;
9203 SmallVector<llvm::Type*, 8> Elems;
9207 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
9208 : Context(c), DL(dl), Size(0), InReg(false) {}
9210 // Pad Elems with integers until Size is ToSize.
9211 void pad(uint64_t ToSize) {
9212 assert(ToSize >= Size && "Cannot remove elements");
9216 // Finish the current 64-bit word.
9217 uint64_t Aligned = llvm::alignTo(Size, 64);
9218 if (Aligned > Size && Aligned <= ToSize) {
9219 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
9223 // Add whole 64-bit words.
9224 while (Size + 64 <= ToSize) {
9225 Elems.push_back(llvm::Type::getInt64Ty(Context));
9229 // Final in-word padding.
9230 if (Size < ToSize) {
9231 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
9236 // Add a floating point element at Offset.
9237 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
9238 // Unaligned floats are treated as integers.
9241 // The InReg flag is only required if there are any floats < 64 bits.
9245 Elems.push_back(Ty);
9246 Size = Offset + Bits;
9249 // Add a struct type to the coercion type, starting at Offset (in bits).
9250 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
9251 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
9252 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
9253 llvm::Type *ElemTy = StrTy->getElementType(i);
9254 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
9255 switch (ElemTy->getTypeID()) {
9256 case llvm::Type::StructTyID:
9257 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
9259 case llvm::Type::FloatTyID:
9260 addFloat(ElemOffset, ElemTy, 32);
9262 case llvm::Type::DoubleTyID:
9263 addFloat(ElemOffset, ElemTy, 64);
9265 case llvm::Type::FP128TyID:
9266 addFloat(ElemOffset, ElemTy, 128);
9268 case llvm::Type::PointerTyID:
9269 if (ElemOffset % 64 == 0) {
9271 Elems.push_back(ElemTy);
9281 // Check if Ty is a usable substitute for the coercion type.
9282 bool isUsableType(llvm::StructType *Ty) const {
9283 return llvm::makeArrayRef(Elems) == Ty->elements();
9286 // Get the coercion type as a literal struct type.
9287 llvm::Type *getType() const {
9288 if (Elems.size() == 1)
9289 return Elems.front();
9291 return llvm::StructType::get(Context, Elems);
9295 } // end anonymous namespace
9298 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
9299 if (Ty->isVoidType())
9300 return ABIArgInfo::getIgnore();
9302 uint64_t Size = getContext().getTypeSize(Ty);
9304 // Anything too big to fit in registers is passed with an explicit indirect
9305 // pointer / sret pointer.
9306 if (Size > SizeLimit)
9307 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
9309 // Treat an enum type as its underlying type.
9310 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9311 Ty = EnumTy->getDecl()->getIntegerType();
9313 // Integer types smaller than a register are extended.
9314 if (Size < 64 && Ty->isIntegerType())
9315 return ABIArgInfo::getExtend(Ty);
9317 if (const auto *EIT = Ty->getAs<ExtIntType>())
9318 if (EIT->getNumBits() < 64)
9319 return ABIArgInfo::getExtend(Ty);
9321 // Other non-aggregates go in registers.
9322 if (!isAggregateTypeForABI(Ty))
9323 return ABIArgInfo::getDirect();
9325 // If a C++ object has either a non-trivial copy constructor or a non-trivial
9326 // destructor, it is passed with an explicit indirect pointer / sret pointer.
9327 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
9328 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
9330 // This is a small aggregate type that should be passed in registers.
9331 // Build a coercion type from the LLVM struct type.
9332 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
9334 return ABIArgInfo::getDirect();
9336 CoerceBuilder CB(getVMContext(), getDataLayout());
9337 CB.addStruct(0, StrTy);
9338 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
9340 // Try to use the original type for coercion.
9341 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
9344 return ABIArgInfo::getDirectInReg(CoerceTy);
9346 return ABIArgInfo::getDirect(CoerceTy);
9349 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9350 QualType Ty) const {
9351 ABIArgInfo AI = classifyType(Ty, 16 * 8);
9352 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9353 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9354 AI.setCoerceToType(ArgTy);
9356 CharUnits SlotSize = CharUnits::fromQuantity(8);
9358 CGBuilderTy &Builder = CGF.Builder;
9359 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
9360 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9362 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
9364 Address ArgAddr = Address::invalid();
9366 switch (AI.getKind()) {
9367 case ABIArgInfo::Expand:
9368 case ABIArgInfo::CoerceAndExpand:
9369 case ABIArgInfo::InAlloca:
9370 llvm_unreachable("Unsupported ABI kind for va_arg");
9372 case ABIArgInfo::Extend: {
9374 CharUnits Offset = SlotSize - TypeInfo.first;
9375 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
9379 case ABIArgInfo::Direct: {
9380 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
9381 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
9386 case ABIArgInfo::Indirect:
9388 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
9389 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
9393 case ABIArgInfo::Ignore:
9394 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
9398 Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
9399 Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
9401 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
9404 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
9405 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
9406 for (auto &I : FI.arguments())
9407 I.info = classifyType(I.type, 16 * 8);
9411 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
9413 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
9414 : TargetCodeGenInfo(std::make_unique<SparcV9ABIInfo>(CGT)) {}
9416 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
9420 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9421 llvm::Value *Address) const override;
9423 } // end anonymous namespace
9426 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
9427 llvm::Value *Address) const {
9428 // This is calculated from the LLVM and GCC tables and verified
9429 // against gcc output. AFAIK all ABIs use the same encoding.
9431 CodeGen::CGBuilderTy &Builder = CGF.Builder;
9433 llvm::IntegerType *i8 = CGF.Int8Ty;
9434 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
9435 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
9437 // 0-31: the 8-byte general-purpose registers
9438 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
9440 // 32-63: f0-31, the 4-byte floating-point registers
9441 AssignToArrayRange(Builder, Address, Four8, 32, 63);
9451 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
9453 // 72-87: d0-15, the 8-byte floating-point registers
9454 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
9459 // ARC ABI implementation.
9462 class ARCABIInfo : public DefaultABIInfo {
9464 using DefaultABIInfo::DefaultABIInfo;
9467 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9468 QualType Ty) const override;
9470 void updateState(const ABIArgInfo &Info, QualType Ty, CCState &State) const {
9471 if (!State.FreeRegs)
9473 if (Info.isIndirect() && Info.getInReg())
9475 else if (Info.isDirect() && Info.getInReg()) {
9476 unsigned sz = (getContext().getTypeSize(Ty) + 31) / 32;
9477 if (sz < State.FreeRegs)
9478 State.FreeRegs -= sz;
9484 void computeInfo(CGFunctionInfo &FI) const override {
9486 // ARC uses 8 registers to pass arguments.
9489 if (!getCXXABI().classifyReturnType(FI))
9490 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
9491 updateState(FI.getReturnInfo(), FI.getReturnType(), State);
9492 for (auto &I : FI.arguments()) {
9493 I.info = classifyArgumentType(I.type, State.FreeRegs);
9494 updateState(I.info, I.type, State);
9498 ABIArgInfo getIndirectByRef(QualType Ty, bool HasFreeRegs) const;
9499 ABIArgInfo getIndirectByValue(QualType Ty) const;
9500 ABIArgInfo classifyArgumentType(QualType Ty, uint8_t FreeRegs) const;
9501 ABIArgInfo classifyReturnType(QualType RetTy) const;
9504 class ARCTargetCodeGenInfo : public TargetCodeGenInfo {
9506 ARCTargetCodeGenInfo(CodeGenTypes &CGT)
9507 : TargetCodeGenInfo(std::make_unique<ARCABIInfo>(CGT)) {}
9511 ABIArgInfo ARCABIInfo::getIndirectByRef(QualType Ty, bool HasFreeRegs) const {
9512 return HasFreeRegs ? getNaturalAlignIndirectInReg(Ty) :
9513 getNaturalAlignIndirect(Ty, false);
9516 ABIArgInfo ARCABIInfo::getIndirectByValue(QualType Ty) const {
9517 // Compute the byval alignment.
9518 const unsigned MinABIStackAlignInBytes = 4;
9519 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
9520 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
9521 TypeAlign > MinABIStackAlignInBytes);
9524 Address ARCABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9525 QualType Ty) const {
9526 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
9527 getContext().getTypeInfoInChars(Ty),
9528 CharUnits::fromQuantity(4), true);
9531 ABIArgInfo ARCABIInfo::classifyArgumentType(QualType Ty,
9532 uint8_t FreeRegs) const {
9533 // Handle the generic C++ ABI.
9534 const RecordType *RT = Ty->getAs<RecordType>();
9536 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
9537 if (RAA == CGCXXABI::RAA_Indirect)
9538 return getIndirectByRef(Ty, FreeRegs > 0);
9540 if (RAA == CGCXXABI::RAA_DirectInMemory)
9541 return getIndirectByValue(Ty);
9544 // Treat an enum type as its underlying type.
9545 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
9546 Ty = EnumTy->getDecl()->getIntegerType();
9548 auto SizeInRegs = llvm::alignTo(getContext().getTypeSize(Ty), 32) / 32;
9550 if (isAggregateTypeForABI(Ty)) {
9551 // Structures with flexible arrays are always indirect.
9552 if (RT && RT->getDecl()->hasFlexibleArrayMember())
9553 return getIndirectByValue(Ty);
9555 // Ignore empty structs/unions.
9556 if (isEmptyRecord(getContext(), Ty, true))
9557 return ABIArgInfo::getIgnore();
9559 llvm::LLVMContext &LLVMContext = getVMContext();
9561 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
9562 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
9563 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
9565 return FreeRegs >= SizeInRegs ?
9566 ABIArgInfo::getDirectInReg(Result) :
9567 ABIArgInfo::getDirect(Result, 0, nullptr, false);
9570 if (const auto *EIT = Ty->getAs<ExtIntType>())
9571 if (EIT->getNumBits() > 64)
9572 return getIndirectByValue(Ty);
9574 return isPromotableIntegerTypeForABI(Ty)
9575 ? (FreeRegs >= SizeInRegs ? ABIArgInfo::getExtendInReg(Ty)
9576 : ABIArgInfo::getExtend(Ty))
9577 : (FreeRegs >= SizeInRegs ? ABIArgInfo::getDirectInReg()
9578 : ABIArgInfo::getDirect());
9581 ABIArgInfo ARCABIInfo::classifyReturnType(QualType RetTy) const {
9582 if (RetTy->isAnyComplexType())
9583 return ABIArgInfo::getDirectInReg();
9585 // Arguments of size > 4 registers are indirect.
9586 auto RetSize = llvm::alignTo(getContext().getTypeSize(RetTy), 32) / 32;
9588 return getIndirectByRef(RetTy, /*HasFreeRegs*/ true);
9590 return DefaultABIInfo::classifyReturnType(RetTy);
9593 } // End anonymous namespace.
9595 //===----------------------------------------------------------------------===//
9596 // XCore ABI Implementation
9597 //===----------------------------------------------------------------------===//
9601 /// A SmallStringEnc instance is used to build up the TypeString by passing
9602 /// it by reference between functions that append to it.
9603 typedef llvm::SmallString<128> SmallStringEnc;
9605 /// TypeStringCache caches the meta encodings of Types.
9607 /// The reason for caching TypeStrings is two fold:
9608 /// 1. To cache a type's encoding for later uses;
9609 /// 2. As a means to break recursive member type inclusion.
9611 /// A cache Entry can have a Status of:
9612 /// NonRecursive: The type encoding is not recursive;
9613 /// Recursive: The type encoding is recursive;
9614 /// Incomplete: An incomplete TypeString;
9615 /// IncompleteUsed: An incomplete TypeString that has been used in a
9616 /// Recursive type encoding.
9618 /// A NonRecursive entry will have all of its sub-members expanded as fully
9619 /// as possible. Whilst it may contain types which are recursive, the type
9620 /// itself is not recursive and thus its encoding may be safely used whenever
9621 /// the type is encountered.
9623 /// A Recursive entry will have all of its sub-members expanded as fully as
9624 /// possible. The type itself is recursive and it may contain other types which
9625 /// are recursive. The Recursive encoding must not be used during the expansion
9626 /// of a recursive type's recursive branch. For simplicity the code uses
9627 /// IncompleteCount to reject all usage of Recursive encodings for member types.
9629 /// An Incomplete entry is always a RecordType and only encodes its
9630 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
9631 /// are placed into the cache during type expansion as a means to identify and
9632 /// handle recursive inclusion of types as sub-members. If there is recursion
9633 /// the entry becomes IncompleteUsed.
9635 /// During the expansion of a RecordType's members:
9637 /// If the cache contains a NonRecursive encoding for the member type, the
9638 /// cached encoding is used;
9640 /// If the cache contains a Recursive encoding for the member type, the
9641 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
9643 /// If the member is a RecordType, an Incomplete encoding is placed into the
9644 /// cache to break potential recursive inclusion of itself as a sub-member;
9646 /// Once a member RecordType has been expanded, its temporary incomplete
9647 /// entry is removed from the cache. If a Recursive encoding was swapped out
9648 /// it is swapped back in;
9650 /// If an incomplete entry is used to expand a sub-member, the incomplete
9651 /// entry is marked as IncompleteUsed. The cache keeps count of how many
9652 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
9654 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
9655 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
9656 /// Else the member is part of a recursive type and thus the recursion has
9657 /// been exited too soon for the encoding to be correct for the member.
9659 class TypeStringCache {
9660 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
9662 std::string Str; // The encoded TypeString for the type.
9663 enum Status State; // Information about the encoding in 'Str'.
9664 std::string Swapped; // A temporary place holder for a Recursive encoding
9665 // during the expansion of RecordType's members.
9667 std::map<const IdentifierInfo *, struct Entry> Map;
9668 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
9669 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
9671 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
9672 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
9673 bool removeIncomplete(const IdentifierInfo *ID);
9674 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
9676 StringRef lookupStr(const IdentifierInfo *ID);
9679 /// TypeString encodings for enum & union fields must be order.
9680 /// FieldEncoding is a helper for this ordering process.
9681 class FieldEncoding {
9685 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
9686 StringRef str() { return Enc; }
9687 bool operator<(const FieldEncoding &rhs) const {
9688 if (HasName != rhs.HasName) return HasName;
9689 return Enc < rhs.Enc;
9693 class XCoreABIInfo : public DefaultABIInfo {
9695 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
9696 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9697 QualType Ty) const override;
9700 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
9701 mutable TypeStringCache TSC;
9702 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
9703 const CodeGen::CodeGenModule &M) const;
9706 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
9707 : TargetCodeGenInfo(std::make_unique<XCoreABIInfo>(CGT)) {}
9708 void emitTargetMetadata(CodeGen::CodeGenModule &CGM,
9709 const llvm::MapVector<GlobalDecl, StringRef>
9710 &MangledDeclNames) const override;
9713 } // End anonymous namespace.
9715 // TODO: this implementation is likely now redundant with the default
9717 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
9718 QualType Ty) const {
9719 CGBuilderTy &Builder = CGF.Builder;
9722 CharUnits SlotSize = CharUnits::fromQuantity(4);
9723 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
9725 // Handle the argument.
9726 ABIArgInfo AI = classifyArgumentType(Ty);
9727 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
9728 llvm::Type *ArgTy = CGT.ConvertType(Ty);
9729 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
9730 AI.setCoerceToType(ArgTy);
9731 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
9733 Address Val = Address::invalid();
9734 CharUnits ArgSize = CharUnits::Zero();
9735 switch (AI.getKind()) {
9736 case ABIArgInfo::Expand:
9737 case ABIArgInfo::CoerceAndExpand:
9738 case ABIArgInfo::InAlloca:
9739 llvm_unreachable("Unsupported ABI kind for va_arg");
9740 case ABIArgInfo::Ignore:
9741 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
9742 ArgSize = CharUnits::Zero();
9744 case ABIArgInfo::Extend:
9745 case ABIArgInfo::Direct:
9746 Val = Builder.CreateBitCast(AP, ArgPtrTy);
9747 ArgSize = CharUnits::fromQuantity(
9748 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
9749 ArgSize = ArgSize.alignTo(SlotSize);
9751 case ABIArgInfo::Indirect:
9752 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
9753 Val = Address(Builder.CreateLoad(Val), TypeAlign);
9758 // Increment the VAList.
9759 if (!ArgSize.isZero()) {
9760 Address APN = Builder.CreateConstInBoundsByteGEP(AP, ArgSize);
9761 Builder.CreateStore(APN.getPointer(), VAListAddr);
9767 /// During the expansion of a RecordType, an incomplete TypeString is placed
9768 /// into the cache as a means to identify and break recursion.
9769 /// If there is a Recursive encoding in the cache, it is swapped out and will
9770 /// be reinserted by removeIncomplete().
9771 /// All other types of encoding should have been used rather than arriving here.
9772 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
9773 std::string StubEnc) {
9777 assert( (E.Str.empty() || E.State == Recursive) &&
9778 "Incorrectly use of addIncomplete");
9779 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
9780 E.Swapped.swap(E.Str); // swap out the Recursive
9781 E.Str.swap(StubEnc);
9782 E.State = Incomplete;
9786 /// Once the RecordType has been expanded, the temporary incomplete TypeString
9787 /// must be removed from the cache.
9788 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
9789 /// Returns true if the RecordType was defined recursively.
9790 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
9793 auto I = Map.find(ID);
9794 assert(I != Map.end() && "Entry not present");
9795 Entry &E = I->second;
9796 assert( (E.State == Incomplete ||
9797 E.State == IncompleteUsed) &&
9798 "Entry must be an incomplete type");
9799 bool IsRecursive = false;
9800 if (E.State == IncompleteUsed) {
9801 // We made use of our Incomplete encoding, thus we are recursive.
9803 --IncompleteUsedCount;
9805 if (E.Swapped.empty())
9808 // Swap the Recursive back.
9809 E.Swapped.swap(E.Str);
9811 E.State = Recursive;
9817 /// Add the encoded TypeString to the cache only if it is NonRecursive or
9818 /// Recursive (viz: all sub-members were expanded as fully as possible).
9819 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
9821 if (!ID || IncompleteUsedCount)
9822 return; // No key or it is is an incomplete sub-type so don't add.
9824 if (IsRecursive && !E.Str.empty()) {
9825 assert(E.State==Recursive && E.Str.size() == Str.size() &&
9826 "This is not the same Recursive entry");
9827 // The parent container was not recursive after all, so we could have used
9828 // this Recursive sub-member entry after all, but we assumed the worse when
9829 // we started viz: IncompleteCount!=0.
9832 assert(E.Str.empty() && "Entry already present");
9834 E.State = IsRecursive? Recursive : NonRecursive;
9837 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
9838 /// are recursively expanding a type (IncompleteCount != 0) and the cached
9839 /// encoding is Recursive, return an empty StringRef.
9840 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
9842 return StringRef(); // We have no key.
9843 auto I = Map.find(ID);
9845 return StringRef(); // We have no encoding.
9846 Entry &E = I->second;
9847 if (E.State == Recursive && IncompleteCount)
9848 return StringRef(); // We don't use Recursive encodings for member types.
9850 if (E.State == Incomplete) {
9851 // The incomplete type is being used to break out of recursion.
9852 E.State = IncompleteUsed;
9853 ++IncompleteUsedCount;
9858 /// The XCore ABI includes a type information section that communicates symbol
9859 /// type information to the linker. The linker uses this information to verify
9860 /// safety/correctness of things such as array bound and pointers et al.
9861 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
9862 /// This type information (TypeString) is emitted into meta data for all global
9863 /// symbols: definitions, declarations, functions & variables.
9865 /// The TypeString carries type, qualifier, name, size & value details.
9866 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
9867 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
9868 /// The output is tested by test/CodeGen/xcore-stringtype.c.
9870 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
9871 const CodeGen::CodeGenModule &CGM,
9872 TypeStringCache &TSC);
9874 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
9875 void XCoreTargetCodeGenInfo::emitTargetMD(
9876 const Decl *D, llvm::GlobalValue *GV,
9877 const CodeGen::CodeGenModule &CGM) const {
9879 if (getTypeString(Enc, D, CGM, TSC)) {
9880 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
9881 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
9882 llvm::MDString::get(Ctx, Enc.str())};
9883 llvm::NamedMDNode *MD =
9884 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
9885 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
9889 void XCoreTargetCodeGenInfo::emitTargetMetadata(
9890 CodeGen::CodeGenModule &CGM,
9891 const llvm::MapVector<GlobalDecl, StringRef> &MangledDeclNames) const {
9892 // Warning, new MangledDeclNames may be appended within this loop.
9893 // We rely on MapVector insertions adding new elements to the end
9894 // of the container.
9895 for (unsigned I = 0; I != MangledDeclNames.size(); ++I) {
9896 auto Val = *(MangledDeclNames.begin() + I);
9897 llvm::GlobalValue *GV = CGM.GetGlobalValue(Val.second);
9899 const Decl *D = Val.first.getDecl()->getMostRecentDecl();
9900 emitTargetMD(D, GV, CGM);
9904 //===----------------------------------------------------------------------===//
9905 // SPIR ABI Implementation
9906 //===----------------------------------------------------------------------===//
9909 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
9911 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
9912 : TargetCodeGenInfo(std::make_unique<DefaultABIInfo>(CGT)) {}
9913 unsigned getOpenCLKernelCallingConv() const override;
9916 } // End anonymous namespace.
9920 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
9921 DefaultABIInfo SPIRABI(CGM.getTypes());
9922 SPIRABI.computeInfo(FI);
9927 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
9928 return llvm::CallingConv::SPIR_KERNEL;
9931 static bool appendType(SmallStringEnc &Enc, QualType QType,
9932 const CodeGen::CodeGenModule &CGM,
9933 TypeStringCache &TSC);
9935 /// Helper function for appendRecordType().
9936 /// Builds a SmallVector containing the encoded field types in declaration
9938 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
9939 const RecordDecl *RD,
9940 const CodeGen::CodeGenModule &CGM,
9941 TypeStringCache &TSC) {
9942 for (const auto *Field : RD->fields()) {
9945 Enc += Field->getName();
9947 if (Field->isBitField()) {
9949 llvm::raw_svector_ostream OS(Enc);
9950 OS << Field->getBitWidthValue(CGM.getContext());
9953 if (!appendType(Enc, Field->getType(), CGM, TSC))
9955 if (Field->isBitField())
9958 FE.emplace_back(!Field->getName().empty(), Enc);
9963 /// Appends structure and union types to Enc and adds encoding to cache.
9964 /// Recursively calls appendType (via extractFieldType) for each field.
9965 /// Union types have their fields ordered according to the ABI.
9966 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
9967 const CodeGen::CodeGenModule &CGM,
9968 TypeStringCache &TSC, const IdentifierInfo *ID) {
9969 // Append the cached TypeString if we have one.
9970 StringRef TypeString = TSC.lookupStr(ID);
9971 if (!TypeString.empty()) {
9976 // Start to emit an incomplete TypeString.
9977 size_t Start = Enc.size();
9978 Enc += (RT->isUnionType()? 'u' : 's');
9981 Enc += ID->getName();
9984 // We collect all encoded fields and order as necessary.
9985 bool IsRecursive = false;
9986 const RecordDecl *RD = RT->getDecl()->getDefinition();
9987 if (RD && !RD->field_empty()) {
9988 // An incomplete TypeString stub is placed in the cache for this RecordType
9989 // so that recursive calls to this RecordType will use it whilst building a
9990 // complete TypeString for this RecordType.
9991 SmallVector<FieldEncoding, 16> FE;
9992 std::string StubEnc(Enc.substr(Start).str());
9993 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
9994 TSC.addIncomplete(ID, std::move(StubEnc));
9995 if (!extractFieldType(FE, RD, CGM, TSC)) {
9996 (void) TSC.removeIncomplete(ID);
9999 IsRecursive = TSC.removeIncomplete(ID);
10000 // The ABI requires unions to be sorted but not structures.
10001 // See FieldEncoding::operator< for sort algorithm.
10002 if (RT->isUnionType())
10004 // We can now complete the TypeString.
10005 unsigned E = FE.size();
10006 for (unsigned I = 0; I != E; ++I) {
10009 Enc += FE[I].str();
10013 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
10017 /// Appends enum types to Enc and adds the encoding to the cache.
10018 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
10019 TypeStringCache &TSC,
10020 const IdentifierInfo *ID) {
10021 // Append the cached TypeString if we have one.
10022 StringRef TypeString = TSC.lookupStr(ID);
10023 if (!TypeString.empty()) {
10028 size_t Start = Enc.size();
10031 Enc += ID->getName();
10034 // We collect all encoded enumerations and order them alphanumerically.
10035 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
10036 SmallVector<FieldEncoding, 16> FE;
10037 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
10039 SmallStringEnc EnumEnc;
10041 EnumEnc += I->getName();
10043 I->getInitVal().toString(EnumEnc);
10045 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
10048 unsigned E = FE.size();
10049 for (unsigned I = 0; I != E; ++I) {
10052 Enc += FE[I].str();
10056 TSC.addIfComplete(ID, Enc.substr(Start), false);
10060 /// Appends type's qualifier to Enc.
10061 /// This is done prior to appending the type's encoding.
10062 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
10063 // Qualifiers are emitted in alphabetical order.
10064 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
10066 if (QT.isConstQualified())
10068 if (QT.isRestrictQualified())
10070 if (QT.isVolatileQualified())
10072 Enc += Table[Lookup];
10075 /// Appends built-in types to Enc.
10076 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
10077 const char *EncType;
10078 switch (BT->getKind()) {
10079 case BuiltinType::Void:
10082 case BuiltinType::Bool:
10085 case BuiltinType::Char_U:
10088 case BuiltinType::UChar:
10091 case BuiltinType::SChar:
10094 case BuiltinType::UShort:
10097 case BuiltinType::Short:
10100 case BuiltinType::UInt:
10103 case BuiltinType::Int:
10106 case BuiltinType::ULong:
10109 case BuiltinType::Long:
10112 case BuiltinType::ULongLong:
10115 case BuiltinType::LongLong:
10118 case BuiltinType::Float:
10121 case BuiltinType::Double:
10124 case BuiltinType::LongDouble:
10134 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
10135 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
10136 const CodeGen::CodeGenModule &CGM,
10137 TypeStringCache &TSC) {
10139 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
10145 /// Appends array encoding to Enc before calling appendType for the element.
10146 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
10147 const ArrayType *AT,
10148 const CodeGen::CodeGenModule &CGM,
10149 TypeStringCache &TSC, StringRef NoSizeEnc) {
10150 if (AT->getSizeModifier() != ArrayType::Normal)
10153 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
10154 CAT->getSize().toStringUnsigned(Enc);
10156 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
10158 // The Qualifiers should be attached to the type rather than the array.
10159 appendQualifier(Enc, QT);
10160 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
10166 /// Appends a function encoding to Enc, calling appendType for the return type
10167 /// and the arguments.
10168 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
10169 const CodeGen::CodeGenModule &CGM,
10170 TypeStringCache &TSC) {
10172 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
10175 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
10176 // N.B. we are only interested in the adjusted param types.
10177 auto I = FPT->param_type_begin();
10178 auto E = FPT->param_type_end();
10181 if (!appendType(Enc, *I, CGM, TSC))
10187 if (FPT->isVariadic())
10190 if (FPT->isVariadic())
10200 /// Handles the type's qualifier before dispatching a call to handle specific
10201 /// type encodings.
10202 static bool appendType(SmallStringEnc &Enc, QualType QType,
10203 const CodeGen::CodeGenModule &CGM,
10204 TypeStringCache &TSC) {
10206 QualType QT = QType.getCanonicalType();
10208 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
10209 // The Qualifiers should be attached to the type rather than the array.
10210 // Thus we don't call appendQualifier() here.
10211 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
10213 appendQualifier(Enc, QT);
10215 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
10216 return appendBuiltinType(Enc, BT);
10218 if (const PointerType *PT = QT->getAs<PointerType>())
10219 return appendPointerType(Enc, PT, CGM, TSC);
10221 if (const EnumType *ET = QT->getAs<EnumType>())
10222 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
10224 if (const RecordType *RT = QT->getAsStructureType())
10225 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10227 if (const RecordType *RT = QT->getAsUnionType())
10228 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
10230 if (const FunctionType *FT = QT->getAs<FunctionType>())
10231 return appendFunctionType(Enc, FT, CGM, TSC);
10236 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
10237 const CodeGen::CodeGenModule &CGM,
10238 TypeStringCache &TSC) {
10242 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
10243 if (FD->getLanguageLinkage() != CLanguageLinkage)
10245 return appendType(Enc, FD->getType(), CGM, TSC);
10248 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
10249 if (VD->getLanguageLinkage() != CLanguageLinkage)
10251 QualType QT = VD->getType().getCanonicalType();
10252 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
10253 // Global ArrayTypes are given a size of '*' if the size is unknown.
10254 // The Qualifiers should be attached to the type rather than the array.
10255 // Thus we don't call appendQualifier() here.
10256 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
10258 return appendType(Enc, QT, CGM, TSC);
10263 //===----------------------------------------------------------------------===//
10264 // RISCV ABI Implementation
10265 //===----------------------------------------------------------------------===//
10268 class RISCVABIInfo : public DefaultABIInfo {
10270 // Size of the integer ('x') registers in bits.
10272 // Size of the floating point ('f') registers in bits. Note that the target
10273 // ISA might have a wider FLen than the selected ABI (e.g. an RV32IF target
10274 // with soft float ABI has FLen==0).
10276 static const int NumArgGPRs = 8;
10277 static const int NumArgFPRs = 8;
10278 bool detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10279 llvm::Type *&Field1Ty,
10280 CharUnits &Field1Off,
10281 llvm::Type *&Field2Ty,
10282 CharUnits &Field2Off) const;
10285 RISCVABIInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen, unsigned FLen)
10286 : DefaultABIInfo(CGT), XLen(XLen), FLen(FLen) {}
10288 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
10289 // non-virtual, but computeInfo is virtual, so we overload it.
10290 void computeInfo(CGFunctionInfo &FI) const override;
10292 ABIArgInfo classifyArgumentType(QualType Ty, bool IsFixed, int &ArgGPRsLeft,
10293 int &ArgFPRsLeft) const;
10294 ABIArgInfo classifyReturnType(QualType RetTy) const;
10296 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10297 QualType Ty) const override;
10299 ABIArgInfo extendType(QualType Ty) const;
10301 bool detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10302 CharUnits &Field1Off, llvm::Type *&Field2Ty,
10303 CharUnits &Field2Off, int &NeededArgGPRs,
10304 int &NeededArgFPRs) const;
10305 ABIArgInfo coerceAndExpandFPCCEligibleStruct(llvm::Type *Field1Ty,
10306 CharUnits Field1Off,
10307 llvm::Type *Field2Ty,
10308 CharUnits Field2Off) const;
10310 } // end anonymous namespace
10312 void RISCVABIInfo::computeInfo(CGFunctionInfo &FI) const {
10313 QualType RetTy = FI.getReturnType();
10314 if (!getCXXABI().classifyReturnType(FI))
10315 FI.getReturnInfo() = classifyReturnType(RetTy);
10317 // IsRetIndirect is true if classifyArgumentType indicated the value should
10318 // be passed indirect, or if the type size is a scalar greater than 2*XLen
10319 // and not a complex type with elements <= FLen. e.g. fp128 is passed direct
10320 // in LLVM IR, relying on the backend lowering code to rewrite the argument
10321 // list and pass indirectly on RV32.
10322 bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
10323 if (!IsRetIndirect && RetTy->isScalarType() &&
10324 getContext().getTypeSize(RetTy) > (2 * XLen)) {
10325 if (RetTy->isComplexType() && FLen) {
10326 QualType EltTy = RetTy->getAs<ComplexType>()->getElementType();
10327 IsRetIndirect = getContext().getTypeSize(EltTy) > FLen;
10329 // This is a normal scalar > 2*XLen, such as fp128 on RV32.
10330 IsRetIndirect = true;
10334 // We must track the number of GPRs used in order to conform to the RISC-V
10335 // ABI, as integer scalars passed in registers should have signext/zeroext
10336 // when promoted, but are anyext if passed on the stack. As GPR usage is
10337 // different for variadic arguments, we must also track whether we are
10338 // examining a vararg or not.
10339 int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
10340 int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
10341 int NumFixedArgs = FI.getNumRequiredArgs();
10344 for (auto &ArgInfo : FI.arguments()) {
10345 bool IsFixed = ArgNum < NumFixedArgs;
10347 classifyArgumentType(ArgInfo.type, IsFixed, ArgGPRsLeft, ArgFPRsLeft);
10352 // Returns true if the struct is a potential candidate for the floating point
10353 // calling convention. If this function returns true, the caller is
10354 // responsible for checking that if there is only a single field then that
10355 // field is a float.
10356 bool RISCVABIInfo::detectFPCCEligibleStructHelper(QualType Ty, CharUnits CurOff,
10357 llvm::Type *&Field1Ty,
10358 CharUnits &Field1Off,
10359 llvm::Type *&Field2Ty,
10360 CharUnits &Field2Off) const {
10361 bool IsInt = Ty->isIntegralOrEnumerationType();
10362 bool IsFloat = Ty->isRealFloatingType();
10364 if (IsInt || IsFloat) {
10365 uint64_t Size = getContext().getTypeSize(Ty);
10366 if (IsInt && Size > XLen)
10368 // Can't be eligible if larger than the FP registers. Half precision isn't
10369 // currently supported on RISC-V and the ABI hasn't been confirmed, so
10370 // default to the integer ABI in that case.
10371 if (IsFloat && (Size > FLen || Size < 32))
10373 // Can't be eligible if an integer type was already found (int+int pairs
10374 // are not eligible).
10375 if (IsInt && Field1Ty && Field1Ty->isIntegerTy())
10378 Field1Ty = CGT.ConvertType(Ty);
10379 Field1Off = CurOff;
10383 Field2Ty = CGT.ConvertType(Ty);
10384 Field2Off = CurOff;
10390 if (auto CTy = Ty->getAs<ComplexType>()) {
10393 QualType EltTy = CTy->getElementType();
10394 if (getContext().getTypeSize(EltTy) > FLen)
10396 Field1Ty = CGT.ConvertType(EltTy);
10397 Field1Off = CurOff;
10398 Field2Ty = Field1Ty;
10399 Field2Off = Field1Off + getContext().getTypeSizeInChars(EltTy);
10403 if (const ConstantArrayType *ATy = getContext().getAsConstantArrayType(Ty)) {
10404 uint64_t ArraySize = ATy->getSize().getZExtValue();
10405 QualType EltTy = ATy->getElementType();
10406 CharUnits EltSize = getContext().getTypeSizeInChars(EltTy);
10407 for (uint64_t i = 0; i < ArraySize; ++i) {
10408 bool Ret = detectFPCCEligibleStructHelper(EltTy, CurOff, Field1Ty,
10409 Field1Off, Field2Ty, Field2Off);
10417 if (const auto *RTy = Ty->getAs<RecordType>()) {
10418 // Structures with either a non-trivial destructor or a non-trivial
10419 // copy constructor are not eligible for the FP calling convention.
10420 if (getRecordArgABI(Ty, CGT.getCXXABI()))
10422 if (isEmptyRecord(getContext(), Ty, true))
10424 const RecordDecl *RD = RTy->getDecl();
10425 // Unions aren't eligible unless they're empty (which is caught above).
10428 int ZeroWidthBitFieldCount = 0;
10429 for (const FieldDecl *FD : RD->fields()) {
10430 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
10431 uint64_t FieldOffInBits = Layout.getFieldOffset(FD->getFieldIndex());
10432 QualType QTy = FD->getType();
10433 if (FD->isBitField()) {
10434 unsigned BitWidth = FD->getBitWidthValue(getContext());
10435 // Allow a bitfield with a type greater than XLen as long as the
10436 // bitwidth is XLen or less.
10437 if (getContext().getTypeSize(QTy) > XLen && BitWidth <= XLen)
10438 QTy = getContext().getIntTypeForBitwidth(XLen, false);
10439 if (BitWidth == 0) {
10440 ZeroWidthBitFieldCount++;
10445 bool Ret = detectFPCCEligibleStructHelper(
10446 QTy, CurOff + getContext().toCharUnitsFromBits(FieldOffInBits),
10447 Field1Ty, Field1Off, Field2Ty, Field2Off);
10451 // As a quirk of the ABI, zero-width bitfields aren't ignored for fp+fp
10452 // or int+fp structs, but are ignored for a struct with an fp field and
10453 // any number of zero-width bitfields.
10454 if (Field2Ty && ZeroWidthBitFieldCount > 0)
10457 return Field1Ty != nullptr;
10463 // Determine if a struct is eligible for passing according to the floating
10464 // point calling convention (i.e., when flattened it contains a single fp
10465 // value, fp+fp, or int+fp of appropriate size). If so, NeededArgFPRs and
10466 // NeededArgGPRs are incremented appropriately.
10467 bool RISCVABIInfo::detectFPCCEligibleStruct(QualType Ty, llvm::Type *&Field1Ty,
10468 CharUnits &Field1Off,
10469 llvm::Type *&Field2Ty,
10470 CharUnits &Field2Off,
10471 int &NeededArgGPRs,
10472 int &NeededArgFPRs) const {
10473 Field1Ty = nullptr;
10474 Field2Ty = nullptr;
10477 bool IsCandidate = detectFPCCEligibleStructHelper(
10478 Ty, CharUnits::Zero(), Field1Ty, Field1Off, Field2Ty, Field2Off);
10479 // Not really a candidate if we have a single int but no float.
10480 if (Field1Ty && !Field2Ty && !Field1Ty->isFloatingPointTy())
10484 if (Field1Ty && Field1Ty->isFloatingPointTy())
10488 if (Field2Ty && Field2Ty->isFloatingPointTy())
10495 // Call getCoerceAndExpand for the two-element flattened struct described by
10496 // Field1Ty, Field1Off, Field2Ty, Field2Off. This method will create an
10497 // appropriate coerceToType and unpaddedCoerceToType.
10498 ABIArgInfo RISCVABIInfo::coerceAndExpandFPCCEligibleStruct(
10499 llvm::Type *Field1Ty, CharUnits Field1Off, llvm::Type *Field2Ty,
10500 CharUnits Field2Off) const {
10501 SmallVector<llvm::Type *, 3> CoerceElts;
10502 SmallVector<llvm::Type *, 2> UnpaddedCoerceElts;
10503 if (!Field1Off.isZero())
10504 CoerceElts.push_back(llvm::ArrayType::get(
10505 llvm::Type::getInt8Ty(getVMContext()), Field1Off.getQuantity()));
10507 CoerceElts.push_back(Field1Ty);
10508 UnpaddedCoerceElts.push_back(Field1Ty);
10511 return ABIArgInfo::getCoerceAndExpand(
10512 llvm::StructType::get(getVMContext(), CoerceElts, !Field1Off.isZero()),
10513 UnpaddedCoerceElts[0]);
10516 CharUnits Field2Align =
10517 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(Field2Ty));
10518 CharUnits Field1End = Field1Off +
10519 CharUnits::fromQuantity(getDataLayout().getTypeStoreSize(Field1Ty));
10520 CharUnits Field2OffNoPadNoPack = Field1End.alignTo(Field2Align);
10522 CharUnits Padding = CharUnits::Zero();
10523 if (Field2Off > Field2OffNoPadNoPack)
10524 Padding = Field2Off - Field2OffNoPadNoPack;
10525 else if (Field2Off != Field2Align && Field2Off > Field1End)
10526 Padding = Field2Off - Field1End;
10528 bool IsPacked = !Field2Off.isMultipleOf(Field2Align);
10530 if (!Padding.isZero())
10531 CoerceElts.push_back(llvm::ArrayType::get(
10532 llvm::Type::getInt8Ty(getVMContext()), Padding.getQuantity()));
10534 CoerceElts.push_back(Field2Ty);
10535 UnpaddedCoerceElts.push_back(Field2Ty);
10537 auto CoerceToType =
10538 llvm::StructType::get(getVMContext(), CoerceElts, IsPacked);
10539 auto UnpaddedCoerceToType =
10540 llvm::StructType::get(getVMContext(), UnpaddedCoerceElts, IsPacked);
10542 return ABIArgInfo::getCoerceAndExpand(CoerceToType, UnpaddedCoerceToType);
10545 ABIArgInfo RISCVABIInfo::classifyArgumentType(QualType Ty, bool IsFixed,
10547 int &ArgFPRsLeft) const {
10548 assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
10549 Ty = useFirstFieldIfTransparentUnion(Ty);
10551 // Structures with either a non-trivial destructor or a non-trivial
10552 // copy constructor are always passed indirectly.
10553 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
10556 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
10557 CGCXXABI::RAA_DirectInMemory);
10560 // Ignore empty structs/unions.
10561 if (isEmptyRecord(getContext(), Ty, true))
10562 return ABIArgInfo::getIgnore();
10564 uint64_t Size = getContext().getTypeSize(Ty);
10566 // Pass floating point values via FPRs if possible.
10567 if (IsFixed && Ty->isFloatingType() && !Ty->isComplexType() &&
10568 FLen >= Size && ArgFPRsLeft) {
10570 return ABIArgInfo::getDirect();
10573 // Complex types for the hard float ABI must be passed direct rather than
10574 // using CoerceAndExpand.
10575 if (IsFixed && Ty->isComplexType() && FLen && ArgFPRsLeft >= 2) {
10576 QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
10577 if (getContext().getTypeSize(EltTy) <= FLen) {
10579 return ABIArgInfo::getDirect();
10583 if (IsFixed && FLen && Ty->isStructureOrClassType()) {
10584 llvm::Type *Field1Ty = nullptr;
10585 llvm::Type *Field2Ty = nullptr;
10586 CharUnits Field1Off = CharUnits::Zero();
10587 CharUnits Field2Off = CharUnits::Zero();
10591 detectFPCCEligibleStruct(Ty, Field1Ty, Field1Off, Field2Ty, Field2Off,
10592 NeededArgGPRs, NeededArgFPRs);
10593 if (IsCandidate && NeededArgGPRs <= ArgGPRsLeft &&
10594 NeededArgFPRs <= ArgFPRsLeft) {
10595 ArgGPRsLeft -= NeededArgGPRs;
10596 ArgFPRsLeft -= NeededArgFPRs;
10597 return coerceAndExpandFPCCEligibleStruct(Field1Ty, Field1Off, Field2Ty,
10602 uint64_t NeededAlign = getContext().getTypeAlign(Ty);
10603 bool MustUseStack = false;
10604 // Determine the number of GPRs needed to pass the current argument
10605 // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
10606 // register pairs, so may consume 3 registers.
10607 int NeededArgGPRs = 1;
10608 if (!IsFixed && NeededAlign == 2 * XLen)
10609 NeededArgGPRs = 2 + (ArgGPRsLeft % 2);
10610 else if (Size > XLen && Size <= 2 * XLen)
10613 if (NeededArgGPRs > ArgGPRsLeft) {
10614 MustUseStack = true;
10615 NeededArgGPRs = ArgGPRsLeft;
10618 ArgGPRsLeft -= NeededArgGPRs;
10620 if (!isAggregateTypeForABI(Ty) && !Ty->isVectorType()) {
10621 // Treat an enum type as its underlying type.
10622 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
10623 Ty = EnumTy->getDecl()->getIntegerType();
10625 // All integral types are promoted to XLen width, unless passed on the
10627 if (Size < XLen && Ty->isIntegralOrEnumerationType() && !MustUseStack) {
10628 return extendType(Ty);
10631 if (const auto *EIT = Ty->getAs<ExtIntType>()) {
10632 if (EIT->getNumBits() < XLen && !MustUseStack)
10633 return extendType(Ty);
10634 if (EIT->getNumBits() > 128 ||
10635 (!getContext().getTargetInfo().hasInt128Type() &&
10636 EIT->getNumBits() > 64))
10637 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10640 return ABIArgInfo::getDirect();
10643 // Aggregates which are <= 2*XLen will be passed in registers if possible,
10644 // so coerce to integers.
10645 if (Size <= 2 * XLen) {
10646 unsigned Alignment = getContext().getTypeAlign(Ty);
10648 // Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
10649 // required, and a 2-element XLen array if only XLen alignment is required.
10650 if (Size <= XLen) {
10651 return ABIArgInfo::getDirect(
10652 llvm::IntegerType::get(getVMContext(), XLen));
10653 } else if (Alignment == 2 * XLen) {
10654 return ABIArgInfo::getDirect(
10655 llvm::IntegerType::get(getVMContext(), 2 * XLen));
10657 return ABIArgInfo::getDirect(llvm::ArrayType::get(
10658 llvm::IntegerType::get(getVMContext(), XLen), 2));
10661 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
10664 ABIArgInfo RISCVABIInfo::classifyReturnType(QualType RetTy) const {
10665 if (RetTy->isVoidType())
10666 return ABIArgInfo::getIgnore();
10668 int ArgGPRsLeft = 2;
10669 int ArgFPRsLeft = FLen ? 2 : 0;
10671 // The rules for return and argument types are the same, so defer to
10672 // classifyArgumentType.
10673 return classifyArgumentType(RetTy, /*IsFixed=*/true, ArgGPRsLeft,
10677 Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
10678 QualType Ty) const {
10679 CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
10681 // Empty records are ignored for parameter passing purposes.
10682 if (isEmptyRecord(getContext(), Ty, true)) {
10683 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
10684 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
10688 std::pair<CharUnits, CharUnits> SizeAndAlign =
10689 getContext().getTypeInfoInChars(Ty);
10691 // Arguments bigger than 2*Xlen bytes are passed indirectly.
10692 bool IsIndirect = SizeAndAlign.first > 2 * SlotSize;
10694 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, SizeAndAlign,
10695 SlotSize, /*AllowHigherAlign=*/true);
10698 ABIArgInfo RISCVABIInfo::extendType(QualType Ty) const {
10699 int TySize = getContext().getTypeSize(Ty);
10700 // RV64 ABI requires unsigned 32 bit integers to be sign extended.
10701 if (XLen == 64 && Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
10702 return ABIArgInfo::getSignExtend(Ty);
10703 return ABIArgInfo::getExtend(Ty);
10707 class RISCVTargetCodeGenInfo : public TargetCodeGenInfo {
10709 RISCVTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned XLen,
10711 : TargetCodeGenInfo(std::make_unique<RISCVABIInfo>(CGT, XLen, FLen)) {}
10713 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
10714 CodeGen::CodeGenModule &CGM) const override {
10715 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
10718 const auto *Attr = FD->getAttr<RISCVInterruptAttr>();
10723 switch (Attr->getInterrupt()) {
10724 case RISCVInterruptAttr::user: Kind = "user"; break;
10725 case RISCVInterruptAttr::supervisor: Kind = "supervisor"; break;
10726 case RISCVInterruptAttr::machine: Kind = "machine"; break;
10729 auto *Fn = cast<llvm::Function>(GV);
10731 Fn->addFnAttr("interrupt", Kind);
10736 //===----------------------------------------------------------------------===//
10737 // VE ABI Implementation.
10740 class VEABIInfo : public DefaultABIInfo {
10742 VEABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
10745 ABIArgInfo classifyReturnType(QualType RetTy) const;
10746 ABIArgInfo classifyArgumentType(QualType RetTy) const;
10747 void computeInfo(CGFunctionInfo &FI) const override;
10749 } // end anonymous namespace
10751 ABIArgInfo VEABIInfo::classifyReturnType(QualType Ty) const {
10752 if (Ty->isAnyComplexType()) {
10753 return ABIArgInfo::getDirect();
10755 return DefaultABIInfo::classifyReturnType(Ty);
10758 ABIArgInfo VEABIInfo::classifyArgumentType(QualType Ty) const {
10759 if (Ty->isAnyComplexType()) {
10760 return ABIArgInfo::getDirect();
10762 return DefaultABIInfo::classifyArgumentType(Ty);
10765 void VEABIInfo::computeInfo(CGFunctionInfo &FI) const {
10767 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
10768 for (auto &Arg : FI.arguments())
10769 Arg.info = classifyArgumentType(Arg.type);
10773 class VETargetCodeGenInfo : public TargetCodeGenInfo {
10775 VETargetCodeGenInfo(CodeGenTypes &CGT)
10776 : TargetCodeGenInfo(std::make_unique<VEABIInfo>(CGT)) {}
10777 // VE ABI requires the arguments of variadic and prototype-less functions
10778 // are passed in both registers and memory.
10779 bool isNoProtoCallVariadic(const CallArgList &args,
10780 const FunctionNoProtoType *fnType) const override {
10784 } // end anonymous namespace
10786 //===----------------------------------------------------------------------===//
10788 //===----------------------------------------------------------------------===//
10790 bool CodeGenModule::supportsCOMDAT() const {
10791 return getTriple().supportsCOMDAT();
10794 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
10795 if (TheTargetCodeGenInfo)
10796 return *TheTargetCodeGenInfo;
10798 // Helper to set the unique_ptr while still keeping the return value.
10799 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
10800 this->TheTargetCodeGenInfo.reset(P);
10804 const llvm::Triple &Triple = getTarget().getTriple();
10805 switch (Triple.getArch()) {
10807 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
10809 case llvm::Triple::le32:
10810 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
10811 case llvm::Triple::mips:
10812 case llvm::Triple::mipsel:
10813 if (Triple.getOS() == llvm::Triple::NaCl)
10814 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
10815 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
10817 case llvm::Triple::mips64:
10818 case llvm::Triple::mips64el:
10819 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
10821 case llvm::Triple::avr:
10822 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
10824 case llvm::Triple::aarch64:
10825 case llvm::Triple::aarch64_32:
10826 case llvm::Triple::aarch64_be: {
10827 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
10828 if (getTarget().getABI() == "darwinpcs")
10829 Kind = AArch64ABIInfo::DarwinPCS;
10830 else if (Triple.isOSWindows())
10832 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
10834 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
10837 case llvm::Triple::wasm32:
10838 case llvm::Triple::wasm64: {
10839 WebAssemblyABIInfo::ABIKind Kind = WebAssemblyABIInfo::MVP;
10840 if (getTarget().getABI() == "experimental-mv")
10841 Kind = WebAssemblyABIInfo::ExperimentalMV;
10842 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types, Kind));
10845 case llvm::Triple::arm:
10846 case llvm::Triple::armeb:
10847 case llvm::Triple::thumb:
10848 case llvm::Triple::thumbeb: {
10849 if (Triple.getOS() == llvm::Triple::Win32) {
10851 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
10854 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
10855 StringRef ABIStr = getTarget().getABI();
10856 if (ABIStr == "apcs-gnu")
10857 Kind = ARMABIInfo::APCS;
10858 else if (ABIStr == "aapcs16")
10859 Kind = ARMABIInfo::AAPCS16_VFP;
10860 else if (CodeGenOpts.FloatABI == "hard" ||
10861 (CodeGenOpts.FloatABI != "soft" &&
10862 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
10863 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
10864 Triple.getEnvironment() == llvm::Triple::EABIHF)))
10865 Kind = ARMABIInfo::AAPCS_VFP;
10867 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
10870 case llvm::Triple::ppc: {
10871 if (Triple.isOSAIX())
10872 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ false));
10875 CodeGenOpts.FloatABI == "soft" || getTarget().hasFeature("spe");
10876 bool RetSmallStructInRegABI =
10877 PPC32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
10879 new PPC32TargetCodeGenInfo(Types, IsSoftFloat, RetSmallStructInRegABI));
10881 case llvm::Triple::ppc64:
10882 if (Triple.isOSAIX())
10883 return SetCGInfo(new AIXTargetCodeGenInfo(Types, /*Is64Bit*/ true));
10885 if (Triple.isOSBinFormatELF()) {
10886 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
10887 if (getTarget().getABI() == "elfv2")
10888 Kind = PPC64_SVR4_ABIInfo::ELFv2;
10889 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
10890 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10892 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
10895 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
10896 case llvm::Triple::ppc64le: {
10897 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
10898 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
10899 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
10900 Kind = PPC64_SVR4_ABIInfo::ELFv1;
10901 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
10902 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
10904 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
10908 case llvm::Triple::nvptx:
10909 case llvm::Triple::nvptx64:
10910 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
10912 case llvm::Triple::msp430:
10913 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
10915 case llvm::Triple::riscv32:
10916 case llvm::Triple::riscv64: {
10917 StringRef ABIStr = getTarget().getABI();
10918 unsigned XLen = getTarget().getPointerWidth(0);
10919 unsigned ABIFLen = 0;
10920 if (ABIStr.endswith("f"))
10922 else if (ABIStr.endswith("d"))
10924 return SetCGInfo(new RISCVTargetCodeGenInfo(Types, XLen, ABIFLen));
10927 case llvm::Triple::systemz: {
10928 bool SoftFloat = CodeGenOpts.FloatABI == "soft";
10929 bool HasVector = !SoftFloat && getTarget().getABI() == "vector";
10930 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector, SoftFloat));
10933 case llvm::Triple::tce:
10934 case llvm::Triple::tcele:
10935 return SetCGInfo(new TCETargetCodeGenInfo(Types));
10937 case llvm::Triple::x86: {
10938 bool IsDarwinVectorABI = Triple.isOSDarwin();
10939 bool RetSmallStructInRegABI =
10940 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
10941 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
10943 if (Triple.getOS() == llvm::Triple::Win32) {
10944 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
10945 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
10946 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
10948 return SetCGInfo(new X86_32TargetCodeGenInfo(
10949 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
10950 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
10951 CodeGenOpts.FloatABI == "soft"));
10955 case llvm::Triple::x86_64: {
10956 StringRef ABI = getTarget().getABI();
10957 X86AVXABILevel AVXLevel =
10959 ? X86AVXABILevel::AVX512
10960 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
10962 switch (Triple.getOS()) {
10963 case llvm::Triple::Win32:
10964 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
10966 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
10969 case llvm::Triple::hexagon:
10970 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
10971 case llvm::Triple::lanai:
10972 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
10973 case llvm::Triple::r600:
10974 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
10975 case llvm::Triple::amdgcn:
10976 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
10977 case llvm::Triple::sparc:
10978 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
10979 case llvm::Triple::sparcv9:
10980 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
10981 case llvm::Triple::xcore:
10982 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
10983 case llvm::Triple::arc:
10984 return SetCGInfo(new ARCTargetCodeGenInfo(Types));
10985 case llvm::Triple::spir:
10986 case llvm::Triple::spir64:
10987 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
10988 case llvm::Triple::ve:
10989 return SetCGInfo(new VETargetCodeGenInfo(Types));
10993 /// Create an OpenCL kernel for an enqueued block.
10995 /// The kernel has the same function type as the block invoke function. Its
10996 /// name is the name of the block invoke function postfixed with "_kernel".
10997 /// It simply calls the block invoke function then returns.
10999 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
11000 llvm::Function *Invoke,
11001 llvm::Value *BlockLiteral) const {
11002 auto *InvokeFT = Invoke->getFunctionType();
11003 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11004 for (auto &P : InvokeFT->params())
11005 ArgTys.push_back(P);
11006 auto &C = CGF.getLLVMContext();
11007 std::string Name = Invoke->getName().str() + "_kernel";
11008 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11009 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11010 &CGF.CGM.getModule());
11011 auto IP = CGF.Builder.saveIP();
11012 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11013 auto &Builder = CGF.Builder;
11014 Builder.SetInsertPoint(BB);
11015 llvm::SmallVector<llvm::Value *, 2> Args;
11016 for (auto &A : F->args())
11017 Args.push_back(&A);
11018 Builder.CreateCall(Invoke, Args);
11019 Builder.CreateRetVoid();
11020 Builder.restoreIP(IP);
11024 /// Create an OpenCL kernel for an enqueued block.
11026 /// The type of the first argument (the block literal) is the struct type
11027 /// of the block literal instead of a pointer type. The first argument
11028 /// (block literal) is passed directly by value to the kernel. The kernel
11029 /// allocates the same type of struct on stack and stores the block literal
11030 /// to it and passes its pointer to the block invoke function. The kernel
11031 /// has "enqueued-block" function attribute and kernel argument metadata.
11032 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
11033 CodeGenFunction &CGF, llvm::Function *Invoke,
11034 llvm::Value *BlockLiteral) const {
11035 auto &Builder = CGF.Builder;
11036 auto &C = CGF.getLLVMContext();
11038 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
11039 auto *InvokeFT = Invoke->getFunctionType();
11040 llvm::SmallVector<llvm::Type *, 2> ArgTys;
11041 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
11042 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
11043 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
11044 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
11045 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
11046 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
11048 ArgTys.push_back(BlockTy);
11049 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11050 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
11051 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
11052 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11053 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11054 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
11055 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
11056 ArgTys.push_back(InvokeFT->getParamType(I));
11057 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
11058 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
11059 AccessQuals.push_back(llvm::MDString::get(C, "none"));
11060 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
11061 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
11062 ArgNames.push_back(
11063 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
11065 std::string Name = Invoke->getName().str() + "_kernel";
11066 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
11067 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
11068 &CGF.CGM.getModule());
11069 F->addFnAttr("enqueued-block");
11070 auto IP = CGF.Builder.saveIP();
11071 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
11072 Builder.SetInsertPoint(BB);
11073 const auto BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(BlockTy);
11074 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
11075 BlockPtr->setAlignment(BlockAlign);
11076 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
11077 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
11078 llvm::SmallVector<llvm::Value *, 2> Args;
11079 Args.push_back(Cast);
11080 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
11082 Builder.CreateCall(Invoke, Args);
11083 Builder.CreateRetVoid();
11084 Builder.restoreIP(IP);
11086 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
11087 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
11088 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
11089 F->setMetadata("kernel_arg_base_type",
11090 llvm::MDNode::get(C, ArgBaseTypeNames));
11091 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
11092 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
11093 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));