From 547ce3e9c7fe7e046b20f51f2f0d370a683659d1 Mon Sep 17 00:00:00 2001 From: Ahmed Bougacha Date: Fri, 10 May 2024 15:58:57 -0700 Subject: [PATCH] [PAC] Implement pointer authentication for C++ member function pointers. Introduces type based signing of member function pointers. To support this discrimination schema we no longer emit member function pointer to virtual methods and indices into a vtable but migrate to using thunks. This does mean member function pointers are no longer necessarily directly comparable, however as such comparisons are UB this is acceptable. We derive the discriminator from the C++ mangling of the type of the pointer being authenticated. Co-Authored-By: Akira Hatanaka ahatanaka@apple.com Co-Authored-By: John McCall rjmccall@apple.com --- clang/include/clang/AST/ASTContext.h | 2 +- .../include/clang/Basic/PointerAuthOptions.h | 3 + clang/lib/AST/ASTContext.cpp | 12 +- clang/lib/CodeGen/CGCall.cpp | 212 +++++---- clang/lib/CodeGen/CGPointerAuth.cpp | 34 ++ clang/lib/CodeGen/CodeGenFunction.h | 3 +- clang/lib/CodeGen/CodeGenModule.h | 8 + clang/lib/CodeGen/ItaniumCXXABI.cpp | 254 +++++++++- clang/lib/Frontend/CompilerInvocation.cpp | 2 + .../ptrauth-member-function-pointer.cpp | 433 ++++++++++++++++++ clang/test/Preprocessor/ptrauth_feature.c | 9 + 11 files changed, 855 insertions(+), 117 deletions(-) create mode 100644 clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h index 608bd90fcc3ff9..f2a17a56dc2019 100644 --- a/clang/include/clang/AST/ASTContext.h +++ b/clang/include/clang/AST/ASTContext.h @@ -1287,7 +1287,7 @@ class ASTContext : public RefCountedBase { getPointerAuthVTablePointerDiscriminator(const CXXRecordDecl *RD); /// Return the "other" type-specific discriminator for the given type. - uint16_t getPointerAuthTypeDiscriminator(QualType T) const; + uint16_t getPointerAuthTypeDiscriminator(QualType T); /// Apply Objective-C protocol qualifiers to the given type. /// \param allowOnPointerType specifies if we can apply protocol diff --git a/clang/include/clang/Basic/PointerAuthOptions.h b/clang/include/clang/Basic/PointerAuthOptions.h index 197d63642ca6d2..595322b8271b63 100644 --- a/clang/include/clang/Basic/PointerAuthOptions.h +++ b/clang/include/clang/Basic/PointerAuthOptions.h @@ -175,6 +175,9 @@ struct PointerAuthOptions { /// The ABI for variadic C++ virtual function pointers. PointerAuthSchema CXXVirtualVariadicFunctionPointers; + + /// The ABI for C++ member function pointers. + PointerAuthSchema CXXMemberFunctionPointers; }; } // end namespace clang diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp index a8e599f7ebe049..58bb259a5395a5 100644 --- a/clang/lib/AST/ASTContext.cpp +++ b/clang/lib/AST/ASTContext.cpp @@ -3404,7 +3404,7 @@ static void encodeTypeForFunctionPointerAuth(const ASTContext &Ctx, } } -uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) const { +uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) { assert(!T->isDependentType() && "cannot compute type discriminator of a dependent type"); @@ -3414,11 +3414,13 @@ uint16_t ASTContext::getPointerAuthTypeDiscriminator(QualType T) const { if (T->isFunctionPointerType() || T->isFunctionReferenceType()) T = T->getPointeeType(); - if (T->isFunctionType()) + if (T->isFunctionType()) { encodeTypeForFunctionPointerAuth(*this, Out, T); - else - llvm_unreachable( - "type discrimination of non-function type not implemented yet"); + } else { + T = T.getUnqualifiedType(); + std::unique_ptr MC(createMangleContext()); + MC->mangleCanonicalTypeName(T, Out); + } return llvm::getPointerAuthStableSipHash(Str); } diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index d582aba679ddc4..52e61cf37189d7 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -5034,7 +5034,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, ReturnValueSlot ReturnValue, const CallArgList &CallArgs, llvm::CallBase **callOrInvoke, bool IsMustTail, - SourceLocation Loc) { + SourceLocation Loc, + bool IsVirtualFunctionPointerThunk) { // FIXME: We no longer need the types from CallArgs; lift up and simplify. assert(Callee.isOrdinary() || Callee.isVirtual()); @@ -5098,7 +5099,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, RawAddress SRetAlloca = RawAddress::invalid(); llvm::Value *UnusedReturnSizePtr = nullptr; if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) { - if (!ReturnValue.isNull()) { + if (IsVirtualFunctionPointerThunk && RetAI.isIndirect()) { + SRetPtr = makeNaturalAddressForPointer(CurFn->arg_begin() + + IRFunctionArgs.getSRetArgNo(), + RetTy, CharUnits::fromQuantity(1)); + } else if (!ReturnValue.isNull()) { SRetPtr = ReturnValue.getAddress(); } else { SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca); @@ -5877,119 +5882,130 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, CallArgs.freeArgumentMemory(*this); // Extract the return value. - RValue Ret = [&] { - switch (RetAI.getKind()) { - case ABIArgInfo::CoerceAndExpand: { - auto coercionType = RetAI.getCoerceAndExpandType(); - - Address addr = SRetPtr.withElementType(coercionType); - - assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); - bool requiresExtract = isa(CI->getType()); + RValue Ret; - unsigned unpaddedIndex = 0; - for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { - llvm::Type *eltType = coercionType->getElementType(i); - if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue; - Address eltAddr = Builder.CreateStructGEP(addr, i); - llvm::Value *elt = CI; - if (requiresExtract) - elt = Builder.CreateExtractValue(elt, unpaddedIndex++); - else - assert(unpaddedIndex == 0); - Builder.CreateStore(elt, eltAddr); + // If the current function is a virtual function pointer thunk, avoid copying + // the return value of the musttail call to a temporary. + if (IsVirtualFunctionPointerThunk) + Ret = RValue::get(CI); + else + Ret = [&] { + switch (RetAI.getKind()) { + case ABIArgInfo::CoerceAndExpand: { + auto coercionType = RetAI.getCoerceAndExpandType(); + + Address addr = SRetPtr.withElementType(coercionType); + + assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType()); + bool requiresExtract = isa(CI->getType()); + + unsigned unpaddedIndex = 0; + for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) { + llvm::Type *eltType = coercionType->getElementType(i); + if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) + continue; + Address eltAddr = Builder.CreateStructGEP(addr, i); + llvm::Value *elt = CI; + if (requiresExtract) + elt = Builder.CreateExtractValue(elt, unpaddedIndex++); + else + assert(unpaddedIndex == 0); + Builder.CreateStore(elt, eltAddr); + } + [[fallthrough]]; } - [[fallthrough]]; - } - - case ABIArgInfo::InAlloca: - case ABIArgInfo::Indirect: { - RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); - if (UnusedReturnSizePtr) - PopCleanupBlock(); - return ret; - } - case ABIArgInfo::Ignore: - // If we are ignoring an argument that had a result, make sure to - // construct the appropriate return value for our caller. - return GetUndefRValue(RetTy); + case ABIArgInfo::InAlloca: + case ABIArgInfo::Indirect: { + RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation()); + if (UnusedReturnSizePtr) + PopCleanupBlock(); + return ret; + } - case ABIArgInfo::Extend: - case ABIArgInfo::Direct: { - llvm::Type *RetIRTy = ConvertType(RetTy); - if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) { - switch (getEvaluationKind(RetTy)) { - case TEK_Complex: { - llvm::Value *Real = Builder.CreateExtractValue(CI, 0); - llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); - return RValue::getComplex(std::make_pair(Real, Imag)); - } - case TEK_Aggregate: { - Address DestPtr = ReturnValue.getAddress(); - bool DestIsVolatile = ReturnValue.isVolatile(); + case ABIArgInfo::Ignore: + // If we are ignoring an argument that had a result, make sure to + // construct the appropriate return value for our caller. + return GetUndefRValue(RetTy); + + case ABIArgInfo::Extend: + case ABIArgInfo::Direct: { + llvm::Type *RetIRTy = ConvertType(RetTy); + if (RetAI.getCoerceToType() == RetIRTy && + RetAI.getDirectOffset() == 0) { + switch (getEvaluationKind(RetTy)) { + case TEK_Complex: { + llvm::Value *Real = Builder.CreateExtractValue(CI, 0); + llvm::Value *Imag = Builder.CreateExtractValue(CI, 1); + return RValue::getComplex(std::make_pair(Real, Imag)); + } + case TEK_Aggregate: { + Address DestPtr = ReturnValue.getAddress(); + bool DestIsVolatile = ReturnValue.isVolatile(); - if (!DestPtr.isValid()) { - DestPtr = CreateMemTemp(RetTy, "agg.tmp"); - DestIsVolatile = false; + if (!DestPtr.isValid()) { + DestPtr = CreateMemTemp(RetTy, "agg.tmp"); + DestIsVolatile = false; + } + EmitAggregateStore(CI, DestPtr, DestIsVolatile); + return RValue::getAggregate(DestPtr); + } + case TEK_Scalar: { + // If the argument doesn't match, perform a bitcast to coerce it. + // This can happen due to trivial type mismatches. + llvm::Value *V = CI; + if (V->getType() != RetIRTy) + V = Builder.CreateBitCast(V, RetIRTy); + return RValue::get(V); + } } - EmitAggregateStore(CI, DestPtr, DestIsVolatile); - return RValue::getAggregate(DestPtr); + llvm_unreachable("bad evaluation kind"); } - case TEK_Scalar: { - // If the argument doesn't match, perform a bitcast to coerce it. This - // can happen due to trivial type mismatches. + + // If coercing a fixed vector from a scalable vector for ABI + // compatibility, and the types match, use the llvm.vector.extract + // intrinsic to perform the conversion. + if (auto *FixedDstTy = dyn_cast(RetIRTy)) { llvm::Value *V = CI; - if (V->getType() != RetIRTy) - V = Builder.CreateBitCast(V, RetIRTy); - return RValue::get(V); - } + if (auto *ScalableSrcTy = + dyn_cast(V->getType())) { + if (FixedDstTy->getElementType() == + ScalableSrcTy->getElementType()) { + llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); + V = Builder.CreateExtractVector(FixedDstTy, V, Zero, + "cast.fixed"); + return RValue::get(V); + } + } } - llvm_unreachable("bad evaluation kind"); - } - // If coercing a fixed vector from a scalable vector for ABI - // compatibility, and the types match, use the llvm.vector.extract - // intrinsic to perform the conversion. - if (auto *FixedDstTy = dyn_cast(RetIRTy)) { - llvm::Value *V = CI; - if (auto *ScalableSrcTy = - dyn_cast(V->getType())) { - if (FixedDstTy->getElementType() == ScalableSrcTy->getElementType()) { - llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty); - V = Builder.CreateExtractVector(FixedDstTy, V, Zero, "cast.fixed"); - return RValue::get(V); - } + Address DestPtr = ReturnValue.getValue(); + bool DestIsVolatile = ReturnValue.isVolatile(); + + if (!DestPtr.isValid()) { + DestPtr = CreateMemTemp(RetTy, "coerce"); + DestIsVolatile = false; } - } - Address DestPtr = ReturnValue.getValue(); - bool DestIsVolatile = ReturnValue.isVolatile(); + // An empty record can overlap other data (if declared with + // no_unique_address); omit the store for such types - as there is no + // actual data to store. + if (!isEmptyRecord(getContext(), RetTy, true)) { + // If the value is offset in memory, apply the offset now. + Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); + CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); + } - if (!DestPtr.isValid()) { - DestPtr = CreateMemTemp(RetTy, "coerce"); - DestIsVolatile = false; + return convertTempToRValue(DestPtr, RetTy, SourceLocation()); } - // An empty record can overlap other data (if declared with - // no_unique_address); omit the store for such types - as there is no - // actual data to store. - if (!isEmptyRecord(getContext(), RetTy, true)) { - // If the value is offset in memory, apply the offset now. - Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI); - CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this); + case ABIArgInfo::Expand: + case ABIArgInfo::IndirectAliased: + llvm_unreachable("Invalid ABI kind for return argument"); } - return convertTempToRValue(DestPtr, RetTy, SourceLocation()); - } - - case ABIArgInfo::Expand: - case ABIArgInfo::IndirectAliased: - llvm_unreachable("Invalid ABI kind for return argument"); - } - - llvm_unreachable("Unhandled ABIArgInfo::Kind"); - } (); + llvm_unreachable("Unhandled ABIArgInfo::Kind"); + }(); // Emit the assume_aligned check on the return value. if (Ret.isScalar() && TargetDecl) { diff --git a/clang/lib/CodeGen/CGPointerAuth.cpp b/clang/lib/CodeGen/CGPointerAuth.cpp index 7fe62c0788742d..b975a6717711ba 100644 --- a/clang/lib/CodeGen/CGPointerAuth.cpp +++ b/clang/lib/CodeGen/CGPointerAuth.cpp @@ -365,6 +365,40 @@ llvm::Constant *CodeGenModule::getFunctionPointer(GlobalDecl GD, return getFunctionPointer(getRawFunctionPointer(GD, Ty), FuncType); } +CGPointerAuthInfo CodeGenModule::getMemberFunctionPointerAuthInfo(QualType FT) { + assert(FT->getAs() && "MemberPointerType expected"); + auto &Schema = getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers; + if (!Schema) + return CGPointerAuthInfo(); + + assert(!Schema.isAddressDiscriminated() && + "function pointers cannot use address-specific discrimination"); + + llvm::ConstantInt *Discriminator = + getPointerAuthOtherDiscriminator(Schema, GlobalDecl(), FT); + return CGPointerAuthInfo(Schema.getKey(), Schema.getAuthenticationMode(), + /* IsIsaPointer */ false, + /* AuthenticatesNullValues */ false, Discriminator); +} + +llvm::Constant *CodeGenModule::getMemberFunctionPointer(llvm::Constant *Pointer, + QualType FT) { + if (CGPointerAuthInfo PointerAuth = getMemberFunctionPointerAuthInfo(FT)) + return getConstantSignedPointer( + Pointer, PointerAuth.getKey(), nullptr, + cast_or_null(PointerAuth.getDiscriminator())); + + return Pointer; +} + +llvm::Constant *CodeGenModule::getMemberFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty) { + QualType FT = FD->getType(); + FT = getContext().getMemberPointerType( + FT, cast(FD)->getParent()->getTypeForDecl()); + return getMemberFunctionPointer(getRawFunctionPointer(FD, Ty), FT); +} + std::optional CodeGenModule::computeVTPointerAuthentication(const CXXRecordDecl *ThisClass) { auto DefaultAuthentication = getCodeGenOpts().PointerAuth.CXXVTablePointers; diff --git a/clang/lib/CodeGen/CodeGenFunction.h b/clang/lib/CodeGen/CodeGenFunction.h index d83e38cab8e2d1..8bb4156beb0580 100644 --- a/clang/lib/CodeGen/CodeGenFunction.h +++ b/clang/lib/CodeGen/CodeGenFunction.h @@ -4373,7 +4373,8 @@ class CodeGenFunction : public CodeGenTypeCache { RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke, bool IsMustTail, - SourceLocation Loc); + SourceLocation Loc, + bool IsVirtualFunctionPointerThunk = false); RValue EmitCall(const CGFunctionInfo &CallInfo, const CGCallee &Callee, ReturnValueSlot ReturnValue, const CallArgList &Args, llvm::CallBase **callOrInvoke = nullptr, diff --git a/clang/lib/CodeGen/CodeGenModule.h b/clang/lib/CodeGen/CodeGenModule.h index 657e681730c3ad..284bba823baeb4 100644 --- a/clang/lib/CodeGen/CodeGenModule.h +++ b/clang/lib/CodeGen/CodeGenModule.h @@ -973,8 +973,16 @@ class CodeGenModule : public CodeGenTypeCache { llvm::Constant *getFunctionPointer(llvm::Constant *Pointer, QualType FunctionType); + llvm::Constant *getMemberFunctionPointer(const FunctionDecl *FD, + llvm::Type *Ty = nullptr); + + llvm::Constant *getMemberFunctionPointer(llvm::Constant *Pointer, + QualType FT); + CGPointerAuthInfo getFunctionPointerAuthInfo(QualType T); + CGPointerAuthInfo getMemberFunctionPointerAuthInfo(QualType FT); + CGPointerAuthInfo getPointerAuthInfoForPointeeType(QualType type); CGPointerAuthInfo getPointerAuthInfoForType(QualType type); diff --git a/clang/lib/CodeGen/ItaniumCXXABI.cpp b/clang/lib/CodeGen/ItaniumCXXABI.cpp index 37b436a21fbc0d..124c9536c899af 100644 --- a/clang/lib/CodeGen/ItaniumCXXABI.cpp +++ b/clang/lib/CodeGen/ItaniumCXXABI.cpp @@ -388,6 +388,9 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI { bool NeedsVTTParameter(GlobalDecl GD) override; + llvm::Constant * + getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD); + /**************************** RTTI Uniqueness ******************************/ protected: @@ -426,6 +429,9 @@ class ItaniumCXXABI : public CodeGen::CGCXXABI { const CXXRecordDecl *RD) override; private: + llvm::Constant * + getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD); + bool hasAnyUnusedVirtualInlineFunction(const CXXRecordDecl *RD) const { const auto &VtableLayout = CGM.getItaniumVTableContext().getVTableLayout(RD); @@ -835,7 +841,25 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer( CalleePtr->addIncoming(VirtualFn, FnVirtual); CalleePtr->addIncoming(NonVirtualFn, FnNonVirtual); - CGCallee Callee(FPT, CalleePtr); + CGPointerAuthInfo PointerAuth; + + if (const auto &Schema = + CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers) { + llvm::PHINode *DiscriminatorPHI = Builder.CreatePHI(CGF.IntPtrTy, 2); + DiscriminatorPHI->addIncoming(llvm::ConstantInt::get(CGF.IntPtrTy, 0), + FnVirtual); + const auto &AuthInfo = + CGM.getMemberFunctionPointerAuthInfo(QualType(MPT, 0)); + assert(Schema.getKey() == AuthInfo.getKey() && + "Keys for virtual and non-virtual member functions must match"); + auto *NonVirtualDiscriminator = AuthInfo.getDiscriminator(); + DiscriminatorPHI->addIncoming(NonVirtualDiscriminator, FnNonVirtual); + PointerAuth = CGPointerAuthInfo( + Schema.getKey(), Schema.getAuthenticationMode(), Schema.isIsaPointer(), + Schema.authenticatesNullValues(), DiscriminatorPHI); + } + + CGCallee Callee(FPT, CalleePtr, PointerAuth); return Callee; } @@ -853,6 +877,25 @@ llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress( "memptr.offset"); } +// See if it's possible to return a constant signed pointer. +static llvm::Constant *pointerAuthResignConstant( + llvm::Value *Ptr, const CGPointerAuthInfo &CurAuthInfo, + const CGPointerAuthInfo &NewAuthInfo, CodeGenModule &CGM) { + auto *CPA = dyn_cast(Ptr); + + if (!CPA) + return nullptr; + + assert(CPA->getKey()->getZExtValue() == CurAuthInfo.getKey() && + CPA->getAddrDiscriminator()->isZeroValue() && + CPA->getDiscriminator() == CurAuthInfo.getDiscriminator() && + "unexpected key or discriminators"); + + return CGM.getConstantSignedPointer( + CPA->getPointer(), NewAuthInfo.getKey(), nullptr, + cast(NewAuthInfo.getDiscriminator())); +} + /// Perform a bitcast, derived-to-base, or base-to-derived member pointer /// conversion. /// @@ -880,21 +923,63 @@ llvm::Value * ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, const CastExpr *E, llvm::Value *src) { + // Use constant emission if we can. + if (isa(src)) + return EmitMemberPointerConversion(E, cast(src)); + assert(E->getCastKind() == CK_DerivedToBaseMemberPointer || E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); + CGBuilderTy &Builder = CGF.Builder; + QualType DstType = E->getType(); + + if (DstType->isMemberFunctionPointerType()) { + if (const auto &NewAuthInfo = + CGM.getMemberFunctionPointerAuthInfo(DstType)) { + QualType SrcType = E->getSubExpr()->getType(); + assert(SrcType->isMemberFunctionPointerType()); + const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType); + llvm::Value *MemFnPtr = Builder.CreateExtractValue(src, 0, "memptr.ptr"); + llvm::Type *OrigTy = MemFnPtr->getType(); + + llvm::BasicBlock *StartBB = Builder.GetInsertBlock(); + llvm::BasicBlock *ResignBB = CGF.createBasicBlock("resign"); + llvm::BasicBlock *MergeBB = CGF.createBasicBlock("merge"); + + // Check whether we have a virtual offset or a pointer to a function. + assert(UseARMMethodPtrABI && "ARM ABI expected"); + llvm::Value *Adj = Builder.CreateExtractValue(src, 1, "memptr.adj"); + llvm::Constant *Ptrdiff_1 = llvm::ConstantInt::get(CGM.PtrDiffTy, 1); + llvm::Value *AndVal = Builder.CreateAnd(Adj, Ptrdiff_1); + llvm::Value *IsVirtualOffset = + Builder.CreateIsNotNull(AndVal, "is.virtual.offset"); + Builder.CreateCondBr(IsVirtualOffset, MergeBB, ResignBB); + + CGF.EmitBlock(ResignBB); + llvm::Type *PtrTy = llvm::PointerType::getUnqual(CGM.Int8Ty); + MemFnPtr = Builder.CreateIntToPtr(MemFnPtr, PtrTy); + MemFnPtr = + CGF.emitPointerAuthResign(MemFnPtr, SrcType, CurAuthInfo, NewAuthInfo, + isa(src)); + MemFnPtr = Builder.CreatePtrToInt(MemFnPtr, OrigTy); + llvm::Value *ResignedVal = Builder.CreateInsertValue(src, MemFnPtr, 0); + ResignBB = Builder.GetInsertBlock(); + + CGF.EmitBlock(MergeBB); + llvm::PHINode *NewSrc = Builder.CreatePHI(src->getType(), 2); + NewSrc->addIncoming(src, StartBB); + NewSrc->addIncoming(ResignedVal, ResignBB); + src = NewSrc; + } + } + // Under Itanium, reinterprets don't require any additional processing. if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; - // Use constant emission if we can. - if (isa(src)) - return EmitMemberPointerConversion(E, cast(src)); - llvm::Constant *adj = getMemberPointerAdjustment(E); if (!adj) return src; - CGBuilderTy &Builder = CGF.Builder; bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer); const MemberPointerType *destTy = @@ -932,6 +1017,34 @@ ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF, return Builder.CreateInsertValue(src, dstAdj, 1); } +static llvm::Constant * +pointerAuthResignMemberFunctionPointer(llvm::Constant *Src, QualType DestType, + QualType SrcType, CodeGenModule &CGM) { + assert(DestType->isMemberFunctionPointerType() && + SrcType->isMemberFunctionPointerType() && + "member function pointers expected"); + if (DestType == SrcType) + return Src; + + const auto &NewAuthInfo = CGM.getMemberFunctionPointerAuthInfo(DestType); + const auto &CurAuthInfo = CGM.getMemberFunctionPointerAuthInfo(SrcType); + + if (!NewAuthInfo && !CurAuthInfo) + return Src; + + llvm::Constant *MemFnPtr = Src->getAggregateElement(0u); + if (MemFnPtr->getNumOperands() == 0) { + // src must be a pair of null pointers. + assert(isa(MemFnPtr) && "constant int expected"); + return Src; + } + + llvm::Constant *ConstPtr = pointerAuthResignConstant( + cast(MemFnPtr)->getOperand(0), CurAuthInfo, NewAuthInfo, CGM); + ConstPtr = llvm::ConstantExpr::getPtrToInt(ConstPtr, MemFnPtr->getType()); + return ConstantFoldInsertValueInstruction(Src, ConstPtr, 0); +} + llvm::Constant * ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, llvm::Constant *src) { @@ -939,6 +1052,12 @@ ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E, E->getCastKind() == CK_BaseToDerivedMemberPointer || E->getCastKind() == CK_ReinterpretMemberPointer); + QualType DstType = E->getType(); + + if (DstType->isMemberFunctionPointerType()) + src = pointerAuthResignMemberFunctionPointer( + src, DstType, E->getSubExpr()->getType(), CGM); + // Under Itanium, reinterprets don't require any additional processing. if (E->getCastKind() == CK_ReinterpretMemberPointer) return src; @@ -1036,9 +1155,32 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, // least significant bit of adj then makes exactly the same // discrimination as the least significant bit of ptr does for // Itanium. - MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); - MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, - 2 * ThisAdjustment.getQuantity() + 1); + + // We cannot use the Itanium ABI's representation for virtual member + // function pointers under pointer authentication because it would + // require us to store both the virtual offset and the constant + // discriminator in the pointer, which would be immediately vulnerable + // to attack. Instead we introduce a thunk that does the virtual dispatch + // and store it as if it were a non-virtual member function. This means + // that virtual function pointers may not compare equal anymore, but + // fortunately they aren't required to by the standard, and we do make + // a best-effort attempt to re-use the thunk. + // + // To support interoperation with code in which pointer authentication + // is disabled, derefencing a member function pointer must still handle + // the virtual case, but it can use a discriminator which should never + // be valid. + const auto &Schema = + CGM.getCodeGenOpts().PointerAuth.CXXMemberFunctionPointers; + if (Schema) + MemPtr[0] = llvm::ConstantExpr::getPtrToInt( + getSignedVirtualMemberFunctionPointer(MD), CGM.PtrDiffTy); + else + MemPtr[0] = llvm::ConstantInt::get(CGM.PtrDiffTy, VTableOffset); + // Don't set the LSB of adj to 1 if pointer authentication for member + // function pointers is enabled. + MemPtr[1] = llvm::ConstantInt::get( + CGM.PtrDiffTy, 2 * ThisAdjustment.getQuantity() + !Schema); } else { // Itanium C++ ABI 2.3: // For a virtual function, [the pointer field] is 1 plus the @@ -1060,7 +1202,7 @@ llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD, // function type is incomplete. Ty = CGM.PtrDiffTy; } - llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty); + llvm::Constant *addr = CGM.getMemberFunctionPointer(MD, Ty); MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, CGM.PtrDiffTy); MemPtr[1] = llvm::ConstantInt::get(CGM.PtrDiffTy, @@ -1080,8 +1222,12 @@ llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP, CharUnits ThisAdjustment = getContext().getMemberPointerPathAdjustment(MP); - if (const CXXMethodDecl *MD = dyn_cast(MPD)) - return BuildMemberPointer(MD, ThisAdjustment); + if (const CXXMethodDecl *MD = dyn_cast(MPD)) { + llvm::Constant *Src = BuildMemberPointer(MD, ThisAdjustment); + QualType SrcType = getContext().getMemberPointerType( + MD->getType(), MD->getParent()->getTypeForDecl()); + return pointerAuthResignMemberFunctionPointer(Src, MPType, SrcType, CGM); + } CharUnits FieldOffset = getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD)); @@ -3185,6 +3331,78 @@ bool ItaniumCXXABI::NeedsVTTParameter(GlobalDecl GD) { return false; } +llvm::Constant * +ItaniumCXXABI::getOrCreateVirtualFunctionPointerThunk(const CXXMethodDecl *MD) { + SmallString<256> MethodName; + llvm::raw_svector_ostream Out(MethodName); + getMangleContext().mangleCXXName(MD, Out); + MethodName += "_vfpthunk_"; + StringRef ThunkName = MethodName.str(); + llvm::Function *ThunkFn; + if ((ThunkFn = cast_or_null( + CGM.getModule().getNamedValue(ThunkName)))) + return ThunkFn; + + const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeCXXMethodDeclaration(MD); + llvm::FunctionType *ThunkTy = CGM.getTypes().GetFunctionType(FnInfo); + llvm::GlobalValue::LinkageTypes Linkage = + MD->isExternallyVisible() ? llvm::GlobalValue::LinkOnceODRLinkage + : llvm::GlobalValue::InternalLinkage; + ThunkFn = + llvm::Function::Create(ThunkTy, Linkage, ThunkName, &CGM.getModule()); + if (Linkage == llvm::GlobalValue::LinkOnceODRLinkage) + ThunkFn->setVisibility(llvm::GlobalValue::HiddenVisibility); + assert(ThunkFn->getName() == ThunkName && "name was uniqued!"); + + CGM.SetLLVMFunctionAttributes(MD, FnInfo, ThunkFn, /*IsThunk=*/true); + CGM.SetLLVMFunctionAttributesForDefinition(MD, ThunkFn); + + // Stack protection sometimes gets inserted after the musttail call. + ThunkFn->removeFnAttr(llvm::Attribute::StackProtect); + ThunkFn->removeFnAttr(llvm::Attribute::StackProtectStrong); + ThunkFn->removeFnAttr(llvm::Attribute::StackProtectReq); + + // Start codegen. + CodeGenFunction CGF(CGM); + CGF.CurGD = GlobalDecl(MD); + CGF.CurFuncIsThunk = true; + + // Build FunctionArgs. + FunctionArgList FunctionArgs; + CGF.BuildFunctionArgList(CGF.CurGD, FunctionArgs); + + CGF.StartFunction(GlobalDecl(), FnInfo.getReturnType(), ThunkFn, FnInfo, + FunctionArgs, MD->getLocation(), SourceLocation()); + llvm::Value *ThisVal = loadIncomingCXXThis(CGF); + setCXXABIThisValue(CGF, ThisVal); + + CallArgList CallArgs; + for (const VarDecl *VD : FunctionArgs) + CGF.EmitDelegateCallArg(CallArgs, VD, SourceLocation()); + + const FunctionProtoType *FPT = MD->getType()->getAs(); + RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, /*this*/ 1); + const CGFunctionInfo &CallInfo = + CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT, Required, 0); + CGCallee Callee = CGCallee::forVirtual(nullptr, GlobalDecl(MD), + getThisAddress(CGF), ThunkTy); + llvm::CallBase *CallOrInvoke; + CGF.EmitCall(CallInfo, Callee, ReturnValueSlot(), CallArgs, &CallOrInvoke, + /*IsMustTail=*/true, SourceLocation(), true); + auto *Call = cast(CallOrInvoke); + Call->setTailCallKind(llvm::CallInst::TCK_MustTail); + if (Call->getType()->isVoidTy()) + CGF.Builder.CreateRetVoid(); + else + CGF.Builder.CreateRet(Call); + + // Finish the function to maintain CodeGenFunction invariants. + // FIXME: Don't emit unreachable code. + CGF.EmitBlock(CGF.createBasicBlock()); + CGF.FinishFunction(); + return ThunkFn; +} + namespace { class ItaniumRTTIBuilder { CodeGenModule &CGM; // Per-module state. @@ -4875,6 +5093,18 @@ ItaniumCXXABI::LoadVTablePtr(CodeGenFunction &CGF, Address This, return {CGF.GetVTablePtr(This, CGM.Int8PtrTy, RD), RD}; } +llvm::Constant * +ItaniumCXXABI::getSignedVirtualMemberFunctionPointer(const CXXMethodDecl *MD) { + const CXXMethodDecl *origMD = + cast(CGM.getItaniumVTableContext() + .findOriginalMethod(MD->getCanonicalDecl()) + .getDecl()); + llvm::Constant *thunk = getOrCreateVirtualFunctionPointerThunk(origMD); + QualType funcType = CGM.getContext().getMemberPointerType( + MD->getType(), MD->getParent()->getTypeForDecl()); + return CGM.getMemberFunctionPointer(thunk, funcType); +} + void WebAssemblyCXXABI::emitBeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *C) { if (CGF.getTarget().hasFeature("exception-handling")) diff --git a/clang/lib/Frontend/CompilerInvocation.cpp b/clang/lib/Frontend/CompilerInvocation.cpp index bf57addff1c6df..28e8fbdf1cffca 100644 --- a/clang/lib/Frontend/CompilerInvocation.cpp +++ b/clang/lib/Frontend/CompilerInvocation.cpp @@ -1494,6 +1494,8 @@ void CompilerInvocation::setDefaultPointerAuthOptions( PointerAuthSchema(Key::ASDA, false, Discrimination::None); Opts.CXXVirtualFunctionPointers = Opts.CXXVirtualVariadicFunctionPointers = PointerAuthSchema(Key::ASIA, true, Discrimination::Decl); + Opts.CXXMemberFunctionPointers = + PointerAuthSchema(Key::ASIA, false, Discrimination::Type); } } diff --git a/clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp b/clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp new file mode 100644 index 00000000000000..5e84e3e7bc5e9a --- /dev/null +++ b/clang/test/CodeGenCXX/ptrauth-member-function-pointer.cpp @@ -0,0 +1,433 @@ +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -o - %s | FileCheck -check-prefixes=CHECK,NODEBUG %s +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -debug-info-kind=limited -o - %s | FileCheck -check-prefixes=CHECK %s +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 1 -o - %s | FileCheck %s -check-prefix=STACK-PROT +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 2 -o - %s | FileCheck %s -check-prefix=STACK-PROT +// RUN: %clang_cc1 -triple arm64-apple-ios -fptrauth-calls -fptrauth-intrinsics -emit-llvm -std=c++11 -O1 -disable-llvm-passes -stack-protector 3 -o - %s | FileCheck %s -check-prefix=STACK-PROT + + +// CHECK: @gmethod0 = global { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC1:35591]]) to i64), i64 0 }, align 8 +// CHECK: @gmethod1 = global { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived011nonvirtual5Ev, i32 0, i64 [[TYPEDISC0:22163]]) to i64), i64 0 }, align 8 +// CHECK: @gmethod2 = global { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, align 8 + +// CHECK: @__const._Z13testArrayInitv.p0 = private unnamed_addr constant [1 x { i64, i64 }] [{ i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 35591) to i64), i64 0 }], align 8 +// CHECK: @__const._Z13testArrayInitv.p1 = private unnamed_addr constant [1 x { i64, i64 }] [{ i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 35591) to i64), i64 0 }], align 8 +// CHECK: @__const._Z13testArrayInitv.c0 = private unnamed_addr constant %struct.Class0 { { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 35591) to i64), i64 0 } }, align 8 +// CHECK: @__const._Z13testArrayInitv.c1 = private unnamed_addr constant %struct.Class0 { { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 35591) to i64), i64 0 } }, align 8 + +// CHECK: @_ZTV5Base0 = unnamed_addr constant { [5 x ptr] } { [5 x ptr] [ptr null, ptr @_ZTI5Base0, +// CHECK-SAME: ptr ptrauth (ptr @_ZN5Base08virtual1Ev, i32 0, i64 55600, ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV5Base0, i32 0, i32 0, i32 2)), +// CHECK-SAME: ptr ptrauth (ptr @_ZN5Base08virtual3Ev, i32 0, i64 53007, ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV5Base0, i32 0, i32 0, i32 3)), +// CHECK-SAME: ptr ptrauth (ptr @_ZN5Base016virtual_variadicEiz, i32 0, i64 7464, ptr getelementptr inbounds ({ [5 x ptr] }, ptr @_ZTV5Base0, i32 0, i32 0, i32 4))] }, align 8 + +typedef __SIZE_TYPE__ size_t; + +namespace std { +template +class initializer_list { + const _Ep *__begin_; + size_t __size_; + + initializer_list(const _Ep *__b, size_t __s); +}; +} // namespace std + +struct Base0 { + void nonvirtual0(); + virtual void virtual1(); + virtual void virtual3(); + virtual void virtual_variadic(int, ...); +}; + +struct A0 { + int d[4]; +}; + +struct A1 { + int d[8]; +}; + +struct __attribute__((trivial_abi)) TrivialS { + TrivialS(const TrivialS &); + ~TrivialS(); + int p[4]; +}; + +struct Derived0 : Base0 { + void virtual1() override; + void nonvirtual5(); + virtual void virtual6(); + virtual A0 return_agg(); + virtual A1 sret(); + virtual void trivial_abi(TrivialS); +}; + +struct Base1 { + virtual void virtual7(); +}; + +struct Derived1 : Base0, Base1 { + void virtual1() override; + void virtual7() override; +}; + +typedef void (Base0::*MethodTy0)(); +typedef void (Base0::*VariadicMethodTy0)(int, ...); +typedef void (Derived0::*MethodTy1)(); + +struct Class0 { + MethodTy1 m0; +}; + +// CHECK: define void @_ZN5Base08virtual1Ev( + +// CHECK: define void @_Z5test0v() +// CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[VARMETHOD1:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD2:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD3:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD4:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD5:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD6:.*]] = alloca { i64, i64 }, align 8 +// CHECK-NEXT: %[[METHOD7:.*]] = alloca { i64, i64 }, align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual3Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base016virtual_variadicEiz_vfpthunk_, i32 0, i64 34368) to i64), i64 0 }, ptr %[[VARMETHOD1]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual3Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived011nonvirtual5Ev, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived08virtual6Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %[[METHOD2]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived010return_aggEv_vfpthunk_, i32 0, i64 64418) to i64), i64 0 }, ptr %[[METHOD3]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived04sretEv_vfpthunk_, i32 0, i64 28187) to i64), i64 0 }, ptr %[[METHOD4]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_, i32 0, i64 8992) to i64), i64 0 }, ptr %[[METHOD5]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base18virtual7Ev_vfpthunk_, i32 0, i64 [[TYPEDISC2:61596]]) to i64), i64 0 }, ptr %[[METHOD6]], align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN8Derived18virtual7Ev_vfpthunk_, i32 0, i64 25206) to i64), i64 0 }, ptr %[[METHOD7]], align 8 +// CHECK-NEXT: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 25206) to i64), i64 0 }, ptr %[[METHOD7]], align 8 +// CHECK: ret void + +// CHECK: define linkonce_odr hidden void @_ZN5Base08virtual1Ev_vfpthunk_(ptr noundef %[[THIS:.*]]) +// CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8 +// CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V0:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8 +// CHECK-NEXT: %[[V2:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK-NEXT: %[[V3:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V2]], i32 2, i64 0) +// CHECK-NEXT: %[[V4:.*]] = inttoptr i64 %[[V3]] to ptr +// CHECK-NEXT: %[[VFN:.*]] = getelementptr inbounds ptr, ptr %[[V4]], i64 0 +// CHECK-NEXT: %[[V5:.*]] = load ptr, ptr %[[VFN]], align 8 +// CHECK-NEXT: %[[V6:.*]] = ptrtoint ptr %[[VFN]] to i64 +// CHECK-NEXT: %[[V7:.*]] = call i64 @llvm.ptrauth.blend(i64 %[[V6]], i64 55600) +// CHECK-NEXT: musttail call void %[[V5]](ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %[[V0]]) [ "ptrauth"(i32 0, i64 %[[V7]]) ] +// CHECK-NEXT: ret void + +// CHECK: define linkonce_odr hidden void @_ZN5Base08virtual3Ev_vfpthunk_(ptr noundef %{{.*}}) +// CHECK: load ptr, ptr %{{.*}}, align 8 +// CHECK: load ptr, ptr %{{.*}}, align 8 +// CHECK: %[[VTABLE:.*]] = load ptr, ptr %{{.*}}, align 8 +// CHECK: %[[V2:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK: %[[V3:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V2]], i32 2, i64 0) +// CHECK: %[[V4:.*]] = inttoptr i64 %[[V3]] to ptr +// CHECK: getelementptr inbounds ptr, ptr %[[V4]], i64 1 +// CHECK: call i64 @llvm.ptrauth.blend(i64 %{{.*}}, i64 53007) + +// CHECK: define linkonce_odr hidden void @_ZN5Base016virtual_variadicEiz_vfpthunk_(ptr noundef %[[THIS:.*]], i32 noundef %0, ...) +// CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8 +// CHECK-NEXT: %[[_ADDR:.*]] = alloca i32, align 4 +// CHECK-NEXT: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8 +// CHECK: store i32 %0, ptr %[[_ADDR]], align 4 +// CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK-NEXT: %[[V2:.*]] = load i32, ptr %[[_ADDR]], align 4 +// CHECK-NEXT: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8 +// CHECK-NEXT: %[[V4:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK-NEXT: %[[V5:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V4]], i32 2, i64 0) +// CHECK-NEXT: %[[V6:.*]] = inttoptr i64 %[[V5]] to ptr +// CHECK-NEXT: %[[VFN:.*]] = getelementptr inbounds ptr, ptr %[[V6]], i64 2 +// CHECK-NEXT: %[[V7:.*]] = load ptr, ptr %[[VFN]], align 8 +// CHECK-NEXT: %[[V8:.*]] = ptrtoint ptr %[[VFN]] to i64 +// CHECK-NEXT: %[[V9:.*]] = call i64 @llvm.ptrauth.blend(i64 %[[V8]], i64 7464) +// CHECK-NEXT: musttail call void (ptr, i32, ...) %[[V7]](ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %[[V1]], i32 noundef %[[V2]], ...) [ "ptrauth"(i32 0, i64 %[[V9]]) ] +// CHECK-NEXT: ret void + +// CHECK: define linkonce_odr hidden void @_ZN8Derived08virtual6Ev_vfpthunk_(ptr noundef %[[THIS:.*]]) +// CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8 +// CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8 +// CHECK: %[[V1:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK: %[[V2:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V1]], i32 2, i64 0) +// CHECK: %[[V3:.*]] = inttoptr i64 %[[V2]] to ptr +// CHECK: %[[VFN:.*]] = getelementptr inbounds ptr, ptr %[[V3]], i64 3 +// CHECK: %[[V5:.*]] = ptrtoint ptr %[[VFN]] to i64 +// CHECK: call i64 @llvm.ptrauth.blend(i64 %[[V5]], i64 55535) + +// Check that the return value of the musttail call isn't copied to a temporary. + +// CHECK: define linkonce_odr hidden [2 x i64] @_ZN8Derived010return_aggEv_vfpthunk_(ptr noundef %{{.*}}) +// CHECK: %[[CALL:.*]] = musttail call [2 x i64] %{{.*}}(ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %{{.*}}) [ "ptrauth"(i32 0, i64 %{{.*}}) ] +// CHECK-NEXT: ret [2 x i64] %[[CALL]] + +// Check that the sret pointer passed to the caller is forwarded to the musttail +// call. + +// CHECK: define linkonce_odr hidden void @_ZN8Derived04sretEv_vfpthunk_(ptr dead_on_unwind noalias writable sret(%struct.A1) align 4 %[[AGG_RESULT:.*]], ptr noundef %{{.*}}) +// CHECK: musttail call void %{{.*}}(ptr dead_on_unwind writable sret(%struct.A1) align 4 %[[AGG_RESULT]], ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %{{.*}}) [ "ptrauth"(i32 0, i64 %{{.*}}) ] +// CHECK-NEXT: ret void + +// Check that the thunk function doesn't destruct the trivial_abi argument. + +// CHECK: define linkonce_odr hidden void @_ZN8Derived011trivial_abiE8TrivialS_vfpthunk_(ptr noundef %{{.*}}, [2 x i64] %{{.*}}) +// NODEBUG-NOT: call +// CHECK: call i64 @llvm.ptrauth.auth( +// NODEBUG-NOT: call +// CHECK: call i64 @llvm.ptrauth.blend( +// NODEBUG-NOT: call +// CHECK: musttail call void +// CHECK-NEXT: ret void + +// CHECK: define linkonce_odr hidden void @_ZN5Base18virtual7Ev_vfpthunk_(ptr noundef %[[THIS:.*]]) +// CHECK: entry: +// CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8 +// CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[V0:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8 +// CHECK: %[[V1:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK: %[[V2:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V1]], i32 2, i64 0) +// CHECK: %[[V3:.*]] = inttoptr i64 %[[V2]] to ptr +// CHECK: getelementptr inbounds ptr, ptr %[[V3]], i64 0 + +// CHECK: define linkonce_odr hidden void @_ZN8Derived18virtual7Ev_vfpthunk_(ptr noundef %[[THIS:.*]]) +// CHECK: %[[THIS_ADDR:.*]] = alloca ptr, align 8 +// CHECK: store ptr %[[THIS]], ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[THIS1:.*]] = load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK: load ptr, ptr %[[THIS_ADDR]], align 8 +// CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[THIS1]], align 8 +// CHECK: %[[V1:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK: %[[V2:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V1]], i32 2, i64 0) +// CHECK: %[[V3:.*]] = inttoptr i64 %[[V2]] to ptr +// CHECK: getelementptr inbounds ptr, ptr %[[V3]], i64 3 + +void Base0::virtual1() {} + +void test0() { + MethodTy0 method0; + method0 = &Base0::nonvirtual0; + method0 = &Base0::virtual1; + method0 = &Base0::virtual3; + + VariadicMethodTy0 varmethod1; + varmethod1 = &Base0::virtual_variadic; + + MethodTy1 method2; + method2 = &Derived0::nonvirtual0; + method2 = &Derived0::virtual1; + method2 = &Derived0::virtual3; + method2 = &Derived0::nonvirtual5; + method2 = &Derived0::virtual6; + + A0 (Derived0::*method3)(); + method3 = &Derived0::return_agg; + + A1 (Derived0::*method4)(); + method4 = &Derived0::sret; + + void (Derived0::*method5)(TrivialS); + method5 = &Derived0::trivial_abi; + + void (Base1::*method6)(); + method6 = &Base1::virtual7; + + void (Derived1::*method7)(); + method7 = &Derived1::virtual7; + method7 = &Derived1::virtual1; +} + +// CHECK: define void @_Z5test1P5Base0MS_FvvE(ptr noundef %[[A0:.*]], [2 x i64] %[[A1_COERCE:.*]]) +// CHECK: %[[A1:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[A0_ADDR:.*]] = alloca ptr, align 8 +// CHECK: %[[A1_ADDR:.*]] = alloca { i64, i64 }, align 8 +// CHECK: store [2 x i64] %[[A1_COERCE]], ptr %[[A1]], align 8 +// CHECK: %[[A11:.*]] = load { i64, i64 }, ptr %[[A1]], align 8 +// CHECK: store ptr %[[A0]], ptr %[[A0_ADDR]], align 8 +// CHECK: store { i64, i64 } %[[A11]], ptr %[[A1_ADDR]], align 8 +// CHECK: %[[V1:.*]] = load ptr, ptr %[[A0_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load { i64, i64 }, ptr %[[A1_ADDR]], align 8 +// CHECK: %[[MEMPTR_ADJ:.*]] = extractvalue { i64, i64 } %[[V2]], 1 +// CHECK: %[[MEMPTR_ADJ_SHIFTED:.*]] = ashr i64 %[[MEMPTR_ADJ]], 1 +// CHECK: %[[V4:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 %[[MEMPTR_ADJ_SHIFTED]] +// CHECK: %[[MEMPTR_PTR:.*]] = extractvalue { i64, i64 } %[[V2]], 0 +// CHECK: %[[V5:.*]] = and i64 %[[MEMPTR_ADJ]], 1 +// CHECK: %[[MEMPTR_ISVIRTUAL:.*]] = icmp ne i64 %[[V5]], 0 +// CHECK: br i1 %[[MEMPTR_ISVIRTUAL]] + +// CHECK: %[[VTABLE:.*]] = load ptr, ptr %[[V4]], align 8 +// CHECK: %[[V7:.*]] = ptrtoint ptr %[[VTABLE]] to i64 +// CHECK: %[[V8:.*]] = call i64 @llvm.ptrauth.auth(i64 %[[V7]], i32 2, i64 0) +// CHECK: %[[V9:.*]] = inttoptr i64 %[[V8]] to ptr +// CHECK: %[[V10:.*]] = trunc i64 %[[MEMPTR_PTR]] to i32 +// CHECK: %[[V11:.*]] = zext i32 %[[V10]] to i64 +// CHECK: %[[V12:.*]] = getelementptr i8, ptr %[[V9]], i64 %[[V11]] +// CHECK: %[[MEMPTR_VIRTUALFN:.*]] = load ptr, ptr %[[V12]], align 8 +// CHECK: br + +// CHECK: %[[MEMPTR_NONVIRTUALFN:.*]] = inttoptr i64 %[[MEMPTR_PTR]] to ptr +// CHECK: br + +// CHECK: %[[V14:.*]] = phi ptr [ %[[MEMPTR_VIRTUALFN]], {{.*}} ], [ %[[MEMPTR_NONVIRTUALFN]], {{.*}} ] +// CHECK: %[[V15:.*]] = phi i64 [ 0, {{.*}} ], [ [[TYPEDISC0]], {{.*}} ] +// CHECK: call void %[[V14]](ptr noundef nonnull align {{[0-9]+}} dereferenceable(8) %[[V4]]) [ "ptrauth"(i32 0, i64 %[[V15]]) ] +// CHECK: ret void + +void test1(Base0 *a0, MethodTy0 a1) { + (a0->*a1)(); +} + +// CHECK: define void @_Z15testConversion0M5Base0FvvEM8Derived0FvvE([2 x i64] %[[METHOD0_COERCE:.*]], [2 x i64] %[[METHOD1_COERCE:.*]]) +// CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[METHOD1:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[METHOD0_ADDR:.*]] = alloca { i64, i64 }, align 8 +// CHECK: %[[METHOD1_ADDR:.*]] = alloca { i64, i64 }, align 8 +// CHECK: store [2 x i64] %[[METHOD0_COERCE]], ptr %[[METHOD0]], align 8 +// CHECK: %[[METHOD01:.*]] = load { i64, i64 }, ptr %[[METHOD0]], align 8 +// CHECK: store [2 x i64] %[[METHOD1_COERCE]], ptr %[[METHOD1]], align 8 +// CHECK: %[[METHOD12:.*]] = load { i64, i64 }, ptr %[[METHOD1]], align 8 +// CHECK: store { i64, i64 } %[[METHOD01]], ptr %[[METHOD0_ADDR]], align 8 +// CHECK: store { i64, i64 } %[[METHOD12]], ptr %[[METHOD1_ADDR]], align 8 +// CHECK: %[[V2:.*]] = load { i64, i64 }, ptr %[[METHOD0_ADDR]], align 8 +// CHECK: %[[MEMPTR_PTR:.*]] = extractvalue { i64, i64 } %[[V2]], 0 +// CHECK: %[[MEMPTR_ADJ:.*]] = extractvalue { i64, i64 } %[[V2]], 1 +// CHECK: %[[V3:.*]] = and i64 %[[MEMPTR_ADJ]], 1 +// CHECK: %[[IS_VIRTUAL_OFFSET:.*]] = icmp ne i64 %[[V3]], 0 +// CHECK: br i1 %[[IS_VIRTUAL_OFFSET]] + +// CHECK: %[[V4:.*]] = inttoptr i64 %[[MEMPTR_PTR]] to ptr +// CHECK: %[[V5:.*]] = icmp ne ptr %[[V4]], null +// CHECK: br i1 %[[V5]] + +// CHECK: %[[V6:.*]] = ptrtoint ptr %[[V4]] to i64 +// CHECK: %[[V7:.*]] = call i64 @llvm.ptrauth.resign(i64 %[[V6]], i32 0, i64 [[TYPEDISC0]], i32 0, i64 [[TYPEDISC1]]) +// CHECK: %[[V8:.*]] = inttoptr i64 %[[V7]] to ptr +// CHECK: br + +// CHECK: %[[V9:.*]] = phi ptr [ null, {{.*}} ], [ %[[V8]], {{.*}} ] +// CHECK: %[[V1:.*]] = ptrtoint ptr %[[V9]] to i64 +// CHECK: %[[V11:.*]] = insertvalue { i64, i64 } %[[V2]], i64 %[[V1]], 0 +// CHECK: br + +// CHECK: %[[V12:.*]] = phi { i64, i64 } [ %[[V2]], {{.*}} ], [ %[[V11]], {{.*}} ] +// CHECK: store { i64, i64 } %[[V12]], ptr %[[METHOD1_ADDR]], align 8 +// CHECK: ret void + +void testConversion0(MethodTy0 method0, MethodTy1 method1) { + method1 = method0; +} + +// CHECK: define void @_Z15testConversion1M5Base0FvvE( +// CHECK: call i64 @llvm.ptrauth.resign(i64 %{{.*}}, i32 0, i64 [[TYPEDISC0]], i32 0, i64 [[TYPEDISC1]]) + +void testConversion1(MethodTy0 method0) { + MethodTy1 method1 = reinterpret_cast(method0); +} + +// CHECK: define void @_Z15testConversion2M8Derived0FvvE( +// CHECK: call i64 @llvm.ptrauth.resign(i64 %{{.*}}, i32 0, i64 [[TYPEDISC1]], i32 0, i64 [[TYPEDISC0]]) + +void testConversion2(MethodTy1 method1) { + MethodTy0 method0 = static_cast(method1); +} + +// CHECK: define void @_Z15testConversion3M8Derived0FvvE( +// CHECK: call i64 @llvm.ptrauth.resign(i64 %{{.*}}, i32 0, i64 [[TYPEDISC1]], i32 0, i64 [[TYPEDISC0]]) + +void testConversion3(MethodTy1 method1) { + MethodTy0 method0 = reinterpret_cast(method1); +} + +// No need to call @llvm.ptrauth.resign if the source member function +// pointer is a constant. + +// CHECK: define void @_Z15testConversion4v( +// CHECK: %[[METHOD0:.*]] = alloca { i64, i64 }, align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC0]]) to i64), i64 0 }, ptr %[[METHOD0]], align 8 +// CHECK: ret void + +void testConversion4() { + MethodTy0 method0 = reinterpret_cast(&Derived0::virtual1); +} + +// This code used to crash. +namespace testNonVirtualThunk { + struct R {}; + + struct B0 { + virtual void bar(); + }; + + struct B1 { + virtual R foo(); + }; + + struct D : B0, B1 { + virtual R foo(); + }; + + D d; +} + +// CHECK: define internal void @_ZN22TestAnonymousNamespace12_GLOBAL__N_11S3fooEv_vfpthunk_( + +namespace TestAnonymousNamespace { +namespace { +struct S { + virtual void foo(){}; +}; +} // namespace + +void test() { + auto t = &S::foo; +} +} // namespace TestAnonymousNamespace + +MethodTy1 gmethod0 = reinterpret_cast(&Base0::nonvirtual0); +MethodTy0 gmethod1 = reinterpret_cast(&Derived0::nonvirtual5); +MethodTy0 gmethod2 = reinterpret_cast(&Derived0::virtual1); + +// CHECK-LABEL: define void @_Z13testArrayInitv() +// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %p0, ptr align 8 @__const._Z13testArrayInitv.p0, i64 16, i1 false) +// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %p1, ptr align 8 @__const._Z13testArrayInitv.p1, i64 16, i1 false) +// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %c0, ptr align 8 @__const._Z13testArrayInitv.c0, i64 16, i1 false) +// CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %c1, ptr align 8 @__const._Z13testArrayInitv.c1, i64 16, i1 false) +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base011nonvirtual0Ev, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %{{.*}} align 8 +// CHECK: store { i64, i64 } { i64 ptrtoint (ptr ptrauth (ptr @_ZN5Base08virtual1Ev_vfpthunk_, i32 0, i64 [[TYPEDISC1]]) to i64), i64 0 }, ptr %{{.*}}, align 8 + +void initList(std::initializer_list); + +void testArrayInit() { + MethodTy1 p0[] = {&Base0::nonvirtual0}; + MethodTy1 p1[] = {&Base0::virtual1}; + Class0 c0{&Base0::nonvirtual0}; + Class0 c1{&Base0::virtual1}; + initList({&Base0::nonvirtual0}); + initList({&Base0::virtual1}); +} + + + +// STACK-PROT: define {{.*}}_vfpthunk{{.*}}[[ATTRS:#[0-9]+]] +// STACK-PROT: attributes [[ATTRS]] = +// STACK-PROT-NOT: ssp +// STACK-PROT-NOT: sspstrong +// STACK-PROT-NOT: sspreq +// STACK-PROT-NEXT: attributes + +// CHECK: define void @_Z15testConvertNullv( +// CHECK: %[[T:.*]] = alloca { i64, i64 }, +// store { i64, i64 } zeroinitializer, { i64, i64 }* %[[T]], + +void testConvertNull() { + VariadicMethodTy0 t = (VariadicMethodTy0)(MethodTy0{}); +} diff --git a/clang/test/Preprocessor/ptrauth_feature.c b/clang/test/Preprocessor/ptrauth_feature.c index 1330ad10b4b474..bf8f496298295c 100644 --- a/clang/test/Preprocessor/ptrauth_feature.c +++ b/clang/test/Preprocessor/ptrauth_feature.c @@ -71,6 +71,15 @@ void has_ptrauth_vtable_pointer_type_discrimination() {} void no_ptrauth_vtable_pointer_type_discrimination() {} #endif +// This is always enabled when ptrauth_calls is enabled, on new enough clangs. +#if __has_feature(ptrauth_member_function_pointer_type_discrimination) +// CALLS: has_ptrauth_member_function_pointer_type_discrimination +void has_ptrauth_member_function_pointer_type_discrimination() {} +#else +// NOCALLS: no_ptrauth_member_function_pointer_type_discrimination +void no_ptrauth_member_function_pointer_type_discrimination() {} +#endif + #if __has_feature(ptrauth_function_pointer_type_discrimination) // FUNC: has_ptrauth_function_pointer_type_discrimination void has_ptrauth_function_pointer_type_discrimination() {}