diff --git a/clang/include/clang/CIR/MissingFeatures.h b/clang/include/clang/CIR/MissingFeatures.h index c0707d687fca..8f9c5d3eff49 100644 --- a/clang/include/clang/CIR/MissingFeatures.h +++ b/clang/include/clang/CIR/MissingFeatures.h @@ -59,7 +59,6 @@ struct MissingFeatures { static bool emitTypeCheck() { return false; } static bool tbaa() { return false; } static bool tbaa_struct() { return false; } - static bool cleanups() { return false; } static bool emitNullabilityCheck() { return false; } static bool ptrAuth() { return false; } @@ -160,12 +159,22 @@ struct MissingFeatures { static bool fastMathFlags() { return false; } static bool fastMathFuncAttributes() { return false; } + // Cleanup + static bool cleanups() { return false; } + static bool simplifyCleanupEntry() { return false; } + static bool requiresCleanups() { return false; } + static bool cleanupBranchAfterSwitch() { return false; } + static bool cleanupAlwaysBranchThrough() { return false; } + static bool cleanupDestinationIndex() { return false; } + static bool cleanupDestroyNRVOVariable() { return false; } + static bool cleanupAppendInsts() { return false; } + static bool cleanupIndexAndBIAdjustment() { return false; } + // Exception handling static bool isSEHTryScope() { return false; } static bool ehStack() { return false; } static bool emitStartEHSpec() { return false; } static bool emitEndEHSpec() { return false; } - static bool simplifyCleanupEntry() { return false; } // Type qualifiers. static bool atomicTypes() { return false; } @@ -208,7 +217,6 @@ struct MissingFeatures { static bool addAutoInitAnnotation() { return false; } static bool addHeapAllocSiteMetadata() { return false; } static bool loopInfoStack() { return false; } - static bool requiresCleanups() { return false; } static bool constantFoldsToSimpleInteger() { return false; } static bool checkFunctionCallABI() { return false; } static bool zeroInitializer() { return false; } diff --git a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp index 40fc101d4c23..534fc2a59968 100644 --- a/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenCleanup.cpp @@ -37,13 +37,12 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest) { // Remove this once we go for making sure unreachable code is // well modeled (or not). - assert(builder.getInsertionBlock() && "not yet implemented"); assert(!cir::MissingFeatures::ehStack()); // Insert a branch: to the cleanup block (unsolved) or to the already // materialized label. Keep track of unsolved goto's. - auto brOp = builder.create( - Loc, Dest.isValid() ? Dest.getBlock() : ReturnBlock().getBlock()); + assert(Dest.getBlock() && "assumes incoming valid dest"); + auto brOp = builder.create(Loc, Dest.getBlock()); // Calculate the innermost active normal cleanup. EHScopeStack::stable_iterator TopCleanup = @@ -70,7 +69,33 @@ cir::BrOp CIRGenFunction::emitBranchThroughCleanup(mlir::Location Loc, return brOp; } - // FIXME(cir): otherwise, thread through all the normal cleanups in scope. + // Otherwise, thread through all the normal cleanups in scope. + auto index = builder.getUInt32(Dest.getDestIndex(), Loc); + assert(!cir::MissingFeatures::cleanupIndexAndBIAdjustment()); + + // Add this destination to all the scopes involved. + EHScopeStack::stable_iterator I = TopCleanup; + EHScopeStack::stable_iterator E = Dest.getScopeDepth(); + if (E.strictlyEncloses(I)) { + while (true) { + EHCleanupScope &Scope = cast(*EHStack.find(I)); + assert(Scope.isNormalCleanup()); + I = Scope.getEnclosingNormalCleanup(); + + // If this is the last cleanup we're propagating through, tell it + // that there's a resolved jump moving through it. + if (!E.strictlyEncloses(I)) { + Scope.addBranchAfter(index, Dest.getBlock()); + break; + } + + // Otherwise, tell the scope that there's a jump propagating + // through it. If this isn't new information, all the rest of + // the work has been done before. + if (!Scope.addBranchThrough(Dest.getBlock())) + break; + } + } return brOp; } @@ -305,6 +330,18 @@ static void emitCleanup(CIRGenFunction &CGF, EHScopeStack::Cleanup *Fn, // No need to emit continuation block because CIR uses a cir.if. } +static mlir::Block *createNormalEntry(CIRGenFunction &cgf, + EHCleanupScope &scope) { + assert(scope.isNormalCleanup()); + mlir::Block *entry = scope.getNormalBlock(); + if (!entry) { + mlir::OpBuilder::InsertionGuard guard(cgf.getBuilder()); + entry = cgf.currLexScope->getOrCreateCleanupBlock(cgf.getBuilder()); + scope.setNormalBlock(entry); + } + return entry; +} + /// Pops a cleanup block. If the block includes a normal cleanup, the /// current insertion point is threaded through the cleanup, as are /// any branch fixups on the cleanup. @@ -341,7 +378,8 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // - whether there's a fallthrough auto *FallthroughSource = builder.getInsertionBlock(); - bool HasFallthrough = (FallthroughSource != nullptr && IsActive); + bool HasFallthrough = + (FallthroughSource != nullptr && (IsActive || HasExistingBranches)); // Branch-through fall-throughs leave the insertion point set to the // end of the last cleanup, which points to the current scope. The @@ -442,7 +480,131 @@ void CIRGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) { // Otherwise, the best approach is to thread everything through // the cleanup block and then try to clean up after ourselves. } else { - llvm_unreachable("NYI"); + // Force the entry block to exist. + mlir::Block *normalEntry = createNormalEntry(*this, Scope); + + // I. Set up the fallthrough edge in. + mlir::OpBuilder::InsertPoint savedInactiveFallthroughIP; + + // If there's a fallthrough, we need to store the cleanup + // destination index. For fall-throughs this is always zero. + if (HasFallthrough) { + if (!HasPrebranchedFallthrough) { + assert(!cir::MissingFeatures::cleanupDestinationIndex()); + } + + // Otherwise, save and clear the IP if we don't have fallthrough + // because the cleanup is inactive. + } else if (FallthroughSource) { + assert(!IsActive && "source without fallthrough for active cleanup"); + savedInactiveFallthroughIP = getBuilder().saveInsertionPoint(); + } + + // II. Emit the entry block. This implicitly branches to it if + // we have fallthrough. All the fixups and existing branches + // should already be branched to it. + builder.setInsertionPointToEnd(normalEntry); + + // intercept normal cleanup to mark SEH scope end + if (IsEHa) { + llvm_unreachable("NYI"); + } + + // III. Figure out where we're going and build the cleanup + // epilogue. + bool HasEnclosingCleanups = + (Scope.getEnclosingNormalCleanup() != EHStack.stable_end()); + + // Compute the branch-through dest if we need it: + // - if there are branch-throughs threaded through the scope + // - if fall-through is a branch-through + // - if there are fixups that will be optimistically forwarded + // to the enclosing cleanup + mlir::Block *branchThroughDest = nullptr; + if (Scope.hasBranchThroughs() || + (FallthroughSource && FallthroughIsBranchThrough) || + (HasFixups && HasEnclosingCleanups)) { + llvm_unreachable("NYI"); + } + + mlir::Block *fallthroughDest = nullptr; + + // If there's exactly one branch-after and no other threads, + // we can route it without a switch. + // Skip for SEH, since ExitSwitch is used to generate code to indicate + // abnormal termination. (SEH: Except _leave and fall-through at + // the end, all other exits in a _try (return/goto/continue/break) + // are considered as abnormal terminations, using NormalCleanupDestSlot + // to indicate abnormal termination) + if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough && + !currentFunctionUsesSEHTry() && Scope.getNumBranchAfters() == 1) { + llvm_unreachable("NYI"); + // Build a switch-out if we need it: + // - if there are branch-afters threaded through the scope + // - if fall-through is a branch-after + // - if there are fixups that have nowhere left to go and + // so must be immediately resolved + } else if (Scope.getNumBranchAfters() || + (HasFallthrough && !FallthroughIsBranchThrough) || + (HasFixups && !HasEnclosingCleanups)) { + assert(!cir::MissingFeatures::cleanupBranchAfterSwitch()); + } else { + // We should always have a branch-through destination in this case. + assert(branchThroughDest); + assert(!cir::MissingFeatures::cleanupAlwaysBranchThrough()); + } + + // IV. Pop the cleanup and emit it. + Scope.markEmitted(); + EHStack.popCleanup(); + assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups); + + emitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag); + + // Append the prepared cleanup prologue from above. + assert(!cir::MissingFeatures::cleanupAppendInsts()); + + // Optimistically hope that any fixups will continue falling through. + for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups(); I < E; + ++I) { + llvm_unreachable("NYI"); + } + + // V. Set up the fallthrough edge out. + + // Case 1: a fallthrough source exists but doesn't branch to the + // cleanup because the cleanup is inactive. + if (!HasFallthrough && FallthroughSource) { + // Prebranched fallthrough was forwarded earlier. + // Non-prebranched fallthrough doesn't need to be forwarded. + // Either way, all we need to do is restore the IP we cleared before. + assert(!IsActive); + llvm_unreachable("NYI"); + + // Case 2: a fallthrough source exists and should branch to the + // cleanup, but we're not supposed to branch through to the next + // cleanup. + } else if (HasFallthrough && fallthroughDest) { + llvm_unreachable("NYI"); + + // Case 3: a fallthrough source exists and should branch to the + // cleanup and then through to the next. + } else if (HasFallthrough) { + // Everything is already set up for this. + + // Case 4: no fallthrough source exists. + } else { + // FIXME(cir): should we clear insertion point here? + } + + // VI. Assorted cleaning. + + // Check whether we can merge NormalEntry into a single predecessor. + // This might invalidate (non-IR) pointers to NormalEntry. + // + // If it did invalidate those pointers, and NormalEntry was the same + // as NormalExit, go back and patch up the fixups. + assert(!cir::MissingFeatures::simplifyCleanupEntry()); } } diff --git a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp index b44896241eff..c1a4ac61a5d6 100644 --- a/clang/lib/CIR/CodeGen/CIRGenDecl.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenDecl.cpp @@ -916,7 +916,7 @@ template struct DestroyNRVOVariable : EHScopeStack::Cleanup { QualType Ty; void Emit(CIRGenFunction &CGF, Flags flags) override { - llvm_unreachable("NYI"); + assert(!cir::MissingFeatures::cleanupDestroyNRVOVariable()); } virtual ~DestroyNRVOVariable() = default; diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp index 4d33c40e38e6..7de4866cd004 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.cpp @@ -349,15 +349,23 @@ void CIRGenFunction::LexicalScope::cleanup() { // Cleanup are done right before codegen resume a scope. This is where // objects are destroyed. - unsigned curLoc = 0; + SmallVector retBlocks; for (auto *retBlock : localScope->getRetBlocks()) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(retBlock); - mlir::Location retLoc = *localScope->getRetLocs()[curLoc]; - curLoc++; + retBlocks.push_back(retBlock); + mlir::Location retLoc = localScope->getRetLoc(retBlock); (void)emitReturn(retLoc); } + auto removeUnusedRetBlocks = [&]() { + for (mlir::Block *retBlock : retBlocks) { + if (!retBlock->getUses().empty()) + continue; + retBlock->erase(); + } + }; + auto insertCleanupAndLeave = [&](mlir::Block *InsPt) { mlir::OpBuilder::InsertionGuard guard(builder); builder.setInsertionPointToEnd(InsPt); @@ -373,9 +381,34 @@ void CIRGenFunction::LexicalScope::cleanup() { if (!cleanupBlock && localScope->getCleanupBlock(builder)) { cleanupBlock = localScope->getCleanupBlock(builder); builder.create(InsPt->back().getLoc(), cleanupBlock); + if (!cleanupBlock->mightHaveTerminator()) { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.setInsertionPointToEnd(cleanupBlock); + builder.create(localScope->EndLoc); + } } if (localScope->Depth == 0) { + // TODO(cir): get rid of all this special cases once cleanups are properly + // implemented. + // TODO(cir): most of this code should move into emitBranchThroughCleanup + if (localScope->getRetBlocks().size() == 1) { + mlir::Block *retBlock = localScope->getRetBlocks()[0]; + mlir::Location loc = localScope->getRetLoc(retBlock); + if (retBlock->getUses().empty()) + retBlock->erase(); + else { + // Thread return block via cleanup block. + if (cleanupBlock) { + for (auto &blockUse : retBlock->getUses()) { + auto brOp = dyn_cast(blockUse.getOwner()); + brOp.setSuccessor(cleanupBlock); + } + } + builder.create(loc, retBlock); + return; + } + } emitImplicitReturn(); return; } @@ -420,6 +453,7 @@ void CIRGenFunction::LexicalScope::cleanup() { // get into this condition and emit the proper cleanup. This is // needed to get nrvo to interop with dtor logic. PerformCleanup = false; + removeUnusedRetBlocks(); return; } @@ -529,7 +563,7 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // the ret after it's been at EndLoc. if (auto *DI = getDebugInfo()) assert(!cir::MissingFeatures::generateDebugInfo() && "NYI"); - builder.clearInsertionPoint(); + // FIXME(cir): should we clearInsertionPoint? breaks many testcases PopCleanupBlocks(PrologueCleanupDepth); } @@ -590,6 +624,20 @@ void CIRGenFunction::finishFunction(SourceLocation EndLoc) { // block, it'd be deleted now. Same for unused ret allocas from ReturnValue } +static void eraseEmptyAndUnusedBlocks(cir::FuncOp fnOp) { + // Remove any left over blocks that are unrecheable and empty, since they do + // not represent unrecheable code useful for warnings nor anything deemed + // useful in general. + SmallVector blocksToDelete; + for (auto &blk : fnOp.getBlocks()) { + if (!blk.empty() || !blk.getUses().empty()) + continue; + blocksToDelete.push_back(&blk); + } + for (auto *b : blocksToDelete) + b->erase(); +} + cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, const CIRGenFunctionInfo &FnInfo) { assert(Fn && "generating code for a null function"); @@ -678,7 +726,6 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, assert(Fn.isDeclaration() && "Function already has body?"); mlir::Block *EntryBB = Fn.addEntryBlock(); builder.setInsertionPointToStart(EntryBB); - { // Initialize lexical scope information. LexicalScope lexScope{*this, fusedLoc, EntryBB}; @@ -728,18 +775,19 @@ cir::FuncOp CIRGenFunction::generateCode(clang::GlobalDecl GD, cir::FuncOp Fn, llvm_unreachable("no definition for emitted function"); assert(builder.getInsertionBlock() && "Should be valid"); - } - if (mlir::failed(Fn.verifyBody())) - return nullptr; + if (mlir::failed(Fn.verifyBody())) + return nullptr; - // Emit the standard function epilogue. - finishFunction(BodyRange.getEnd()); + // Emit the standard function epilogue. + finishFunction(BodyRange.getEnd()); - // If we haven't marked the function nothrow through other means, do a quick - // pass now to see if we can. - assert(!cir::MissingFeatures::tryMarkNoThrow()); + // If we haven't marked the function nothrow through other means, do a quick + // pass now to see if we can. + assert(!cir::MissingFeatures::tryMarkNoThrow()); + } + eraseEmptyAndUnusedBlocks(Fn); return Fn; } @@ -1163,9 +1211,13 @@ void CIRGenFunction::StartFunction(GlobalDecl GD, QualType RetTy, if (FD && FD->isMain() && cir::MissingFeatures::zerocallusedregs()) llvm_unreachable("NYI"); - mlir::Block *EntryBB = &Fn.getBlocks().front(); + // CIRGen has its own logic for entry blocks, usually per operation region. + mlir::Block *retBlock = currLexScope->getOrCreateRetBlock(*this, getLoc(Loc)); + // returnBlock handles per region getJumpDestInCurrentScope LLVM traditional + // codegen logic. + (void)returnBlock(retBlock); - // TODO: allocapt insertion? probably don't need for CIR + mlir::Block *EntryBB = &Fn.getBlocks().front(); if (cir::MissingFeatures::requiresReturnValueCheck()) llvm_unreachable("NYI"); diff --git a/clang/lib/CIR/CodeGen/CIRGenFunction.h b/clang/lib/CIR/CodeGen/CIRGenFunction.h index 38ea6a407884..ce26520114e8 100644 --- a/clang/lib/CIR/CodeGen/CIRGenFunction.h +++ b/clang/lib/CIR/CodeGen/CIRGenFunction.h @@ -379,11 +379,14 @@ class CIRGenFunction : public CIRGenTypeCache { clang::GlobalDecl CurGD; /// Unified return block. - /// Not that for LLVM codegen this is a memeber variable instead. - JumpDest ReturnBlock() { - return JumpDest(currLexScope->getOrCreateCleanupBlock(builder)); + /// In CIR this is a function because each scope might have + /// it's associated return block. + JumpDest returnBlock(mlir::Block *retBlock) { + return getJumpDestInCurrentScope(retBlock); } + unsigned nextCleanupDestIndex = 1; + /// The temporary alloca to hold the return value. This is /// invalid iff the function has no return value. Address ReturnValue = Address::invalid(); @@ -1347,6 +1350,16 @@ class CIRGenFunction : public CIRGenTypeCache { void emitStoreThroughBitfieldLValue(RValue Src, LValue Dst, mlir::Value &Result); + /// The given basic block lies in the current EH scope, but may be a + /// target of a potentially scope-crossing jump; get a stable handle + /// to which we can perform this jump later. + /// CIRGen: this mostly tracks state for figuring out the proper scope + /// information, no actual branches are emitted. + JumpDest getJumpDestInCurrentScope(mlir::Block *target) { + return JumpDest(target, EHStack.getInnermostNormalCleanup(), + nextCleanupDestIndex++); + } + cir::BrOp emitBranchThroughCleanup(mlir::Location Loc, JumpDest Dest); /// Given an assignment `*LHS = RHS`, emit a test that checks if \p RHS is @@ -2069,11 +2082,14 @@ class CIRGenFunction : public CIRGenTypeCache { void ForceCleanup(std::initializer_list ValuesToReload = {}) { assert(PerformCleanup && "Already forced cleanup"); - CGF.DidCallStackSave = OldDidCallStackSave; - CGF.PopCleanupBlocks(CleanupStackDepth, LifetimeExtendedCleanupStackSize, - ValuesToReload); - PerformCleanup = false; - CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + { + mlir::OpBuilder::InsertionGuard guard(CGF.getBuilder()); + CGF.DidCallStackSave = OldDidCallStackSave; + CGF.PopCleanupBlocks(CleanupStackDepth, + LifetimeExtendedCleanupStackSize, ValuesToReload); + PerformCleanup = false; + CGF.CurrentCleanupScopeDepth = OldCleanupScopeDepth; + } } }; @@ -2202,7 +2218,8 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::Block *getOrCreateCleanupBlock(mlir::OpBuilder &builder) { if (CleanupBlock) return getCleanupBlock(builder); - return createCleanupBlock(builder); + CleanupBlock = createCleanupBlock(builder); + return CleanupBlock; } mlir::Block *getCleanupBlock(mlir::OpBuilder &builder) { @@ -2212,9 +2229,10 @@ class CIRGenFunction : public CIRGenTypeCache { { // Create the cleanup block but dont hook it up around just yet. mlir::OpBuilder::InsertionGuard guard(builder); - CleanupBlock = builder.createBlock(builder.getBlock()->getParent()); + mlir::Region *r = builder.getBlock() ? builder.getBlock()->getParent() + : &CGF.CurFn->getRegion(0); + CleanupBlock = builder.createBlock(r); } - assert(builder.getInsertionBlock() && "Should be valid"); return CleanupBlock; } @@ -2226,7 +2244,7 @@ class CIRGenFunction : public CIRGenTypeCache { // On switches we need one return block per region, since cases don't // have their own scopes but are distinct regions nonetheless. llvm::SmallVector RetBlocks; - llvm::SmallVector> RetLocs; + llvm::DenseMap RetLocs; llvm::DenseMap RetBlockInCaseIndex; std::optional NormalRetBlockIndex; llvm::SmallVector> SwitchRegions; @@ -2244,7 +2262,7 @@ class CIRGenFunction : public CIRGenTypeCache { mlir::OpBuilder::InsertionGuard guard(CGF.builder); auto *b = CGF.builder.createBlock(CGF.builder.getBlock()->getParent()); RetBlocks.push_back(b); - RetLocs.push_back(loc); + updateRetLoc(b, loc); return b; } @@ -2253,8 +2271,9 @@ class CIRGenFunction : public CIRGenTypeCache { public: llvm::ArrayRef getRetBlocks() { return RetBlocks; } - llvm::ArrayRef> getRetLocs() { - return RetLocs; + mlir::Location getRetLoc(mlir::Block *b) { return RetLocs.at(b); } + void updateRetLoc(mlir::Block *b, mlir::Location loc) { + RetLocs.insert_or_assign(b, loc); } llvm::MutableArrayRef> getSwitchRegions() { assert(isSwitch() && "expected switch scope"); @@ -2268,22 +2287,26 @@ class CIRGenFunction : public CIRGenTypeCache { } mlir::Block *getOrCreateRetBlock(CIRGenFunction &CGF, mlir::Location loc) { + mlir::Block *ret = nullptr; if (auto caseOp = mlir::dyn_cast_if_present( CGF.builder.getBlock()->getParentOp())) { auto iter = RetBlockInCaseIndex.find(caseOp); if (iter != RetBlockInCaseIndex.end()) - return RetBlocks[iter->second]; - - mlir::Block *ret = createRetBlock(CGF, loc); - RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; - return ret; - } - if (!NormalRetBlockIndex) { - mlir::Block *ret = createRetBlock(CGF, loc); + ret = RetBlocks[iter->second]; + else { + ret = createRetBlock(CGF, loc); + RetBlockInCaseIndex[caseOp] = RetBlocks.size() - 1; + return ret; + } + } else if (!NormalRetBlockIndex) { + ret = createRetBlock(CGF, loc); NormalRetBlockIndex = RetBlocks.size() - 1; return ret; + } else { + ret = &*RetBlocks[*NormalRetBlockIndex]; } - return &*RetBlocks[*NormalRetBlockIndex]; + updateRetLoc(ret, loc); + return ret; } // Scope entry block tracking diff --git a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp index 76d78dc09c76..1fa1653bc1cf 100644 --- a/clang/lib/CIR/CodeGen/CIRGenStmt.cpp +++ b/clang/lib/CIR/CodeGen/CIRGenStmt.cpp @@ -72,12 +72,18 @@ Address CIRGenFunction::emitCompoundStmt(const CompoundStmt &S, bool getLast, // Add local scope to track new declared variables. SymTableScopeTy varScope(symbolTable); auto scopeLoc = getLoc(S.getSourceRange()); + mlir::OpBuilder::InsertPoint scopeInsPt; builder.create( scopeLoc, /*scopeBuilder=*/ [&](mlir::OpBuilder &b, mlir::Type &type, mlir::Location loc) { - LexicalScope lexScope{*this, loc, builder.getInsertionBlock()}; - retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); + scopeInsPt = b.saveInsertionPoint(); }); + { + mlir::OpBuilder::InsertionGuard guard(builder); + builder.restoreInsertionPoint(scopeInsPt); + LexicalScope lexScope{*this, scopeLoc, builder.getInsertionBlock()}; + retAlloca = emitCompoundStmtWithoutScope(S, getLast, slot); + } return retAlloca; } @@ -473,14 +479,25 @@ mlir::LogicalResult CIRGenFunction::emitDeclStmt(const DeclStmt &S) { mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { assert(!cir::MissingFeatures::requiresReturnValueCheck()); + assert(!cir::MissingFeatures::isSEHTryScope()); + auto loc = getLoc(S.getSourceRange()); // Emit the result value, even if unused, to evaluate the side effects. const Expr *RV = S.getRetValue(); - // TODO(cir): LLVM codegen uses a RunCleanupsScope cleanupScope here, we - // should model this in face of dtors. + // Record the result expression of the return statement. The recorded + // expression is used to determine whether a block capture's lifetime should + // end at the end of the full expression as opposed to the end of the scope + // enclosing the block expression. + // + // This permits a small, easily-implemented exception to our over-conservative + // rules about not jumping to statements following block literals with + // non-trivial cleanups. + // TODO(cir): SaveRetExpr + // SaveRetExprRAII SaveRetExpr(RV, *this); + RunCleanupsScope cleanupScope(*this); bool createNewScope = false; if (const auto *EWC = dyn_cast_or_null(RV)) { RV = EWC->getSubExpr(); @@ -557,16 +574,17 @@ mlir::LogicalResult CIRGenFunction::emitReturnStmt(const ReturnStmt &S) { } } - // Create a new return block (if not existent) and add a branch to - // it. The actual return instruction is only inserted during current - // scope cleanup handling. + cleanupScope.ForceCleanup(); + + // In CIR we might have returns in different scopes. + // FIXME(cir): cleanup code is handling actual return emission, the logic + // should try to match traditional codegen more closely (to the extend which + // is possible). auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); - builder.create(loc, retBlock); + emitBranchThroughCleanup(loc, returnBlock(retBlock)); // Insert the new block to continue codegen after branch to ret block. builder.createBlock(builder.getBlock()->getParent()); - - // TODO(cir): LLVM codegen for a cleanup on cleanupScope here. return mlir::success(); } @@ -1153,5 +1171,6 @@ void CIRGenFunction::emitReturnOfRValue(mlir::Location loc, RValue RV, } else { llvm_unreachable("NYI"); } - emitBranchThroughCleanup(loc, ReturnBlock()); + auto *retBlock = currLexScope->getOrCreateRetBlock(*this, loc); + emitBranchThroughCleanup(loc, returnBlock(retBlock)); } diff --git a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp index a53d85fbf55b..216e63029ddd 100644 --- a/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp +++ b/clang/test/CIR/CodeGen/builtin-constant-evaluated.cpp @@ -4,9 +4,9 @@ auto func() { return __builtin_strcmp("", ""); // CIR: cir.func @_Z4funcv() - // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} loc(#loc2) - // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i loc(#loc7) - // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr loc(#loc8) - // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i loc(#loc8) - // CIR-NEXT: cir.return %2 : !s32i loc(#loc8) + // CIR-NEXT: %0 = cir.alloca !s32i, !cir.ptr, ["__retval"] {alignment = 4 : i64} + // CIR-NEXT: %1 = cir.const #cir.int<0> : !s32i + // CIR-NEXT: cir.store %1, %0 : !s32i, !cir.ptr + // CIR-NEXT: %2 = cir.load %0 : !cir.ptr, !s32i + // CIR-NEXT: cir.return %2 : !s32i } diff --git a/clang/test/CIR/CodeGen/lambda.cpp b/clang/test/CIR/CodeGen/lambda.cpp index 138533e2308d..57c4a85eec0e 100644 --- a/clang/test/CIR/CodeGen/lambda.cpp +++ b/clang/test/CIR/CodeGen/lambda.cpp @@ -251,10 +251,8 @@ int g3() { // COM: LLVM: [[CALL:%.*]] = call noundef i32 @"_ZZ2g3vENK3$_0clERKi"(ptr noundef nonnull align 1 dereferenceable(1) [[unused_capture]], ptr noundef nonnull align 4 dereferenceable(4) [[TMP0]]) // LLVM: [[CALL:%.*]] = call i32 @"_ZZ2g3vENK3$_0clERKi"(ptr [[unused_capture]], ptr [[TMP0]]) // LLVM: store i32 [[CALL]], ptr [[ret_val]], align 4 -// FIXME: should just return result -// COM: LLVM: ret i32 [[ret_val]] -// LLVM: call void @llvm.trap() -// LLVM: unreachable +// LLVM: %[[ret:.*]] = load i32, ptr [[ret_val]], align 4 +// LLVM: ret i32 %[[ret]] // lambda operator int (*)(int const&)() // LLVM-LABEL: @"_ZZ2g3vENK3$_0cvPFiRKiEEv"