clang 22.0.0git
CIRGenCoroutine.cpp
Go to the documentation of this file.
1/===----- CGCoroutine.cpp - Emit CIR Code for C++ coroutines -------------===/
2/
3/ Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4/ See https://llvm.org/LICENSE.txt for license information.
5/ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6/
7/===----------------------------------------------------------------------===/
8/
9/ This contains code dealing with C++ code generation of coroutines.
10/
11/===----------------------------------------------------------------------===/
12
13#include "CIRGenFunction.h"
14#include "mlir/Support/LLVM.h"
15#include "clang/AST/StmtCXX.h"
20
21using namespace clang;
22using namespace clang::CIRGen;
23
25 / What is the current await expression kind and how many
26 / await/yield expressions were encountered so far.
27 / These are used to generate pretty labels for await expressions in LLVM IR.
28 cir::AwaitKind currentAwaitKind = cir::AwaitKind::Init;
29 / Stores the __builtin_coro_id emitted in the function so that we can supply
30 / it as the first argument to other builtins.
31 cir::CallOp coroId = nullptr;
32
33 / Stores the result of __builtin_coro_begin call.
34 mlir::Value coroBegin = nullptr;
35
36 / Stores the insertion point for final suspend, this happens after the
37 / promise call (return_xxx promise member) but before a cir.br to the return
38 / block.
39 mlir::Operation *finalSuspendInsPoint;
40
41 / How many co_return statements are in the coroutine. Used to decide whether
42 / we need to add co_return; equivalent at the end of the user authored body.
43 unsigned coreturnCount = 0;
44
45 / The promise type's 'unhandled_exception' handler, if it defines one.
47};
48
49/ Defining these here allows to keep CGCoroData private to this file.
52
53namespace {
54/ FIXME: both GetParamRef and ParamReferenceReplacerRAII are good template
55/ candidates to be shared among LLVM / CIR codegen.
56
57/ Hunts for the parameter reference in the parameter copy/move declaration.
58struct GetParamRef : public StmtVisitor<GetParamRef> {
59public:
60 DeclRefExpr *expr = nullptr;
61 GetParamRef() {}
62 void VisitDeclRefExpr(DeclRefExpr *e) {
63 assert(expr == nullptr && "multilple declref in param move");
64 expr = e;
65 }
66 void VisitStmt(Stmt *s) {
67 for (Stmt *c : s->children()) {
68 if (c)
69 Visit(c);
70 }
71 }
72};
73
74/ This class replaces references to parameters to their copies by changing
75/ the addresses in CGF.LocalDeclMap and restoring back the original values in
76/ its destructor.
77struct ParamReferenceReplacerRAII {
78 CIRGenFunction::DeclMapTy savedLocals;
79 CIRGenFunction::DeclMapTy &localDeclMap;
80
81 ParamReferenceReplacerRAII(CIRGenFunction::DeclMapTy &localDeclMap)
82 : localDeclMap(localDeclMap) {}
83
84 void addCopy(const DeclStmt *pm) {
85 / Figure out what param it refers to.
86
87 assert(pm->isSingleDecl());
88 const VarDecl *vd = static_cast<const VarDecl *>(pm->getSingleDecl());
89 const Expr *initExpr = vd->getInit();
90 GetParamRef visitor;
91 visitor.Visit(const_cast<Expr *>(initExpr));
92 assert(visitor.expr);
93 DeclRefExpr *dreOrig = visitor.expr;
94 auto *pd = dreOrig->getDecl();
95
96 auto it = localDeclMap.find(pd);
97 assert(it != localDeclMap.end() && "parameter is not found");
98 savedLocals.insert({pd, it->second});
99
100 auto copyIt = localDeclMap.find(vd);
101 assert(copyIt != localDeclMap.end() && "parameter copy is not found");
102 it->second = copyIt->getSecond();
103 }
104
105 ~ParamReferenceReplacerRAII() {
106 for (auto &&savedLocal : savedLocals) {
107 localDeclMap.insert({savedLocal.first, savedLocal.second});
108 }
109 }
110};
111} / namespace
112
114 if (curCoro.data && curCoro.data->coroBegin) {
115 return RValue::get(curCoro.data->coroBegin);
116 }
117 cgm.errorNYI("NYI");
118 return RValue();
119}
120
123 cir::CallOp coroId) {
124 assert(!curCoro.data && "EmitCoroutineBodyStatement called twice?");
125
126 curCoro.data = std::make_unique<CGCoroData>();
127 curCoro.data->coroId = coroId;
128}
129
130static mlir::LogicalResult
132 Stmt *body,
133 const CIRGenFunction::LexicalScope *currLexScope) {
134 if (cgf.emitStmt(body, /*useCurrentScope=*/true).failed())
135 return mlir::failure();
136 / Note that classic codegen checks CanFallthrough by looking into the
137 / availability of the insert block which is kinda brittle and unintuitive,
138 / seems to be related with how landing pads are handled.
139 /
140 / CIRGen handles this by checking pre-existing co_returns in the current
141 / scope instead.
142
143 / From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock();
144 const bool canFallthrough = !currLexScope->hasCoreturn();
145 if (canFallthrough)
146 if (Stmt *onFallthrough = s.getFallthroughHandler())
147 if (cgf.emitStmt(onFallthrough, /*useCurrentScope=*/true).failed())
148 return mlir::failure();
149
150 return mlir::success();
151}
152
153cir::CallOp CIRGenFunction::emitCoroIDBuiltinCall(mlir::Location loc,
154 mlir::Value nullPtr) {
155 cir::IntType int32Ty = builder.getUInt32Ty();
156
157 const TargetInfo &ti = cgm.getASTContext().getTargetInfo();
158 unsigned newAlign = ti.getNewAlign() / ti.getCharWidth();
159
160 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroId);
161
162 cir::FuncOp fnOp;
163 if (!builtin) {
164 fnOp = cgm.createCIRBuiltinFunction(
165 loc, cgm.builtinCoroId,
166 cir::FuncType::get({int32Ty, voidPtrTy, voidPtrTy, voidPtrTy}, int32Ty),
167 /*FD=*/nullptr);
168 assert(fnOp && "should always succeed");
169 } else {
170 fnOp = cast<cir::FuncOp>(builtin);
171 }
172
173 return builder.createCallOp(loc, fnOp,
174 mlir::ValueRange{builder.getUInt32(newAlign, loc),
175 nullPtr, nullPtr, nullPtr});
176}
177
178cir::CallOp CIRGenFunction::emitCoroAllocBuiltinCall(mlir::Location loc) {
179 cir::BoolType boolTy = builder.getBoolTy();
180
181 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroAlloc);
182
183 cir::FuncOp fnOp;
184 if (!builtin) {
185 fnOp = cgm.createCIRBuiltinFunction(loc, cgm.builtinCoroAlloc,
186 cir::FuncType::get({uInt32Ty}, boolTy),
187 /*fd=*/nullptr);
188 assert(fnOp && "should always succeed");
189 } else {
190 fnOp = cast<cir::FuncOp>(builtin);
191 }
192
193 return builder.createCallOp(
194 loc, fnOp, mlir::ValueRange{curCoro.data->coroId.getResult()});
195}
196
197cir::CallOp
199 mlir::Value coroframeAddr) {
200 mlir::Operation *builtin = cgm.getGlobalValue(cgm.builtinCoroBegin);
201
202 cir::FuncOp fnOp;
203 if (!builtin) {
204 fnOp = cgm.createCIRBuiltinFunction(
205 loc, cgm.builtinCoroBegin,
206 cir::FuncType::get({uInt32Ty, voidPtrTy}, voidPtrTy),
207 /*fd=*/nullptr);
208 assert(fnOp && "should always succeed");
209 } else {
210 fnOp = cast<cir::FuncOp>(builtin);
211 }
212
213 return builder.createCallOp(
214 loc, fnOp,
215 mlir::ValueRange{curCoro.data->coroId.getResult(), coroframeAddr});
216}
217
218mlir::LogicalResult
220 mlir::Location openCurlyLoc = getLoc(s.getBeginLoc());
221 cir::ConstantOp nullPtrCst = builder.getNullPtr(voidPtrTy, openCurlyLoc);
222
223 auto fn = mlir::cast<cir::FuncOp>(curFn);
224 fn.setCoroutine(true);
225 cir::CallOp coroId = emitCoroIDBuiltinCall(openCurlyLoc, nullPtrCst);
226 createCoroData(*this, curCoro, coroId);
227
228 / Backend is allowed to elide memory allocations, to help it, emit
229 / auto mem = coro.alloc() ? 0 : ... allocation code ...;
230 cir::CallOp coroAlloc = emitCoroAllocBuiltinCall(openCurlyLoc);
231
232 / Initialize address of coroutine frame to null
233 CanQualType astVoidPtrTy = cgm.getASTContext().VoidPtrTy;
234 mlir::Type allocaTy = convertTypeForMem(astVoidPtrTy);
235 Address coroFrame =
236 createTempAlloca(allocaTy, getContext().getTypeAlignInChars(astVoidPtrTy),
237 openCurlyLoc, "__coro_frame_addr",
238 /*ArraySize=*/nullptr);
239
240 mlir::Value storeAddr = coroFrame.getPointer();
241 builder.CIRBaseBuilderTy::createStore(openCurlyLoc, nullPtrCst, storeAddr);
242 cir::IfOp::create(
243 builder, openCurlyLoc, coroAlloc.getResult(),
244 /*withElseRegion=*/false,
245 /*thenBuilder=*/[&](mlir::OpBuilder &b, mlir::Location loc) {
246 builder.CIRBaseBuilderTy::createStore(
247 loc, emitScalarExpr(s.getAllocate()), storeAddr);
248 cir::YieldOp::create(builder, loc);
249 });
250 curCoro.data->coroBegin =
252 openCurlyLoc,
253 cir::LoadOp::create(builder, openCurlyLoc, allocaTy, storeAddr))
254 .getResult();
255
256 / Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
257 if (s.getReturnStmtOnAllocFailure())
258 cgm.errorNYI("handle coroutine return alloc failure");
259
260 {
262 ParamReferenceReplacerRAII paramReplacer(localDeclMap);
263 / Create mapping between parameters and copy-params for coroutine
264 / function.
265 llvm::ArrayRef<const Stmt *> paramMoves = s.getParamMoves();
266 assert((paramMoves.size() == 0 || (paramMoves.size() == fnArgs.size())) &&
267 "ParamMoves and FnArgs should be the same size for coroutine "
268 "function");
269 / For zipping the arg map into debug info.
271
272 / Create parameter copies. We do it before creating a promise, since an
273 / evolution of coroutine TS may allow promise constructor to observe
274 / parameter copies.
276 for (auto *pm : paramMoves) {
277 if (emitStmt(pm, /*useCurrentScope=*/true).failed())
278 return mlir::failure();
279 paramReplacer.addCopy(cast<DeclStmt>(pm));
280 }
281
282 if (emitStmt(s.getPromiseDeclStmt(), /*useCurrentScope=*/true).failed())
283 return mlir::failure();
284 / returnValue should be valid as long as the coroutine's return type
285 / is not void. The assertion could help us to reduce the check later.
286 assert(returnValue.isValid() == (bool)s.getReturnStmt());
287 / Now we have the promise, initialize the GRO.
288 / We need to emit `get_return_object` first. According to:
289 / [dcl.fct.def.coroutine]p7
290 / The call to get_return_­object is sequenced before the call to
291 / initial_suspend and is invoked at most once.
292 /
293 / So we couldn't emit return value when we emit return statment,
294 / otherwise the call to get_return_object wouldn't be in front
295 / of initial_suspend.
296 if (returnValue.isValid())
297 emitAnyExprToMem(s.getReturnValue(), returnValue,
298 s.getReturnValue()->getType().getQualifiers(),
299 /*isInit*/ true);
300
302
303 curCoro.data->currentAwaitKind = cir::AwaitKind::Init;
304 if (emitStmt(s.getInitSuspendStmt(), /*useCurrentScope=*/true).failed())
305 return mlir::failure();
306
307 curCoro.data->currentAwaitKind = cir::AwaitKind::User;
308
309 / FIXME(cir): wrap emitBodyAndFallthrough with try/catch bits.
310 if (s.getExceptionHandler())
312 if (emitBodyAndFallthrough(*this, s, s.getBody(), curLexScope).failed())
313 return mlir::failure();
314
315 / Note that LLVM checks CanFallthrough by looking into the availability
316 / of the insert block which is kinda brittle and unintuitive, seems to be
317 / related with how landing pads are handled.
318 /
319 / CIRGen handles this by checking pre-existing co_returns in the current
320 / scope instead.
321 /
322 / From LLVM IR Gen: const bool CanFallthrough = Builder.GetInsertBlock();
323 const bool canFallthrough = curLexScope->hasCoreturn();
324 const bool hasCoreturns = curCoro.data->coreturnCount > 0;
325 if (canFallthrough || hasCoreturns) {
326 curCoro.data->currentAwaitKind = cir::AwaitKind::Final;
327 {
328 mlir::OpBuilder::InsertionGuard guard(builder);
329 builder.setInsertionPoint(curCoro.data->finalSuspendInsPoint);
330 if (emitStmt(s.getFinalSuspendStmt(), /*useCurrentScope=*/true)
331 .failed())
332 return mlir::failure();
333 }
334 }
335 }
336 return mlir::success();
337}
338
339static bool memberCallExpressionCanThrow(const Expr *e) {
340 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
341 if (const auto *proto =
342 ce->getMethodDecl()->getType()->getAs<FunctionProtoType>())
343 if (isNoexceptExceptionSpec(proto->getExceptionSpecType()) &&
344 proto->canThrow() == CT_Cannot)
345 return false;
346 return true;
347}
348
349/ Given a suspend expression which roughly looks like:
350/
351/ auto && x = CommonExpr();
352/ if (!x.await_ready()) {
353/ x.await_suspend(...); (*)
354/ }
355/ x.await_resume();
356/
357/ where the result of the entire expression is the result of x.await_resume()
358/
359/ (*) If x.await_suspend return type is bool, it allows to veto a suspend:
360/ if (x.await_suspend(...))
361/ llvm_coro_suspend();
362/
363/ This is more higher level than LLVM codegen, for that one see llvm's
364/ docs/Coroutines.rst for more details.
365namespace {
366struct LValueOrRValue {
367 LValue lv;
368 RValue rv;
369};
370} / namespace
371
372static LValueOrRValue
374 CoroutineSuspendExpr const &s, cir::AwaitKind kind,
375 AggValueSlot aggSlot, bool ignoreResult,
376 mlir::Block *scopeParentBlock,
377 mlir::Value &tmpResumeRValAddr, bool forLValue) {
378 [[maybe_unused]] mlir::LogicalResult awaitBuild = mlir::success();
379 LValueOrRValue awaitRes;
380
382 CIRGenFunction::OpaqueValueMapping(cgf, s.getOpaqueValue());
383 CIRGenBuilderTy &builder = cgf.getBuilder();
384 [[maybe_unused]] cir::AwaitOp awaitOp = cir::AwaitOp::create(
385 builder, cgf.getLoc(s.getSourceRange()), kind,
386 /*readyBuilder=*/
387 [&](mlir::OpBuilder &b, mlir::Location loc) {
388 Expr *condExpr = s.getReadyExpr()->IgnoreParens();
389 builder.createCondition(cgf.evaluateExprAsBool(condExpr));
390 },
391 /*suspendBuilder=*/
392 [&](mlir::OpBuilder &b, mlir::Location loc) {
393 / Note that differently from LLVM codegen we do not emit coro.save
394 / and coro.suspend here, that should be done as part of lowering this
395 / to LLVM dialect (or some other MLIR dialect)
396
397 / A invalid suspendRet indicates "void returning await_suspend"
398 mlir::Value suspendRet = cgf.emitScalarExpr(s.getSuspendExpr());
399
400 / Veto suspension if requested by bool returning await_suspend.
401 if (suspendRet) {
402 cgf.cgm.errorNYI("Veto await_suspend");
403 }
404
405 / Signals the parent that execution flows to next region.
406 cir::YieldOp::create(builder, loc);
407 },
408 /*resumeBuilder=*/
409 [&](mlir::OpBuilder &b, mlir::Location loc) {
410 / Exception handling requires additional IR. If the 'await_resume'
411 / function is marked as 'noexcept', we avoid generating this additional
412 / IR.
413 CXXTryStmt *tryStmt = nullptr;
414 if (coro.exceptionHandler && kind == cir::AwaitKind::Init &&
415 memberCallExpressionCanThrow(s.getResumeExpr()))
416 cgf.cgm.errorNYI("Coro resume Exception");
417
418 / FIXME(cir): the alloca for the resume expr should be placed in the
419 / enclosing cir.scope instead.
420 if (forLValue) {
422 } else {
423 awaitRes.rv =
424 cgf.emitAnyExpr(s.getResumeExpr(), aggSlot, ignoreResult);
425 if (!awaitRes.rv.isIgnored())
426 / Create the alloca in the block before the scope wrapping
427 / cir.await.
429 }
430
431 if (tryStmt)
432 cgf.cgm.errorNYI("Coro tryStmt");
433
434 / Returns control back to parent.
435 cir::YieldOp::create(builder, loc);
436 });
437
438 assert(awaitBuild.succeeded() && "Should know how to codegen");
439 return awaitRes;
440}
441
443 const CoroutineSuspendExpr &e,
444 cir::AwaitKind kind, AggValueSlot aggSlot,
445 bool ignoreResult) {
446 RValue rval;
447 mlir::Location scopeLoc = cgf.getLoc(e.getSourceRange());
448
449 / Since we model suspend / resume as an inner region, we must store
450 / resume scalar results in a tmp alloca, and load it after we build the
451 / suspend expression. An alternative way to do this would be to make
452 / every region return a value when promise.return_value() is used, but
453 / it's a bit awkward given that resume is the only region that actually
454 / returns a value.
455 mlir::Block *currEntryBlock = cgf.curLexScope->getEntryBlock();
456 [[maybe_unused]] mlir::Value tmpResumeRValAddr;
457
458 / No need to explicitly wrap this into a scope since the AST already uses a
459 / ExprWithCleanups, which will wrap this into a cir.scope anyways.
460 rval = emitSuspendExpression(cgf, *cgf.curCoro.data, e, kind, aggSlot,
461 ignoreResult, currEntryBlock, tmpResumeRValAddr,
462 /*forLValue*/ false)
463 .rv;
464
465 if (ignoreResult || rval.isIgnored())
466 return rval;
467
468 if (rval.isScalar()) {
469 rval = RValue::get(cir::LoadOp::create(cgf.getBuilder(), scopeLoc,
470 rval.getValue().getType(),
471 tmpResumeRValAddr));
472 } else if (rval.isAggregate()) {
473 / This is probably already handled via AggSlot, remove this assertion
474 / once we have a testcase and prove all pieces work.
475 cgf.cgm.errorNYI("emitSuspendExpr Aggregate");
476 } else { / complex
477 cgf.cgm.errorNYI("emitSuspendExpr Complex");
478 }
479 return rval;
480}
481
483 AggValueSlot aggSlot,
484 bool ignoreResult) {
485 return emitSuspendExpr(*this, e, curCoro.data->currentAwaitKind, aggSlot,
486 ignoreResult);
487}
488
490 ++curCoro.data->coreturnCount;
491 curLexScope->setCoreturn();
492
493 const Expr *rv = s.getOperand();
494 if (rv && rv->getType()->isVoidType() && !isa<InitListExpr>(rv)) {
495 / Make sure to evaluate the non initlist expression of a co_return
496 / with a void expression for side effects.
497 RunCleanupsScope cleanupScope(*this);
498 emitIgnoredExpr(rv);
499 }
500
501 if (emitStmt(s.getPromiseCall(), /*useCurrentScope=*/true).failed())
502 return mlir::failure();
503 / Create a new return block (if not existent) and add a branch to
504 / it. The actual return instruction is only inserted during current
505 / scope cleanup handling.
506 mlir::Location loc = getLoc(s.getSourceRange());
507 mlir::Block *retBlock = curLexScope->getOrCreateRetBlock(*this, loc);
508 curCoro.data->finalSuspendInsPoint =
509 cir::BrOp::create(builder, loc, retBlock);
510
511 / Insert the new block to continue codegen after branch to ret block,
512 / this will likely be an empty block.
513 builder.createBlock(builder.getBlock()->getParent());
514
515 return mlir::success();
516}
static LValueOrRValue emitSuspendExpression(CIRGenFunction &cgf, CGCoroData &coro, CoroutineSuspendExpr const &s, cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult, mlir::Block *scopeParentBlock, mlir::Value &tmpResumeRValAddr, bool forLValue)
static RValue emitSuspendExpr(CIRGenFunction &cgf, const CoroutineSuspendExpr &e, cir::AwaitKind kind, AggValueSlot aggSlot, bool ignoreResult)
static bool memberCallExpressionCanThrow(const Expr *e)
static mlir::LogicalResult emitBodyAndFallthrough(CIRGenFunction &cgf, const CoroutineBodyStmt &s, Stmt *body, const CIRGenFunction::LexicalScope *currLexScope)
static void createCoroData(CIRGenFunction &cgf, CIRGenFunction::CGCoroInfo &curCoro, cir::CallOp coroId)
__device__ __2f16 b
__device__ __2f16 float __ockl_bool s
__device__ __2f16 float c
mlir::Value getPointer() const
Definition Address.h:90
An aggregate value slot.
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
Enters a new scope for capturing cleanups, all of which will be executed once the scope is exited.
cir::CallOp emitCoroIDBuiltinCall(mlir::Location loc, mlir::Value nullPtr)
mlir::LogicalResult emitCoreturnStmt(const CoreturnStmt &s)
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
llvm::DenseMap< const clang::Decl *, Address > DeclMapTy
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
mlir::Operation * curFn
The current function or global initializer that is generated code for.
llvm::SmallVector< const ParmVarDecl * > fnArgs
Save Parameter Decl for coroutine.
mlir::Type convertTypeForMem(QualType t)
cir::CallOp emitCoroAllocBuiltinCall(mlir::Location loc)
Address returnValue
The temporary alloca to hold the return value.
CIRGenBuilderTy & getBuilder()
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
RValue emitCoawaitExpr(const CoawaitExpr &e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
clang::ASTContext & getContext() const
mlir::LogicalResult emitCoroutineBody(const CoroutineBodyStmt &s)
cir::CallOp emitCoroBeginBuiltinCall(mlir::Location loc, mlir::Value coroframeAddr)
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
bool isAggregate() const
Definition CIRGenValue.h:51
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
bool isIgnored() const
Definition CIRGenValue.h:52
Represents a 'co_await' expression.
Definition ExprCXX.h:5369
Represents a 'co_return' statement in the C++ Coroutines TS.
Definition StmtCXX.h:473
Represents the body of a coroutine.
Definition StmtCXX.h:320
Represents an expression that might suspend coroutine execution; either a co_await or co_yield expres...
Definition ExprCXX.h:5255
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
ValueDecl * getDecl()
Definition Expr.h:1338
bool isSingleDecl() const
isSingleDecl - This method returns true if this DeclStmt refers to a single Decl.
Definition Stmt.h:1635
const Decl * getSingleDecl() const
Definition Stmt.h:1637
This represents one expression.
Definition Expr.h:112
QualType getType() const
Definition Expr.h:144
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5269
StmtVisitor - This class implements a simple visitor for Stmt subclasses.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
Exposes information about the current target.
Definition TargetInfo.h:226
unsigned getNewAlign() const
Return the largest alignment for which a suitably-sized allocation with 'operator new(size_t)' is gua...
Definition TargetInfo.h:766
unsigned getCharWidth() const
Definition TargetInfo.h:520
bool isVoidType() const
Definition TypeBase.h:8892
const Expr * getInit() const
Definition Decl.h:1368
Defines the clang::TargetInfo interface.
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
The JSON file list parser is used to communicate input to InstallAPI.
CanQual< Type > CanQualType
Represents a canonical, potentially-qualified type.
bool isa(CodeGen::Address addr)
Definition Address.h:330
bool isNoexceptExceptionSpec(ExceptionSpecificationType ESpecType)
U cast(CodeGen::Address addr)
Definition Address.h:327
static bool ehCleanupScope()
static bool coroCoReturn()
static bool coroutineExceptions()
static bool coroOutsideFrameMD()
static bool coroCoYield()
static bool generateDebugInfo()
mlir::Operation * finalSuspendInsPoint
std::unique_ptr< CGCoroData > data
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::PointerType voidPtrTy
void* in address space 0

Follow Lee on X/Twitter - Father, Husband, Serial builder creating AI, crypto, games & web tools. We are friends :) AI Will Come To Life!

Check out: eBank.nz (Art Generator) | Netwrck.com (AI Tools) | Text-Generator.io (AI API) | BitBank.nz (Crypto AI) | ReadingTime (Kids Reading) | RewordGame | BigMultiplayerChess | WebFiddle | How.nz | Helix AI Assistant