clang 22.0.0git
CIRGenExpr.cpp
Go to the documentation of this file.
1/===----------------------------------------------------------------------===/
2/
3/ Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4/ See https://llvm.org/LICENSE.txt for license information.
5/ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6/
7/===----------------------------------------------------------------------===/
8/
9/ This contains code to emit Expr nodes as CIR code.
10/
11/===----------------------------------------------------------------------===/
12
13#include "Address.h"
15#include "CIRGenFunction.h"
16#include "CIRGenModule.h"
17#include "CIRGenValue.h"
18#include "mlir/IR/BuiltinAttributes.h"
19#include "mlir/IR/Value.h"
20#include "clang/AST/Attr.h"
21#include "clang/AST/CharUnits.h"
22#include "clang/AST/Decl.h"
23#include "clang/AST/Expr.h"
24#include "clang/AST/ExprCXX.h"
31#include <optional>
32
33using namespace clang;
34using namespace clang::CIRGen;
35using namespace cir;
36
37/ Get the address of a zero-sized field within a record. The resulting address
38/ doesn't necessarily have the right type.
40 const FieldDecl *field,
41 llvm::StringRef fieldName,
42 unsigned fieldIndex) {
43 if (field->isZeroSize(getContext())) {
44 cgm.errorNYI(field->getSourceRange(),
45 "emitAddrOfFieldStorage: zero-sized field");
46 return Address::invalid();
47 }
48
49 mlir::Location loc = getLoc(field->getLocation());
50
51 mlir::Type fieldType = convertType(field->getType());
52 auto fieldPtr = cir::PointerType::get(fieldType);
53 / For most cases fieldName is the same as field->getName() but for lambdas,
54 / which do not currently carry the name, so it can be passed down from the
55 / CaptureStmt.
56 cir::GetMemberOp memberAddr = builder.createGetMember(
57 loc, fieldPtr, base.getPointer(), fieldName, fieldIndex);
58
59 / Retrieve layout information, compute alignment and return the final
60 / address.
61 const RecordDecl *rec = field->getParent();
62 const CIRGenRecordLayout &layout = cgm.getTypes().getCIRGenRecordLayout(rec);
63 unsigned idx = layout.getCIRFieldNo(field);
65 layout.getCIRType().getElementOffset(cgm.getDataLayout().layout, idx));
66 return Address(memberAddr, base.getAlignment().alignmentAtOffset(offset));
67}
68
69/ Given an expression of pointer type, try to
70/ derive a more accurate bound on the alignment of the pointer.
72 LValueBaseInfo *baseInfo) {
73 / We allow this with ObjC object pointers because of fragile ABIs.
74 assert(expr->getType()->isPointerType() ||
75 expr->getType()->isObjCObjectPointerType());
76 expr = expr->IgnoreParens();
77
78 / Casts:
79 if (auto const *ce = dyn_cast<CastExpr>(expr)) {
80 if (const auto *ece = dyn_cast<ExplicitCastExpr>(ce))
81 cgm.emitExplicitCastExprType(ece);
82
83 switch (ce->getCastKind()) {
84 / Non-converting casts (but not C's implicit conversion from void*).
85 case CK_BitCast:
86 case CK_NoOp:
87 case CK_AddressSpaceConversion: {
88 if (const auto *ptrTy =
89 ce->getSubExpr()->getType()->getAs<PointerType>()) {
90 if (ptrTy->getPointeeType()->isVoidType())
91 break;
92
93 LValueBaseInfo innerBaseInfo;
95 Address addr =
96 emitPointerWithAlignment(ce->getSubExpr(), &innerBaseInfo);
97 if (baseInfo)
98 *baseInfo = innerBaseInfo;
99
100 if (isa<ExplicitCastExpr>(ce)) {
101 LValueBaseInfo targetTypeBaseInfo;
102
103 const QualType pointeeType = expr->getType()->getPointeeType();
104 const CharUnits align =
105 cgm.getNaturalTypeAlignment(pointeeType, &targetTypeBaseInfo);
106
107 / If the source l-value is opaque, honor the alignment of the
108 / casted-to type.
109 if (innerBaseInfo.getAlignmentSource() != AlignmentSource::Decl) {
110 if (baseInfo)
111 baseInfo->mergeForCast(targetTypeBaseInfo);
112 addr = Address(addr.getPointer(), addr.getElementType(), align);
113 }
114 }
115
117
118 const mlir::Type eltTy =
119 convertTypeForMem(expr->getType()->getPointeeType());
120 addr = getBuilder().createElementBitCast(getLoc(expr->getSourceRange()),
121 addr, eltTy);
123
124 return addr;
125 }
126 break;
127 }
128
129 / Array-to-pointer decay. TODO(cir): BaseInfo and TBAAInfo.
130 case CK_ArrayToPointerDecay:
131 return emitArrayToPointerDecay(ce->getSubExpr(), baseInfo);
132
133 case CK_UncheckedDerivedToBase:
134 case CK_DerivedToBase: {
137 Address addr = emitPointerWithAlignment(ce->getSubExpr(), baseInfo);
138 const CXXRecordDecl *derived =
139 ce->getSubExpr()->getType()->getPointeeCXXRecordDecl();
140 return getAddressOfBaseClass(addr, derived, ce->path(),
142 ce->getExprLoc());
143 }
144
145 case CK_AnyPointerToBlockPointerCast:
146 case CK_BaseToDerived:
147 case CK_BaseToDerivedMemberPointer:
148 case CK_BlockPointerToObjCPointerCast:
149 case CK_BuiltinFnToFnPtr:
150 case CK_CPointerToObjCPointerCast:
151 case CK_DerivedToBaseMemberPointer:
152 case CK_Dynamic:
153 case CK_FunctionToPointerDecay:
154 case CK_IntegralToPointer:
155 case CK_LValueToRValue:
156 case CK_LValueToRValueBitCast:
157 case CK_NullToMemberPointer:
158 case CK_NullToPointer:
159 case CK_ReinterpretMemberPointer:
160 / Common pointer conversions, nothing to do here.
161 / TODO: Is there any reason to treat base-to-derived conversions
162 / specially?
163 break;
164
165 case CK_ARCConsumeObject:
166 case CK_ARCExtendBlockObject:
167 case CK_ARCProduceObject:
168 case CK_ARCReclaimReturnedObject:
169 case CK_AtomicToNonAtomic:
170 case CK_BooleanToSignedIntegral:
171 case CK_ConstructorConversion:
172 case CK_CopyAndAutoreleaseBlockObject:
173 case CK_Dependent:
174 case CK_FixedPointCast:
175 case CK_FixedPointToBoolean:
176 case CK_FixedPointToFloating:
177 case CK_FixedPointToIntegral:
178 case CK_FloatingCast:
179 case CK_FloatingComplexCast:
180 case CK_FloatingComplexToBoolean:
181 case CK_FloatingComplexToIntegralComplex:
182 case CK_FloatingComplexToReal:
183 case CK_FloatingRealToComplex:
184 case CK_FloatingToBoolean:
185 case CK_FloatingToFixedPoint:
186 case CK_FloatingToIntegral:
187 case CK_HLSLAggregateSplatCast:
188 case CK_HLSLArrayRValue:
189 case CK_HLSLElementwiseCast:
190 case CK_HLSLVectorTruncation:
191 case CK_HLSLMatrixTruncation:
192 case CK_IntToOCLSampler:
193 case CK_IntegralCast:
194 case CK_IntegralComplexCast:
195 case CK_IntegralComplexToBoolean:
196 case CK_IntegralComplexToFloatingComplex:
197 case CK_IntegralComplexToReal:
198 case CK_IntegralRealToComplex:
199 case CK_IntegralToBoolean:
200 case CK_IntegralToFixedPoint:
201 case CK_IntegralToFloating:
202 case CK_LValueBitCast:
203 case CK_MatrixCast:
204 case CK_MemberPointerToBoolean:
205 case CK_NonAtomicToAtomic:
206 case CK_ObjCObjectLValueCast:
207 case CK_PointerToBoolean:
208 case CK_PointerToIntegral:
209 case CK_ToUnion:
210 case CK_ToVoid:
211 case CK_UserDefinedConversion:
212 case CK_VectorSplat:
213 case CK_ZeroToOCLOpaqueType:
214 llvm_unreachable("unexpected cast for emitPointerWithAlignment");
215 }
216 }
217
218 / Unary &
219 if (const UnaryOperator *uo = dyn_cast<UnaryOperator>(expr)) {
220 / TODO(cir): maybe we should use cir.unary for pointers here instead.
221 if (uo->getOpcode() == UO_AddrOf) {
222 LValue lv = emitLValue(uo->getSubExpr());
223 if (baseInfo)
224 *baseInfo = lv.getBaseInfo();
226 return lv.getAddress();
227 }
228 }
229
230 / std::addressof and variants.
231 if (auto const *call = dyn_cast<CallExpr>(expr)) {
232 switch (call->getBuiltinCallee()) {
233 default:
234 break;
235 case Builtin::BIaddressof:
236 case Builtin::BI__addressof:
237 case Builtin::BI__builtin_addressof: {
238 cgm.errorNYI(expr->getSourceRange(),
239 "emitPointerWithAlignment: builtin addressof");
240 return Address::invalid();
241 }
242 }
243 }
244
245 / Otherwise, use the alignment of the type.
247 emitScalarExpr(expr), expr->getType()->getPointeeType(), CharUnits(),
248 /*forPointeeType=*/true, baseInfo);
249}
250
252 bool isInit) {
253 if (!dst.isSimple()) {
254 if (dst.isVectorElt()) {
255 / Read/modify/write the vector, inserting the new element
256 const mlir::Location loc = dst.getVectorPointer().getLoc();
257 const mlir::Value vector =
258 builder.createLoad(loc, dst.getVectorAddress());
259 const mlir::Value newVector = cir::VecInsertOp::create(
260 builder, loc, vector, src.getValue(), dst.getVectorIdx());
261 builder.createStore(loc, newVector, dst.getVectorAddress());
262 return;
263 }
264
265 assert(dst.isBitField() && "Unknown LValue type");
267 return;
268
269 cgm.errorNYI(dst.getPointer().getLoc(),
270 "emitStoreThroughLValue: non-simple lvalue");
271 return;
272 }
273
275
276 assert(src.isScalar() && "Can't emit an aggregate store with this method");
277 emitStoreOfScalar(src.getValue(), dst, isInit);
278}
279
280static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e,
281 const VarDecl *vd) {
282 QualType t = e->getType();
283
284 / If it's thread_local, emit a call to its wrapper function instead.
285 if (vd->getTLSKind() == VarDecl::TLS_Dynamic)
286 cgf.cgm.errorNYI(e->getSourceRange(),
287 "emitGlobalVarDeclLValue: thread_local variable");
288
289 / Check if the variable is marked as declare target with link clause in
290 / device codegen.
291 if (cgf.getLangOpts().OpenMP)
292 cgf.cgm.errorNYI(e->getSourceRange(), "emitGlobalVarDeclLValue: OpenMP");
293
294 / Traditional LLVM codegen handles thread local separately, CIR handles
295 / as part of getAddrOfGlobalVar.
296 mlir::Value v = cgf.cgm.getAddrOfGlobalVar(vd);
297
299 mlir::Type realVarTy = cgf.convertTypeForMem(vd->getType());
300 cir::PointerType realPtrTy = cgf.getBuilder().getPointerTo(realVarTy);
301 if (realPtrTy != v.getType())
302 v = cgf.getBuilder().createBitcast(v.getLoc(), v, realPtrTy);
303
304 CharUnits alignment = cgf.getContext().getDeclAlign(vd);
305 Address addr(v, realVarTy, alignment);
306 LValue lv;
307 if (vd->getType()->isReferenceType())
308 cgf.cgm.errorNYI(e->getSourceRange(),
309 "emitGlobalVarDeclLValue: reference type");
310 else
311 lv = cgf.makeAddrLValue(addr, t, AlignmentSource::Decl);
313 return lv;
314}
315
316void CIRGenFunction::emitStoreOfScalar(mlir::Value value, Address addr,
317 bool isVolatile, QualType ty,
318 LValueBaseInfo baseInfo, bool isInit,
319 bool isNontemporal) {
320
321 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
322 / Boolean vectors use `iN` as storage type.
323 if (clangVecTy->isExtVectorBoolType())
324 cgm.errorNYI(addr.getPointer().getLoc(),
325 "emitStoreOfScalar ExtVectorBoolType");
326
327 / Handle vectors of size 3 like size 4 for better performance.
328 const mlir::Type elementType = addr.getElementType();
329 const auto vecTy = cast<cir::VectorType>(elementType);
330
331 / TODO(CIR): Use `ABIInfo::getOptimalVectorMemoryType` once it upstreamed
333 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
334 cgm.errorNYI(addr.getPointer().getLoc(),
335 "emitStoreOfScalar Vec3 & PreserveVec3Type disabled");
336 }
337
338 value = emitToMemory(value, ty);
339
341 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
342 if (ty->isAtomicType() ||
343 (!isInit && isLValueSuitableForInlineAtomic(atomicLValue))) {
344 emitAtomicStore(RValue::get(value), atomicLValue, isInit);
345 return;
346 }
347
348 / Update the alloca with more info on initialization.
349 assert(addr.getPointer() && "expected pointer to exist");
350 auto srcAlloca = addr.getDefiningOp<cir::AllocaOp>();
351 if (currVarDecl && srcAlloca) {
352 const VarDecl *vd = currVarDecl;
353 assert(vd && "VarDecl expected");
354 if (vd->hasInit())
355 srcAlloca.setInitAttr(mlir::UnitAttr::get(&getMLIRContext()));
356 }
357
358 assert(currSrcLoc && "must pass in source location");
359 builder.createStore(*currSrcLoc, value, addr, isVolatile);
360
361 if (isNontemporal) {
362 cgm.errorNYI(addr.getPointer().getLoc(), "emitStoreOfScalar nontemporal");
363 return;
364 }
365
367}
368
369/ TODO: Replace this with a proper TargetInfo function call.
370/ Helper method to check if the underlying ABI is AAPCS
371static bool isAAPCS(const TargetInfo &targetInfo) {
372 return targetInfo.getABI().starts_with("aapcs");
373}
374
376 LValue dst) {
377
378 const CIRGenBitFieldInfo &info = dst.getBitFieldInfo();
379 mlir::Type resLTy = convertTypeForMem(dst.getType());
380 Address ptr = dst.getBitFieldAddress();
381
382 bool useVoaltile = cgm.getCodeGenOpts().AAPCSBitfieldWidth &&
383 dst.isVolatileQualified() &&
384 info.volatileStorageSize != 0 && isAAPCS(cgm.getTarget());
385
386 mlir::Value dstAddr = dst.getAddress().getPointer();
387
388 return builder.createSetBitfield(dstAddr.getLoc(), resLTy, ptr,
389 ptr.getElementType(), src.getValue(), info,
390 dst.isVolatileQualified(), useVoaltile);
391}
392
394 const CIRGenBitFieldInfo &info = lv.getBitFieldInfo();
395
396 / Get the output type.
397 mlir::Type resLTy = convertType(lv.getType());
398 Address ptr = lv.getBitFieldAddress();
399
400 bool useVoaltile = lv.isVolatileQualified() && info.volatileOffset != 0 &&
401 isAAPCS(cgm.getTarget());
402
403 mlir::Value field =
404 builder.createGetBitfield(getLoc(loc), resLTy, ptr, ptr.getElementType(),
405 info, lv.isVolatile(), useVoaltile);
407 return RValue::get(field);
408}
409
411 const FieldDecl *field,
412 mlir::Type fieldType,
413 unsigned index) {
414 mlir::Location loc = getLoc(field->getLocation());
415 cir::PointerType fieldPtr = cir::PointerType::get(fieldType);
417 cir::GetMemberOp sea = getBuilder().createGetMember(
418 loc, fieldPtr, base.getPointer(), field->getName(),
419 rec.isUnion() ? field->getFieldIndex() : index);
421 rec.getElementOffset(cgm.getDataLayout().layout, index));
422 return Address(sea, base.getAlignment().alignmentAtOffset(offset));
423}
424
426 const FieldDecl *field) {
427 LValueBaseInfo baseInfo = base.getBaseInfo();
428 const CIRGenRecordLayout &layout =
429 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
430 const CIRGenBitFieldInfo &info = layout.getBitFieldInfo(field);
431
433
434 unsigned idx = layout.getCIRFieldNo(field);
435 Address addr = getAddrOfBitFieldStorage(base, field, info.storageType, idx);
436
437 mlir::Location loc = getLoc(field->getLocation());
438 if (addr.getElementType() != info.storageType)
439 addr = builder.createElementBitCast(loc, addr, info.storageType);
440
441 QualType fieldType =
443 / TODO(cir): Support TBAA for bit fields.
445 LValueBaseInfo fieldBaseInfo(baseInfo.getAlignmentSource());
446 return LValue::makeBitfield(addr, info, fieldType, fieldBaseInfo);
447}
448
450 LValueBaseInfo baseInfo = base.getBaseInfo();
451
452 if (field->isBitField())
453 return emitLValueForBitField(base, field);
454
455 QualType fieldType = field->getType();
456 const RecordDecl *rec = field->getParent();
457 AlignmentSource baseAlignSource = baseInfo.getAlignmentSource();
458 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(baseAlignSource));
460
461 Address addr = base.getAddress();
462 if (auto *classDecl = dyn_cast<CXXRecordDecl>(rec)) {
463 if (cgm.getCodeGenOpts().StrictVTablePointers &&
464 classDecl->isDynamicClass()) {
465 cgm.errorNYI(field->getSourceRange(),
466 "emitLValueForField: strict vtable for dynamic class");
467 }
468 }
469
470 unsigned recordCVR = base.getVRQualifiers();
471
472 llvm::StringRef fieldName = field->getName();
473 unsigned fieldIndex;
474 if (cgm.lambdaFieldToName.count(field))
475 fieldName = cgm.lambdaFieldToName[field];
476
477 if (rec->isUnion())
478 fieldIndex = field->getFieldIndex();
479 else {
480 const CIRGenRecordLayout &layout =
481 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
482 fieldIndex = layout.getCIRFieldNo(field);
483 }
484
485 addr = emitAddrOfFieldStorage(addr, field, fieldName, fieldIndex);
487
488 / If this is a reference field, load the reference right now.
489 if (fieldType->isReferenceType()) {
491 LValue refLVal = makeAddrLValue(addr, fieldType, fieldBaseInfo);
492 if (recordCVR & Qualifiers::Volatile)
493 refLVal.getQuals().addVolatile();
494 addr = emitLoadOfReference(refLVal, getLoc(field->getSourceRange()),
495 &fieldBaseInfo);
496
497 / Qualifiers on the struct don't apply to the referencee.
498 recordCVR = 0;
499 fieldType = fieldType->getPointeeType();
500 }
501
502 if (field->hasAttr<AnnotateAttr>()) {
503 cgm.errorNYI(field->getSourceRange(), "emitLValueForField: AnnotateAttr");
504 return LValue();
505 }
506
507 LValue lv = makeAddrLValue(addr, fieldType, fieldBaseInfo);
508 lv.getQuals().addCVRQualifiers(recordCVR);
509
510 / __weak attribute on a field is ignored.
512 cgm.errorNYI(field->getSourceRange(),
513 "emitLValueForField: __weak attribute");
514 return LValue();
515 }
516
517 return lv;
518}
519
521 LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName) {
522 QualType fieldType = field->getType();
523
524 if (!fieldType->isReferenceType())
525 return emitLValueForField(base, field);
526
527 const CIRGenRecordLayout &layout =
528 cgm.getTypes().getCIRGenRecordLayout(field->getParent());
529 unsigned fieldIndex = layout.getCIRFieldNo(field);
530
531 Address v =
532 emitAddrOfFieldStorage(base.getAddress(), field, fieldName, fieldIndex);
533
534 / Make sure that the address is pointing to the right type.
535 mlir::Type memTy = convertTypeForMem(fieldType);
536 v = builder.createElementBitCast(getLoc(field->getSourceRange()), v, memTy);
537
538 / TODO: Generate TBAA information that describes this access as a structure
539 / member access and not just an access to an object of the field's type. This
540 / should be similar to what we do in EmitLValueForField().
541 LValueBaseInfo baseInfo = base.getBaseInfo();
542 AlignmentSource fieldAlignSource = baseInfo.getAlignmentSource();
543 LValueBaseInfo fieldBaseInfo(getFieldAlignmentSource(fieldAlignSource));
545 return makeAddrLValue(v, fieldType, fieldBaseInfo);
546}
547
548mlir::Value CIRGenFunction::emitToMemory(mlir::Value value, QualType ty) {
549 / Bool has a different representation in memory than in registers,
550 / but in ClangIR, it is simply represented as a cir.bool value.
551 / This function is here as a placeholder for possible future changes.
552 return value;
553}
554
555void CIRGenFunction::emitStoreOfScalar(mlir::Value value, LValue lvalue,
556 bool isInit) {
557 if (lvalue.getType()->isConstantMatrixType()) {
558 assert(0 && "NYI: emitStoreOfScalar constant matrix type");
559 return;
560 }
561
562 emitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
563 lvalue.getType(), lvalue.getBaseInfo(), isInit,
564 /*isNontemporal=*/false);
565}
566
567mlir::Value CIRGenFunction::emitLoadOfScalar(Address addr, bool isVolatile,
568 QualType ty, SourceLocation loc,
569 LValueBaseInfo baseInfo) {
570 / Traditional LLVM codegen handles thread local separately, CIR handles
571 / as part of getAddrOfGlobalVar (GetGlobalOp).
572 mlir::Type eltTy = addr.getElementType();
573
574 if (const auto *clangVecTy = ty->getAs<clang::VectorType>()) {
575 if (clangVecTy->isExtVectorBoolType()) {
576 cgm.errorNYI(loc, "emitLoadOfScalar: ExtVectorBoolType");
577 return nullptr;
578 }
579
580 const auto vecTy = cast<cir::VectorType>(eltTy);
581
582 / Handle vectors of size 3 like size 4 for better performance.
584 if (vecTy.getSize() == 3 && !getLangOpts().PreserveVec3Type)
585 cgm.errorNYI(addr.getPointer().getLoc(),
586 "emitLoadOfScalar Vec3 & PreserveVec3Type disabled");
587 }
588
590 LValue atomicLValue = LValue::makeAddr(addr, ty, baseInfo);
591 if (ty->isAtomicType() || isLValueSuitableForInlineAtomic(atomicLValue))
592 cgm.errorNYI("emitLoadOfScalar: load atomic");
593
594 if (mlir::isa<cir::VoidType>(eltTy))
595 cgm.errorNYI(loc, "emitLoadOfScalar: void type");
596
598
599 mlir::Value loadOp = builder.createLoad(getLoc(loc), addr, isVolatile);
600 if (!ty->isBooleanType() && ty->hasBooleanRepresentation())
601 cgm.errorNYI("emitLoadOfScalar: boolean type with boolean representation");
602
603 return loadOp;
604}
605
607 SourceLocation loc) {
610 return emitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
611 lvalue.getType(), loc, lvalue.getBaseInfo());
612}
613
614/ Given an expression that represents a value lvalue, this
615/ method emits the address of the lvalue, then loads the result as an rvalue,
616/ returning the rvalue.
618 assert(!lv.getType()->isFunctionType());
619 assert(!(lv.getType()->isConstantMatrixType()) && "not implemented");
620
621 if (lv.isBitField())
622 return emitLoadOfBitfieldLValue(lv, loc);
623
624 if (lv.isSimple())
625 return RValue::get(emitLoadOfScalar(lv, loc));
626
627 if (lv.isVectorElt()) {
628 const mlir::Value load =
629 builder.createLoad(getLoc(loc), lv.getVectorAddress());
630 return RValue::get(cir::VecExtractOp::create(builder, getLoc(loc), load,
631 lv.getVectorIdx()));
632 }
633
634 if (lv.isExtVectorElt())
636
637 cgm.errorNYI(loc, "emitLoadOfLValue");
638 return RValue::get(nullptr);
639}
640
641int64_t CIRGenFunction::getAccessedFieldNo(unsigned int idx,
642 const mlir::ArrayAttr elts) {
643 auto elt = mlir::cast<mlir::IntegerAttr>(elts[idx]);
644 return elt.getInt();
645}
646
647/ If this is a reference to a subset of the elements of a vector, create an
648/ appropriate shufflevector.
650 mlir::Location loc = lv.getExtVectorPointer().getLoc();
651 mlir::Value vec = builder.createLoad(loc, lv.getExtVectorAddress());
652
653 / HLSL allows treating scalars as one-element vectors. Converting the scalar
654 / IR value to a vector here allows the rest of codegen to behave as normal.
655 if (getLangOpts().HLSL && !mlir::isa<cir::VectorType>(vec.getType())) {
656 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: HLSL");
657 return {};
658 }
659
660 const mlir::ArrayAttr elts = lv.getExtVectorElts();
661
662 / If the result of the expression is a non-vector type, we must be extracting
663 / a single element. Just codegen as an extractelement.
664 const auto *exprVecTy = lv.getType()->getAs<clang::VectorType>();
665 if (!exprVecTy) {
666 int64_t indexValue = getAccessedFieldNo(0, elts);
667 cir::ConstantOp index =
668 builder.getConstInt(loc, builder.getSInt64Ty(), indexValue);
669 return RValue::get(cir::VecExtractOp::create(builder, loc, vec, index));
670 }
671
672 / Always use shuffle vector to try to retain the original program structure
674 for (auto i : llvm::seq<unsigned>(0, exprVecTy->getNumElements()))
675 mask.push_back(getAccessedFieldNo(i, elts));
676
677 cir::VecShuffleOp resultVec = builder.createVecShuffle(loc, vec, mask);
678 if (lv.getType()->isExtVectorBoolType()) {
679 cgm.errorNYI(loc, "emitLoadOfExtVectorElementLValue: ExtVectorBoolType");
680 return {};
681 }
682
683 return RValue::get(resultVec);
684}
685
686LValue
688 assert((e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI) &&
689 "unexpected binary operator opcode");
690
691 Address baseAddr = Address::invalid();
692 if (e->getOpcode() == BO_PtrMemD)
693 baseAddr = emitLValue(e->getLHS()).getAddress();
694 else
695 baseAddr = emitPointerWithAlignment(e->getLHS());
696
697 const auto *memberPtrTy = e->getRHS()->getType()->castAs<MemberPointerType>();
698
699 mlir::Value memberPtr = emitScalarExpr(e->getRHS());
700
701 LValueBaseInfo baseInfo;
703 Address memberAddr = emitCXXMemberDataPointerAddress(e, baseAddr, memberPtr,
704 memberPtrTy, &baseInfo);
705
706 return makeAddrLValue(memberAddr, memberPtrTy->getPointeeType(), baseInfo);
707}
708
709/ Generates lvalue for partial ext_vector access.
711 mlir::Location loc) {
712 Address vectorAddress = lv.getExtVectorAddress();
713 QualType elementTy = lv.getType()->castAs<VectorType>()->getElementType();
714 mlir::Type vectorElementTy = cgm.getTypes().convertType(elementTy);
715 Address castToPointerElement =
716 vectorAddress.withElementType(builder, vectorElementTy);
717
718 mlir::ArrayAttr extVecElts = lv.getExtVectorElts();
719 unsigned idx = getAccessedFieldNo(0, extVecElts);
720 mlir::Value idxValue =
721 builder.getConstInt(loc, mlir::cast<cir::IntType>(ptrDiffTy), idx);
722
723 mlir::Value elementValue = builder.getArrayElement(
724 loc, loc, castToPointerElement.getPointer(), vectorElementTy, idxValue,
725 /*shouldDecay=*/false);
726
727 const CharUnits eltSize = getContext().getTypeSizeInChars(elementTy);
728 const CharUnits alignment =
729 castToPointerElement.getAlignment().alignmentAtOffset(idx * eltSize);
730 return Address(elementValue, vectorElementTy, alignment);
731}
732
733static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd) {
735 return cgm.getAddrOfFunction(gd);
736}
737
739 mlir::Value thisValue) {
740 return cgf.emitLValueForLambdaField(fd, thisValue);
741}
742
743/ Given that we are currently emitting a lambda, emit an l-value for
744/ one of its members.
745/
747 mlir::Value thisValue) {
748 bool hasExplicitObjectParameter = false;
749 const auto *methD = dyn_cast_if_present<CXXMethodDecl>(curCodeDecl);
750 LValue lambdaLV;
751 if (methD) {
752 hasExplicitObjectParameter = methD->isExplicitObjectMemberFunction();
753 assert(methD->getParent()->isLambda());
754 assert(methD->getParent() == field->getParent());
755 }
756 if (hasExplicitObjectParameter) {
757 cgm.errorNYI(field->getSourceRange(), "ExplicitObjectMemberFunction");
758 } else {
759 QualType lambdaTagType =
761 lambdaLV = makeNaturalAlignAddrLValue(thisValue, lambdaTagType);
762 }
763 return emitLValueForField(lambdaLV, field);
764}
765
769
770static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e,
771 GlobalDecl gd) {
772 const FunctionDecl *fd = cast<FunctionDecl>(gd.getDecl());
773 cir::FuncOp funcOp = emitFunctionDeclPointer(cgf.cgm, gd);
774 mlir::Location loc = cgf.getLoc(e->getSourceRange());
775 CharUnits align = cgf.getContext().getDeclAlign(fd);
776
778
779 mlir::Type fnTy = funcOp.getFunctionType();
780 mlir::Type ptrTy = cir::PointerType::get(fnTy);
781 mlir::Value addr = cir::GetGlobalOp::create(cgf.getBuilder(), loc, ptrTy,
782 funcOp.getSymName());
783
784 if (funcOp.getFunctionType() != cgf.convertType(fd->getType())) {
785 fnTy = cgf.convertType(fd->getType());
786 ptrTy = cir::PointerType::get(fnTy);
787
788 addr = cir::CastOp::create(cgf.getBuilder(), addr.getLoc(), ptrTy,
789 cir::CastKind::bitcast, addr);
790 }
791
792 return cgf.makeAddrLValue(Address(addr, fnTy, align), e->getType(),
794}
795
796/ Determine whether we can emit a reference to \p vd from the current
797/ context, despite not necessarily having seen an odr-use of the variable in
798/ this context.
799/ TODO(cir): This could be shared with classic codegen.
801 const DeclRefExpr *e,
802 const VarDecl *vd) {
803 / For a variable declared in an enclosing scope, do not emit a spurious
804 / reference even if we have a capture, as that will emit an unwarranted
805 / reference to our capture state, and will likely generate worse code than
806 / emitting a local copy.
808 return false;
809
810 / For a local declaration declared in this function, we can always reference
811 / it even if we don't have an odr-use.
812 if (vd->hasLocalStorage()) {
813 return vd->getDeclContext() ==
814 dyn_cast_or_null<DeclContext>(cgf.curCodeDecl);
815 }
816
817 / For a global declaration, we can emit a reference to it if we know
818 / for sure that we are able to emit a definition of it.
819 vd = vd->getDefinition(cgf.getContext());
820 if (!vd)
821 return false;
822
823 / Don't emit a spurious reference if it might be to a variable that only
824 / exists on a different device / target.
825 / FIXME: This is unnecessarily broad. Check whether this would actually be a
826 / cross-target reference.
827 if (cgf.getLangOpts().OpenMP || cgf.getLangOpts().CUDA ||
828 cgf.getLangOpts().OpenCL) {
829 return false;
830 }
831
832 / We can emit a spurious reference only if the linkage implies that we'll
833 / be emitting a non-interposable symbol that will be retained until link
834 / time.
835 switch (cgf.cgm.getCIRLinkageVarDefinition(vd, /*IsConstant=*/false)) {
836 case cir::GlobalLinkageKind::ExternalLinkage:
837 case cir::GlobalLinkageKind::LinkOnceODRLinkage:
838 case cir::GlobalLinkageKind::WeakODRLinkage:
839 case cir::GlobalLinkageKind::InternalLinkage:
840 case cir::GlobalLinkageKind::PrivateLinkage:
841 return true;
842 default:
843 return false;
844 }
845}
846
848 const NamedDecl *nd = e->getDecl();
849 QualType ty = e->getType();
850
851 assert(e->isNonOdrUse() != NOUR_Unevaluated &&
852 "should not emit an unevaluated operand");
853
854 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
855 / Global Named registers access via intrinsics only
856 if (vd->getStorageClass() == SC_Register && vd->hasAttr<AsmLabelAttr>() &&
857 !vd->isLocalVarDecl()) {
858 cgm.errorNYI(e->getSourceRange(),
859 "emitDeclRefLValue: Global Named registers access");
860 return LValue();
861 }
862
863 if (e->isNonOdrUse() == NOUR_Constant &&
864 (vd->getType()->isReferenceType() ||
865 !canEmitSpuriousReferenceToVariable(*this, e, vd))) {
866 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: NonOdrUse");
867 return LValue();
868 }
869
870 / Check for captured variables.
872 vd = vd->getCanonicalDecl();
873 if (FieldDecl *fd = lambdaCaptureFields.lookup(vd))
874 return emitCapturedFieldLValue(*this, fd, cxxabiThisValue);
877 }
878 }
879
880 if (const auto *vd = dyn_cast<VarDecl>(nd)) {
881 / Checks for omitted feature handling
888
889 / Check if this is a global variable
890 if (vd->hasLinkage() || vd->isStaticDataMember())
891 return emitGlobalVarDeclLValue(*this, e, vd);
892
893 Address addr = Address::invalid();
894
895 / The variable should generally be present in the local decl map.
896 auto iter = localDeclMap.find(vd);
897 if (iter != localDeclMap.end()) {
898 addr = iter->second;
899 } else {
900 / Otherwise, it might be static local we haven't emitted yet for some
901 / reason; most likely, because it's in an outer function.
902 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: static local");
903 }
904
905 / Drill into reference types.
906 LValue lv =
907 vd->getType()->isReferenceType()
911
912 / Statics are defined as globals, so they are not include in the function's
913 / symbol table.
914 assert((vd->isStaticLocal() || symbolTable.count(vd)) &&
915 "non-static locals should be already mapped");
916
917 return lv;
918 }
919
920 if (const auto *bd = dyn_cast<BindingDecl>(nd)) {
923 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: lambda captures");
924 return LValue();
925 }
926 return emitLValue(bd->getBinding());
927 }
928
929 if (const auto *fd = dyn_cast<FunctionDecl>(nd)) {
930 LValue lv = emitFunctionDeclLValue(*this, e, fd);
931
932 / Emit debuginfo for the function declaration if the target wants to.
933 if (getContext().getTargetInfo().allowDebugInfoForExternalRef())
935
936 return lv;
937 }
938
939 cgm.errorNYI(e->getSourceRange(), "emitDeclRefLValue: unhandled decl type");
940 return LValue();
941}
942
944 QualType boolTy = getContext().BoolTy;
945 SourceLocation loc = e->getExprLoc();
946
948 if (e->getType()->getAs<MemberPointerType>()) {
949 cgm.errorNYI(e->getSourceRange(),
950 "evaluateExprAsBool: member pointer type");
951 return createDummyValue(getLoc(loc), boolTy);
952 }
953
955 if (!e->getType()->isAnyComplexType())
956 return emitScalarConversion(emitScalarExpr(e), e->getType(), boolTy, loc);
957
959 loc);
960}
961
964
965 / __extension__ doesn't affect lvalue-ness.
966 if (op == UO_Extension)
967 return emitLValue(e->getSubExpr());
968
969 switch (op) {
970 case UO_Deref: {
972 assert(!t.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
973
975 LValueBaseInfo baseInfo;
976 Address addr = emitPointerWithAlignment(e->getSubExpr(), &baseInfo);
977
978 / Tag 'load' with deref attribute.
979 / FIXME: This misses some derefence cases and has problematic interactions
980 / with other operators.
981 if (auto loadOp = addr.getDefiningOp<cir::LoadOp>())
982 loadOp.setIsDerefAttr(mlir::UnitAttr::get(&getMLIRContext()));
983
984 LValue lv = makeAddrLValue(addr, t, baseInfo);
987 return lv;
988 }
989 case UO_Real:
990 case UO_Imag: {
991 LValue lv = emitLValue(e->getSubExpr());
992 assert(lv.isSimple() && "real/imag on non-ordinary l-value");
993
994 / __real is valid on scalars. This is a faster way of testing that.
995 / __imag can only produce an rvalue on scalars.
996 if (e->getOpcode() == UO_Real &&
997 !mlir::isa<cir::ComplexType>(lv.getAddress().getElementType())) {
998 assert(e->getSubExpr()->getType()->isArithmeticType());
999 return lv;
1000 }
1001
1003 QualType elemTy = exprTy->castAs<clang::ComplexType>()->getElementType();
1004 mlir::Location loc = getLoc(e->getExprLoc());
1005 Address component =
1006 e->getOpcode() == UO_Real
1007 ? builder.createComplexRealPtr(loc, lv.getAddress())
1008 : builder.createComplexImagPtr(loc, lv.getAddress());
1010 LValue elemLV = makeAddrLValue(component, elemTy);
1011 elemLV.getQuals().addQualifiers(lv.getQuals());
1012 return elemLV;
1013 }
1014 case UO_PreInc:
1015 case UO_PreDec: {
1016 cir::UnaryOpKind kind =
1017 e->isIncrementOp() ? cir::UnaryOpKind::Inc : cir::UnaryOpKind::Dec;
1018 LValue lv = emitLValue(e->getSubExpr());
1019
1020 assert(e->isPrefix() && "Prefix operator in unexpected state!");
1021
1022 if (e->getType()->isAnyComplexType()) {
1023 emitComplexPrePostIncDec(e, lv, kind, /*isPre=*/true);
1024 } else {
1025 emitScalarPrePostIncDec(e, lv, kind, /*isPre=*/true);
1026 }
1027
1028 return lv;
1029 }
1030 case UO_Extension:
1031 llvm_unreachable("UnaryOperator extension should be handled above!");
1032 case UO_Plus:
1033 case UO_Minus:
1034 case UO_Not:
1035 case UO_LNot:
1036 case UO_AddrOf:
1037 case UO_PostInc:
1038 case UO_PostDec:
1039 case UO_Coawait:
1040 llvm_unreachable("UnaryOperator of non-lvalue kind!");
1041 }
1042 llvm_unreachable("Unknown unary operator kind!");
1043}
1044
1045/ If the specified expr is a simple decay from an array to pointer,
1046/ return the array subexpression.
1047/ FIXME: this could be abstracted into a common AST helper.
1048static const Expr *getSimpleArrayDecayOperand(const Expr *e) {
1049 / If this isn't just an array->pointer decay, bail out.
1050 const auto *castExpr = dyn_cast<CastExpr>(e);
1051 if (!castExpr || castExpr->getCastKind() != CK_ArrayToPointerDecay)
1052 return nullptr;
1053
1054 / If this is a decay from variable width array, bail out.
1055 const Expr *subExpr = castExpr->getSubExpr();
1056 if (subExpr->getType()->isVariableArrayType())
1057 return nullptr;
1058
1059 return subExpr;
1060}
1061
1062static cir::IntAttr getConstantIndexOrNull(mlir::Value idx) {
1063 / TODO(cir): should we consider using MLIRs IndexType instead of IntegerAttr?
1064 if (auto constantOp = idx.getDefiningOp<cir::ConstantOp>())
1065 return constantOp.getValueAttr<cir::IntAttr>();
1066 return {};
1067}
1068
1069static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx,
1070 CharUnits eltSize) {
1071 / If we have a constant index, we can use the exact offset of the
1072 / element we're accessing.
1073 if (const cir::IntAttr constantIdx = getConstantIndexOrNull(idx)) {
1074 const CharUnits offset = constantIdx.getValue().getZExtValue() * eltSize;
1075 return arrayAlign.alignmentAtOffset(offset);
1076 }
1077 / Otherwise, use the worst-case alignment for any element.
1078 return arrayAlign.alignmentOfArrayElement(eltSize);
1079}
1080
1082 const VariableArrayType *vla) {
1083 QualType eltType;
1084 do {
1085 eltType = vla->getElementType();
1086 } while ((vla = astContext.getAsVariableArrayType(eltType)));
1087 return eltType;
1088}
1089
1091 mlir::Location beginLoc,
1092 mlir::Location endLoc, mlir::Value ptr,
1093 mlir::Type eltTy, mlir::Value idx,
1094 bool shouldDecay) {
1095 CIRGenModule &cgm = cgf.getCIRGenModule();
1096 / TODO(cir): LLVM codegen emits in bound gep check here, is there anything
1097 / that would enhance tracking this later in CIR?
1099 return cgm.getBuilder().getArrayElement(beginLoc, endLoc, ptr, eltTy, idx,
1100 shouldDecay);
1101}
1102
1104 mlir::Location beginLoc,
1105 mlir::Location endLoc, Address addr,
1106 QualType eltType, mlir::Value idx,
1107 mlir::Location loc, bool shouldDecay) {
1108
1109 / Determine the element size of the statically-sized base. This is
1110 / the thing that the indices are expressed in terms of.
1111 if (const VariableArrayType *vla =
1112 cgf.getContext().getAsVariableArrayType(eltType)) {
1113 eltType = getFixedSizeElementType(cgf.getContext(), vla);
1114 }
1115
1116 / We can use that to compute the best alignment of the element.
1117 const CharUnits eltSize = cgf.getContext().getTypeSizeInChars(eltType);
1118 const CharUnits eltAlign =
1119 getArrayElementAlign(addr.getAlignment(), idx, eltSize);
1120
1122 const mlir::Value eltPtr =
1123 emitArraySubscriptPtr(cgf, beginLoc, endLoc, addr.getPointer(),
1124 addr.getElementType(), idx, shouldDecay);
1125 const mlir::Type elementType = cgf.convertTypeForMem(eltType);
1126 return Address(eltPtr, elementType, eltAlign);
1127}
1128
1129LValue
1131 if (getContext().getAsVariableArrayType(e->getType())) {
1132 cgm.errorNYI(e->getSourceRange(),
1133 "emitArraySubscriptExpr: VariableArrayType");
1135 }
1136
1137 if (e->getType()->getAs<ObjCObjectType>()) {
1138 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjCObjectType");
1140 }
1141
1142 / The index must always be an integer, which is not an aggregate. Emit it
1143 / in lexical order (this complexity is, sadly, required by C++17).
1144 assert((e->getIdx() == e->getLHS() || e->getIdx() == e->getRHS()) &&
1145 "index was neither LHS nor RHS");
1146
1147 auto emitIdxAfterBase = [&](bool promote) -> mlir::Value {
1148 const mlir::Value idx = emitScalarExpr(e->getIdx());
1149
1150 / Extend or truncate the index type to 32 or 64-bits.
1151 auto ptrTy = mlir::dyn_cast<cir::PointerType>(idx.getType());
1152 if (promote && ptrTy && ptrTy.isPtrTo<cir::IntType>())
1153 cgm.errorNYI(e->getSourceRange(),
1154 "emitArraySubscriptExpr: index type cast");
1155 return idx;
1156 };
1157
1158 / If the base is a vector type, then we are forming a vector element
1159 / with this subscript.
1160 if (e->getBase()->getType()->isSubscriptableVectorType() &&
1162 const mlir::Value idx = emitIdxAfterBase(/*promote=*/false);
1163 const LValue lv = emitLValue(e->getBase());
1164 return LValue::makeVectorElt(lv.getAddress(), idx, e->getBase()->getType(),
1165 lv.getBaseInfo());
1166 }
1167
1168 const mlir::Value idx = emitIdxAfterBase(/*promote=*/true);
1169
1170 / Handle the extvector case we ignored above.
1172 const LValue lv = emitLValue(e->getBase());
1173 Address addr = emitExtVectorElementLValue(lv, cgm.getLoc(e->getExprLoc()));
1174
1175 QualType elementType = lv.getType()->castAs<VectorType>()->getElementType();
1176 addr = emitArraySubscriptPtr(*this, cgm.getLoc(e->getBeginLoc()),
1177 cgm.getLoc(e->getEndLoc()), addr, e->getType(),
1178 idx, cgm.getLoc(e->getExprLoc()),
1179 /*shouldDecay=*/false);
1180
1181 return makeAddrLValue(addr, elementType, lv.getBaseInfo());
1182 }
1183
1184 if (const Expr *array = getSimpleArrayDecayOperand(e->getBase())) {
1185 LValue arrayLV;
1186 if (const auto *ase = dyn_cast<ArraySubscriptExpr>(array))
1187 arrayLV = emitArraySubscriptExpr(ase);
1188 else
1189 arrayLV = emitLValue(array);
1190
1191 / Propagate the alignment from the array itself to the result.
1192 const Address addr = emitArraySubscriptPtr(
1193 *this, cgm.getLoc(array->getBeginLoc()), cgm.getLoc(array->getEndLoc()),
1194 arrayLV.getAddress(), e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1195 /*shouldDecay=*/true);
1196
1197 const LValue lv = LValue::makeAddr(addr, e->getType(), LValueBaseInfo());
1198
1199 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1200 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1201 }
1202
1203 return lv;
1204 }
1205
1206 / The base must be a pointer; emit it with an estimate of its alignment.
1207 assert(e->getBase()->getType()->isPointerType() &&
1208 "The base must be a pointer");
1209
1210 LValueBaseInfo eltBaseInfo;
1211 const Address ptrAddr = emitPointerWithAlignment(e->getBase(), &eltBaseInfo);
1212 / Propagate the alignment from the array itself to the result.
1213 const Address addxr = emitArraySubscriptPtr(
1214 *this, cgm.getLoc(e->getBeginLoc()), cgm.getLoc(e->getEndLoc()), ptrAddr,
1215 e->getType(), idx, cgm.getLoc(e->getExprLoc()),
1216 /*shouldDecay=*/false);
1217
1218 const LValue lv = LValue::makeAddr(addxr, e->getType(), eltBaseInfo);
1219
1220 if (getLangOpts().ObjC && getLangOpts().getGC() != LangOptions::NonGC) {
1221 cgm.errorNYI(e->getSourceRange(), "emitArraySubscriptExpr: ObjC with GC");
1222 }
1223
1224 return lv;
1225}
1226
1228 / Emit the base vector as an l-value.
1229 LValue base;
1230
1231 / ExtVectorElementExpr's base can either be a vector or pointer to vector.
1232 if (e->isArrow()) {
1233 / If it is a pointer to a vector, emit the address and form an lvalue with
1234 / it.
1235 LValueBaseInfo baseInfo;
1236 Address ptr = emitPointerWithAlignment(e->getBase(), &baseInfo);
1237 const auto *clangPtrTy =
1239 base = makeAddrLValue(ptr, clangPtrTy->getPointeeType(), baseInfo);
1240 base.getQuals().removeObjCGCAttr();
1241 } else if (e->getBase()->isGLValue()) {
1242 / Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1243 / emit the base as an lvalue.
1244 assert(e->getBase()->getType()->isVectorType());
1245 base = emitLValue(e->getBase());
1246 } else {
1247 / Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1248 assert(e->getBase()->getType()->isVectorType() &&
1249 "Result must be a vector");
1250 mlir::Value vec = emitScalarExpr(e->getBase());
1251
1252 / Store the vector to memory (because LValue wants an address).
1253 QualType baseTy = e->getBase()->getType();
1254 Address vecMem = createMemTemp(baseTy, vec.getLoc(), "tmp");
1255 if (!getLangOpts().HLSL && baseTy->isExtVectorBoolType()) {
1256 cgm.errorNYI(e->getSourceRange(),
1257 "emitExtVectorElementExpr: ExtVectorBoolType & !HLSL");
1258 return {};
1259 }
1260 builder.createStore(vec.getLoc(), vec, vecMem);
1261 base = makeAddrLValue(vecMem, baseTy, AlignmentSource::Decl);
1262 }
1263
1264 QualType type =
1266
1267 / Encode the element access list into a vector of unsigned indices.
1269 e->getEncodedElementAccess(indices);
1270
1271 if (base.isSimple()) {
1272 SmallVector<int64_t> attrElts(indices.begin(), indices.end());
1273 mlir::ArrayAttr elts = builder.getI64ArrayAttr(attrElts);
1274 return LValue::makeExtVectorElt(base.getAddress(), elts, type,
1275 base.getBaseInfo());
1276 }
1277
1278 cgm.errorNYI(e->getSourceRange(),
1279 "emitExtVectorElementExpr: isSimple is false");
1280 return {};
1281}
1282
1284 llvm::StringRef name) {
1285 cir::GlobalOp globalOp = cgm.getGlobalForStringLiteral(e, name);
1286 assert(globalOp.getAlignment() && "expected alignment for string literal");
1287 unsigned align = *(globalOp.getAlignment());
1288 mlir::Value addr =
1289 builder.createGetGlobal(getLoc(e->getSourceRange()), globalOp);
1290 return makeAddrLValue(
1291 Address(addr, globalOp.getSymType(), CharUnits::fromQuantity(align)),
1293}
1294
1295/ Casts are never lvalues unless that cast is to a reference type. If the cast
1296/ is to a reference, we can have the usual lvalue result, otherwise if a cast
1297/ is needed by the code generator in an lvalue context, then it must mean that
1298/ we need the address of an aggregate in order to access one of its members.
1299/ This can happen for all the reasons that casts are permitted with aggregate
1300/ result, including noop aggregate casts, and cast from scalar to union.
1302 switch (e->getCastKind()) {
1303 case CK_ToVoid:
1304 case CK_BitCast:
1305 case CK_LValueToRValueBitCast:
1306 case CK_ArrayToPointerDecay:
1307 case CK_FunctionToPointerDecay:
1308 case CK_NullToMemberPointer:
1309 case CK_NullToPointer:
1310 case CK_IntegralToPointer:
1311 case CK_PointerToIntegral:
1312 case CK_PointerToBoolean:
1313 case CK_IntegralCast:
1314 case CK_BooleanToSignedIntegral:
1315 case CK_IntegralToBoolean:
1316 case CK_IntegralToFloating:
1317 case CK_FloatingToIntegral:
1318 case CK_FloatingToBoolean:
1319 case CK_FloatingCast:
1320 case CK_FloatingRealToComplex:
1321 case CK_FloatingComplexToReal:
1322 case CK_FloatingComplexToBoolean:
1323 case CK_FloatingComplexCast:
1324 case CK_FloatingComplexToIntegralComplex:
1325 case CK_IntegralRealToComplex:
1326 case CK_IntegralComplexToReal:
1327 case CK_IntegralComplexToBoolean:
1328 case CK_IntegralComplexCast:
1329 case CK_IntegralComplexToFloatingComplex:
1330 case CK_DerivedToBaseMemberPointer:
1331 case CK_BaseToDerivedMemberPointer:
1332 case CK_MemberPointerToBoolean:
1333 case CK_ReinterpretMemberPointer:
1334 case CK_AnyPointerToBlockPointerCast:
1335 case CK_ARCProduceObject:
1336 case CK_ARCConsumeObject:
1337 case CK_ARCReclaimReturnedObject:
1338 case CK_ARCExtendBlockObject:
1339 case CK_CopyAndAutoreleaseBlockObject:
1340 case CK_IntToOCLSampler:
1341 case CK_FloatingToFixedPoint:
1342 case CK_FixedPointToFloating:
1343 case CK_FixedPointCast:
1344 case CK_FixedPointToBoolean:
1345 case CK_FixedPointToIntegral:
1346 case CK_IntegralToFixedPoint:
1347 case CK_MatrixCast:
1348 case CK_HLSLVectorTruncation:
1349 case CK_HLSLMatrixTruncation:
1350 case CK_HLSLArrayRValue:
1351 case CK_HLSLElementwiseCast:
1352 case CK_HLSLAggregateSplatCast:
1353 llvm_unreachable("unexpected cast lvalue");
1354
1355 case CK_Dependent:
1356 llvm_unreachable("dependent cast kind in IR gen!");
1357
1358 case CK_BuiltinFnToFnPtr:
1359 llvm_unreachable("builtin functions are handled elsewhere");
1360
1361 case CK_Dynamic: {
1362 LValue lv = emitLValue(e->getSubExpr());
1363 Address v = lv.getAddress();
1364 const auto *dce = cast<CXXDynamicCastExpr>(e);
1366 }
1367
1368 / These are never l-values; just use the aggregate emission code.
1369 case CK_NonAtomicToAtomic:
1370 case CK_AtomicToNonAtomic:
1371 case CK_ToUnion:
1372 case CK_ObjCObjectLValueCast:
1373 case CK_VectorSplat:
1374 case CK_ConstructorConversion:
1375 case CK_UserDefinedConversion:
1376 case CK_CPointerToObjCPointerCast:
1377 case CK_BlockPointerToObjCPointerCast:
1378 case CK_LValueToRValue: {
1379 cgm.errorNYI(e->getSourceRange(),
1380 std::string("emitCastLValue for unhandled cast kind: ") +
1381 e->getCastKindName());
1382
1383 return {};
1384 }
1385 case CK_AddressSpaceConversion: {
1386 LValue lv = emitLValue(e->getSubExpr());
1387 QualType destTy = getContext().getPointerType(e->getType());
1388
1389 clang::LangAS srcLangAS = e->getSubExpr()->getType().getAddressSpace();
1390 cir::TargetAddressSpaceAttr srcAS;
1391 if (clang::isTargetAddressSpace(srcLangAS))
1392 srcAS = cir::toCIRTargetAddressSpace(getMLIRContext(), srcLangAS);
1393 else
1394 cgm.errorNYI(
1395 e->getSourceRange(),
1396 "emitCastLValue: address space conversion from unknown address "
1397 "space");
1398
1399 mlir::Value v = getTargetHooks().performAddrSpaceCast(
1400 *this, lv.getPointer(), srcAS, convertType(destTy));
1401
1403 lv.getAddress().getAlignment()),
1404 e->getType(), lv.getBaseInfo());
1405 }
1406
1407 case CK_LValueBitCast: {
1408 / This must be a reinterpret_cast (or c-style equivalent).
1409 const auto *ce = cast<ExplicitCastExpr>(e);
1410
1411 cgm.emitExplicitCastExprType(ce, this);
1412 LValue LV = emitLValue(e->getSubExpr());
1414 builder, convertTypeForMem(ce->getTypeAsWritten()->getPointeeType()));
1415
1416 return makeAddrLValue(V, e->getType(), LV.getBaseInfo());
1417 }
1418
1419 case CK_NoOp: {
1420 / CK_NoOp can model a qualification conversion, which can remove an array
1421 / bound and change the IR type.
1422 LValue lv = emitLValue(e->getSubExpr());
1423 / Propagate the volatile qualifier to LValue, if exists in e.
1425 cgm.errorNYI(e->getSourceRange(),
1426 "emitCastLValue: NoOp changes volatile qual");
1427 if (lv.isSimple()) {
1428 Address v = lv.getAddress();
1429 if (v.isValid()) {
1430 mlir::Type ty = convertTypeForMem(e->getType());
1431 if (v.getElementType() != ty)
1432 cgm.errorNYI(e->getSourceRange(),
1433 "emitCastLValue: NoOp needs bitcast");
1434 }
1435 }
1436 return lv;
1437 }
1438
1439 case CK_UncheckedDerivedToBase:
1440 case CK_DerivedToBase: {
1441 auto *derivedClassDecl = e->getSubExpr()->getType()->castAsCXXRecordDecl();
1442
1443 LValue lv = emitLValue(e->getSubExpr());
1444 Address thisAddr = lv.getAddress();
1445
1446 / Perform the derived-to-base conversion
1447 Address baseAddr =
1448 getAddressOfBaseClass(thisAddr, derivedClassDecl, e->path(),
1449 /*NullCheckValue=*/false, e->getExprLoc());
1450
1451 / TODO: Support accesses to members of base classes in TBAA. For now, we
1452 / conservatively pretend that the complete object is of the base class
1453 / type.
1455 return makeAddrLValue(baseAddr, e->getType(), lv.getBaseInfo());
1456 }
1457
1458 case CK_BaseToDerived: {
1459 const auto *derivedClassDecl = e->getType()->castAsCXXRecordDecl();
1460 LValue lv = emitLValue(e->getSubExpr());
1461
1462 / Perform the base-to-derived conversion
1464 getLoc(e->getSourceRange()), lv.getAddress(), derivedClassDecl,
1465 e->path(), /*NullCheckValue=*/false);
1466 / C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
1467 / performed and the object is not of the derived type.
1469
1471 return makeAddrLValue(derived, e->getType(), lv.getBaseInfo());
1472 }
1473
1474 case CK_ZeroToOCLOpaqueType:
1475 llvm_unreachable("NULL to OpenCL opaque type lvalue cast is not valid");
1476 }
1477
1478 llvm_unreachable("Invalid cast kind");
1479}
1480
1482 const MemberExpr *me) {
1483 if (auto *vd = dyn_cast<VarDecl>(me->getMemberDecl())) {
1484 / Try to emit static variable member expressions as DREs.
1485 return DeclRefExpr::Create(
1487 /*RefersToEnclosingVariableOrCapture=*/false, me->getExprLoc(),
1488 me->getType(), me->getValueKind(), nullptr, nullptr, me->isNonOdrUse());
1489 }
1490 return nullptr;
1491}
1492
1494 if (DeclRefExpr *dre = tryToConvertMemberExprToDeclRefExpr(*this, e)) {
1496 return emitDeclRefLValue(dre);
1497 }
1498
1499 Expr *baseExpr = e->getBase();
1500 / If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
1501 LValue baseLV;
1502 if (e->isArrow()) {
1503 LValueBaseInfo baseInfo;
1505 Address addr = emitPointerWithAlignment(baseExpr, &baseInfo);
1506 QualType ptrTy = baseExpr->getType()->getPointeeType();
1508 baseLV = makeAddrLValue(addr, ptrTy, baseInfo);
1509 } else {
1511 baseLV = emitLValue(baseExpr);
1512 }
1513
1514 const NamedDecl *nd = e->getMemberDecl();
1515 if (auto *field = dyn_cast<FieldDecl>(nd)) {
1516 LValue lv = emitLValueForField(baseLV, field);
1518 if (getLangOpts().OpenMP) {
1519 / If the member was explicitly marked as nontemporal, mark it as
1520 / nontemporal. If the base lvalue is marked as nontemporal, mark access
1521 / to children as nontemporal too.
1522 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: OpenMP");
1523 }
1524 return lv;
1525 }
1526
1527 if (isa<FunctionDecl>(nd)) {
1528 cgm.errorNYI(e->getSourceRange(), "emitMemberExpr: FunctionDecl");
1529 return LValue();
1530 }
1531
1532 llvm_unreachable("Unhandled member declaration!");
1533}
1534
1535/ Evaluate an expression into a given memory location.
1537 Qualifiers quals, bool isInit) {
1538 / FIXME: This function should take an LValue as an argument.
1539 switch (getEvaluationKind(e->getType())) {
1540 case cir::TEK_Complex: {
1541 LValue lv = makeAddrLValue(location, e->getType());
1542 emitComplexExprIntoLValue(e, lv, isInit);
1543 return;
1544 }
1545
1546 case cir::TEK_Aggregate: {
1547 emitAggExpr(e, AggValueSlot::forAddr(location, quals,
1551 return;
1552 }
1553
1554 case cir::TEK_Scalar: {
1556 LValue lv = makeAddrLValue(location, e->getType());
1557 emitStoreThroughLValue(rv, lv);
1558 return;
1559 }
1560 }
1561
1562 llvm_unreachable("bad evaluation kind");
1563}
1564
1566 const MaterializeTemporaryExpr *m,
1567 const Expr *inner) {
1568 / TODO(cir): cgf.getTargetHooks();
1569 switch (m->getStorageDuration()) {
1570 case SD_FullExpression:
1571 case SD_Automatic: {
1572 QualType ty = inner->getType();
1573
1575
1576 / The temporary memory should be created in the same scope as the extending
1577 / declaration of the temporary materialization expression.
1578 cir::AllocaOp extDeclAlloca;
1579 if (const ValueDecl *extDecl = m->getExtendingDecl()) {
1580 auto extDeclAddrIter = cgf.localDeclMap.find(extDecl);
1581 if (extDeclAddrIter != cgf.localDeclMap.end())
1582 extDeclAlloca = extDeclAddrIter->second.getDefiningOp<cir::AllocaOp>();
1583 }
1584 mlir::OpBuilder::InsertPoint ip;
1585 if (extDeclAlloca)
1586 ip = {extDeclAlloca->getBlock(), extDeclAlloca->getIterator()};
1587 return cgf.createMemTemp(ty, cgf.getLoc(m->getSourceRange()),
1588 cgf.getCounterRefTmpAsString(), /*alloca=*/nullptr,
1589 ip);
1590 }
1591 case SD_Thread:
1592 case SD_Static: {
1593 cgf.cgm.errorNYI(
1594 m->getSourceRange(),
1595 "createReferenceTemporary: static/thread storage duration");
1596 return Address::invalid();
1597 }
1598
1599 case SD_Dynamic:
1600 llvm_unreachable("temporary can't have dynamic storage duration");
1601 }
1602 llvm_unreachable("unknown storage duration");
1603}
1604
1606 const MaterializeTemporaryExpr *m,
1607 const Expr *e, Address referenceTemporary) {
1608 / Objective-C++ ARC:
1609 / If we are binding a reference to a temporary that has ownership, we
1610 / need to perform retain/release operations on the temporary.
1611 /
1612 / FIXME(ogcg): This should be looking at e, not m.
1613 if (m->getType().getObjCLifetime()) {
1614 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: ObjCLifetime");
1615 return;
1616 }
1617
1619 if (dk == QualType::DK_none)
1620 return;
1621
1622 switch (m->getStorageDuration()) {
1623 case SD_Static:
1624 case SD_Thread: {
1625 CXXDestructorDecl *referenceTemporaryDtor = nullptr;
1626 if (const auto *classDecl =
1628 classDecl && !classDecl->hasTrivialDestructor())
1629 / Get the destructor for the reference temporary.
1630 referenceTemporaryDtor = classDecl->getDestructor();
1631
1632 if (!referenceTemporaryDtor)
1633 return;
1634
1635 cgf.cgm.errorNYI(e->getSourceRange(), "pushTemporaryCleanup: static/thread "
1636 "storage duration with destructors");
1637 break;
1638 }
1639
1640 case SD_FullExpression:
1641 cgf.pushDestroy(NormalAndEHCleanup, referenceTemporary, e->getType(),
1643 break;
1644
1645 case SD_Automatic:
1646 cgf.cgm.errorNYI(e->getSourceRange(),
1647 "pushTemporaryCleanup: automatic storage duration");
1648 break;
1649
1650 case SD_Dynamic:
1651 llvm_unreachable("temporary cannot have dynamic storage duration");
1652 }
1653}
1654
1656 const MaterializeTemporaryExpr *m) {
1657 const Expr *e = m->getSubExpr();
1658
1659 assert((!m->getExtendingDecl() || !isa<VarDecl>(m->getExtendingDecl()) ||
1660 !cast<VarDecl>(m->getExtendingDecl())->isARCPseudoStrong()) &&
1661 "Reference should never be pseudo-strong!");
1662
1663 / FIXME: ideally this would use emitAnyExprToMem, however, we cannot do so
1664 / as that will cause the lifetime adjustment to be lost for ARC
1665 auto ownership = m->getType().getObjCLifetime();
1666 if (ownership != Qualifiers::OCL_None &&
1667 ownership != Qualifiers::OCL_ExplicitNone) {
1668 cgm.errorNYI(e->getSourceRange(),
1669 "emitMaterializeTemporaryExpr: ObjCLifetime");
1670 return {};
1671 }
1672
1675 e = e->skipRValueSubobjectAdjustments(commaLHSs, adjustments);
1676
1677 for (const Expr *ignored : commaLHSs)
1678 emitIgnoredExpr(ignored);
1679
1680 if (isa<OpaqueValueExpr>(e)) {
1681 cgm.errorNYI(e->getSourceRange(),
1682 "emitMaterializeTemporaryExpr: OpaqueValueExpr");
1683 return {};
1684 }
1685
1686 / Create and initialize the reference temporary.
1687 Address object = createReferenceTemporary(*this, m, e);
1688
1689 if (auto var = object.getPointer().getDefiningOp<cir::GlobalOp>()) {
1690 / TODO(cir): add something akin to stripPointerCasts() to ptr above
1691 cgm.errorNYI(e->getSourceRange(), "emitMaterializeTemporaryExpr: GlobalOp");
1692 return {};
1693 } else {
1695 emitAnyExprToMem(e, object, Qualifiers(), /*isInitializer=*/true);
1696 }
1697 pushTemporaryCleanup(*this, m, e, object);
1698
1699 / Perform derived-to-base casts and/or field accesses, to get from the
1700 / temporary object we created (and, potentially, for which we extended
1701 / the lifetime) to the subobject we're binding the reference to.
1702 if (!adjustments.empty()) {
1703 cgm.errorNYI(e->getSourceRange(),
1704 "emitMaterializeTemporaryExpr: Adjustments");
1705 return {};
1706 }
1707
1708 return makeAddrLValue(object, m->getType(), AlignmentSource::Decl);
1709}
1710
1711LValue
1714
1715 auto it = opaqueLValues.find(e);
1716 if (it != opaqueLValues.end())
1717 return it->second;
1718
1719 assert(e->isUnique() && "LValue for a nonunique OVE hasn't been emitted");
1720 return emitLValue(e->getSourceExpr());
1721}
1722
1723RValue
1726
1727 auto it = opaqueRValues.find(e);
1728 if (it != opaqueRValues.end())
1729 return it->second;
1730
1731 assert(e->isUnique() && "RValue for a nonunique OVE hasn't been emitted");
1732 return emitAnyExpr(e->getSourceExpr());
1733}
1734
1736 if (e->isFileScope()) {
1737 cgm.errorNYI(e->getSourceRange(), "emitCompoundLiteralLValue: FileScope");
1738 return {};
1739 }
1740
1741 if (e->getType()->isVariablyModifiedType()) {
1742 cgm.errorNYI(e->getSourceRange(),
1743 "emitCompoundLiteralLValue: VariablyModifiedType");
1744 return {};
1745 }
1746
1747 Address declPtr = createMemTemp(e->getType(), getLoc(e->getSourceRange()),
1748 ".compoundliteral");
1749 const Expr *initExpr = e->getInitializer();
1750 LValue result = makeAddrLValue(declPtr, e->getType(), AlignmentSource::Decl);
1751
1752 emitAnyExprToMem(initExpr, declPtr, e->getType().getQualifiers(),
1753 /*Init*/ true);
1754
1755 / Block-scope compound literals are destroyed at the end of the enclosing
1756 / scope in C.
1757 if (!getLangOpts().CPlusPlus && e->getType().isDestructedType()) {
1758 cgm.errorNYI(e->getSourceRange(),
1759 "emitCompoundLiteralLValue: non C++ DestructedType");
1760 return {};
1761 }
1762
1763 return result;
1764}
1765
1767 RValue rv = emitCallExpr(e);
1768
1769 if (!rv.isScalar()) {
1770 cgm.errorNYI(e->getSourceRange(), "emitCallExprLValue: non-scalar return");
1771 return {};
1772 }
1773
1774 assert(e->getCallReturnType(getContext())->isReferenceType() &&
1775 "Can't have a scalar return unless the return type is a "
1776 "reference type!");
1777
1779}
1780
1782 / Comma expressions just emit their LHS then their RHS as an l-value.
1783 if (e->getOpcode() == BO_Comma) {
1784 emitIgnoredExpr(e->getLHS());
1785 return emitLValue(e->getRHS());
1786 }
1787
1788 if (e->getOpcode() == BO_PtrMemD || e->getOpcode() == BO_PtrMemI)
1790
1791 assert(e->getOpcode() == BO_Assign && "unexpected binary l-value");
1792
1793 / Note that in all of these cases, __block variables need the RHS
1794 / evaluated first just in case the variable gets moved by the RHS.
1795
1797 case cir::TEK_Scalar: {
1799 if (e->getLHS()->getType().getObjCLifetime() !=
1801 cgm.errorNYI(e->getSourceRange(), "objc lifetimes");
1802 return {};
1803 }
1804
1805 RValue rv = emitAnyExpr(e->getRHS());
1806 LValue lv = emitLValue(e->getLHS());
1807
1808 SourceLocRAIIObject loc{*this, getLoc(e->getSourceRange())};
1809 if (lv.isBitField())
1811 else
1812 emitStoreThroughLValue(rv, lv);
1813
1814 if (getLangOpts().OpenMP) {
1815 cgm.errorNYI(e->getSourceRange(), "openmp");
1816 return {};
1817 }
1818
1819 return lv;
1820 }
1821
1822 case cir::TEK_Complex: {
1824 }
1825
1826 case cir::TEK_Aggregate:
1827 cgm.errorNYI(e->getSourceRange(), "aggregate lvalues");
1828 return {};
1829 }
1830 llvm_unreachable("bad evaluation kind");
1831}
1832
1833/ Emit code to compute the specified expression which
1834/ can have any type. The result is returned as an RValue struct.
1836 bool ignoreResult) {
1838 case cir::TEK_Scalar:
1839 return RValue::get(emitScalarExpr(e, ignoreResult));
1840 case cir::TEK_Complex:
1842 case cir::TEK_Aggregate: {
1843 if (!ignoreResult && aggSlot.isIgnored())
1844 aggSlot = createAggTemp(e->getType(), getLoc(e->getSourceRange()),
1846 emitAggExpr(e, aggSlot);
1847 return aggSlot.asRValue();
1848 }
1849 }
1850 llvm_unreachable("bad evaluation kind");
1851}
1852
1853/ Detect the unusual situation where an inline version is shadowed by a
1854/ non-inline version. In that case we should pick the external one
1855/ everywhere. That's GCC behavior too.
1857 for (const FunctionDecl *pd = fd; pd; pd = pd->getPreviousDecl())
1858 if (!pd->isInlineBuiltinDeclaration())
1859 return false;
1860 return true;
1861}
1862
1863CIRGenCallee CIRGenFunction::emitDirectCallee(const GlobalDecl &gd) {
1864 const auto *fd = cast<FunctionDecl>(gd.getDecl());
1865
1866 if (unsigned builtinID = fd->getBuiltinID()) {
1867 StringRef ident = cgm.getMangledName(gd);
1868 std::string fdInlineName = (ident + ".inline").str();
1869
1870 bool isPredefinedLibFunction =
1871 cgm.getASTContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
1872 / Assume nobuiltins everywhere until we actually read the attributes.
1873 bool hasAttributeNoBuiltin = true;
1875
1876 / When directing calling an inline builtin, call it through it's mangled
1877 / name to make it clear it's not the actual builtin.
1878 auto fn = cast<cir::FuncOp>(curFn);
1879 if (fn.getName() != fdInlineName && onlyHasInlineBuiltinDeclaration(fd)) {
1880 cir::FuncOp clone =
1881 mlir::cast_or_null<cir::FuncOp>(cgm.getGlobalValue(fdInlineName));
1882
1883 if (!clone) {
1884 / Create a forward declaration - the body will be generated in
1885 / generateCode when the function definition is processed
1886 cir::FuncOp calleeFunc = emitFunctionDeclPointer(cgm, gd);
1887 mlir::OpBuilder::InsertionGuard guard(builder);
1888 builder.setInsertionPointToStart(cgm.getModule().getBody());
1889
1890 clone = cir::FuncOp::create(builder, calleeFunc.getLoc(), fdInlineName,
1891 calleeFunc.getFunctionType());
1892 clone.setLinkageAttr(cir::GlobalLinkageKindAttr::get(
1893 &cgm.getMLIRContext(), cir::GlobalLinkageKind::InternalLinkage));
1894 clone.setSymVisibility("private");
1895 clone.setInlineKind(cir::InlineKind::AlwaysInline);
1896 }
1897 return CIRGenCallee::forDirect(clone, gd);
1898 }
1899
1900 / Replaceable builtins provide their own implementation of a builtin. If we
1901 / are in an inline builtin implementation, avoid trivial infinite
1902 / recursion. Honor __attribute__((no_builtin("foo"))) or
1903 / __attribute__((no_builtin)) on the current function unless foo is
1904 / not a predefined library function which means we must generate the
1905 / builtin no matter what.
1906 else if (!isPredefinedLibFunction || !hasAttributeNoBuiltin)
1907 return CIRGenCallee::forBuiltin(builtinID, fd);
1908 }
1909
1910 cir::FuncOp callee = emitFunctionDeclPointer(cgm, gd);
1911
1912 assert(!cir::MissingFeatures::hip());
1913
1914 return CIRGenCallee::forDirect(callee, gd);
1915}
1916
1918 if (ty->isVoidType())
1919 return RValue::get(nullptr);
1920
1921 cgm.errorNYI("unsupported type for undef rvalue");
1922 return RValue::get(nullptr);
1923}
1924
1926 const CIRGenCallee &origCallee,
1927 const clang::CallExpr *e,
1929 / Get the actual function type. The callee type will always be a pointer to
1930 / function type or a block pointer type.
1931 assert(calleeTy->isFunctionPointerType() &&
1932 "Callee must have function pointer type!");
1933
1934 calleeTy = getContext().getCanonicalType(calleeTy);
1935 auto pointeeTy = cast<PointerType>(calleeTy)->getPointeeType();
1936
1937 CIRGenCallee callee = origCallee;
1938
1939 if (getLangOpts().CPlusPlus)
1941
1942 const auto *fnType = cast<FunctionType>(pointeeTy);
1943
1945
1946 CallArgList args;
1948
1949 emitCallArgs(args, dyn_cast<FunctionProtoType>(fnType), e->arguments(),
1950 e->getDirectCallee());
1951
1952 const CIRGenFunctionInfo &funcInfo =
1953 cgm.getTypes().arrangeFreeFunctionCall(args, fnType);
1954
1955 / C99 6.5.2.2p6:
1956 / If the expression that denotes the called function has a type that does
1957 / not include a prototype, [the default argument promotions are performed].
1958 / If the number of arguments does not equal the number of parameters, the
1959 / behavior is undefined. If the function is defined with a type that
1960 / includes a prototype, and either the prototype ends with an ellipsis (,
1961 / ...) or the types of the arguments after promotion are not compatible
1962 / with the types of the parameters, the behavior is undefined. If the
1963 / function is defined with a type that does not include a prototype, and
1964 / the types of the arguments after promotion are not compatible with those
1965 / of the parameters after promotion, the behavior is undefined [except in
1966 / some trivial cases].
1967 / That is, in the general case, we should assume that a call through an
1968 / unprototyped function type works like a *non-variadic* call. The way we
1969 / make this work is to cast to the exxact type fo the promoted arguments.
1970 if (isa<FunctionNoProtoType>(fnType)) {
1973 cir::FuncType calleeTy = getTypes().getFunctionType(funcInfo);
1974 / get non-variadic function type
1975 calleeTy = cir::FuncType::get(calleeTy.getInputs(),
1976 calleeTy.getReturnType(), false);
1977 auto calleePtrTy = cir::PointerType::get(calleeTy);
1978
1979 mlir::Operation *fn = callee.getFunctionPointer();
1980 mlir::Value addr;
1981 if (auto funcOp = mlir::dyn_cast<cir::FuncOp>(fn)) {
1982 addr = cir::GetGlobalOp::create(
1983 builder, getLoc(e->getSourceRange()),
1984 cir::PointerType::get(funcOp.getFunctionType()), funcOp.getSymName());
1985 } else {
1986 addr = fn->getResult(0);
1987 }
1988
1989 fn = builder.createBitcast(addr, calleePtrTy).getDefiningOp();
1990 callee.setFunctionPointer(fn);
1991 }
1992
1994 assert(!cir::MissingFeatures::hip());
1996
1997 cir::CIRCallOpInterface callOp;
1998 RValue callResult = emitCall(funcInfo, callee, returnValue, args, &callOp,
1999 getLoc(e->getExprLoc()));
2000
2002
2003 return callResult;
2004}
2005
2007 e = e->IgnoreParens();
2008
2009 / Look through function-to-pointer decay.
2010 if (const auto *implicitCast = dyn_cast<ImplicitCastExpr>(e)) {
2011 if (implicitCast->getCastKind() == CK_FunctionToPointerDecay ||
2012 implicitCast->getCastKind() == CK_BuiltinFnToFnPtr) {
2013 return emitCallee(implicitCast->getSubExpr());
2014 }
2015 / When performing an indirect call through a function pointer lvalue, the
2016 / function pointer lvalue is implicitly converted to an rvalue through an
2017 / lvalue-to-rvalue conversion.
2018 assert(implicitCast->getCastKind() == CK_LValueToRValue &&
2019 "unexpected implicit cast on function pointers");
2020 } else if (const auto *declRef = dyn_cast<DeclRefExpr>(e)) {
2021 / Resolve direct calls.
2022 const auto *funcDecl = cast<FunctionDecl>(declRef->getDecl());
2023 return emitDirectCallee(funcDecl);
2024 } else if (auto me = dyn_cast<MemberExpr>(e)) {
2025 if (const auto *fd = dyn_cast<FunctionDecl>(me->getMemberDecl())) {
2026 emitIgnoredExpr(me->getBase());
2027 return emitDirectCallee(fd);
2028 }
2029 / Else fall through to the indirect reference handling below.
2030 } else if (auto *pde = dyn_cast<CXXPseudoDestructorExpr>(e)) {
2032 }
2033
2034 / Otherwise, we have an indirect reference.
2035 mlir::Value calleePtr;
2037 if (const auto *ptrType = e->getType()->getAs<clang::PointerType>()) {
2038 calleePtr = emitScalarExpr(e);
2039 functionType = ptrType->getPointeeType();
2040 } else {
2041 functionType = e->getType();
2042 calleePtr = emitLValue(e).getPointer();
2043 }
2044 assert(functionType->isFunctionType());
2045
2046 GlobalDecl gd;
2047 if (const auto *vd =
2048 dyn_cast_or_null<VarDecl>(e->getReferencedDeclOfCallee()))
2049 gd = GlobalDecl(vd);
2050
2051 CIRGenCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(), gd);
2052 CIRGenCallee callee(calleeInfo, calleePtr.getDefiningOp());
2053 return callee;
2054}
2055
2059
2060 if (const auto *ce = dyn_cast<CXXMemberCallExpr>(e))
2062
2063 if (isa<CUDAKernelCallExpr>(e)) {
2064 cgm.errorNYI(e->getSourceRange(), "call to CUDA kernel");
2065 return RValue::get(nullptr);
2066 }
2067
2068 if (const auto *operatorCall = dyn_cast<CXXOperatorCallExpr>(e)) {
2069 / If the callee decl is a CXXMethodDecl, we need to emit this as a C++
2070 / operator member call.
2071 if (const CXXMethodDecl *md =
2072 dyn_cast_or_null<CXXMethodDecl>(operatorCall->getCalleeDecl()))
2073 return emitCXXOperatorMemberCallExpr(operatorCall, md, returnValue);
2074 / A CXXOperatorCallExpr is created even for explicit object methods, but
2075 / these should be treated like static function calls. Fall through to do
2076 / that.
2077 }
2078
2079 CIRGenCallee callee = emitCallee(e->getCallee());
2080
2081 if (callee.isBuiltin())
2082 return emitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(), e,
2083 returnValue);
2084
2085 if (callee.isPseudoDestructor())
2087
2088 return emitCall(e->getCallee()->getType(), callee, e, returnValue);
2089}
2090
2091/ Emit code to compute the specified expression, ignoring the result.
2093 if (e->isPRValue()) {
2094 emitAnyExpr(e, AggValueSlot::ignored(), /*ignoreResult=*/true);
2095 return;
2096 }
2097
2098 / Just emit it as an l-value and drop the result.
2099 emitLValue(e);
2100}
2101
2103 LValueBaseInfo *baseInfo) {
2105 assert(e->getType()->isArrayType() &&
2106 "Array to pointer decay must have array source type!");
2107
2108 / Expressions of array type can't be bitfields or vector elements.
2109 LValue lv = emitLValue(e);
2110 Address addr = lv.getAddress();
2111
2112 / If the array type was an incomplete type, we need to make sure
2113 / the decay ends up being the right type.
2114 auto lvalueAddrTy = mlir::cast<cir::PointerType>(addr.getPointer().getType());
2115
2116 if (e->getType()->isVariableArrayType())
2117 return addr;
2118
2119 [[maybe_unused]] auto pointeeTy =
2120 mlir::cast<cir::ArrayType>(lvalueAddrTy.getPointee());
2121
2122 [[maybe_unused]] mlir::Type arrayTy = convertType(e->getType());
2123 assert(mlir::isa<cir::ArrayType>(arrayTy) && "expected array");
2124 assert(pointeeTy == arrayTy);
2125
2126 / The result of this decay conversion points to an array element within the
2127 / base lvalue. However, since TBAA currently does not support representing
2128 / accesses to elements of member arrays, we conservatively represent accesses
2129 / to the pointee object as if it had no any base lvalue specified.
2130 / TODO: Support TBAA for member arrays.
2133
2134 mlir::Value ptr = builder.maybeBuildArrayDecay(
2135 cgm.getLoc(e->getSourceRange()), addr.getPointer(),
2136 convertTypeForMem(eltType));
2137 return Address(ptr, addr.getAlignment());
2138}
2139
2140/ Given the address of a temporary variable, produce an r-value of its type.
2144 switch (getEvaluationKind(type)) {
2145 case cir::TEK_Complex:
2146 return RValue::getComplex(emitLoadOfComplex(lvalue, loc));
2147 case cir::TEK_Aggregate:
2148 cgm.errorNYI(loc, "convertTempToRValue: aggregate type");
2149 return RValue::get(nullptr);
2150 case cir::TEK_Scalar:
2151 return RValue::get(emitLoadOfScalar(lvalue, loc));
2152 }
2153 llvm_unreachable("bad evaluation kind");
2154}
2155
2156/ Emit an `if` on a boolean condition, filling `then` and `else` into
2157/ appropriated regions.
2158mlir::LogicalResult CIRGenFunction::emitIfOnBoolExpr(const Expr *cond,
2159 const Stmt *thenS,
2160 const Stmt *elseS) {
2161 mlir::Location thenLoc = getLoc(thenS->getSourceRange());
2162 std::optional<mlir::Location> elseLoc;
2163 if (elseS)
2164 elseLoc = getLoc(elseS->getSourceRange());
2165
2166 mlir::LogicalResult resThen = mlir::success(), resElse = mlir::success();
2168 cond, /*thenBuilder=*/
2169 [&](mlir::OpBuilder &, mlir::Location) {
2170 LexicalScope lexScope{*this, thenLoc, builder.getInsertionBlock()};
2171 resThen = emitStmt(thenS, /*useCurrentScope=*/true);
2172 },
2173 thenLoc,
2174 /*elseBuilder=*/
2175 [&](mlir::OpBuilder &, mlir::Location) {
2176 assert(elseLoc && "Invalid location for elseS.");
2177 LexicalScope lexScope{*this, *elseLoc, builder.getInsertionBlock()};
2178 resElse = emitStmt(elseS, /*useCurrentScope=*/true);
2179 },
2180 elseLoc);
2181
2182 return mlir::LogicalResult::success(resThen.succeeded() &&
2183 resElse.succeeded());
2184}
2185
2186/ Emit an `if` on a boolean condition, filling `then` and `else` into
2187/ appropriated regions.
2189 const clang::Expr *cond, BuilderCallbackRef thenBuilder,
2190 mlir::Location thenLoc, BuilderCallbackRef elseBuilder,
2191 std::optional<mlir::Location> elseLoc) {
2192 / Attempt to be as accurate as possible with IfOp location, generate
2193 / one fused location that has either 2 or 4 total locations, depending
2194 / on else's availability.
2195 SmallVector<mlir::Location, 2> ifLocs{thenLoc};
2196 if (elseLoc)
2197 ifLocs.push_back(*elseLoc);
2198 mlir::Location loc = mlir::FusedLoc::get(&getMLIRContext(), ifLocs);
2199
2200 / Emit the code with the fully general case.
2201 mlir::Value condV = emitOpOnBoolExpr(loc, cond);
2202 return cir::IfOp::create(builder, loc, condV, elseLoc.has_value(),
2203 /*thenBuilder=*/thenBuilder,
2204 /*elseBuilder=*/elseBuilder);
2205}
2206
2207/ TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
2208mlir::Value CIRGenFunction::emitOpOnBoolExpr(mlir::Location loc,
2209 const Expr *cond) {
2212 cond = cond->IgnoreParens();
2213
2214 / In LLVM the condition is reversed here for efficient codegen.
2215 / This should be done in CIR prior to LLVM lowering, if we do now
2216 / we can make CIR based diagnostics misleading.
2217 / cir.ternary(!x, t, f) -> cir.ternary(x, f, t)
2219
2220 if (const ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(cond)) {
2221 Expr *trueExpr = condOp->getTrueExpr();
2222 Expr *falseExpr = condOp->getFalseExpr();
2223 mlir::Value condV = emitOpOnBoolExpr(loc, condOp->getCond());
2224
2225 mlir::Value ternaryOpRes =
2226 cir::TernaryOp::create(
2227 builder, loc, condV, /*thenBuilder=*/
2228 [this, trueExpr](mlir::OpBuilder &b, mlir::Location loc) {
2229 mlir::Value lhs = emitScalarExpr(trueExpr);
2230 cir::YieldOp::create(b, loc, lhs);
2231 },
2232 /*elseBuilder=*/
2233 [this, falseExpr](mlir::OpBuilder &b, mlir::Location loc) {
2234 mlir::Value rhs = emitScalarExpr(falseExpr);
2235 cir::YieldOp::create(b, loc, rhs);
2236 })
2237 .getResult();
2238
2239 return emitScalarConversion(ternaryOpRes, condOp->getType(),
2240 getContext().BoolTy, condOp->getExprLoc());
2241 }
2242
2243 if (isa<CXXThrowExpr>(cond)) {
2244 cgm.errorNYI("NYI");
2245 return createDummyValue(loc, cond->getType());
2246 }
2247
2248 / If the branch has a condition wrapped by __builtin_unpredictable,
2249 / create metadata that specifies that the branch is unpredictable.
2250 / Don't bother if not optimizing because that metadata would not be used.
2252
2253 / Emit the code with the fully general case.
2254 return evaluateExprAsBool(cond);
2255}
2256
2257mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2258 mlir::Location loc, CharUnits alignment,
2259 bool insertIntoFnEntryBlock,
2260 mlir::Value arraySize) {
2261 mlir::Block *entryBlock = insertIntoFnEntryBlock
2263 : curLexScope->getEntryBlock();
2264
2265 / If this is an alloca in the entry basic block of a cir.try and there's
2266 / a surrounding cir.scope, make sure the alloca ends up in the surrounding
2267 / scope instead. This is necessary in order to guarantee all SSA values are
2268 / reachable during cleanups.
2269 if (auto tryOp =
2270 llvm::dyn_cast_if_present<cir::TryOp>(entryBlock->getParentOp())) {
2271 if (auto scopeOp = llvm::dyn_cast<cir::ScopeOp>(tryOp->getParentOp()))
2272 entryBlock = &scopeOp.getScopeRegion().front();
2273 }
2274
2275 return emitAlloca(name, ty, loc, alignment,
2276 builder.getBestAllocaInsertPoint(entryBlock), arraySize);
2277}
2278
2279mlir::Value CIRGenFunction::emitAlloca(StringRef name, mlir::Type ty,
2280 mlir::Location loc, CharUnits alignment,
2281 mlir::OpBuilder::InsertPoint ip,
2282 mlir::Value arraySize) {
2283 / CIR uses its own alloca address space rather than follow the target data
2284 / layout like original CodeGen. The data layout awareness should be done in
2285 / the lowering pass instead.
2286 cir::PointerType localVarPtrTy =
2288 mlir::IntegerAttr alignIntAttr = cgm.getSize(alignment);
2289
2290 mlir::Value addr;
2291 {
2292 mlir::OpBuilder::InsertionGuard guard(builder);
2293 builder.restoreInsertionPoint(ip);
2294 addr = builder.createAlloca(loc, /*addr type*/ localVarPtrTy,
2295 /*var type*/ ty, name, alignIntAttr, arraySize);
2297 }
2298 return addr;
2299}
2300
2301/ Note: this function also emit constructor calls to support a MSVC extensions
2302/ allowing explicit constructor function call.
2305 const Expr *callee = ce->getCallee()->IgnoreParens();
2306
2307 if (isa<BinaryOperator>(callee)) {
2308 cgm.errorNYI(ce->getSourceRange(),
2309 "emitCXXMemberCallExpr: C++ binary operator");
2310 return RValue::get(nullptr);
2311 }
2312
2313 const auto *me = cast<MemberExpr>(callee);
2314 const auto *md = cast<CXXMethodDecl>(me->getMemberDecl());
2315
2316 if (md->isStatic()) {
2317 cgm.errorNYI(ce->getSourceRange(), "emitCXXMemberCallExpr: static method");
2318 return RValue::get(nullptr);
2319 }
2320
2321 bool hasQualifier = me->hasQualifier();
2322 NestedNameSpecifier qualifier = me->getQualifier();
2323 bool isArrow = me->isArrow();
2324 const Expr *base = me->getBase();
2325
2327 ce, md, returnValue, hasQualifier, qualifier, isArrow, base);
2328}
2329
2331 / Emit the expression as an lvalue.
2332 LValue lv = emitLValue(e);
2333 assert(lv.isSimple());
2334 mlir::Value value = lv.getPointer();
2335
2337
2338 return RValue::get(value);
2339}
2340
2342 LValueBaseInfo *pointeeBaseInfo) {
2343 if (refLVal.isVolatile())
2344 cgm.errorNYI(loc, "load of volatile reference");
2345
2346 cir::LoadOp load =
2347 cir::LoadOp::create(builder, loc, refLVal.getAddress().getElementType(),
2348 refLVal.getAddress().getPointer());
2349
2351
2352 QualType pointeeType = refLVal.getType()->getPointeeType();
2353 CharUnits align = cgm.getNaturalTypeAlignment(pointeeType, pointeeBaseInfo);
2354 return Address(load, convertTypeForMem(pointeeType), align);
2355}
2356
2358 mlir::Location loc,
2359 QualType refTy,
2360 AlignmentSource source) {
2361 LValue refLVal = makeAddrLValue(refAddr, refTy, LValueBaseInfo(source));
2362 LValueBaseInfo pointeeBaseInfo;
2364 Address pointeeAddr = emitLoadOfReference(refLVal, loc, &pointeeBaseInfo);
2365 return makeAddrLValue(pointeeAddr, refLVal.getType()->getPointeeType(),
2366 pointeeBaseInfo);
2367}
2368
2369void CIRGenFunction::emitTrap(mlir::Location loc, bool createNewBlock) {
2370 cir::TrapOp::create(builder, loc);
2371 if (createNewBlock)
2372 builder.createBlock(builder.getBlock()->getParent());
2373}
2374
2376 bool createNewBlock) {
2378 cir::UnreachableOp::create(builder, getLoc(loc));
2379 if (createNewBlock)
2380 builder.createBlock(builder.getBlock()->getParent());
2381}
2382
2383mlir::Value CIRGenFunction::createDummyValue(mlir::Location loc,
2384 clang::QualType qt) {
2385 mlir::Type t = convertType(qt);
2386 CharUnits alignment = getContext().getTypeAlignInChars(qt);
2387 return builder.createDummyValue(loc, t, alignment);
2388}
2389
2390/===----------------------------------------------------------------------===/
2391/ CIR builder helpers
2392/===----------------------------------------------------------------------===/
2393
2395 const Twine &name, Address *alloca,
2396 mlir::OpBuilder::InsertPoint ip) {
2397 / FIXME: Should we prefer the preferred type alignment here?
2398 return createMemTemp(ty, getContext().getTypeAlignInChars(ty), loc, name,
2399 alloca, ip);
2400}
2401
2403 mlir::Location loc, const Twine &name,
2404 Address *alloca,
2405 mlir::OpBuilder::InsertPoint ip) {
2406 Address result = createTempAlloca(convertTypeForMem(ty), align, loc, name,
2407 /*ArraySize=*/nullptr, alloca, ip);
2408 if (ty->isConstantMatrixType()) {
2410 cgm.errorNYI(loc, "temporary matrix value");
2411 }
2412 return result;
2413}
2414
2415/ This creates a alloca and inserts it into the entry block of the
2416/ current region.
2418 mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name,
2419 mlir::Value arraySize, mlir::OpBuilder::InsertPoint ip) {
2420 cir::AllocaOp alloca = ip.isSet()
2421 ? createTempAlloca(ty, loc, name, ip, arraySize)
2422 : createTempAlloca(ty, loc, name, arraySize);
2423 alloca.setAlignmentAttr(cgm.getSize(align));
2424 return Address(alloca, ty, align);
2425}
2426
2427/ This creates a alloca and inserts it into the entry block. The alloca is
2428/ casted to default address space if necessary.
2429/ TODO(cir): Implement address space casting to match classic codegen's
2430/ CreateTempAlloca behavior with DestLangAS parameter
2432 mlir::Location loc, const Twine &name,
2433 mlir::Value arraySize,
2434 Address *allocaAddr,
2435 mlir::OpBuilder::InsertPoint ip) {
2436 Address alloca =
2437 createTempAllocaWithoutCast(ty, align, loc, name, arraySize, ip);
2438 if (allocaAddr)
2439 *allocaAddr = alloca;
2440 mlir::Value v = alloca.getPointer();
2441 / Alloca always returns a pointer in alloca address space, which may
2442 / be different from the type defined by the language. For example,
2443 / in C++ the auto variables are in the default address space. Therefore
2444 / cast alloca to the default address space when necessary.
2445
2446 LangAS allocaAS = alloca.getAddressSpace()
2448 alloca.getAddressSpace().getValue().getUInt())
2453 getCIRAllocaAddressSpace().getValue().getUInt());
2454 }
2455
2456 if (dstTyAS != allocaAS) {
2458 builder.getPointerTo(ty, dstTyAS));
2459 }
2460 return Address(v, ty, align);
2461}
2462
2463/ This creates an alloca and inserts it into the entry block if \p ArraySize
2464/ is nullptr, otherwise inserts it at the current insertion point of the
2465/ builder.
2466cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2467 mlir::Location loc,
2468 const Twine &name,
2469 mlir::Value arraySize,
2470 bool insertIntoFnEntryBlock) {
2471 return mlir::cast<cir::AllocaOp>(emitAlloca(name.str(), ty, loc, CharUnits(),
2472 insertIntoFnEntryBlock, arraySize)
2473 .getDefiningOp());
2474}
2475
2476/ This creates an alloca and inserts it into the provided insertion point
2477cir::AllocaOp CIRGenFunction::createTempAlloca(mlir::Type ty,
2478 mlir::Location loc,
2479 const Twine &name,
2480 mlir::OpBuilder::InsertPoint ip,
2481 mlir::Value arraySize) {
2482 assert(ip.isSet() && "Insertion point is not set");
2483 return mlir::cast<cir::AllocaOp>(
2484 emitAlloca(name.str(), ty, loc, CharUnits(), ip, arraySize)
2485 .getDefiningOp());
2486}
2487
2488/ Try to emit a reference to the given value without producing it as
2489/ an l-value. For many cases, this is just an optimization, but it avoids
2490/ us needing to emit global copies of variables if they're named without
2491/ triggering a formal use in a context where we can't emit a direct
2492/ reference to them, for instance if a block or lambda or a member of a
2493/ local class uses a const int variable or constexpr variable from an
2494/ enclosing function.
2495/
2496/ For named members of enums, this is the only way they are emitted.
2499 const ValueDecl *value = refExpr->getDecl();
2500
2501 / There is a lot more to do here, but for now only EnumConstantDecl is
2502 / supported.
2504
2505 / The value needs to be an enum constant or a constant variable.
2506 if (!isa<EnumConstantDecl>(value))
2507 return ConstantEmission();
2508
2509 Expr::EvalResult result;
2510 if (!refExpr->EvaluateAsRValue(result, getContext()))
2511 return ConstantEmission();
2512
2513 QualType resultType = refExpr->getType();
2514
2515 / As long as we're only handling EnumConstantDecl, there should be no
2516 / side-effects.
2517 assert(!result.HasSideEffects);
2518
2519 / Emit as a constant.
2520 / FIXME(cir): have emitAbstract build a TypedAttr instead (this requires
2521 / somewhat heavy refactoring...)
2522 mlir::Attribute c = ConstantEmitter(*this).emitAbstract(
2523 refExpr->getLocation(), result.Val, resultType);
2524 mlir::TypedAttr cstToEmit = mlir::dyn_cast_if_present<mlir::TypedAttr>(c);
2525 assert(cstToEmit && "expected a typed attribute");
2526
2528
2529 return ConstantEmission::forValue(cstToEmit);
2530}
2531
2535 return tryEmitAsConstant(dre);
2536 return ConstantEmission();
2537}
2538
2540 const CIRGenFunction::ConstantEmission &constant, Expr *e) {
2541 assert(constant && "not a constant");
2542 if (constant.isReference()) {
2543 cgm.errorNYI(e->getSourceRange(), "emitScalarConstant: reference");
2544 return {};
2545 }
2546 return builder.getConstant(getLoc(e->getSourceRange()), constant.getValue());
2547}
2548
2550 const StringLiteral *sl = e->getFunctionName();
2551 assert(sl != nullptr && "No StringLiteral name in PredefinedExpr");
2552 auto fn = cast<cir::FuncOp>(curFn);
2553 StringRef fnName = fn.getName();
2554 fnName.consume_front("\01");
2555 std::array<StringRef, 2> nameItems = {
2557 std::string gvName = llvm::join(nameItems, ".");
2558 if (isa_and_nonnull<BlockDecl>(curCodeDecl))
2559 cgm.errorNYI(e->getSourceRange(), "predefined lvalue in block");
2560
2561 return emitStringLiteralLValue(sl, gvName);
2562}
2563
2568
2569namespace {
2570/ Handle the case where the condition is a constant evaluatable simple integer,
2571/ which means we don't have to separately handle the true/false blocks.
2572std::optional<LValue> handleConditionalOperatorLValueSimpleCase(
2574 const Expr *condExpr = e->getCond();
2575 llvm::APSInt condExprVal;
2576 if (!cgf.constantFoldsToSimpleInteger(condExpr, condExprVal))
2577 return std::nullopt;
2578
2579 const Expr *live = e->getTrueExpr(), *dead = e->getFalseExpr();
2580 if (!condExprVal.getBoolValue())
2581 std::swap(live, dead);
2582
2583 if (cgf.containsLabel(dead))
2584 return std::nullopt;
2585
2586 / If the true case is live, we need to track its region.
2589 / If a throw expression we emit it and return an undefined lvalue
2590 / because it can't be used.
2591 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
2592 cgf.emitCXXThrowExpr(throwExpr);
2593 / Return an undefined lvalue - the throw terminates execution
2594 / so this value will never actually be used
2595 mlir::Type elemTy = cgf.convertType(dead->getType());
2596 mlir::Value undefPtr =
2597 cgf.getBuilder().getNullPtr(cgf.getBuilder().getPointerTo(elemTy),
2598 cgf.getLoc(throwExpr->getSourceRange()));
2599 return cgf.makeAddrLValue(Address(undefPtr, elemTy, CharUnits::One()),
2600 dead->getType());
2601 }
2602 return cgf.emitLValue(live);
2603}
2604
2605/ Emit the operand of a glvalue conditional operator. This is either a glvalue
2606/ or a (possibly-parenthesized) throw-expression. If this is a throw, no
2607/ LValue is returned and the current block has been terminated.
2608static std::optional<LValue> emitLValueOrThrowExpression(CIRGenFunction &cgf,
2609 const Expr *operand) {
2610 if (auto *throwExpr = dyn_cast<CXXThrowExpr>(operand->IgnoreParens())) {
2611 cgf.emitCXXThrowExpr(throwExpr);
2612 return std::nullopt;
2613 }
2614
2615 return cgf.emitLValue(operand);
2616}
2617} / namespace
2618
2619/ Create and generate the 3 blocks for a conditional operator.
2620/ Leaves the 'current block' in the continuation basic block.
2621template <typename FuncTy>
2624 const FuncTy &branchGenFunc) {
2625 ConditionalInfo info;
2626 ConditionalEvaluation eval(*this);
2627 mlir::Location loc = getLoc(e->getSourceRange());
2628 CIRGenBuilderTy &builder = getBuilder();
2629
2630 mlir::Value condV = emitOpOnBoolExpr(loc, e->getCond());
2632 mlir::Type yieldTy{};
2633
2634 auto emitBranch = [&](mlir::OpBuilder &b, mlir::Location loc,
2635 const Expr *expr, std::optional<LValue> &resultLV) {
2636 CIRGenFunction::LexicalScope lexScope{*this, loc, b.getInsertionBlock()};
2637 curLexScope->setAsTernary();
2638
2640 eval.beginEvaluation();
2641 resultLV = branchGenFunc(*this, expr);
2642 mlir::Value resultPtr = resultLV ? resultLV->getPointer() : mlir::Value();
2643 eval.endEvaluation();
2644
2645 if (resultPtr) {
2646 yieldTy = resultPtr.getType();
2647 cir::YieldOp::create(b, loc, resultPtr);
2648 } else {
2649 / If LHS or RHS is a void expression we need
2650 / to patch arms as to properly match yield types.
2651 / If the current block's terminator is an UnreachableOp (from a throw),
2652 / we don't need a yield
2653 if (builder.getInsertionBlock()->mightHaveTerminator()) {
2654 mlir::Operation *terminator =
2655 builder.getInsertionBlock()->getTerminator();
2656 if (isa_and_nonnull<cir::UnreachableOp>(terminator))
2657 insertPoints.push_back(b.saveInsertionPoint());
2658 }
2659 }
2660 };
2661
2662 info.result = cir::TernaryOp::create(
2663 builder, loc, condV,
2664 /*trueBuilder=*/
2665 [&](mlir::OpBuilder &b, mlir::Location loc) {
2666 emitBranch(b, loc, e->getTrueExpr(), info.lhs);
2667 },
2668 /*falseBuilder=*/
2669 [&](mlir::OpBuilder &b, mlir::Location loc) {
2670 emitBranch(b, loc, e->getFalseExpr(), info.rhs);
2671 })
2672 .getResult();
2673
2674 / If both arms are void, so be it.
2675 if (!yieldTy)
2676 yieldTy = voidTy;
2677
2678 / Insert required yields.
2679 for (mlir::OpBuilder::InsertPoint &toInsert : insertPoints) {
2680 mlir::OpBuilder::InsertionGuard guard(builder);
2681 builder.restoreInsertionPoint(toInsert);
2682
2683 / Block does not return: build empty yield.
2684 if (!yieldTy) {
2685 cir::YieldOp::create(builder, loc);
2686 } else { / Block returns: set null yield value.
2687 mlir::Value op0 = builder.getNullValue(yieldTy, loc);
2688 cir::YieldOp::create(builder, loc, op0);
2689 }
2690 }
2691
2692 return info;
2693}
2694
2697 if (!expr->isGLValue()) {
2698 / ?: here should be an aggregate.
2699 assert(hasAggregateEvaluationKind(expr->getType()) &&
2700 "Unexpected conditional operator!");
2701 return emitAggExprToLValue(expr);
2702 }
2703
2704 OpaqueValueMapping binding(*this, expr);
2705 if (std::optional<LValue> res =
2706 handleConditionalOperatorLValueSimpleCase(*this, expr))
2707 return *res;
2708
2709 ConditionalInfo info =
2710 emitConditionalBlocks(expr, [](CIRGenFunction &cgf, const Expr *e) {
2711 return emitLValueOrThrowExpression(cgf, e);
2712 });
2713
2714 if ((info.lhs && !info.lhs->isSimple()) ||
2715 (info.rhs && !info.rhs->isSimple())) {
2716 cgm.errorNYI(expr->getSourceRange(),
2717 "unsupported conditional operator with non-simple lvalue");
2718 return LValue();
2719 }
2720
2721 if (info.lhs && info.rhs) {
2722 Address lhsAddr = info.lhs->getAddress();
2723 Address rhsAddr = info.rhs->getAddress();
2724 Address result(info.result, lhsAddr.getElementType(),
2725 std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
2726 AlignmentSource alignSource =
2727 std::max(info.lhs->getBaseInfo().getAlignmentSource(),
2728 info.rhs->getBaseInfo().getAlignmentSource());
2730 return makeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource));
2731 }
2732
2733 assert((info.lhs || info.rhs) &&
2734 "both operands of glvalue conditional are throw-expressions?");
2735 return info.lhs ? *info.lhs : *info.rhs;
2736}
2737
2738/ An LValue is a candidate for having its loads and stores be made atomic if
2739/ we are operating under /volatile:ms *and* the LValue itself is volatile and
2740/ performing such an operation can be performed without a libcall.
2742 if (!cgm.getLangOpts().MSVolatile)
2743 return false;
2744
2745 cgm.errorNYI("LValueSuitableForInlineAtomic LangOpts MSVolatile");
2746 return false;
2747}
#define V(N, I)
Provides definitions for the various language-specific address spaces.
llvm::function_ref< void(mlir::OpBuilder &, mlir::Location)> BuilderCallbackRef
Definition CIRDialect.h:37
static Address createReferenceTemporary(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *inner)
static bool isAAPCS(const TargetInfo &targetInfo)
Helper method to check if the underlying ABI is AAPCS.
static LValue emitFunctionDeclLValue(CIRGenFunction &cgf, const Expr *e, GlobalDecl gd)
static CharUnits getArrayElementAlign(CharUnits arrayAlign, mlir::Value idx, CharUnits eltSize)
static void pushTemporaryCleanup(CIRGenFunction &cgf, const MaterializeTemporaryExpr *m, const Expr *e, Address referenceTemporary)
static cir::IntAttr getConstantIndexOrNull(mlir::Value idx)
static const Expr * getSimpleArrayDecayOperand(const Expr *e)
If the specified expr is a simple decay from an array to pointer, return the array subexpression.
static QualType getFixedSizeElementType(const ASTContext &astContext, const VariableArrayType *vla)
static bool canEmitSpuriousReferenceToVariable(CIRGenFunction &cgf, const DeclRefExpr *e, const VarDecl *vd)
Determine whether we can emit a reference to vd from the current context, despite not necessarily hav...
static DeclRefExpr * tryToConvertMemberExprToDeclRefExpr(CIRGenFunction &cgf, const MemberExpr *me)
static cir::FuncOp emitFunctionDeclPointer(CIRGenModule &cgm, GlobalDecl gd)
static LValue emitGlobalVarDeclLValue(CIRGenFunction &cgf, const Expr *e, const VarDecl *vd)
static mlir::Value emitArraySubscriptPtr(CIRGenFunction &cgf, mlir::Location beginLoc, mlir::Location endLoc, mlir::Value ptr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
static LValue emitCapturedFieldLValue(CIRGenFunction &cgf, const FieldDecl *fd, mlir::Value thisValue)
static bool onlyHasInlineBuiltinDeclaration(const FunctionDecl *fd)
Defines the clang::Expr interface and subclasses for C++ expressions.
__device__ __2f16 b
__device__ __2f16 float c
static OpBuilder::InsertPoint getBestAllocaInsertPoint(mlir::Block *block)
cir::GetMemberOp createGetMember(mlir::Location loc, mlir::Type resultTy, mlir::Value base, llvm::StringRef name, unsigned index)
cir::PointerType getPointerTo(mlir::Type ty)
cir::ConstantOp getNullPtr(mlir::Type ty, mlir::Location loc)
mlir::Value createBitcast(mlir::Value src, mlir::Type newTy)
mlir::Value createAlloca(mlir::Location loc, cir::PointerType addrType, mlir::Type type, llvm::StringRef name, mlir::IntegerAttr alignment, mlir::Value dynAllocSize)
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
static CanQualType getCanonicalType(QualType T)
Return the canonical (structural) type corresponding to the specified potentially non-canonical type ...
QualType getPointerType(QualType T) const
Return the uniqued reference to the type for a pointer to the specified type.
CanQualType BoolTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
const VariableArrayType * getAsVariableArrayType(QualType T) const
CanQualType getCanonicalTagType(const TagDecl *TD) const
AbstractConditionalOperator - An abstract base class for ConditionalOperator and BinaryConditionalOpe...
Definition Expr.h:4287
Expr * getCond() const
getCond - Return the expression representing the condition for the ?
Definition Expr.h:4465
Expr * getTrueExpr() const
getTrueExpr - Return the subexpression representing the value of the expression if the condition eval...
Definition Expr.h:4471
Expr * getFalseExpr() const
getFalseExpr - Return the subexpression representing the value of the expression if the condition eva...
Definition Expr.h:4477
ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
Definition Expr.h:2721
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:2776
Expr * getLHS()
An array access can be written A[4] or 4[A] (both are equivalent).
Definition Expr.h:2750
SourceLocation getBeginLoc() const LLVM_READONLY
Definition Expr.h:2764
SourceLocation getEndLoc() const
Definition Expr.h:2767
QualType getElementType() const
Definition TypeBase.h:3734
A builtin binary operation expression such as "x + y" or "x <= y".
Definition Expr.h:3972
Expr * getLHS() const
Definition Expr.h:4022
Expr * getRHS() const
Definition Expr.h:4024
Opcode getOpcode() const
Definition Expr.h:4017
mlir::Value getPointer() const
Definition Address.h:90
mlir::Type getElementType() const
Definition Address.h:117
static Address invalid()
Definition Address.h:69
Address withElementType(CIRGenBuilderTy &builder, mlir::Type ElemTy) const
Return address with different element type, a bitcast pointer, and the same alignment.
cir::TargetAddressSpaceAttr getAddressSpace() const
Definition Address.h:125
clang::CharUnits getAlignment() const
Definition Address.h:130
mlir::Type getType() const
Definition Address.h:109
bool isValid() const
Definition Address.h:70
mlir::Operation * getDefiningOp() const
Get the operation which defines this address.
Definition Address.h:133
An aggregate value slot.
IsDestructed_t
This is set to true if the slot might be aliased and it's not undefined behavior to access it through...
static AggValueSlot forAddr(Address addr, clang::Qualifiers quals, IsDestructed_t isDestructed, IsAliased_t isAliased, Overlap_t mayOverlap, IsZeroed_t isZeroed=IsNotZeroed)
static AggValueSlot ignored()
Returns an aggregate value slot indicating that the aggregate value is being ignored.
Address createElementBitCast(mlir::Location loc, Address addr, mlir::Type destType)
Cast the element type of the given address to a different type, preserving information like the align...
mlir::Value getArrayElement(mlir::Location arrayLocBegin, mlir::Location arrayLocEnd, mlir::Value arrayPtr, mlir::Type eltTy, mlir::Value idx, bool shouldDecay)
Create a cir.ptr_stride operation to get access to an array element.
Abstract information about a function or function prototype.
Definition CIRGenCall.h:27
bool isPseudoDestructor() const
Definition CIRGenCall.h:121
void setFunctionPointer(mlir::Operation *functionPtr)
Definition CIRGenCall.h:183
const clang::FunctionDecl * getBuiltinDecl() const
Definition CIRGenCall.h:97
const CXXPseudoDestructorExpr * getPseudoDestructorExpr() const
Definition CIRGenCall.h:125
static CIRGenCallee forDirect(mlir::Operation *funcPtr, const CIRGenCalleeInfo &abstractInfo=CIRGenCalleeInfo())
Definition CIRGenCall.h:90
unsigned getBuiltinID() const
Definition CIRGenCall.h:101
static CIRGenCallee forBuiltin(unsigned builtinID, const clang::FunctionDecl *builtinDecl)
Definition CIRGenCall.h:106
mlir::Operation * getFunctionPointer() const
Definition CIRGenCall.h:145
static CIRGenCallee forPseudoDestructor(const clang::CXXPseudoDestructorExpr *expr)
Definition CIRGenCall.h:115
An object to manage conditionally-evaluated expressions.
static ConstantEmission forValue(mlir::TypedAttr c)
An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
mlir::Value emitComplexToScalarConversion(mlir::Value src, QualType srcTy, QualType dstTy, SourceLocation loc)
Emit a conversion from the specified complex type to the specified destination type,...
void emitCallArgs(CallArgList &args, PrototypeWrapper prototype, llvm::iterator_range< clang::CallExpr::const_arg_iterator > argRange, AbstractCallee callee=AbstractCallee(), unsigned paramsToSkip=0)
mlir::Type convertType(clang::QualType t)
LValue emitOpaqueValueLValue(const OpaqueValueExpr *e)
static cir::TypeEvaluationKind getEvaluationKind(clang::QualType type)
Return the cir::TypeEvaluationKind of QualType type.
RValue convertTempToRValue(Address addr, clang::QualType type, clang::SourceLocation loc)
Given the address of a temporary variable, produce an r-value of its type.
Address emitCXXMemberDataPointerAddress(const Expr *e, Address base, mlir::Value memberPtr, const MemberPointerType *memberPtrType, LValueBaseInfo *baseInfo)
CIRGenTypes & getTypes() const
Address emitPointerWithAlignment(const clang::Expr *expr, LValueBaseInfo *baseInfo=nullptr)
Given an expression with a pointer type, emit the value and compute our best estimate of the alignmen...
RValue emitLoadOfLValue(LValue lv, SourceLocation loc)
Given an expression that represents a value lvalue, this method emits the address of the lvalue,...
const clang::LangOptions & getLangOpts() const
cir::AllocaOp createTempAlloca(mlir::Type ty, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, bool insertIntoFnEntryBlock=false)
This creates an alloca and inserts it into the entry block if ArraySize is nullptr,...
void emitTrap(mlir::Location loc, bool createNewBlock)
Emit a trap instruction, which is used to abort the program in an abnormal way, usually for debugging...
mlir::Block * getCurFunctionEntryBlock()
RValue emitCXXMemberCallExpr(const clang::CXXMemberCallExpr *e, ReturnValueSlot returnValue)
LValue emitLValueForBitField(LValue base, const FieldDecl *field)
mlir::LogicalResult emitIfOnBoolExpr(const clang::Expr *cond, const clang::Stmt *thenS, const clang::Stmt *elseS)
Emit an if on a boolean condition to the specified blocks.
mlir::Value emitComplexExpr(const Expr *e)
Emit the computation of the specified expression of complex type, returning the result.
LValue makeNaturalAlignPointeeAddrLValue(mlir::Value v, clang::QualType t)
Given a value of type T* that may not be to a complete object, construct an l-vlaue withi the natural...
RValue emitCallExpr(const clang::CallExpr *e, ReturnValueSlot returnValue=ReturnValueSlot())
LValue emitMemberExpr(const MemberExpr *e)
LValue emitConditionalOperatorLValue(const AbstractConditionalOperator *expr)
LValue emitLValue(const clang::Expr *e)
Emit code to compute a designator that specifies the location of the expression.
Address makeNaturalAddressForPointer(mlir::Value ptr, QualType t, CharUnits alignment, bool forPointeeType=false, LValueBaseInfo *baseInfo=nullptr)
Construct an address with the natural alignment of T.
LValue emitLValueForLambdaField(const FieldDecl *field)
mlir::Value evaluateExprAsBool(const clang::Expr *e)
Perform the usual unary conversions on the specified expression and compare the result against zero,...
LValue makeNaturalAlignAddrLValue(mlir::Value val, QualType ty)
bool constantFoldsToSimpleInteger(const clang::Expr *cond, llvm::APSInt &resultInt, bool allowLabels=false)
If the specified expression does not fold to a constant, or if it does fold but contains a label,...
mlir::Location getLoc(clang::SourceLocation srcLoc)
Helpers to convert Clang's SourceLocation to a MLIR Location.
mlir::Value emitOpOnBoolExpr(mlir::Location loc, const clang::Expr *cond)
TODO(cir): see EmitBranchOnBoolExpr for extra ideas).
Address getAddressOfBaseClass(Address value, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue, SourceLocation loc)
LValue emitLoadOfReferenceLValue(Address refAddr, mlir::Location loc, QualType refTy, AlignmentSource source)
mlir::Value emitScalarPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind kind, bool isPre)
void emitAnyExprToMem(const Expr *e, Address location, Qualifiers quals, bool isInitializer)
Emits the code necessary to evaluate an arbitrary expression into the given memory location.
LValue getOrCreateOpaqueLValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its LValue mapping if it exists, otherwise create one.
RValue emitReferenceBindingToExpr(const Expr *e)
Emits a reference binding to the passed in expression.
const TargetCIRGenInfo & getTargetHooks() const
LValue emitArraySubscriptExpr(const clang::ArraySubscriptExpr *e)
mlir::Operation * curFn
The current function or global initializer that is generated code for.
Address emitExtVectorElementLValue(LValue lv, mlir::Location loc)
Generates lvalue for partial ext_vector access.
mlir::Value emitScalarConversion(mlir::Value src, clang::QualType srcType, clang::QualType dstType, clang::SourceLocation loc)
Emit a conversion from the specified type to the specified destination type, both of which are CIR sc...
Address getAddressOfDerivedClass(mlir::Location loc, Address baseAddr, const CXXRecordDecl *derived, llvm::iterator_range< CastExpr::path_const_iterator > path, bool nullCheckValue)
AggValueSlot createAggTemp(QualType ty, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr)
Create a temporary memory object for the given aggregate type.
RValue emitLoadOfExtVectorElementLValue(LValue lv)
mlir::Type convertTypeForMem(QualType t)
mlir::Value emitAlloca(llvm::StringRef name, mlir::Type ty, mlir::Location loc, clang::CharUnits alignment, bool insertIntoFnEntryBlock, mlir::Value arraySize=nullptr)
void emitUnreachable(clang::SourceLocation loc, bool createNewBlock)
Emit a reached-unreachable diagnostic if loc is valid and runtime checking is enabled.
mlir::Value createDummyValue(mlir::Location loc, clang::QualType qt)
mlir::Value emitLoadOfComplex(LValue src, SourceLocation loc)
Load a complex number from the specified l-value.
LValue emitAggExprToLValue(const Expr *e)
void emitStoreOfScalar(mlir::Value value, Address addr, bool isVolatile, clang::QualType ty, LValueBaseInfo baseInfo, bool isInit=false, bool isNontemporal=false)
void pushDestroy(QualType::DestructionKind dtorKind, Address addr, QualType type)
Push the standard destructor for the given type as at least a normal cleanup.
RValue getUndefRValue(clang::QualType ty)
Get an appropriate 'undef' rvalue for the given type.
Address returnValue
The temporary alloca to hold the return value.
static bool hasAggregateEvaluationKind(clang::QualType type)
LValue emitPointerToDataMemberBinaryExpr(const BinaryOperator *e)
mlir::Value emitComplexPrePostIncDec(const UnaryOperator *e, LValue lv, cir::UnaryOpKind op, bool isPre)
LValue emitUnaryOpLValue(const clang::UnaryOperator *e)
RValue emitLoadOfBitfieldLValue(LValue lv, SourceLocation loc)
RValue emitCall(const CIRGenFunctionInfo &funcInfo, const CIRGenCallee &callee, ReturnValueSlot returnValue, const CallArgList &args, cir::CIRCallOpInterface *callOp, mlir::Location loc)
LValue emitComplexAssignmentLValue(const BinaryOperator *e)
const clang::Decl * curCodeDecl
This is the inner-most code context, which includes blocks.
LValue emitLValueForFieldInitialization(LValue base, const clang::FieldDecl *field, llvm::StringRef fieldName)
Like emitLValueForField, excpet that if the Field is a reference, this will return the address of the...
LValue emitCallExprLValue(const clang::CallExpr *e)
LValue emitStringLiteralLValue(const StringLiteral *e, llvm::StringRef name=".str")
mlir::Value emitToMemory(mlir::Value value, clang::QualType ty)
Given a value and its clang type, returns the value casted to its memory representation.
LValue emitLValueForField(LValue base, const clang::FieldDecl *field)
mlir::Value emitScalarExpr(const clang::Expr *e, bool ignoreResultAssign=false)
Emit the computation of the specified expression of scalar type.
Address emitLoadOfReference(LValue refLVal, mlir::Location loc, LValueBaseInfo *pointeeBaseInfo)
bool shouldNullCheckClassCastValue(const CastExpr *ce)
CIRGenBuilderTy & getBuilder()
LValue emitBinaryOperatorLValue(const BinaryOperator *e)
Address getAddrOfBitFieldStorage(LValue base, const clang::FieldDecl *field, mlir::Type fieldType, unsigned index)
mlir::MLIRContext & getMLIRContext()
LValue emitCastLValue(const CastExpr *e)
Casts are never lvalues unless that cast is to a reference type.
mlir::Value emitLoadOfScalar(LValue lvalue, SourceLocation loc)
EmitLoadOfScalar - Load a scalar value from an address, taking care to appropriately convert from the...
bool containsLabel(const clang::Stmt *s, bool ignoreCaseStmts=false)
Return true if the statement contains a label in it.
DeclMapTy localDeclMap
This keeps track of the CIR allocas or globals for local C declarations.
LValue emitDeclRefLValue(const clang::DeclRefExpr *e)
void emitComplexExprIntoLValue(const Expr *e, LValue dest, bool isInit)
llvm::DenseMap< const clang::ValueDecl *, clang::FieldDecl * > lambdaCaptureFields
ConstantEmission tryEmitAsConstant(const DeclRefExpr *refExpr)
Try to emit a reference to the given value without producing it as an l-value.
void emitCXXThrowExpr(const CXXThrowExpr *e)
LValue makeAddrLValue(Address addr, QualType ty, AlignmentSource source=AlignmentSource::Type)
int64_t getAccessedFieldNo(unsigned idx, mlir::ArrayAttr elts)
LValue emitPredefinedLValue(const PredefinedExpr *e)
RValue emitAnyExpr(const clang::Expr *e, AggValueSlot aggSlot=AggValueSlot::ignored(), bool ignoreResult=false)
Emit code to compute the specified expression which can have any type.
llvm::DenseMap< const OpaqueValueExpr *, RValue > opaqueRValues
Address emitArrayToPointerDecay(const Expr *e, LValueBaseInfo *baseInfo=nullptr)
void emitAtomicStore(RValue rvalue, LValue dest, bool isInit)
mlir::Value emitStoreThroughBitfieldLValue(RValue src, LValue dstresult)
llvm::DenseMap< const OpaqueValueExpr *, LValue > opaqueLValues
Keeps track of the current set of opaque value expressions.
CIRGenFunction(CIRGenModule &cgm, CIRGenBuilderTy &builder, bool suppressNewContext=false)
std::optional< mlir::Location > currSrcLoc
Use to track source locations across nested visitor traversals.
LValue emitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *e)
LValue emitExtVectorElementExpr(const ExtVectorElementExpr *e)
clang::ASTContext & getContext() const
RValue emitCXXMemberOrOperatorMemberCallExpr(const clang::CallExpr *ce, const clang::CXXMethodDecl *md, ReturnValueSlot returnValue, bool hasQualifier, clang::NestedNameSpecifier qualifier, bool isArrow, const clang::Expr *base)
mlir::Value emitScalarConstant(const ConstantEmission &constant, Expr *e)
RValue emitBuiltinExpr(const clang::GlobalDecl &gd, unsigned builtinID, const clang::CallExpr *e, ReturnValueSlot returnValue)
RValue emitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *e, const CXXMethodDecl *md, ReturnValueSlot returnValue)
void emitStoreThroughLValue(RValue src, LValue dst, bool isInit=false)
Store the specified rvalue into the specified lvalue, where both are guaranteed to the have the same ...
bool isLValueSuitableForInlineAtomic(LValue lv)
An LValue is a candidate for having its loads and stores be made atomic if we are operating under /vo...
mlir::LogicalResult emitStmt(const clang::Stmt *s, bool useCurrentScope, llvm::ArrayRef< const Attr * > attrs={})
RValue getOrCreateOpaqueRValueMapping(const OpaqueValueExpr *e)
Given an opaque value expression, return its RValue mapping if it exists, otherwise create one.
Address createTempAllocaWithoutCast(mlir::Type ty, CharUnits align, mlir::Location loc, const Twine &name="tmp", mlir::Value arraySize=nullptr, mlir::OpBuilder::InsertPoint ip={})
This creates a alloca and inserts it into the entry block of the current region.
void emitIgnoredExpr(const clang::Expr *e)
Emit code to compute the specified expression, ignoring the result.
Address createMemTemp(QualType t, mlir::Location loc, const Twine &name="tmp", Address *alloca=nullptr, mlir::OpBuilder::InsertPoint ip={})
Create a temporary memory object of the given type, with appropriate alignmen and cast it to the defa...
mlir::Value emitDynamicCast(Address thisAddr, const CXXDynamicCastExpr *dce)
void emitAggExpr(const clang::Expr *e, AggValueSlot slot)
ConditionalInfo emitConditionalBlocks(const AbstractConditionalOperator *e, const FuncTy &branchGenFunc)
RValue emitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *expr)
LValue emitCompoundLiteralLValue(const CompoundLiteralExpr *e)
CIRGenCallee emitCallee(const clang::Expr *e)
Address emitAddrOfFieldStorage(Address base, const FieldDecl *field, llvm::StringRef fieldName, unsigned fieldIndex)
Get the address of a zero-sized field within a record.
This class organizes the cross-function state that is used while generating CIR code.
DiagnosticBuilder errorNYI(SourceLocation, llvm::StringRef)
Helpers to emit "not yet implemented" error diagnostics.
mlir::IntegerAttr getSize(CharUnits size)
CIRGenBuilderTy & getBuilder()
cir::FuncOp getAddrOfFunction(clang::GlobalDecl gd, mlir::Type funcType=nullptr, bool forVTable=false, bool dontDefer=false, ForDefinition_t isForDefinition=NotForDefinition)
Return the address of the given function.
mlir::Value getAddrOfGlobalVar(const VarDecl *d, mlir::Type ty={}, ForDefinition_t isForDefinition=NotForDefinition)
Return the mlir::Value for the address of the given global variable.
cir::GlobalLinkageKind getCIRLinkageVarDefinition(const VarDecl *vd, bool isConstant)
This class handles record and union layout info while lowering AST types to CIR types.
cir::RecordType getCIRType() const
Return the "complete object" LLVM type associated with this record.
const CIRGenBitFieldInfo & getBitFieldInfo(const clang::FieldDecl *fd) const
Return the BitFieldInfo that corresponds to the field FD.
unsigned getCIRFieldNo(const clang::FieldDecl *fd) const
Return cir::RecordType element number that corresponds to the field FD.
cir::FuncType getFunctionType(const CIRGenFunctionInfo &info)
Get the CIR function type for.
mlir::Attribute emitAbstract(const Expr *e, QualType destType)
Emit the result of the given expression as an abstract constant, asserting that it succeeded.
AlignmentSource getAlignmentSource() const
void mergeForCast(const LValueBaseInfo &info)
bool isExtVectorElt() const
const clang::Qualifiers & getQuals() const
mlir::Value getExtVectorPointer() const
static LValue makeExtVectorElt(Address vecAddress, mlir::ArrayAttr elts, clang::QualType type, LValueBaseInfo baseInfo)
mlir::Value getVectorIdx() const
bool isVectorElt() const
Address getAddress() const
static LValue makeAddr(Address address, clang::QualType t, LValueBaseInfo baseInfo)
mlir::ArrayAttr getExtVectorElts() const
static LValue makeVectorElt(Address vecAddress, mlir::Value index, clang::QualType t, LValueBaseInfo baseInfo)
unsigned getVRQualifiers() const
clang::QualType getType() const
static LValue makeBitfield(Address addr, const CIRGenBitFieldInfo &info, clang::QualType type, LValueBaseInfo baseInfo)
Create a new object to represent a bit-field access.
mlir::Value getPointer() const
bool isVolatileQualified() const
bool isBitField() const
Address getVectorAddress() const
clang::CharUnits getAlignment() const
LValueBaseInfo getBaseInfo() const
bool isVolatile() const
const CIRGenBitFieldInfo & getBitFieldInfo() const
Address getBitFieldAddress() const
Address getExtVectorAddress() const
bool isSimple() const
This trivial value class is used to represent the result of an expression that is evaluated.
Definition CIRGenValue.h:33
static RValue get(mlir::Value v)
Definition CIRGenValue.h:83
static RValue getComplex(mlir::Value v)
Definition CIRGenValue.h:91
mlir::Value getValue() const
Return the value of this scalar value.
Definition CIRGenValue.h:57
bool isScalar() const
Definition CIRGenValue.h:49
Contains the address where the return value of a function can be stored, and whether the address is v...
Definition CIRGenCall.h:254
virtual mlir::Value performAddrSpaceCast(CIRGenFunction &cgf, mlir::Value v, cir::TargetAddressSpaceAttr srcAddr, mlir::Type destTy, bool isNonNull=false) const
Perform address space cast of an expression of pointer type.
Represents a C++ destructor within a class.
Definition DeclCXX.h:2869
Represents a call to a member function that may be written either with member call syntax (e....
Definition ExprCXX.h:179
Represents a static or instance method of a struct/union/class.
Definition DeclCXX.h:2129
Represents a C++ struct/union/class.
Definition DeclCXX.h:258
bool hasTrivialDestructor() const
Determine whether this class has a trivial destructor (C++ [class.dtor]p3)
Definition DeclCXX.h:1366
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
FunctionDecl * getDirectCallee()
If the callee is a FunctionDecl, return it. Otherwise return null.
Definition Expr.h:3060
Expr * getCallee()
Definition Expr.h:3024
arg_range arguments()
Definition Expr.h:3129
QualType getCallReturnType(const ASTContext &Ctx) const
getCallReturnType - Get the return type of the call expr.
Definition Expr.cpp:1599
CastExpr - Base class for type casts, including both implicit casts (ImplicitCastExpr) and explicit c...
Definition Expr.h:3610
CastKind getCastKind() const
Definition Expr.h:3654
llvm::iterator_range< path_iterator > path()
Path through the class hierarchy taken by casts between base and derived classes (see implementation ...
Definition Expr.h:3697
bool changesVolatileQualification() const
Return.
Definition Expr.h:3744
static const char * getCastKindName(CastKind CK)
Definition Expr.cpp:1947
Expr * getSubExpr()
Definition Expr.h:3660
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
CharUnits alignmentOfArrayElement(CharUnits elementSize) const
Given that this is the alignment of the first element of an array, return the minimum alignment of an...
Definition CharUnits.h:214
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
Complex values, per C99 6.2.5p11.
Definition TypeBase.h:3275
CompoundLiteralExpr - [C99 6.5.2.5].
Definition Expr.h:3539
bool isFileScope() const
Definition Expr.h:3571
const Expr * getInitializer() const
Definition Expr.h:3567
ConditionalOperator - The ?
Definition Expr.h:4325
A reference to a declared variable, function, enum, etc.
Definition Expr.h:1270
bool refersToEnclosingVariableOrCapture() const
Does this DeclRefExpr refer to an enclosing local or a captured variable?
Definition Expr.h:1474
static DeclRefExpr * Create(const ASTContext &Context, NestedNameSpecifierLoc QualifierLoc, SourceLocation TemplateKWLoc, ValueDecl *D, bool RefersToEnclosingVariableOrCapture, SourceLocation NameLoc, QualType T, ExprValueKind VK, NamedDecl *FoundD=nullptr, const TemplateArgumentListInfo *TemplateArgs=nullptr, NonOdrUseReason NOUR=NOUR_None)
Definition Expr.cpp:484
ValueDecl * getDecl()
Definition Expr.h:1338
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:1468
SourceLocation getLocation() const
Definition Expr.h:1346
SourceLocation getLocation() const
Definition DeclBase.h:439
DeclContext * getDeclContext()
Definition DeclBase.h:448
bool hasAttr() const
Definition DeclBase.h:577
This represents one expression.
Definition Expr.h:112
const Expr * skipRValueSubobjectAdjustments(SmallVectorImpl< const Expr * > &CommaLHS, SmallVectorImpl< SubobjectAdjustment > &Adjustments) const
Walk outwards from an expression we want to bind a reference to and find the expression whose lifetim...
Definition Expr.cpp:80
bool isGLValue() const
Definition Expr.h:287
ExprValueKind getValueKind() const
getValueKind - The value kind that this expression produces.
Definition Expr.h:444
Expr * IgnoreParens() LLVM_READONLY
Skip past any parentheses which might surround this expression until reaching a fixed point.
Definition Expr.cpp:3082
bool isPRValue() const
Definition Expr.h:285
bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx, bool InConstantContext=false) const
EvaluateAsRValue - Return true if this is a constant which we can fold to an rvalue using any crazy t...
Decl * getReferencedDeclOfCallee()
Definition Expr.cpp:1542
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
ExtVectorElementExpr - This represents access to specific elements of a vector, and may occur on the ...
Definition Expr.h:6498
bool isArrow() const
isArrow - Return true if the base expression is a pointer to vector, return false if the base express...
Definition Expr.cpp:4411
void getEncodedElementAccess(SmallVectorImpl< uint32_t > &Elts) const
getEncodedElementAccess - Encode the elements accessed into an llvm aggregate Constant of ConstantInt...
Definition Expr.cpp:4443
const Expr * getBase() const
Definition Expr.h:6515
Represents a member of a struct/union/class.
Definition Decl.h:3160
bool isBitField() const
Determines whether this field is a bitfield.
Definition Decl.h:3263
SourceRange getSourceRange() const override LLVM_READONLY
Source range that this declaration covers.
Definition Decl.cpp:4822
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
bool isZeroSize(const ASTContext &Ctx) const
Determine if this field is a subobject of zero size, that is, either a zero-length bit-field or a fie...
Definition Decl.cpp:4762
Represents a function declaration or definition.
Definition Decl.h:2000
FunctionDecl * getPreviousDecl()
Return the previous declaration of this declaration or NULL if this is the first declaration.
Represents a prototype with parameter type info, e.g.
Definition TypeBase.h:5254
GlobalDecl - represents a global declaration.
Definition GlobalDecl.h:57
const Decl * getDecl() const
Definition GlobalDecl.h:106
Represents a prvalue temporary that is written into memory so that a reference can bind to it.
Definition ExprCXX.h:4920
StorageDuration getStorageDuration() const
Retrieve the storage duration for the materialized temporary.
Definition ExprCXX.h:4945
Expr * getSubExpr() const
Retrieve the temporary-generating subexpression whose value will be materialized into a glvalue.
Definition ExprCXX.h:4937
ValueDecl * getExtendingDecl()
Get the declaration which triggered the lifetime-extension of this temporary, if any.
Definition ExprCXX.h:4970
MemberExpr - [C99 6.5.2.3] Structure and Union Members.
Definition Expr.h:3298
ValueDecl * getMemberDecl() const
Retrieve the member declaration to which this expression refers.
Definition Expr.h:3381
NonOdrUseReason isNonOdrUse() const
Is this expression a non-odr-use reference, and if so, why?
Definition Expr.h:3522
Expr * getBase() const
Definition Expr.h:3375
bool isArrow() const
Definition Expr.h:3482
SourceLocation getExprLoc() const LLVM_READONLY
Definition Expr.h:3493
A pointer to member type per C++ 8.3.3 - Pointers to members.
Definition TypeBase.h:3653
This represents a decl that may have a name.
Definition Decl.h:274
StringRef getName() const
Get the name of identifier for this declaration as a StringRef.
Definition Decl.h:301
A C++ nested-name-specifier augmented with source location information.
Represents a C++ nested name specifier, such as "\::std::vector<int>::".
OpaqueValueExpr - An expression referring to an opaque object of a fixed type and value class.
Definition Expr.h:1178
Expr * getSourceExpr() const
The source expression of an opaque value expression is the expression which originally generated the ...
Definition Expr.h:1228
bool isUnique() const
Definition Expr.h:1236
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
[C99 6.4.2.2] - A predefined identifier such as func.
Definition Expr.h:2005
StringRef getIdentKindName() const
Definition Expr.h:2062
PredefinedIdentKind getIdentKind() const
Definition Expr.h:2040
StringLiteral * getFunctionName()
Definition Expr.h:2049
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
LangAS getAddressSpace() const
Return the address space of this type.
Definition TypeBase.h:8404
Qualifiers getQualifiers() const
Retrieve the set of qualifiers applied to this type.
Definition TypeBase.h:8318
Qualifiers::ObjCLifetime getObjCLifetime() const
Returns lifetime attribute of this type.
Definition TypeBase.h:1438
QualType withCVRQualifiers(unsigned CVR) const
Definition TypeBase.h:1179
DestructionKind isDestructedType() const
Returns a nonzero value if objects of this type require non-trivial work to clean up after.
Definition TypeBase.h:1545
The collection of all-type qualifiers we support.
Definition TypeBase.h:331
unsigned getCVRQualifiers() const
Definition TypeBase.h:488
GC getObjCGCAttr() const
Definition TypeBase.h:519
@ OCL_ExplicitNone
This object can be modified without requiring retains or releases.
Definition TypeBase.h:354
@ OCL_None
There is no lifetime qualification on this type.
Definition TypeBase.h:350
void addCVRQualifiers(unsigned mask)
Definition TypeBase.h:502
void addQualifiers(Qualifiers Q)
Add the qualifiers from the given set to this set.
Definition TypeBase.h:650
Represents a struct/union/class.
Definition Decl.h:4321
Encodes a location in the source.
Stmt - This represents one statement.
Definition Stmt.h:85
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
StringLiteral - This represents a string literal expression, e.g.
Definition Expr.h:1799
bool isUnion() const
Definition Decl.h:3922
Exposes information about the current target.
Definition TargetInfo.h:226
virtual StringRef getABI() const
Get the ABI currently in use.
bool isVoidType() const
Definition TypeBase.h:8871
bool isBooleanType() const
Definition TypeBase.h:9001
const ArrayType * castAsArrayTypeUnsafe() const
A variant of castAs<> for array type which silently discards qualifiers from the outermost type.
Definition TypeBase.h:9167
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
bool isArrayType() const
Definition TypeBase.h:8614
bool isFunctionPointerType() const
Definition TypeBase.h:8582
CXXRecordDecl * castAsCXXRecordDecl() const
Definition Type.h:36
bool isArithmeticType() const
Definition Type.cpp:2337
bool isConstantMatrixType() const
Definition TypeBase.h:8676
bool isPointerType() const
Definition TypeBase.h:8515
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
bool isReferenceType() const
Definition TypeBase.h:8539
bool isVariableArrayType() const
Definition TypeBase.h:8626
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isExtVectorBoolType() const
Definition TypeBase.h:8662
bool isAnyComplexType() const
Definition TypeBase.h:8650
const Type * getBaseElementTypeUnsafe() const
Get the base element type of this type, potentially discarding type qualifiers.
Definition TypeBase.h:9044
bool isAtomicType() const
Definition TypeBase.h:8697
bool isVariablyModifiedType() const
Whether this type is a variably-modified type (C99 6.7.5).
Definition TypeBase.h:2800
bool isFunctionType() const
Definition TypeBase.h:8511
bool isVectorType() const
Definition TypeBase.h:8654
bool isSubscriptableVectorType() const
Definition TypeBase.h:8668
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
bool hasBooleanRepresentation() const
Determine whether this type has a boolean representation – i.e., it is a boolean type,...
Definition Type.cpp:2354
UnaryOperator - This represents the unary-expression's (except sizeof and alignof),...
Definition Expr.h:2244
SourceLocation getExprLoc() const
Definition Expr.h:2368
Expr * getSubExpr() const
Definition Expr.h:2285
Opcode getOpcode() const
Definition Expr.h:2280
static bool isIncrementOp(Opcode Op)
Definition Expr.h:2326
static bool isPrefix(Opcode Op)
isPrefix - Return true if this is a prefix operation, like –x.
Definition Expr.h:2319
Represent the declaration of a variable (in which case it is an lvalue) a function (in which case it ...
Definition Decl.h:712
QualType getType() const
Definition Decl.h:723
Represents a variable declaration or definition.
Definition Decl.h:926
TLSKind getTLSKind() const
Definition Decl.cpp:2175
bool hasInit() const
Definition Decl.cpp:2405
VarDecl * getDefinition(ASTContext &)
Get the real (not just tentative) definition for this declaration.
Definition Decl.cpp:2373
bool hasLocalStorage() const
Returns true if a variable with function scope is a non-static local variable.
Definition Decl.h:1184
@ TLS_Dynamic
TLS with a dynamic initializer.
Definition Decl.h:952
Represents a C array with a specified size that is not an integer-constant-expression.
Definition TypeBase.h:3966
Represents a GCC generic vector type.
Definition TypeBase.h:4175
Defines the clang::TargetInfo interface.
cir::TargetAddressSpaceAttr toCIRTargetAddressSpace(mlir::MLIRContext &context, clang::LangAS langAS)
Definition CIRTypes.cpp:836
AlignmentSource
The source of the alignment of an l-value; an expression of confidence in the alignment actually matc...
@ Decl
The l-value was an access to a declared entity or something equivalently strong, like the address of ...
static AlignmentSource getFieldAlignmentSource(AlignmentSource source)
Given that the base address has the given alignment source, what's our confidence in the alignment of...
const internal::VariadicAllOfMatcher< Type > type
Matches Types in the clang AST.
const AstTypeMatcher< FunctionType > functionType
const internal::VariadicDynCastAllOfMatcher< Stmt, Expr > expr
Matches expressions.
const internal::VariadicDynCastAllOfMatcher< Stmt, CastExpr > castExpr
Matches any cast nodes of Clang's AST.
The JSON file list parser is used to communicate input to InstallAPI.
bool isa(CodeGen::Address addr)
Definition Address.h:330
@ CPlusPlus
bool isTargetAddressSpace(LangAS AS)
@ SC_Register
Definition Specifiers.h:257
@ SD_Thread
Thread storage duration.
Definition Specifiers.h:342
@ SD_Static
Static storage duration.
Definition Specifiers.h:343
@ SD_FullExpression
Full-expression storage duration (for temporaries).
Definition Specifiers.h:340
@ SD_Automatic
Automatic storage duration (most local variables).
Definition Specifiers.h:341
@ SD_Dynamic
Dynamic storage duration.
Definition Specifiers.h:344
LangAS
Defines the address space values used by the address space qualifier of QualType.
U cast(CodeGen::Address addr)
Definition Address.h:327
LangAS getLangASFromTargetAS(unsigned TargetAS)
@ NOUR_Unevaluated
This name appears in an unevaluated operand.
Definition Specifiers.h:177
@ NOUR_Constant
This name appears as a potential result of an lvalue-to-rvalue conversion that is a constant expressi...
Definition Specifiers.h:180
static bool weakRefReference()
static bool objCLifetime()
static bool emitLifetimeMarkers()
static bool opLoadEmitScalarRangeCheck()
static bool addressSpace()
static bool opAllocaNonGC()
static bool opAllocaOpenMPThreadPrivate()
static bool preservedAccessIndexRegion()
static bool mergeAllConstants()
static bool opLoadStoreTbaa()
static bool cgFPOptionsRAII()
static bool opCallChain()
static bool opAllocaImpreciseLifetime()
static bool opAllocaStaticLocal()
static bool opAllocaTLS()
static bool emitCheckedInBoundsGEP()
static bool attributeNoBuiltin()
static bool setObjCGCLValueClass()
static bool cirgenABIInfo()
static bool opLoadStoreObjC()
static bool opCallArgEvaluationOrder()
static bool lambdaCaptures()
static bool insertBuiltinUnpredictable()
static bool opCallMustTail()
static bool shouldReverseUnaryCondOnBoolExpr()
static bool tryEmitAsConstant()
static bool addressIsKnownNonNull()
static bool astVarDeclInterface()
static bool cgCapturedStmtInfo()
static bool opAllocaEscapeByReference()
static bool opLoadStoreNontemporal()
static bool opCallFnInfoOpts()
static bool generateDebugInfo()
static bool incrementProfileCounter()
Record with information about how a bitfield should be accessed.
unsigned volatileStorageSize
The storage size in bits which should be used when accessing this bitfield.
unsigned volatileOffset
The offset within a contiguous run of bitfields that are represented as a single "field" within the c...
Represents a scope, including function bodies, compound statements, and the substatements of if/while...
cir::TargetAddressSpaceAttr getCIRAllocaAddressSpace() const
EvalResult is a struct with detailed info about an evaluated expression.
Definition Expr.h:645
APValue Val
Val - This is the value the expression can be folded to.
Definition Expr.h:647
bool HasSideEffects
Whether the evaluated expression has side effects.
Definition Expr.h:612

Follow Lee on X/Twitter - Father, Husband, Serial builder creating AI, crypto, games & web tools. We are friends :) AI Will Come To Life!

Check out: eBank.nz (Art Generator) | Netwrck.com (AI Tools) | Text-Generator.io (AI API) | BitBank.nz (Crypto AI) | ReadingTime (Kids Reading) | RewordGame | BigMultiplayerChess | WebFiddle | How.nz | Helix AI Assistant