clang 22.0.0git
InterpBuiltin.cpp
Go to the documentation of this file.
1/===--- InterpBuiltin.cpp - Interpreter for the constexpr VM ---*- C++ -*-===/
2/
3/ Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4/ See https://llvm.org/LICENSE.txt for license information.
5/ SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6/
7/===----------------------------------------------------------------------===/
9#include "Boolean.h"
10#include "EvalEmitter.h"
12#include "InterpHelpers.h"
13#include "PrimType.h"
14#include "Program.h"
16#include "clang/AST/OSLog.h"
21#include "llvm/ADT/StringExtras.h"
22#include "llvm/Support/AllocToken.h"
23#include "llvm/Support/ErrorHandling.h"
24#include "llvm/Support/SipHash.h"
25
26namespace clang {
27namespace interp {
28
29[[maybe_unused]] static bool isNoopBuiltin(unsigned ID) {
30 switch (ID) {
31 case Builtin::BIas_const:
32 case Builtin::BIforward:
33 case Builtin::BIforward_like:
34 case Builtin::BImove:
35 case Builtin::BImove_if_noexcept:
36 case Builtin::BIaddressof:
37 case Builtin::BI__addressof:
38 case Builtin::BI__builtin_addressof:
39 case Builtin::BI__builtin_launder:
40 return true;
41 default:
42 return false;
43 }
44 return false;
45}
46
47static void discard(InterpStack &Stk, PrimType T) {
48 TYPE_SWITCH(T, { Stk.discard<T>(); });
49}
50
51static uint64_t popToUInt64(const InterpState &S, const Expr *E) {
53 return static_cast<uint64_t>(S.Stk.pop<T>()));
54}
55
57 INT_TYPE_SWITCH(T, return Stk.pop<T>().toAPSInt());
58}
59
60static APSInt popToAPSInt(InterpState &S, const Expr *E) {
61 return popToAPSInt(S.Stk, *S.getContext().classify(E->getType()));
62}
64 return popToAPSInt(S.Stk, *S.getContext().classify(T));
65}
66
67/ Pushes \p Val on the stack as the type given by \p QT.
68static void pushInteger(InterpState &S, const APSInt &Val, QualType QT) {
72 assert(T);
73
74 unsigned BitWidth = S.getASTContext().getTypeSize(QT);
75
76 if (T == PT_IntAPS) {
77 auto Result = S.allocAP<IntegralAP<true>>(BitWidth);
78 Result.copy(Val);
80 return;
81 }
82
83 if (T == PT_IntAP) {
84 auto Result = S.allocAP<IntegralAP<false>>(BitWidth);
85 Result.copy(Val);
87 return;
88 }
89
91 int64_t V = Val.getSExtValue();
92 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
93 } else {
95 uint64_t V = Val.getZExtValue();
96 INT_TYPE_SWITCH(*T, { S.Stk.push<T>(T::from(V, BitWidth)); });
97 }
98}
99
100template <typename T>
101static void pushInteger(InterpState &S, T Val, QualType QT) {
102 if constexpr (std::is_same_v<T, APInt>)
103 pushInteger(S, APSInt(Val, !std::is_signed_v<T>), QT);
104 else if constexpr (std::is_same_v<T, APSInt>)
105 pushInteger(S, Val, QT);
106 else
107 pushInteger(S,
108 APSInt(APInt(sizeof(T) * 8, static_cast<uint64_t>(Val),
109 std::is_signed_v<T>),
110 !std::is_signed_v<T>),
111 QT);
112}
113
114static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT,
115 const APSInt &Value) {
116
117 if (ValueT == PT_IntAPS) {
118 Dest.deref<IntegralAP<true>>() =
119 S.allocAP<IntegralAP<true>>(Value.getBitWidth());
120 Dest.deref<IntegralAP<true>>().copy(Value);
121 } else if (ValueT == PT_IntAP) {
122 Dest.deref<IntegralAP<false>>() =
123 S.allocAP<IntegralAP<false>>(Value.getBitWidth());
124 Dest.deref<IntegralAP<false>>().copy(Value);
125 } else {
127 ValueT, { Dest.deref<T>() = T::from(static_cast<T>(Value)); });
128 }
129}
130
131static QualType getElemType(const Pointer &P) {
132 const Descriptor *Desc = P.getFieldDesc();
133 QualType T = Desc->getType();
134 if (Desc->isPrimitive())
135 return T;
136 if (T->isPointerType())
137 return T->getAs<PointerType>()->getPointeeType();
138 if (Desc->isArray())
139 return Desc->getElemQualType();
140 if (const auto *AT = T->getAsArrayTypeUnsafe())
141 return AT->getElementType();
142 return T;
143}
144
146 unsigned ID) {
147 if (!S.diagnosing())
148 return;
149
150 auto Loc = S.Current->getSource(OpPC);
151 if (S.getLangOpts().CPlusPlus11)
152 S.CCEDiag(Loc, diag::note_constexpr_invalid_function)
153 << /*isConstexpr=*/0 << /*isConstructor=*/0
155 else
156 S.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
157}
158
159static llvm::APSInt convertBoolVectorToInt(const Pointer &Val) {
160 assert(Val.getFieldDesc()->isPrimitiveArray() &&
162 "Not a boolean vector");
163 unsigned NumElems = Val.getNumElems();
164
165 / Each element is one bit, so create an integer with NumElts bits.
166 llvm::APSInt Result(NumElems, 0);
167 for (unsigned I = 0; I != NumElems; ++I) {
168 if (Val.elem<bool>(I))
169 Result.setBit(I);
170 }
171
172 return Result;
173}
174
175/ Strict double -> float conversion used for X86 PD2PS/cvtsd2ss intrinsics.
176/ Reject NaN/Inf/Subnormal inputs and any lossy/inexact conversions.
178 InterpState &S, const Expr *DiagExpr) {
179 if (Src.isInfinity()) {
180 if (S.diagnosing())
181 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 0;
182 return false;
183 }
184 if (Src.isNaN()) {
185 if (S.diagnosing())
186 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic) << 1;
187 return false;
188 }
189 APFloat Val = Src;
190 bool LosesInfo = false;
191 APFloat::opStatus Status = Val.convert(
192 APFloat::IEEEsingle(), APFloat::rmNearestTiesToEven, &LosesInfo);
193 if (LosesInfo || Val.isDenormal()) {
194 if (S.diagnosing())
195 S.CCEDiag(DiagExpr, diag::note_constexpr_float_arithmetic_strict);
196 return false;
197 }
198 if (Status != APFloat::opOK) {
199 if (S.diagnosing())
200 S.CCEDiag(DiagExpr, diag::note_invalid_subexpr_in_const_expr);
201 return false;
202 }
203 Dst.copy(Val);
204 return true;
205}
206
208 const InterpFrame *Frame,
209 const CallExpr *Call) {
210 unsigned Depth = S.Current->getDepth();
211 auto isStdCall = [](const FunctionDecl *F) -> bool {
212 return F && F->isInStdNamespace() && F->getIdentifier() &&
213 F->getIdentifier()->isStr("is_constant_evaluated");
214 };
215 const InterpFrame *Caller = Frame->Caller;
216 / The current frame is the one for __builtin_is_constant_evaluated.
217 / The one above that, potentially the one for std::is_constant_evaluated().
219 S.getEvalStatus().Diag &&
220 (Depth == 0 || (Depth == 1 && isStdCall(Frame->getCallee())))) {
221 if (Caller && isStdCall(Frame->getCallee())) {
222 const Expr *E = Caller->getExpr(Caller->getRetPC());
223 S.report(E->getExprLoc(),
224 diag::warn_is_constant_evaluated_always_true_constexpr)
225 << "std::is_constant_evaluated" << E->getSourceRange();
226 } else {
227 S.report(Call->getExprLoc(),
228 diag::warn_is_constant_evaluated_always_true_constexpr)
229 << "__builtin_is_constant_evaluated" << Call->getSourceRange();
230 }
231 }
232
234 return true;
235}
236
237/ __builtin_assume(int)
239 const InterpFrame *Frame,
240 const CallExpr *Call) {
241 assert(Call->getNumArgs() == 1);
242 discard(S.Stk, *S.getContext().classify(Call->getArg(0)));
243 return true;
244}
245
247 const InterpFrame *Frame,
248 const CallExpr *Call, unsigned ID) {
249 uint64_t Limit = ~static_cast<uint64_t>(0);
250 if (ID == Builtin::BIstrncmp || ID == Builtin::BI__builtin_strncmp ||
251 ID == Builtin::BIwcsncmp || ID == Builtin::BI__builtin_wcsncmp)
252 Limit = popToUInt64(S, Call->getArg(2));
253
254 const Pointer &B = S.Stk.pop<Pointer>();
255 const Pointer &A = S.Stk.pop<Pointer>();
256 if (ID == Builtin::BIstrcmp || ID == Builtin::BIstrncmp ||
257 ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp)
258 diagnoseNonConstexprBuiltin(S, OpPC, ID);
259
260 if (Limit == 0) {
261 pushInteger(S, 0, Call->getType());
262 return true;
263 }
264
265 if (!CheckLive(S, OpPC, A, AK_Read) || !CheckLive(S, OpPC, B, AK_Read))
266 return false;
267
268 if (A.isDummy() || B.isDummy())
269 return false;
270 if (!A.isBlockPointer() || !B.isBlockPointer())
271 return false;
272
273 bool IsWide = ID == Builtin::BIwcscmp || ID == Builtin::BIwcsncmp ||
274 ID == Builtin::BI__builtin_wcscmp ||
275 ID == Builtin::BI__builtin_wcsncmp;
276 assert(A.getFieldDesc()->isPrimitiveArray());
277 assert(B.getFieldDesc()->isPrimitiveArray());
278
279 / Different element types shouldn't happen, but with casts they can.
281 return false;
282
283 PrimType ElemT = *S.getContext().classify(getElemType(A));
284
285 auto returnResult = [&](int V) -> bool {
286 pushInteger(S, V, Call->getType());
287 return true;
288 };
289
290 unsigned IndexA = A.getIndex();
291 unsigned IndexB = B.getIndex();
292 uint64_t Steps = 0;
293 for (;; ++IndexA, ++IndexB, ++Steps) {
294
295 if (Steps >= Limit)
296 break;
297 const Pointer &PA = A.atIndex(IndexA);
298 const Pointer &PB = B.atIndex(IndexB);
299 if (!CheckRange(S, OpPC, PA, AK_Read) ||
300 !CheckRange(S, OpPC, PB, AK_Read)) {
301 return false;
302 }
303
304 if (IsWide) {
305 INT_TYPE_SWITCH(ElemT, {
306 T CA = PA.deref<T>();
307 T CB = PB.deref<T>();
308 if (CA > CB)
309 return returnResult(1);
310 if (CA < CB)
311 return returnResult(-1);
312 if (CA.isZero() || CB.isZero())
313 return returnResult(0);
314 });
315 continue;
316 }
317
318 uint8_t CA = PA.deref<uint8_t>();
319 uint8_t CB = PB.deref<uint8_t>();
320
321 if (CA > CB)
322 return returnResult(1);
323 if (CA < CB)
324 return returnResult(-1);
325 if (CA == 0 || CB == 0)
326 return returnResult(0);
327 }
328
329 return returnResult(0);
330}
331
333 const InterpFrame *Frame,
334 const CallExpr *Call, unsigned ID) {
335 const Pointer &StrPtr = S.Stk.pop<Pointer>().expand();
336
337 if (ID == Builtin::BIstrlen || ID == Builtin::BIwcslen)
338 diagnoseNonConstexprBuiltin(S, OpPC, ID);
339
340 if (!CheckArray(S, OpPC, StrPtr))
341 return false;
342
343 if (!CheckLive(S, OpPC, StrPtr, AK_Read))
344 return false;
345
346 if (!CheckDummy(S, OpPC, StrPtr.block(), AK_Read))
347 return false;
348
349 if (!StrPtr.getFieldDesc()->isPrimitiveArray())
350 return false;
351
352 assert(StrPtr.getFieldDesc()->isPrimitiveArray());
353 unsigned ElemSize = StrPtr.getFieldDesc()->getElemSize();
354
355 if (ID == Builtin::BI__builtin_wcslen || ID == Builtin::BIwcslen) {
356 [[maybe_unused]] const ASTContext &AC = S.getASTContext();
357 assert(ElemSize == AC.getTypeSizeInChars(AC.getWCharType()).getQuantity());
358 }
359
360 size_t Len = 0;
361 for (size_t I = StrPtr.getIndex();; ++I, ++Len) {
362 const Pointer &ElemPtr = StrPtr.atIndex(I);
363
364 if (!CheckRange(S, OpPC, ElemPtr, AK_Read))
365 return false;
366
367 uint32_t Val;
368 switch (ElemSize) {
369 case 1:
370 Val = ElemPtr.deref<uint8_t>();
371 break;
372 case 2:
373 Val = ElemPtr.deref<uint16_t>();
374 break;
375 case 4:
376 Val = ElemPtr.deref<uint32_t>();
377 break;
378 default:
379 llvm_unreachable("Unsupported char size");
380 }
381 if (Val == 0)
382 break;
383 }
384
385 pushInteger(S, Len, Call->getType());
386
387 return true;
388}
389
391 const InterpFrame *Frame, const CallExpr *Call,
392 bool Signaling) {
393 const Pointer &Arg = S.Stk.pop<Pointer>();
394
395 if (!CheckLoad(S, OpPC, Arg))
396 return false;
397
398 assert(Arg.getFieldDesc()->isPrimitiveArray());
399
400 / Convert the given string to an integer using StringRef's API.
401 llvm::APInt Fill;
402 std::string Str;
403 assert(Arg.getNumElems() >= 1);
404 for (unsigned I = 0;; ++I) {
405 const Pointer &Elem = Arg.atIndex(I);
406
407 if (!CheckLoad(S, OpPC, Elem))
408 return false;
409
410 if (Elem.deref<int8_t>() == 0)
411 break;
412
413 Str += Elem.deref<char>();
414 }
415
416 / Treat empty strings as if they were zero.
417 if (Str.empty())
418 Fill = llvm::APInt(32, 0);
419 else if (StringRef(Str).getAsInteger(0, Fill))
420 return false;
421
422 const llvm::fltSemantics &TargetSemantics =
424 Call->getDirectCallee()->getReturnType());
425
426 Floating Result = S.allocFloat(TargetSemantics);
428 if (Signaling)
429 Result.copy(
430 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
431 else
432 Result.copy(
433 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
434 } else {
435 / Prior to IEEE 754-2008, architectures were allowed to choose whether
436 / the first bit of their significand was set for qNaN or sNaN. MIPS chose
437 / a different encoding to what became a standard in 2008, and for pre-
438 / 2008 revisions, MIPS interpreted sNaN-2008 as qNan and qNaN-2008 as
439 / sNaN. This is now known as "legacy NaN" encoding.
440 if (Signaling)
441 Result.copy(
442 llvm::APFloat::getQNaN(TargetSemantics, /*Negative=*/false, &Fill));
443 else
444 Result.copy(
445 llvm::APFloat::getSNaN(TargetSemantics, /*Negative=*/false, &Fill));
446 }
447
449 return true;
450}
451
453 const InterpFrame *Frame,
454 const CallExpr *Call) {
455 const llvm::fltSemantics &TargetSemantics =
457 Call->getDirectCallee()->getReturnType());
458
459 Floating Result = S.allocFloat(TargetSemantics);
460 Result.copy(APFloat::getInf(TargetSemantics));
462 return true;
463}
464
466 const InterpFrame *Frame) {
467 const Floating &Arg2 = S.Stk.pop<Floating>();
468 const Floating &Arg1 = S.Stk.pop<Floating>();
469 Floating Result = S.allocFloat(Arg1.getSemantics());
470
471 APFloat Copy = Arg1.getAPFloat();
472 Copy.copySign(Arg2.getAPFloat());
473 Result.copy(Copy);
475
476 return true;
477}
478
480 const InterpFrame *Frame, bool IsNumBuiltin) {
481 const Floating &RHS = S.Stk.pop<Floating>();
482 const Floating &LHS = S.Stk.pop<Floating>();
483 Floating Result = S.allocFloat(LHS.getSemantics());
484
485 if (IsNumBuiltin)
486 Result.copy(llvm::minimumnum(LHS.getAPFloat(), RHS.getAPFloat()));
487 else
488 Result.copy(minnum(LHS.getAPFloat(), RHS.getAPFloat()));
490 return true;
491}
492
494 const InterpFrame *Frame, bool IsNumBuiltin) {
495 const Floating &RHS = S.Stk.pop<Floating>();
496 const Floating &LHS = S.Stk.pop<Floating>();
497 Floating Result = S.allocFloat(LHS.getSemantics());
498
499 if (IsNumBuiltin)
500 Result.copy(llvm::maximumnum(LHS.getAPFloat(), RHS.getAPFloat()));
501 else
502 Result.copy(maxnum(LHS.getAPFloat(), RHS.getAPFloat()));
504 return true;
505}
506
507/ Defined as __builtin_isnan(...), to accommodate the fact that it can
508/ take a float, double, long double, etc.
509/ But for us, that's all a Floating anyway.
511 const InterpFrame *Frame,
512 const CallExpr *Call) {
513 const Floating &Arg = S.Stk.pop<Floating>();
514
515 pushInteger(S, Arg.isNan(), Call->getType());
516 return true;
517}
518
520 const InterpFrame *Frame,
521 const CallExpr *Call) {
522 const Floating &Arg = S.Stk.pop<Floating>();
523
524 pushInteger(S, Arg.isSignaling(), Call->getType());
525 return true;
526}
527
529 const InterpFrame *Frame, bool CheckSign,
530 const CallExpr *Call) {
531 const Floating &Arg = S.Stk.pop<Floating>();
532 APFloat F = Arg.getAPFloat();
533 bool IsInf = F.isInfinity();
534
535 if (CheckSign)
536 pushInteger(S, IsInf ? (F.isNegative() ? -1 : 1) : 0, Call->getType());
537 else
538 pushInteger(S, IsInf, Call->getType());
539 return true;
540}
541
543 const InterpFrame *Frame,
544 const CallExpr *Call) {
545 const Floating &Arg = S.Stk.pop<Floating>();
546
547 pushInteger(S, Arg.isFinite(), Call->getType());
548 return true;
549}
550
552 const InterpFrame *Frame,
553 const CallExpr *Call) {
554 const Floating &Arg = S.Stk.pop<Floating>();
555
556 pushInteger(S, Arg.isNormal(), Call->getType());
557 return true;
558}
559
561 const InterpFrame *Frame,
562 const CallExpr *Call) {
563 const Floating &Arg = S.Stk.pop<Floating>();
564
565 pushInteger(S, Arg.isDenormal(), Call->getType());
566 return true;
567}
568
570 const InterpFrame *Frame,
571 const CallExpr *Call) {
572 const Floating &Arg = S.Stk.pop<Floating>();
573
574 pushInteger(S, Arg.isZero(), Call->getType());
575 return true;
576}
577
579 const InterpFrame *Frame,
580 const CallExpr *Call) {
581 const Floating &Arg = S.Stk.pop<Floating>();
582
583 pushInteger(S, Arg.isNegative(), Call->getType());
584 return true;
585}
586
588 const CallExpr *Call, unsigned ID) {
589 const Floating &RHS = S.Stk.pop<Floating>();
590 const Floating &LHS = S.Stk.pop<Floating>();
591
593 S,
594 [&] {
595 switch (ID) {
596 case Builtin::BI__builtin_isgreater:
597 return LHS > RHS;
598 case Builtin::BI__builtin_isgreaterequal:
599 return LHS >= RHS;
600 case Builtin::BI__builtin_isless:
601 return LHS < RHS;
602 case Builtin::BI__builtin_islessequal:
603 return LHS <= RHS;
604 case Builtin::BI__builtin_islessgreater: {
605 ComparisonCategoryResult Cmp = LHS.compare(RHS);
606 return Cmp == ComparisonCategoryResult::Less ||
608 }
609 case Builtin::BI__builtin_isunordered:
611 default:
612 llvm_unreachable("Unexpected builtin ID: Should be a floating point "
613 "comparison function");
614 }
615 }(),
616 Call->getType());
617 return true;
618}
619
620/ First parameter to __builtin_isfpclass is the floating value, the
621/ second one is an integral value.
623 const InterpFrame *Frame,
624 const CallExpr *Call) {
625 APSInt FPClassArg = popToAPSInt(S, Call->getArg(1));
626 const Floating &F = S.Stk.pop<Floating>();
627
628 int32_t Result = static_cast<int32_t>(
629 (F.classify() & std::move(FPClassArg)).getZExtValue());
630 pushInteger(S, Result, Call->getType());
631
632 return true;
633}
634
635/ Five int values followed by one floating value.
636/ __builtin_fpclassify(int, int, int, int, int, float)
638 const InterpFrame *Frame,
639 const CallExpr *Call) {
640 const Floating &Val = S.Stk.pop<Floating>();
641
642 PrimType IntT = *S.getContext().classify(Call->getArg(0));
643 APSInt Values[5];
644 for (unsigned I = 0; I != 5; ++I)
645 Values[4 - I] = popToAPSInt(S.Stk, IntT);
646
647 unsigned Index;
648 switch (Val.getCategory()) {
649 case APFloat::fcNaN:
650 Index = 0;
651 break;
652 case APFloat::fcInfinity:
653 Index = 1;
654 break;
655 case APFloat::fcNormal:
656 Index = Val.isDenormal() ? 3 : 2;
657 break;
658 case APFloat::fcZero:
659 Index = 4;
660 break;
661 }
662
663 / The last argument is first on the stack.
664 assert(Index <= 4);
665
666 pushInteger(S, Values[Index], Call->getType());
667 return true;
668}
669
670static inline Floating abs(InterpState &S, const Floating &In) {
671 if (!In.isNegative())
672 return In;
673
674 Floating Output = S.allocFloat(In.getSemantics());
675 APFloat New = In.getAPFloat();
676 New.changeSign();
677 Output.copy(New);
678 return Output;
679}
680
681/ The C standard says "fabs raises no floating-point exceptions,
682/ even if x is a signaling NaN. The returned value is independent of
683/ the current rounding direction mode." Therefore constant folding can
684/ proceed without regard to the floating point settings.
685/ Reference, WG14 N2478 F.10.4.3
687 const InterpFrame *Frame) {
688 const Floating &Val = S.Stk.pop<Floating>();
689 S.Stk.push<Floating>(abs(S, Val));
690 return true;
691}
692
694 const InterpFrame *Frame,
695 const CallExpr *Call) {
696 APSInt Val = popToAPSInt(S, Call->getArg(0));
697 if (Val ==
698 APSInt(APInt::getSignedMinValue(Val.getBitWidth()), /*IsUnsigned=*/false))
699 return false;
700 if (Val.isNegative())
701 Val.negate();
702 pushInteger(S, Val, Call->getType());
703 return true;
704}
705
707 const InterpFrame *Frame,
708 const CallExpr *Call) {
709 APSInt Val;
710 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
711 const Pointer &Arg = S.Stk.pop<Pointer>();
712 Val = convertBoolVectorToInt(Arg);
713 } else {
714 Val = popToAPSInt(S, Call->getArg(0));
715 }
716 pushInteger(S, Val.popcount(), Call->getType());
717 return true;
718}
719
721 const InterpFrame *Frame,
722 const CallExpr *Call) {
723 / This is an unevaluated call, so there are no arguments on the stack.
724 assert(Call->getNumArgs() == 1);
725 const Expr *Arg = Call->getArg(0);
726
727 GCCTypeClass ResultClass =
729 int32_t ReturnVal = static_cast<int32_t>(ResultClass);
730 pushInteger(S, ReturnVal, Call->getType());
731 return true;
732}
733
734/ __builtin_expect(long, long)
735/ __builtin_expect_with_probability(long, long, double)
737 const InterpFrame *Frame,
738 const CallExpr *Call) {
739 / The return value is simply the value of the first parameter.
740 / We ignore the probability.
741 unsigned NumArgs = Call->getNumArgs();
742 assert(NumArgs == 2 || NumArgs == 3);
743
744 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
745 if (NumArgs == 3)
746 S.Stk.discard<Floating>();
747 discard(S.Stk, ArgT);
748
749 APSInt Val = popToAPSInt(S.Stk, ArgT);
750 pushInteger(S, Val, Call->getType());
751 return true;
752}
753
755 const InterpFrame *Frame,
756 const CallExpr *Call) {
757#ifndef NDEBUG
758 assert(Call->getArg(0)->isLValue());
759 PrimType PtrT = S.getContext().classify(Call->getArg(0)).value_or(PT_Ptr);
760 assert(PtrT == PT_Ptr &&
761 "Unsupported pointer type passed to __builtin_addressof()");
762#endif
763 return true;
764}
765
767 const InterpFrame *Frame,
768 const CallExpr *Call) {
769 return Call->getDirectCallee()->isConstexpr();
770}
771
773 const InterpFrame *Frame,
774 const CallExpr *Call) {
775 APSInt Arg = popToAPSInt(S, Call->getArg(0));
776
778 Arg.getZExtValue());
779 pushInteger(S, Result, Call->getType());
780 return true;
781}
782
783/ Two integral values followed by a pointer (lhs, rhs, resultOut)
785 const CallExpr *Call,
786 unsigned BuiltinOp) {
787 const Pointer &ResultPtr = S.Stk.pop<Pointer>();
788 if (ResultPtr.isDummy() || !ResultPtr.isBlockPointer())
789 return false;
790
791 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
792 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
793 APSInt RHS = popToAPSInt(S.Stk, RHST);
794 APSInt LHS = popToAPSInt(S.Stk, LHST);
795 QualType ResultType = Call->getArg(2)->getType()->getPointeeType();
796 PrimType ResultT = *S.getContext().classify(ResultType);
797 bool Overflow;
798
800 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
801 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
802 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
803 bool IsSigned = LHS.isSigned() || RHS.isSigned() ||
805 bool AllSigned = LHS.isSigned() && RHS.isSigned() &&
807 uint64_t LHSSize = LHS.getBitWidth();
808 uint64_t RHSSize = RHS.getBitWidth();
809 uint64_t ResultSize = S.getASTContext().getTypeSize(ResultType);
810 uint64_t MaxBits = std::max(std::max(LHSSize, RHSSize), ResultSize);
811
812 / Add an additional bit if the signedness isn't uniformly agreed to. We
813 / could do this ONLY if there is a signed and an unsigned that both have
814 / MaxBits, but the code to check that is pretty nasty. The issue will be
815 / caught in the shrink-to-result later anyway.
816 if (IsSigned && !AllSigned)
817 ++MaxBits;
818
819 LHS = APSInt(LHS.extOrTrunc(MaxBits), !IsSigned);
820 RHS = APSInt(RHS.extOrTrunc(MaxBits), !IsSigned);
821 Result = APSInt(MaxBits, !IsSigned);
822 }
823
824 / Find largest int.
825 switch (BuiltinOp) {
826 default:
827 llvm_unreachable("Invalid value for BuiltinOp");
828 case Builtin::BI__builtin_add_overflow:
829 case Builtin::BI__builtin_sadd_overflow:
830 case Builtin::BI__builtin_saddl_overflow:
831 case Builtin::BI__builtin_saddll_overflow:
832 case Builtin::BI__builtin_uadd_overflow:
833 case Builtin::BI__builtin_uaddl_overflow:
834 case Builtin::BI__builtin_uaddll_overflow:
835 Result = LHS.isSigned() ? LHS.sadd_ov(RHS, Overflow)
836 : LHS.uadd_ov(RHS, Overflow);
837 break;
838 case Builtin::BI__builtin_sub_overflow:
839 case Builtin::BI__builtin_ssub_overflow:
840 case Builtin::BI__builtin_ssubl_overflow:
841 case Builtin::BI__builtin_ssubll_overflow:
842 case Builtin::BI__builtin_usub_overflow:
843 case Builtin::BI__builtin_usubl_overflow:
844 case Builtin::BI__builtin_usubll_overflow:
845 Result = LHS.isSigned() ? LHS.ssub_ov(RHS, Overflow)
846 : LHS.usub_ov(RHS, Overflow);
847 break;
848 case Builtin::BI__builtin_mul_overflow:
849 case Builtin::BI__builtin_smul_overflow:
850 case Builtin::BI__builtin_smull_overflow:
851 case Builtin::BI__builtin_smulll_overflow:
852 case Builtin::BI__builtin_umul_overflow:
853 case Builtin::BI__builtin_umull_overflow:
854 case Builtin::BI__builtin_umulll_overflow:
855 Result = LHS.isSigned() ? LHS.smul_ov(RHS, Overflow)
856 : LHS.umul_ov(RHS, Overflow);
857 break;
858 }
859
860 / In the case where multiple sizes are allowed, truncate and see if
861 / the values are the same.
862 if (BuiltinOp == Builtin::BI__builtin_add_overflow ||
863 BuiltinOp == Builtin::BI__builtin_sub_overflow ||
864 BuiltinOp == Builtin::BI__builtin_mul_overflow) {
865 / APSInt doesn't have a TruncOrSelf, so we use extOrTrunc instead,
866 / since it will give us the behavior of a TruncOrSelf in the case where
867 / its parameter <= its size. We previously set Result to be at least the
868 / type-size of the result, so getTypeSize(ResultType) <= Resu
869 APSInt Temp = Result.extOrTrunc(S.getASTContext().getTypeSize(ResultType));
870 Temp.setIsSigned(ResultType->isSignedIntegerOrEnumerationType());
871
872 if (!APSInt::isSameValue(Temp, Result))
873 Overflow = true;
874 Result = std::move(Temp);
875 }
876
877 / Write Result to ResultPtr and put Overflow on the stack.
878 assignInteger(S, ResultPtr, ResultT, Result);
879 if (ResultPtr.canBeInitialized())
880 ResultPtr.initialize();
881
882 assert(Call->getDirectCallee()->getReturnType()->isBooleanType());
883 S.Stk.push<Boolean>(Overflow);
884 return true;
885}
886
887/ Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
889 const InterpFrame *Frame,
890 const CallExpr *Call, unsigned BuiltinOp) {
891 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
892 PrimType LHST = *S.getContext().classify(Call->getArg(0)->getType());
893 PrimType RHST = *S.getContext().classify(Call->getArg(1)->getType());
894 APSInt CarryIn = popToAPSInt(S.Stk, LHST);
895 APSInt RHS = popToAPSInt(S.Stk, RHST);
896 APSInt LHS = popToAPSInt(S.Stk, LHST);
897
898 if (CarryOutPtr.isDummy() || !CarryOutPtr.isBlockPointer())
899 return false;
900
901 APSInt CarryOut;
902
904 / Copy the number of bits and sign.
905 Result = LHS;
906 CarryOut = LHS;
907
908 bool FirstOverflowed = false;
909 bool SecondOverflowed = false;
910 switch (BuiltinOp) {
911 default:
912 llvm_unreachable("Invalid value for BuiltinOp");
913 case Builtin::BI__builtin_addcb:
914 case Builtin::BI__builtin_addcs:
915 case Builtin::BI__builtin_addc:
916 case Builtin::BI__builtin_addcl:
917 case Builtin::BI__builtin_addcll:
918 Result =
919 LHS.uadd_ov(RHS, FirstOverflowed).uadd_ov(CarryIn, SecondOverflowed);
920 break;
921 case Builtin::BI__builtin_subcb:
922 case Builtin::BI__builtin_subcs:
923 case Builtin::BI__builtin_subc:
924 case Builtin::BI__builtin_subcl:
925 case Builtin::BI__builtin_subcll:
926 Result =
927 LHS.usub_ov(RHS, FirstOverflowed).usub_ov(CarryIn, SecondOverflowed);
928 break;
929 }
930 / It is possible for both overflows to happen but CGBuiltin uses an OR so
931 / this is consistent.
932 CarryOut = (uint64_t)(FirstOverflowed | SecondOverflowed);
933
934 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
935 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
936 assignInteger(S, CarryOutPtr, CarryOutT, CarryOut);
937 CarryOutPtr.initialize();
938
939 assert(Call->getType() == Call->getArg(0)->getType());
940 pushInteger(S, Result, Call->getType());
941 return true;
942}
943
945 const InterpFrame *Frame, const CallExpr *Call,
946 unsigned BuiltinOp) {
947
948 std::optional<APSInt> Fallback;
949 if (BuiltinOp == Builtin::BI__builtin_clzg && Call->getNumArgs() == 2)
950 Fallback = popToAPSInt(S, Call->getArg(1));
951
952 APSInt Val;
953 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
954 const Pointer &Arg = S.Stk.pop<Pointer>();
955 Val = convertBoolVectorToInt(Arg);
956 } else {
957 Val = popToAPSInt(S, Call->getArg(0));
958 }
959
960 / When the argument is 0, the result of GCC builtins is undefined, whereas
961 / for Microsoft intrinsics, the result is the bit-width of the argument.
962 bool ZeroIsUndefined = BuiltinOp != Builtin::BI__lzcnt16 &&
963 BuiltinOp != Builtin::BI__lzcnt &&
964 BuiltinOp != Builtin::BI__lzcnt64;
965
966 if (Val == 0) {
967 if (Fallback) {
968 pushInteger(S, *Fallback, Call->getType());
969 return true;
970 }
971
972 if (ZeroIsUndefined)
973 return false;
974 }
975
976 pushInteger(S, Val.countl_zero(), Call->getType());
977 return true;
978}
979
981 const InterpFrame *Frame, const CallExpr *Call,
982 unsigned BuiltinID) {
983 std::optional<APSInt> Fallback;
984 if (BuiltinID == Builtin::BI__builtin_ctzg && Call->getNumArgs() == 2)
985 Fallback = popToAPSInt(S, Call->getArg(1));
986
987 APSInt Val;
988 if (Call->getArg(0)->getType()->isExtVectorBoolType()) {
989 const Pointer &Arg = S.Stk.pop<Pointer>();
990 Val = convertBoolVectorToInt(Arg);
991 } else {
992 Val = popToAPSInt(S, Call->getArg(0));
993 }
994
995 if (Val == 0) {
996 if (Fallback) {
997 pushInteger(S, *Fallback, Call->getType());
998 return true;
999 }
1000 return false;
1001 }
1002
1003 pushInteger(S, Val.countr_zero(), Call->getType());
1004 return true;
1005}
1006
1008 const InterpFrame *Frame,
1009 const CallExpr *Call) {
1010 const APSInt &Val = popToAPSInt(S, Call->getArg(0));
1011 if (Val.getBitWidth() == 8)
1012 pushInteger(S, Val, Call->getType());
1013 else
1014 pushInteger(S, Val.byteSwap(), Call->getType());
1015 return true;
1016}
1017
1018/ bool __atomic_always_lock_free(size_t, void const volatile*)
1019/ bool __atomic_is_lock_free(size_t, void const volatile*)
1021 const InterpFrame *Frame,
1022 const CallExpr *Call,
1023 unsigned BuiltinOp) {
1024 auto returnBool = [&S](bool Value) -> bool {
1025 S.Stk.push<Boolean>(Value);
1026 return true;
1027 };
1028
1029 const Pointer &Ptr = S.Stk.pop<Pointer>();
1030 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1031
1032 / For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
1033 / of two less than or equal to the maximum inline atomic width, we know it
1034 / is lock-free. If the size isn't a power of two, or greater than the
1035 / maximum alignment where we promote atomics, we know it is not lock-free
1036 / (at least not in the sense of atomic_is_lock_free). Otherwise,
1037 / the answer can only be determined at runtime; for example, 16-byte
1038 / atomics have lock-free implementations on some, but not all,
1039 / x86-64 processors.
1040
1041 / Check power-of-two.
1042 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1043 if (Size.isPowerOfTwo()) {
1044 / Check against inlining width.
1045 unsigned InlineWidthBits =
1047 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1048
1049 / OK, we will inline appropriately-aligned operations of this size,
1050 / and _Atomic(T) is appropriately-aligned.
1051 if (Size == CharUnits::One())
1052 return returnBool(true);
1053
1054 / Same for null pointers.
1055 assert(BuiltinOp != Builtin::BI__c11_atomic_is_lock_free);
1056 if (Ptr.isZero())
1057 return returnBool(true);
1058
1059 if (Ptr.isIntegralPointer()) {
1060 uint64_t IntVal = Ptr.getIntegerRepresentation();
1061 if (APSInt(APInt(64, IntVal, false), true).isAligned(Size.getAsAlign()))
1062 return returnBool(true);
1063 }
1064
1065 const Expr *PtrArg = Call->getArg(1);
1066 / Otherwise, check if the type's alignment against Size.
1067 if (const auto *ICE = dyn_cast<ImplicitCastExpr>(PtrArg)) {
1068 / Drop the potential implicit-cast to 'const volatile void*', getting
1069 / the underlying type.
1070 if (ICE->getCastKind() == CK_BitCast)
1071 PtrArg = ICE->getSubExpr();
1072 }
1073
1074 if (const auto *PtrTy = PtrArg->getType()->getAs<PointerType>()) {
1075 QualType PointeeType = PtrTy->getPointeeType();
1076 if (!PointeeType->isIncompleteType() &&
1077 S.getASTContext().getTypeAlignInChars(PointeeType) >= Size) {
1078 / OK, we will inline operations on this object.
1079 return returnBool(true);
1080 }
1081 }
1082 }
1083 }
1084
1085 if (BuiltinOp == Builtin::BI__atomic_always_lock_free)
1086 return returnBool(false);
1087
1088 return false;
1089}
1090
1091/ bool __c11_atomic_is_lock_free(size_t)
1093 CodePtr OpPC,
1094 const InterpFrame *Frame,
1095 const CallExpr *Call) {
1096 uint64_t SizeVal = popToUInt64(S, Call->getArg(0));
1097
1098 CharUnits Size = CharUnits::fromQuantity(SizeVal);
1099 if (Size.isPowerOfTwo()) {
1100 / Check against inlining width.
1101 unsigned InlineWidthBits =
1103 if (Size <= S.getASTContext().toCharUnitsFromBits(InlineWidthBits)) {
1104 S.Stk.push<Boolean>(true);
1105 return true;
1106 }
1107 }
1108
1109 return false; / returnBool(false);
1110}
1111
1112/ __builtin_complex(Float A, float B);
1114 const InterpFrame *Frame,
1115 const CallExpr *Call) {
1116 const Floating &Arg2 = S.Stk.pop<Floating>();
1117 const Floating &Arg1 = S.Stk.pop<Floating>();
1118 Pointer &Result = S.Stk.peek<Pointer>();
1119
1120 Result.elem<Floating>(0) = Arg1;
1121 Result.elem<Floating>(1) = Arg2;
1122 Result.initializeAllElements();
1123
1124 return true;
1125}
1126
1127/ __builtin_is_aligned()
1128/ __builtin_align_up()
1129/ __builtin_align_down()
1130/ The first parameter is either an integer or a pointer.
1131/ The second parameter is the requested alignment as an integer.
1133 const InterpFrame *Frame,
1134 const CallExpr *Call,
1135 unsigned BuiltinOp) {
1136 const APSInt &Alignment = popToAPSInt(S, Call->getArg(1));
1137
1138 if (Alignment < 0 || !Alignment.isPowerOf2()) {
1139 S.FFDiag(Call, diag::note_constexpr_invalid_alignment) << Alignment;
1140 return false;
1141 }
1142 unsigned SrcWidth = S.getASTContext().getIntWidth(Call->getArg(0)->getType());
1143 APSInt MaxValue(APInt::getOneBitSet(SrcWidth, SrcWidth - 1));
1144 if (APSInt::compareValues(Alignment, MaxValue) > 0) {
1145 S.FFDiag(Call, diag::note_constexpr_alignment_too_big)
1146 << MaxValue << Call->getArg(0)->getType() << Alignment;
1147 return false;
1148 }
1149
1150 / The first parameter is either an integer or a pointer.
1151 PrimType FirstArgT = *S.Ctx.classify(Call->getArg(0));
1152
1153 if (isIntegralType(FirstArgT)) {
1154 const APSInt &Src = popToAPSInt(S.Stk, FirstArgT);
1155 APInt AlignMinusOne = Alignment.extOrTrunc(Src.getBitWidth()) - 1;
1156 if (BuiltinOp == Builtin::BI__builtin_align_up) {
1157 APSInt AlignedVal =
1158 APSInt((Src + AlignMinusOne) & ~AlignMinusOne, Src.isUnsigned());
1159 pushInteger(S, AlignedVal, Call->getType());
1160 } else if (BuiltinOp == Builtin::BI__builtin_align_down) {
1161 APSInt AlignedVal = APSInt(Src & ~AlignMinusOne, Src.isUnsigned());
1162 pushInteger(S, AlignedVal, Call->getType());
1163 } else {
1164 assert(*S.Ctx.classify(Call->getType()) == PT_Bool);
1165 S.Stk.push<Boolean>((Src & AlignMinusOne) == 0);
1166 }
1167 return true;
1168 }
1169 assert(FirstArgT == PT_Ptr);
1170 const Pointer &Ptr = S.Stk.pop<Pointer>();
1171 if (!Ptr.isBlockPointer())
1172 return false;
1173
1174 unsigned PtrOffset = Ptr.getIndex();
1175 CharUnits BaseAlignment =
1177 CharUnits PtrAlign =
1178 BaseAlignment.alignmentAtOffset(CharUnits::fromQuantity(PtrOffset));
1179
1180 if (BuiltinOp == Builtin::BI__builtin_is_aligned) {
1181 if (PtrAlign.getQuantity() >= Alignment) {
1182 S.Stk.push<Boolean>(true);
1183 return true;
1184 }
1185 / If the alignment is not known to be sufficient, some cases could still
1186 / be aligned at run time. However, if the requested alignment is less or
1187 / equal to the base alignment and the offset is not aligned, we know that
1188 / the run-time value can never be aligned.
1189 if (BaseAlignment.getQuantity() >= Alignment &&
1190 PtrAlign.getQuantity() < Alignment) {
1191 S.Stk.push<Boolean>(false);
1192 return true;
1193 }
1194
1195 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_compute)
1196 << Alignment;
1197 return false;
1198 }
1199
1200 assert(BuiltinOp == Builtin::BI__builtin_align_down ||
1201 BuiltinOp == Builtin::BI__builtin_align_up);
1202
1203 / For align_up/align_down, we can return the same value if the alignment
1204 / is known to be greater or equal to the requested value.
1205 if (PtrAlign.getQuantity() >= Alignment) {
1206 S.Stk.push<Pointer>(Ptr);
1207 return true;
1208 }
1209
1210 / The alignment could be greater than the minimum at run-time, so we cannot
1211 / infer much about the resulting pointer value. One case is possible:
1212 / For `_Alignas(32) char buf[N]; __builtin_align_down(&buf[idx], 32)` we
1213 / can infer the correct index if the requested alignment is smaller than
1214 / the base alignment so we can perform the computation on the offset.
1215 if (BaseAlignment.getQuantity() >= Alignment) {
1216 assert(Alignment.getBitWidth() <= 64 &&
1217 "Cannot handle > 64-bit address-space");
1218 uint64_t Alignment64 = Alignment.getZExtValue();
1219 CharUnits NewOffset =
1220 CharUnits::fromQuantity(BuiltinOp == Builtin::BI__builtin_align_down
1221 ? llvm::alignDown(PtrOffset, Alignment64)
1222 : llvm::alignTo(PtrOffset, Alignment64));
1223
1224 S.Stk.push<Pointer>(Ptr.atIndex(NewOffset.getQuantity()));
1225 return true;
1226 }
1227
1228 / Otherwise, we cannot constant-evaluate the result.
1229 S.FFDiag(Call->getArg(0), diag::note_constexpr_alignment_adjust) << Alignment;
1230 return false;
1231}
1232
1233/ __builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
1235 const InterpFrame *Frame,
1236 const CallExpr *Call) {
1237 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
1238
1239 std::optional<APSInt> ExtraOffset;
1240 if (Call->getNumArgs() == 3)
1241 ExtraOffset = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(2)));
1242
1243 APSInt Alignment = popToAPSInt(S.Stk, *S.Ctx.classify(Call->getArg(1)));
1244 const Pointer &Ptr = S.Stk.pop<Pointer>();
1245
1246 CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
1247
1248 / If there is a base object, then it must have the correct alignment.
1249 if (Ptr.isBlockPointer()) {
1250 CharUnits BaseAlignment;
1251 if (const auto *VD = Ptr.getDeclDesc()->asValueDecl())
1252 BaseAlignment = S.getASTContext().getDeclAlign(VD);
1253 else if (const auto *E = Ptr.getDeclDesc()->asExpr())
1254 BaseAlignment = GetAlignOfExpr(S.getASTContext(), E, UETT_AlignOf);
1255
1256 if (BaseAlignment < Align) {
1257 S.CCEDiag(Call->getArg(0),
1258 diag::note_constexpr_baa_insufficient_alignment)
1259 << 0 << BaseAlignment.getQuantity() << Align.getQuantity();
1260 return false;
1261 }
1262 }
1263
1264 APValue AV = Ptr.toAPValue(S.getASTContext());
1265 CharUnits AVOffset = AV.getLValueOffset();
1266 if (ExtraOffset)
1267 AVOffset -= CharUnits::fromQuantity(ExtraOffset->getZExtValue());
1268 if (AVOffset.alignTo(Align) != AVOffset) {
1269 if (Ptr.isBlockPointer())
1270 S.CCEDiag(Call->getArg(0),
1271 diag::note_constexpr_baa_insufficient_alignment)
1272 << 1 << AVOffset.getQuantity() << Align.getQuantity();
1273 else
1274 S.CCEDiag(Call->getArg(0),
1275 diag::note_constexpr_baa_value_insufficient_alignment)
1276 << AVOffset.getQuantity() << Align.getQuantity();
1277 return false;
1278 }
1279
1280 S.Stk.push<Pointer>(Ptr);
1281 return true;
1282}
1283
1284/ (CarryIn, LHS, RHS, Result)
1286 CodePtr OpPC,
1287 const InterpFrame *Frame,
1288 const CallExpr *Call,
1289 unsigned BuiltinOp) {
1290 if (Call->getNumArgs() != 4 || !Call->getArg(0)->getType()->isIntegerType() ||
1291 !Call->getArg(1)->getType()->isIntegerType() ||
1292 !Call->getArg(2)->getType()->isIntegerType())
1293 return false;
1294
1295 const Pointer &CarryOutPtr = S.Stk.pop<Pointer>();
1296
1297 APSInt RHS = popToAPSInt(S, Call->getArg(2));
1298 APSInt LHS = popToAPSInt(S, Call->getArg(1));
1299 APSInt CarryIn = popToAPSInt(S, Call->getArg(0));
1300
1301 bool IsAdd = BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u32 ||
1302 BuiltinOp == clang::X86::BI__builtin_ia32_addcarryx_u64;
1303
1304 unsigned BitWidth = LHS.getBitWidth();
1305 unsigned CarryInBit = CarryIn.ugt(0) ? 1 : 0;
1306 APInt ExResult =
1307 IsAdd ? (LHS.zext(BitWidth + 1) + (RHS.zext(BitWidth + 1) + CarryInBit))
1308 : (LHS.zext(BitWidth + 1) - (RHS.zext(BitWidth + 1) + CarryInBit));
1309
1310 APInt Result = ExResult.extractBits(BitWidth, 0);
1311 APSInt CarryOut =
1312 APSInt(ExResult.extractBits(1, BitWidth), /*IsUnsigned=*/true);
1313
1314 QualType CarryOutType = Call->getArg(3)->getType()->getPointeeType();
1315 PrimType CarryOutT = *S.getContext().classify(CarryOutType);
1316 assignInteger(S, CarryOutPtr, CarryOutT, APSInt(std::move(Result), true));
1317
1318 pushInteger(S, CarryOut, Call->getType());
1319
1320 return true;
1321}
1322
1324 CodePtr OpPC,
1325 const InterpFrame *Frame,
1326 const CallExpr *Call) {
1329 pushInteger(S, Layout.size().getQuantity(), Call->getType());
1330 return true;
1331}
1332
1333static bool
1335 const InterpFrame *Frame,
1336 const CallExpr *Call) {
1337 const auto &Ptr = S.Stk.pop<Pointer>();
1338 assert(Ptr.getFieldDesc()->isPrimitiveArray());
1339
1340 / This should be created for a StringLiteral, so should alway shold at least
1341 / one array element.
1342 assert(Ptr.getFieldDesc()->getNumElems() >= 1);
1343 StringRef R(&Ptr.deref<char>(), Ptr.getFieldDesc()->getNumElems() - 1);
1344 uint64_t Result = getPointerAuthStableSipHash(R);
1345 pushInteger(S, Result, Call->getType());
1346 return true;
1347}
1348
1350 const InterpFrame *Frame,
1351 const CallExpr *Call) {
1352 const ASTContext &ASTCtx = S.getASTContext();
1353 uint64_t BitWidth = ASTCtx.getTypeSize(ASTCtx.getSizeType());
1354 auto Mode =
1355 ASTCtx.getLangOpts().AllocTokenMode.value_or(llvm::DefaultAllocTokenMode);
1356 auto MaxTokensOpt = ASTCtx.getLangOpts().AllocTokenMax;
1357 uint64_t MaxTokens =
1358 MaxTokensOpt.value_or(0) ? *MaxTokensOpt : (~0ULL >> (64 - BitWidth));
1359
1360 / We do not read any of the arguments; discard them.
1361 for (int I = Call->getNumArgs() - 1; I >= 0; --I)
1362 discard(S.Stk, *S.getContext().classify(Call->getArg(I)));
1363
1364 / Note: Type inference from a surrounding cast is not supported in
1365 / constexpr evaluation.
1366 QualType AllocType = infer_alloc::inferPossibleType(Call, ASTCtx, nullptr);
1367 if (AllocType.isNull()) {
1368 S.CCEDiag(Call,
1369 diag::note_constexpr_infer_alloc_token_type_inference_failed);
1370 return false;
1371 }
1372
1373 auto ATMD = infer_alloc::getAllocTokenMetadata(AllocType, ASTCtx);
1374 if (!ATMD) {
1375 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_no_metadata);
1376 return false;
1377 }
1378
1379 auto MaybeToken = llvm::getAllocToken(Mode, *ATMD, MaxTokens);
1380 if (!MaybeToken) {
1381 S.CCEDiag(Call, diag::note_constexpr_infer_alloc_token_stateful_mode);
1382 return false;
1383 }
1384
1385 pushInteger(S, llvm::APInt(BitWidth, *MaybeToken), ASTCtx.getSizeType());
1386 return true;
1387}
1388
1390 const InterpFrame *Frame,
1391 const CallExpr *Call) {
1392 / A call to __operator_new is only valid within std::allocate<>::allocate.
1393 / Walk up the call stack to find the appropriate caller and get the
1394 / element type from it.
1395 auto [NewCall, ElemType] = S.getStdAllocatorCaller("allocate");
1396
1397 if (ElemType.isNull()) {
1398 S.FFDiag(Call, S.getLangOpts().CPlusPlus20
1399 ? diag::note_constexpr_new_untyped
1400 : diag::note_constexpr_new);
1401 return false;
1402 }
1403 assert(NewCall);
1404
1405 if (ElemType->isIncompleteType() || ElemType->isFunctionType()) {
1406 S.FFDiag(Call, diag::note_constexpr_new_not_complete_object_type)
1407 << (ElemType->isIncompleteType() ? 0 : 1) << ElemType;
1408 return false;
1409 }
1410
1411 / We only care about the first parameter (the size), so discard all the
1412 / others.
1413 {
1414 unsigned NumArgs = Call->getNumArgs();
1415 assert(NumArgs >= 1);
1416
1417 / The std::nothrow_t arg never gets put on the stack.
1418 if (Call->getArg(NumArgs - 1)->getType()->isNothrowT())
1419 --NumArgs;
1420 auto Args = ArrayRef(Call->getArgs(), Call->getNumArgs());
1421 / First arg is needed.
1422 Args = Args.drop_front();
1423
1424 / Discard the rest.
1425 for (const Expr *Arg : Args)
1426 discard(S.Stk, *S.getContext().classify(Arg));
1427 }
1428
1429 APSInt Bytes = popToAPSInt(S, Call->getArg(0));
1430 CharUnits ElemSize = S.getASTContext().getTypeSizeInChars(ElemType);
1431 assert(!ElemSize.isZero());
1432 / Divide the number of bytes by sizeof(ElemType), so we get the number of
1433 / elements we should allocate.
1434 APInt NumElems, Remainder;
1435 APInt ElemSizeAP(Bytes.getBitWidth(), ElemSize.getQuantity());
1436 APInt::udivrem(Bytes, ElemSizeAP, NumElems, Remainder);
1437 if (Remainder != 0) {
1438 / This likely indicates a bug in the implementation of 'std::allocator'.
1439 S.FFDiag(Call, diag::note_constexpr_operator_new_bad_size)
1440 << Bytes << APSInt(ElemSizeAP, true) << ElemType;
1441 return false;
1442 }
1443
1444 / NB: The same check we're using in CheckArraySize()
1445 if (NumElems.getActiveBits() >
1447 NumElems.ugt(Descriptor::MaxArrayElemBytes / ElemSize.getQuantity())) {
1448 / FIXME: NoThrow check?
1449 const SourceInfo &Loc = S.Current->getSource(OpPC);
1450 S.FFDiag(Loc, diag::note_constexpr_new_too_large)
1451 << NumElems.getZExtValue();
1452 return false;
1453 }
1454
1455 if (!CheckArraySize(S, OpPC, NumElems.getZExtValue()))
1456 return false;
1457
1458 bool IsArray = NumElems.ugt(1);
1459 OptPrimType ElemT = S.getContext().classify(ElemType);
1460 DynamicAllocator &Allocator = S.getAllocator();
1461 if (ElemT) {
1462 Block *B =
1463 Allocator.allocate(NewCall, *ElemT, NumElems.getZExtValue(),
1465 assert(B);
1466 S.Stk.push<Pointer>(Pointer(B).atIndex(0));
1467 return true;
1468 }
1469
1470 assert(!ElemT);
1471
1472 / Composite arrays
1473 if (IsArray) {
1474 const Descriptor *Desc =
1475 S.P.createDescriptor(NewCall, ElemType.getTypePtr(), std::nullopt);
1476 Block *B =
1477 Allocator.allocate(Desc, NumElems.getZExtValue(), S.Ctx.getEvalID(),
1479 assert(B);
1480 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1481 return true;
1482 }
1483
1484 / Records. Still allocate them as single-element arrays.
1486 ElemType, NumElems, nullptr, ArraySizeModifier::Normal, 0);
1487
1488 const Descriptor *Desc = S.P.createDescriptor(NewCall, AllocType.getTypePtr(),
1490 Block *B = Allocator.allocate(Desc, S.getContext().getEvalID(),
1492 assert(B);
1493 S.Stk.push<Pointer>(Pointer(B).atIndex(0).narrow());
1494 return true;
1495}
1496
1498 const InterpFrame *Frame,
1499 const CallExpr *Call) {
1500 const Expr *Source = nullptr;
1501 const Block *BlockToDelete = nullptr;
1502
1504 S.Stk.discard<Pointer>();
1505 return false;
1506 }
1507
1508 / This is permitted only within a call to std::allocator<T>::deallocate.
1509 if (!S.getStdAllocatorCaller("deallocate")) {
1510 S.FFDiag(Call);
1511 S.Stk.discard<Pointer>();
1512 return true;
1513 }
1514
1515 {
1516 const Pointer &Ptr = S.Stk.pop<Pointer>();
1517
1518 if (Ptr.isZero()) {
1519 S.CCEDiag(Call, diag::note_constexpr_deallocate_null);
1520 return true;
1521 }
1522
1523 Source = Ptr.getDeclDesc()->asExpr();
1524 BlockToDelete = Ptr.block();
1525
1526 if (!BlockToDelete->isDynamic()) {
1527 S.FFDiag(Call, diag::note_constexpr_delete_not_heap_alloc)
1529 if (const auto *D = Ptr.getFieldDesc()->asDecl())
1530 S.Note(D->getLocation(), diag::note_declared_at);
1531 }
1532 }
1533 assert(BlockToDelete);
1534
1535 DynamicAllocator &Allocator = S.getAllocator();
1536 const Descriptor *BlockDesc = BlockToDelete->getDescriptor();
1537 std::optional<DynamicAllocator::Form> AllocForm =
1538 Allocator.getAllocationForm(Source);
1539
1540 if (!Allocator.deallocate(Source, BlockToDelete, S)) {
1541 / Nothing has been deallocated, this must be a double-delete.
1542 const SourceInfo &Loc = S.Current->getSource(OpPC);
1543 S.FFDiag(Loc, diag::note_constexpr_double_delete);
1544 return false;
1545 }
1546 assert(AllocForm);
1547
1548 return CheckNewDeleteForms(
1549 S, OpPC, *AllocForm, DynamicAllocator::Form::Operator, BlockDesc, Source);
1550}
1551
1553 const InterpFrame *Frame,
1554 const CallExpr *Call) {
1555 const Floating &Arg0 = S.Stk.pop<Floating>();
1556 S.Stk.push<Floating>(Arg0);
1557 return true;
1558}
1559
1561 const CallExpr *Call, unsigned ID) {
1562 const Pointer &Arg = S.Stk.pop<Pointer>();
1563 assert(Arg.getFieldDesc()->isPrimitiveArray());
1564
1565 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1566 assert(Call->getType() == ElemType);
1567 PrimType ElemT = *S.getContext().classify(ElemType);
1568 unsigned NumElems = Arg.getNumElems();
1569
1571 T Result = Arg.elem<T>(0);
1572 unsigned BitWidth = Result.bitWidth();
1573 for (unsigned I = 1; I != NumElems; ++I) {
1574 T Elem = Arg.elem<T>(I);
1575 T PrevResult = Result;
1576
1577 if (ID == Builtin::BI__builtin_reduce_add) {
1578 if (T::add(Result, Elem, BitWidth, &Result)) {
1579 unsigned OverflowBits = BitWidth + 1;
1580 (void)handleOverflow(S, OpPC,
1581 (PrevResult.toAPSInt(OverflowBits) +
1582 Elem.toAPSInt(OverflowBits)));
1583 return false;
1584 }
1585 } else if (ID == Builtin::BI__builtin_reduce_mul) {
1586 if (T::mul(Result, Elem, BitWidth, &Result)) {
1587 unsigned OverflowBits = BitWidth * 2;
1588 (void)handleOverflow(S, OpPC,
1589 (PrevResult.toAPSInt(OverflowBits) *
1590 Elem.toAPSInt(OverflowBits)));
1591 return false;
1592 }
1593
1594 } else if (ID == Builtin::BI__builtin_reduce_and) {
1595 (void)T::bitAnd(Result, Elem, BitWidth, &Result);
1596 } else if (ID == Builtin::BI__builtin_reduce_or) {
1597 (void)T::bitOr(Result, Elem, BitWidth, &Result);
1598 } else if (ID == Builtin::BI__builtin_reduce_xor) {
1599 (void)T::bitXor(Result, Elem, BitWidth, &Result);
1600 } else if (ID == Builtin::BI__builtin_reduce_min) {
1601 if (Elem < Result)
1602 Result = Elem;
1603 } else if (ID == Builtin::BI__builtin_reduce_max) {
1604 if (Elem > Result)
1605 Result = Elem;
1606 } else {
1607 llvm_unreachable("Unhandled vector reduce builtin");
1608 }
1609 }
1610 pushInteger(S, Result.toAPSInt(), Call->getType());
1611 });
1612
1613 return true;
1614}
1615
1617 const InterpFrame *Frame,
1618 const CallExpr *Call,
1619 unsigned BuiltinID) {
1620 assert(Call->getNumArgs() == 1);
1621 QualType Ty = Call->getArg(0)->getType();
1622 if (Ty->isIntegerType()) {
1623 APSInt Val = popToAPSInt(S, Call->getArg(0));
1624 pushInteger(S, Val.abs(), Call->getType());
1625 return true;
1626 }
1627
1628 if (Ty->isFloatingType()) {
1629 Floating Val = S.Stk.pop<Floating>();
1630 Floating Result = abs(S, Val);
1631 S.Stk.push<Floating>(Result);
1632 return true;
1633 }
1634
1635 / Otherwise, the argument must be a vector.
1636 assert(Call->getArg(0)->getType()->isVectorType());
1637 const Pointer &Arg = S.Stk.pop<Pointer>();
1638 assert(Arg.getFieldDesc()->isPrimitiveArray());
1639 const Pointer &Dst = S.Stk.peek<Pointer>();
1640 assert(Dst.getFieldDesc()->isPrimitiveArray());
1641 assert(Arg.getFieldDesc()->getNumElems() ==
1642 Dst.getFieldDesc()->getNumElems());
1643
1644 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1645 PrimType ElemT = *S.getContext().classify(ElemType);
1646 unsigned NumElems = Arg.getNumElems();
1647 / we can either have a vector of integer or a vector of floating point
1648 for (unsigned I = 0; I != NumElems; ++I) {
1649 if (ElemType->isIntegerType()) {
1651 Dst.elem<T>(I) = T::from(static_cast<T>(
1652 APSInt(Arg.elem<T>(I).toAPSInt().abs(),
1654 });
1655 } else {
1656 Floating Val = Arg.elem<Floating>(I);
1657 Dst.elem<Floating>(I) = abs(S, Val);
1658 }
1659 }
1661
1662 return true;
1663}
1664
1665/ Can be called with an integer or vector as the first and only parameter.
1667 CodePtr OpPC,
1668 const InterpFrame *Frame,
1669 const CallExpr *Call,
1670 unsigned BuiltinID) {
1671 bool HasZeroArg = Call->getNumArgs() == 2;
1672 bool IsCTTZ = BuiltinID == Builtin::BI__builtin_elementwise_ctzg;
1673 assert(Call->getNumArgs() == 1 || HasZeroArg);
1674 if (Call->getArg(0)->getType()->isIntegerType()) {
1675 PrimType ArgT = *S.getContext().classify(Call->getArg(0)->getType());
1676 APSInt Val = popToAPSInt(S.Stk, ArgT);
1677 std::optional<APSInt> ZeroVal;
1678 if (HasZeroArg) {
1679 ZeroVal = Val;
1680 Val = popToAPSInt(S.Stk, ArgT);
1681 }
1682
1683 if (Val.isZero()) {
1684 if (ZeroVal) {
1685 pushInteger(S, *ZeroVal, Call->getType());
1686 return true;
1687 }
1688 / If we haven't been provided the second argument, the result is
1689 / undefined
1690 S.FFDiag(S.Current->getSource(OpPC),
1691 diag::note_constexpr_countzeroes_zero)
1692 << /*IsTrailing=*/IsCTTZ;
1693 return false;
1694 }
1695
1696 if (BuiltinID == Builtin::BI__builtin_elementwise_clzg) {
1697 pushInteger(S, Val.countLeadingZeros(), Call->getType());
1698 } else {
1699 pushInteger(S, Val.countTrailingZeros(), Call->getType());
1700 }
1701 return true;
1702 }
1703 / Otherwise, the argument must be a vector.
1704 const ASTContext &ASTCtx = S.getASTContext();
1705 Pointer ZeroArg;
1706 if (HasZeroArg) {
1707 assert(Call->getArg(1)->getType()->isVectorType() &&
1708 ASTCtx.hasSameUnqualifiedType(Call->getArg(0)->getType(),
1709 Call->getArg(1)->getType()));
1710 (void)ASTCtx;
1711 ZeroArg = S.Stk.pop<Pointer>();
1712 assert(ZeroArg.getFieldDesc()->isPrimitiveArray());
1713 }
1714 assert(Call->getArg(0)->getType()->isVectorType());
1715 const Pointer &Arg = S.Stk.pop<Pointer>();
1716 assert(Arg.getFieldDesc()->isPrimitiveArray());
1717 const Pointer &Dst = S.Stk.peek<Pointer>();
1718 assert(Dst.getFieldDesc()->isPrimitiveArray());
1719 assert(Arg.getFieldDesc()->getNumElems() ==
1720 Dst.getFieldDesc()->getNumElems());
1721
1722 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
1723 PrimType ElemT = *S.getContext().classify(ElemType);
1724 unsigned NumElems = Arg.getNumElems();
1725
1726 / FIXME: Reading from uninitialized vector elements?
1727 for (unsigned I = 0; I != NumElems; ++I) {
1729 APInt EltVal = Arg.atIndex(I).deref<T>().toAPSInt();
1730 if (EltVal.isZero()) {
1731 if (HasZeroArg) {
1732 Dst.atIndex(I).deref<T>() = ZeroArg.atIndex(I).deref<T>();
1733 } else {
1734 / If we haven't been provided the second argument, the result is
1735 / undefined
1736 S.FFDiag(S.Current->getSource(OpPC),
1737 diag::note_constexpr_countzeroes_zero)
1738 << /*IsTrailing=*/IsCTTZ;
1739 return false;
1740 }
1741 } else if (IsCTTZ) {
1742 Dst.atIndex(I).deref<T>() = T::from(EltVal.countTrailingZeros());
1743 } else {
1744 Dst.atIndex(I).deref<T>() = T::from(EltVal.countLeadingZeros());
1745 }
1746 Dst.atIndex(I).initialize();
1747 });
1748 }
1749
1750 return true;
1751}
1752
1754 const InterpFrame *Frame,
1755 const CallExpr *Call, unsigned ID) {
1756 assert(Call->getNumArgs() == 3);
1757 const ASTContext &ASTCtx = S.getASTContext();
1758 uint64_t Size = popToUInt64(S, Call->getArg(2));
1759 Pointer SrcPtr = S.Stk.pop<Pointer>().expand();
1760 Pointer DestPtr = S.Stk.pop<Pointer>().expand();
1761
1762 if (ID == Builtin::BImemcpy || ID == Builtin::BImemmove)
1763 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1764
1765 bool Move =
1766 (ID == Builtin::BI__builtin_memmove || ID == Builtin::BImemmove ||
1767 ID == Builtin::BI__builtin_wmemmove || ID == Builtin::BIwmemmove);
1768 bool WChar = ID == Builtin::BIwmemcpy || ID == Builtin::BIwmemmove ||
1769 ID == Builtin::BI__builtin_wmemcpy ||
1770 ID == Builtin::BI__builtin_wmemmove;
1771
1772 / If the size is zero, we treat this as always being a valid no-op.
1773 if (Size == 0) {
1774 S.Stk.push<Pointer>(DestPtr);
1775 return true;
1776 }
1777
1778 if (SrcPtr.isZero() || DestPtr.isZero()) {
1779 Pointer DiagPtr = (SrcPtr.isZero() ? SrcPtr : DestPtr);
1780 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1781 << /*IsMove=*/Move << /*IsWchar=*/WChar << !SrcPtr.isZero()
1782 << DiagPtr.toDiagnosticString(ASTCtx);
1783 return false;
1784 }
1785
1786 / Diagnose integral src/dest pointers specially.
1787 if (SrcPtr.isIntegralPointer() || DestPtr.isIntegralPointer()) {
1788 std::string DiagVal = "(void *)";
1789 DiagVal += SrcPtr.isIntegralPointer()
1790 ? std::to_string(SrcPtr.getIntegerRepresentation())
1791 : std::to_string(DestPtr.getIntegerRepresentation());
1792 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_null)
1793 << Move << WChar << DestPtr.isIntegralPointer() << DiagVal;
1794 return false;
1795 }
1796
1797 / Can't read from dummy pointers.
1798 if (DestPtr.isDummy() || SrcPtr.isDummy())
1799 return false;
1800
1801 if (DestPtr.getType()->isIncompleteType()) {
1802 S.FFDiag(S.Current->getSource(OpPC),
1803 diag::note_constexpr_memcpy_incomplete_type)
1804 << Move << DestPtr.getType();
1805 return false;
1806 }
1807 if (SrcPtr.getType()->isIncompleteType()) {
1808 S.FFDiag(S.Current->getSource(OpPC),
1809 diag::note_constexpr_memcpy_incomplete_type)
1810 << Move << SrcPtr.getType();
1811 return false;
1812 }
1813
1814 QualType DestElemType = getElemType(DestPtr);
1815 if (DestElemType->isIncompleteType()) {
1816 S.FFDiag(S.Current->getSource(OpPC),
1817 diag::note_constexpr_memcpy_incomplete_type)
1818 << Move << DestElemType;
1819 return false;
1820 }
1821
1822 size_t RemainingDestElems;
1823 if (DestPtr.getFieldDesc()->isArray()) {
1824 RemainingDestElems = DestPtr.isUnknownSizeArray()
1825 ? 0
1826 : (DestPtr.getNumElems() - DestPtr.getIndex());
1827 } else {
1828 RemainingDestElems = 1;
1829 }
1830 unsigned DestElemSize = ASTCtx.getTypeSizeInChars(DestElemType).getQuantity();
1831
1832 if (WChar) {
1833 uint64_t WCharSize =
1834 ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1835 Size *= WCharSize;
1836 }
1837
1838 if (Size % DestElemSize != 0) {
1839 S.FFDiag(S.Current->getSource(OpPC),
1840 diag::note_constexpr_memcpy_unsupported)
1841 << Move << WChar << 0 << DestElemType << Size << DestElemSize;
1842 return false;
1843 }
1844
1845 QualType SrcElemType = getElemType(SrcPtr);
1846 size_t RemainingSrcElems;
1847 if (SrcPtr.getFieldDesc()->isArray()) {
1848 RemainingSrcElems = SrcPtr.isUnknownSizeArray()
1849 ? 0
1850 : (SrcPtr.getNumElems() - SrcPtr.getIndex());
1851 } else {
1852 RemainingSrcElems = 1;
1853 }
1854 unsigned SrcElemSize = ASTCtx.getTypeSizeInChars(SrcElemType).getQuantity();
1855
1856 if (!ASTCtx.hasSameUnqualifiedType(DestElemType, SrcElemType)) {
1857 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_type_pun)
1858 << Move << SrcElemType << DestElemType;
1859 return false;
1860 }
1861
1862 if (!DestElemType.isTriviallyCopyableType(ASTCtx)) {
1863 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_nontrivial)
1864 << Move << DestElemType;
1865 return false;
1866 }
1867
1868 / Check if we have enough elements to read from and write to.
1869 size_t RemainingDestBytes = RemainingDestElems * DestElemSize;
1870 size_t RemainingSrcBytes = RemainingSrcElems * SrcElemSize;
1871 if (Size > RemainingDestBytes || Size > RemainingSrcBytes) {
1872 APInt N = APInt(64, Size / DestElemSize);
1873 S.FFDiag(S.Current->getSource(OpPC),
1874 diag::note_constexpr_memcpy_unsupported)
1875 << Move << WChar << (Size > RemainingSrcBytes ? 1 : 2) << DestElemType
1876 << toString(N, 10, /*Signed=*/false);
1877 return false;
1878 }
1879
1880 / Check for overlapping memory regions.
1881 if (!Move && Pointer::pointToSameBlock(SrcPtr, DestPtr)) {
1882 / Remove base casts.
1883 Pointer SrcP = SrcPtr;
1884 while (SrcP.isBaseClass())
1885 SrcP = SrcP.getBase();
1886
1887 Pointer DestP = DestPtr;
1888 while (DestP.isBaseClass())
1889 DestP = DestP.getBase();
1890
1891 unsigned SrcIndex = SrcP.expand().getIndex() * SrcP.elemSize();
1892 unsigned DstIndex = DestP.expand().getIndex() * DestP.elemSize();
1893
1894 if ((SrcIndex <= DstIndex && (SrcIndex + Size) > DstIndex) ||
1895 (DstIndex <= SrcIndex && (DstIndex + Size) > SrcIndex)) {
1896 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_memcpy_overlap)
1897 << /*IsWChar=*/false;
1898 return false;
1899 }
1900 }
1901
1902 assert(Size % DestElemSize == 0);
1903 if (!DoMemcpy(S, OpPC, SrcPtr, DestPtr, Bytes(Size).toBits()))
1904 return false;
1905
1906 S.Stk.push<Pointer>(DestPtr);
1907 return true;
1908}
1909
1910/ Determine if T is a character type for which we guarantee that
1911/ sizeof(T) == 1.
1913 return T->isCharType() || T->isChar8Type();
1914}
1915
1917 const InterpFrame *Frame,
1918 const CallExpr *Call, unsigned ID) {
1919 assert(Call->getNumArgs() == 3);
1920 uint64_t Size = popToUInt64(S, Call->getArg(2));
1921 const Pointer &PtrB = S.Stk.pop<Pointer>();
1922 const Pointer &PtrA = S.Stk.pop<Pointer>();
1923
1924 if (ID == Builtin::BImemcmp || ID == Builtin::BIbcmp ||
1925 ID == Builtin::BIwmemcmp)
1926 diagnoseNonConstexprBuiltin(S, OpPC, ID);
1927
1928 if (Size == 0) {
1929 pushInteger(S, 0, Call->getType());
1930 return true;
1931 }
1932
1933 if (!PtrA.isBlockPointer() || !PtrB.isBlockPointer())
1934 return false;
1935
1936 bool IsWide =
1937 (ID == Builtin::BIwmemcmp || ID == Builtin::BI__builtin_wmemcmp);
1938
1939 const ASTContext &ASTCtx = S.getASTContext();
1940 QualType ElemTypeA = getElemType(PtrA);
1941 QualType ElemTypeB = getElemType(PtrB);
1942 / FIXME: This is an arbitrary limitation the current constant interpreter
1943 / had. We could remove this.
1944 if (!IsWide && (!isOneByteCharacterType(ElemTypeA) ||
1945 !isOneByteCharacterType(ElemTypeB))) {
1946 S.FFDiag(S.Current->getSource(OpPC),
1947 diag::note_constexpr_memcmp_unsupported)
1948 << ASTCtx.BuiltinInfo.getQuotedName(ID) << PtrA.getType()
1949 << PtrB.getType();
1950 return false;
1951 }
1952
1953 if (PtrA.isDummy() || PtrB.isDummy())
1954 return false;
1955
1956 if (!CheckRange(S, OpPC, PtrA, AK_Read) ||
1957 !CheckRange(S, OpPC, PtrB, AK_Read))
1958 return false;
1959
1960 / Now, read both pointers to a buffer and compare those.
1961 BitcastBuffer BufferA(
1962 Bits(ASTCtx.getTypeSize(ElemTypeA) * PtrA.getNumElems()));
1963 readPointerToBuffer(S.getContext(), PtrA, BufferA, false);
1964 / FIXME: The swapping here is UNDOING something we do when reading the
1965 / data into the buffer.
1966 if (ASTCtx.getTargetInfo().isBigEndian())
1967 swapBytes(BufferA.Data.get(), BufferA.byteSize().getQuantity());
1968
1969 BitcastBuffer BufferB(
1970 Bits(ASTCtx.getTypeSize(ElemTypeB) * PtrB.getNumElems()));
1971 readPointerToBuffer(S.getContext(), PtrB, BufferB, false);
1972 / FIXME: The swapping here is UNDOING something we do when reading the
1973 / data into the buffer.
1974 if (ASTCtx.getTargetInfo().isBigEndian())
1975 swapBytes(BufferB.Data.get(), BufferB.byteSize().getQuantity());
1976
1977 size_t MinBufferSize = std::min(BufferA.byteSize().getQuantity(),
1978 BufferB.byteSize().getQuantity());
1979
1980 unsigned ElemSize = 1;
1981 if (IsWide)
1982 ElemSize = ASTCtx.getTypeSizeInChars(ASTCtx.getWCharType()).getQuantity();
1983 / The Size given for the wide variants is in wide-char units. Convert it
1984 / to bytes.
1985 size_t ByteSize = Size * ElemSize;
1986 size_t CmpSize = std::min(MinBufferSize, ByteSize);
1987
1988 for (size_t I = 0; I != CmpSize; I += ElemSize) {
1989 if (IsWide) {
1991 T A = *reinterpret_cast<T *>(BufferA.atByte(I));
1992 T B = *reinterpret_cast<T *>(BufferB.atByte(I));
1993 if (A < B) {
1994 pushInteger(S, -1, Call->getType());
1995 return true;
1996 }
1997 if (A > B) {
1998 pushInteger(S, 1, Call->getType());
1999 return true;
2000 }
2001 });
2002 } else {
2003 std::byte A = BufferA.deref<std::byte>(Bytes(I));
2004 std::byte B = BufferB.deref<std::byte>(Bytes(I));
2005
2006 if (A < B) {
2007 pushInteger(S, -1, Call->getType());
2008 return true;
2009 }
2010 if (A > B) {
2011 pushInteger(S, 1, Call->getType());
2012 return true;
2013 }
2014 }
2015 }
2016
2017 / We compared CmpSize bytes above. If the limiting factor was the Size
2018 / passed, we're done and the result is equality (0).
2019 if (ByteSize <= CmpSize) {
2020 pushInteger(S, 0, Call->getType());
2021 return true;
2022 }
2023
2024 / However, if we read all the available bytes but were instructed to read
2025 / even more, diagnose this as a "read of dereferenced one-past-the-end
2026 / pointer". This is what would happen if we called CheckLoad() on every array
2027 / element.
2028 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_past_end)
2029 << AK_Read << S.Current->getRange(OpPC);
2030 return false;
2031}
2032
2033/ __builtin_memchr(ptr, int, int)
2034/ __builtin_strchr(ptr, int)
2036 const CallExpr *Call, unsigned ID) {
2037 if (ID == Builtin::BImemchr || ID == Builtin::BIwcschr ||
2038 ID == Builtin::BIstrchr || ID == Builtin::BIwmemchr)
2039 diagnoseNonConstexprBuiltin(S, OpPC, ID);
2040
2041 std::optional<APSInt> MaxLength;
2042 if (Call->getNumArgs() == 3)
2043 MaxLength = popToAPSInt(S, Call->getArg(2));
2044
2045 APSInt Desired = popToAPSInt(S, Call->getArg(1));
2046 const Pointer &Ptr = S.Stk.pop<Pointer>();
2047
2048 if (MaxLength && MaxLength->isZero()) {
2049 S.Stk.push<Pointer>();
2050 return true;
2051 }
2052
2053 if (Ptr.isDummy()) {
2054 if (Ptr.getType()->isIncompleteType())
2055 S.FFDiag(S.Current->getSource(OpPC),
2056 diag::note_constexpr_ltor_incomplete_type)
2057 << Ptr.getType();
2058 return false;
2059 }
2060
2061 / Null is only okay if the given size is 0.
2062 if (Ptr.isZero()) {
2063 S.FFDiag(S.Current->getSource(OpPC), diag::note_constexpr_access_null)
2064 << AK_Read;
2065 return false;
2066 }
2067
2068 QualType ElemTy = Ptr.getFieldDesc()->isArray()
2069 ? Ptr.getFieldDesc()->getElemQualType()
2070 : Ptr.getFieldDesc()->getType();
2071 bool IsRawByte = ID == Builtin::BImemchr || ID == Builtin::BI__builtin_memchr;
2072
2073 / Give up on byte-oriented matching against multibyte elements.
2074 if (IsRawByte && !isOneByteCharacterType(ElemTy)) {
2075 S.FFDiag(S.Current->getSource(OpPC),
2076 diag::note_constexpr_memchr_unsupported)
2077 << S.getASTContext().BuiltinInfo.getQuotedName(ID) << ElemTy;
2078 return false;
2079 }
2080
2081 if (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr) {
2082 int64_t DesiredTrunc;
2083 if (S.getASTContext().CharTy->isSignedIntegerType())
2084 DesiredTrunc =
2085 Desired.trunc(S.getASTContext().getCharWidth()).getSExtValue();
2086 else
2087 DesiredTrunc =
2088 Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2089 / strchr compares directly to the passed integer, and therefore
2090 / always fails if given an int that is not a char.
2091 if (Desired != DesiredTrunc) {
2092 S.Stk.push<Pointer>();
2093 return true;
2094 }
2095 }
2096
2097 uint64_t DesiredVal;
2098 if (ID == Builtin::BIwmemchr || ID == Builtin::BI__builtin_wmemchr ||
2099 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr) {
2100 / wcschr and wmemchr are given a wchar_t to look for. Just use it.
2101 DesiredVal = Desired.getZExtValue();
2102 } else {
2103 DesiredVal = Desired.trunc(S.getASTContext().getCharWidth()).getZExtValue();
2104 }
2105
2106 bool StopAtZero =
2107 (ID == Builtin::BIstrchr || ID == Builtin::BI__builtin_strchr ||
2108 ID == Builtin::BIwcschr || ID == Builtin::BI__builtin_wcschr);
2109
2110 PrimType ElemT =
2111 IsRawByte ? PT_Sint8 : *S.getContext().classify(getElemType(Ptr));
2112
2113 size_t Index = Ptr.getIndex();
2114 size_t Step = 0;
2115 for (;;) {
2116 const Pointer &ElemPtr =
2117 (Index + Step) > 0 ? Ptr.atIndex(Index + Step) : Ptr;
2118
2119 if (!CheckLoad(S, OpPC, ElemPtr))
2120 return false;
2121
2122 uint64_t V;
2124 ElemT, { V = static_cast<uint64_t>(ElemPtr.deref<T>().toUnsigned()); });
2125
2126 if (V == DesiredVal) {
2127 S.Stk.push<Pointer>(ElemPtr);
2128 return true;
2129 }
2130
2131 if (StopAtZero && V == 0)
2132 break;
2133
2134 ++Step;
2135 if (MaxLength && Step == MaxLength->getZExtValue())
2136 break;
2137 }
2138
2139 S.Stk.push<Pointer>();
2140 return true;
2141}
2142
2143static std::optional<unsigned> computeFullDescSize(const ASTContext &ASTCtx,
2144 const Descriptor *Desc) {
2145 if (Desc->isPrimitive())
2146 return ASTCtx.getTypeSizeInChars(Desc->getType()).getQuantity();
2147 if (Desc->isArray())
2148 return ASTCtx.getTypeSizeInChars(Desc->getElemQualType()).getQuantity() *
2149 Desc->getNumElems();
2150 if (Desc->isRecord()) {
2151 / Can't use Descriptor::getType() as that may return a pointer type. Look
2152 / at the decl directly.
2153 return ASTCtx
2155 ASTCtx.getCanonicalTagType(Desc->ElemRecord->getDecl()))
2156 .getQuantity();
2157 }
2158
2159 return std::nullopt;
2160}
2161
2162/ Compute the byte offset of \p Ptr in the full declaration.
2163static unsigned computePointerOffset(const ASTContext &ASTCtx,
2164 const Pointer &Ptr) {
2165 unsigned Result = 0;
2166
2167 Pointer P = Ptr;
2168 while (P.isField() || P.isArrayElement()) {
2169 P = P.expand();
2170 const Descriptor *D = P.getFieldDesc();
2171
2172 if (P.isArrayElement()) {
2173 unsigned ElemSize =
2175 if (P.isOnePastEnd())
2176 Result += ElemSize * P.getNumElems();
2177 else
2178 Result += ElemSize * P.getIndex();
2179 P = P.expand().getArray();
2180 } else if (P.isBaseClass()) {
2181 const auto *RD = cast<CXXRecordDecl>(D->asDecl());
2182 bool IsVirtual = Ptr.isVirtualBaseClass();
2183 P = P.getBase();
2184 const Record *BaseRecord = P.getRecord();
2185
2186 const ASTRecordLayout &Layout =
2187 ASTCtx.getASTRecordLayout(cast<CXXRecordDecl>(BaseRecord->getDecl()));
2188 if (IsVirtual)
2189 Result += Layout.getVBaseClassOffset(RD).getQuantity();
2190 else
2191 Result += Layout.getBaseClassOffset(RD).getQuantity();
2192 } else if (P.isField()) {
2193 const FieldDecl *FD = P.getField();
2194 const ASTRecordLayout &Layout =
2195 ASTCtx.getASTRecordLayout(FD->getParent());
2196 unsigned FieldIndex = FD->getFieldIndex();
2197 uint64_t FieldOffset =
2198 ASTCtx.toCharUnitsFromBits(Layout.getFieldOffset(FieldIndex))
2199 .getQuantity();
2200 Result += FieldOffset;
2201 P = P.getBase();
2202 } else
2203 llvm_unreachable("Unhandled descriptor type");
2204 }
2205
2206 return Result;
2207}
2208
2209/ Does Ptr point to the last subobject?
2210static bool pointsToLastObject(const Pointer &Ptr) {
2211 Pointer P = Ptr;
2212 while (!P.isRoot()) {
2213
2214 if (P.isArrayElement()) {
2215 P = P.expand().getArray();
2216 continue;
2217 }
2218 if (P.isBaseClass()) {
2219 if (P.getRecord()->getNumFields() > 0)
2220 return false;
2221 P = P.getBase();
2222 continue;
2223 }
2224
2225 Pointer Base = P.getBase();
2226 if (const Record *R = Base.getRecord()) {
2227 assert(P.getField());
2228 if (P.getField()->getFieldIndex() != R->getNumFields() - 1)
2229 return false;
2230 }
2231 P = Base;
2232 }
2233
2234 return true;
2235}
2236
2237/ Does Ptr point to the last object AND to a flexible array member?
2238static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const Pointer &Ptr) {
2239 auto isFlexibleArrayMember = [&](const Descriptor *FieldDesc) {
2241 FAMKind StrictFlexArraysLevel =
2242 Ctx.getLangOpts().getStrictFlexArraysLevel();
2243
2244 if (StrictFlexArraysLevel == FAMKind::Default)
2245 return true;
2246
2247 unsigned NumElems = FieldDesc->getNumElems();
2248 if (NumElems == 0 && StrictFlexArraysLevel != FAMKind::IncompleteOnly)
2249 return true;
2250
2251 if (NumElems == 1 && StrictFlexArraysLevel == FAMKind::OneZeroOrIncomplete)
2252 return true;
2253 return false;
2254 };
2255
2256 const Descriptor *FieldDesc = Ptr.getFieldDesc();
2257 if (!FieldDesc->isArray())
2258 return false;
2259
2260 return Ptr.isDummy() && pointsToLastObject(Ptr) &&
2261 isFlexibleArrayMember(FieldDesc);
2262}
2263
2265 const InterpFrame *Frame,
2266 const CallExpr *Call) {
2267 const ASTContext &ASTCtx = S.getASTContext();
2268 / From the GCC docs:
2269 / Kind is an integer constant from 0 to 3. If the least significant bit is
2270 / clear, objects are whole variables. If it is set, a closest surrounding
2271 / subobject is considered the object a pointer points to. The second bit
2272 / determines if maximum or minimum of remaining bytes is computed.
2273 unsigned Kind = popToUInt64(S, Call->getArg(1));
2274 assert(Kind <= 3 && "unexpected kind");
2275 bool UseFieldDesc = (Kind & 1u);
2276 bool ReportMinimum = (Kind & 2u);
2277 const Pointer &Ptr = S.Stk.pop<Pointer>();
2278
2279 if (Call->getArg(0)->HasSideEffects(ASTCtx)) {
2280 / "If there are any side effects in them, it returns (size_t) -1
2281 / for type 0 or 1 and (size_t) 0 for type 2 or 3."
2282 pushInteger(S, Kind <= 1 ? -1 : 0, Call->getType());
2283 return true;
2284 }
2285
2286 if (Ptr.isZero() || !Ptr.isBlockPointer())
2287 return false;
2288
2289 / We can't load through pointers.
2290 if (Ptr.isDummy() && Ptr.getType()->isPointerType())
2291 return false;
2292
2293 bool DetermineForCompleteObject = Ptr.getFieldDesc() == Ptr.getDeclDesc();
2294 const Descriptor *DeclDesc = Ptr.getDeclDesc();
2295 assert(DeclDesc);
2296
2297 if (!UseFieldDesc || DetermineForCompleteObject) {
2298 / Lower bound, so we can't fall back to this.
2299 if (ReportMinimum && !DetermineForCompleteObject)
2300 return false;
2301
2302 / Can't read beyond the pointer decl desc.
2303 if (!UseFieldDesc && !ReportMinimum && DeclDesc->getType()->isPointerType())
2304 return false;
2305 } else {
2306 if (isUserWritingOffTheEnd(ASTCtx, Ptr.expand())) {
2307 / If we cannot determine the size of the initial allocation, then we
2308 / can't given an accurate upper-bound. However, we are still able to give
2309 / conservative lower-bounds for Type=3.
2310 if (Kind == 1)
2311 return false;
2312 }
2313 }
2314
2315 const Descriptor *Desc = UseFieldDesc ? Ptr.getFieldDesc() : DeclDesc;
2316 assert(Desc);
2317
2318 std::optional<unsigned> FullSize = computeFullDescSize(ASTCtx, Desc);
2319 if (!FullSize)
2320 return false;
2321
2322 unsigned ByteOffset;
2323 if (UseFieldDesc) {
2324 if (Ptr.isBaseClass())
2325 ByteOffset = computePointerOffset(ASTCtx, Ptr.getBase()) -
2326 computePointerOffset(ASTCtx, Ptr);
2327 else {
2328 if (Ptr.inArray())
2329 ByteOffset =
2330 computePointerOffset(ASTCtx, Ptr) -
2331 computePointerOffset(ASTCtx, Ptr.expand().atIndex(0).narrow());
2332 else
2333 ByteOffset = 0;
2334 }
2335 } else
2336 ByteOffset = computePointerOffset(ASTCtx, Ptr);
2337
2338 assert(ByteOffset <= *FullSize);
2339 unsigned Result = *FullSize - ByteOffset;
2340
2341 pushInteger(S, Result, Call->getType());
2342 return true;
2343}
2344
2346 const CallExpr *Call) {
2347
2348 if (!S.inConstantContext())
2349 return false;
2350
2351 const Pointer &Ptr = S.Stk.pop<Pointer>();
2352
2353 auto Error = [&](int Diag) {
2354 bool CalledFromStd = false;
2355 const auto *Callee = S.Current->getCallee();
2356 if (Callee && Callee->isInStdNamespace()) {
2357 const IdentifierInfo *Identifier = Callee->getIdentifier();
2358 CalledFromStd = Identifier && Identifier->isStr("is_within_lifetime");
2359 }
2360 S.CCEDiag(CalledFromStd
2362 : S.Current->getSource(OpPC),
2363 diag::err_invalid_is_within_lifetime)
2364 << (CalledFromStd ? "std::is_within_lifetime"
2365 : "__builtin_is_within_lifetime")
2366 << Diag;
2367 return false;
2368 };
2369
2370 if (Ptr.isZero())
2371 return Error(0);
2372 if (Ptr.isOnePastEnd())
2373 return Error(1);
2374
2375 bool Result = Ptr.getLifetime() != Lifetime::Ended;
2376 if (!Ptr.isActive()) {
2377 Result = false;
2378 } else {
2379 if (!CheckLive(S, OpPC, Ptr, AK_Read))
2380 return false;
2381 if (!CheckMutable(S, OpPC, Ptr))
2382 return false;
2383 if (!CheckDummy(S, OpPC, Ptr.block(), AK_Read))
2384 return false;
2385 }
2386
2387 / Check if we're currently running an initializer.
2388 if (llvm::is_contained(S.InitializingBlocks, Ptr.block()))
2389 return Error(2);
2390 if (S.EvaluatingDecl && Ptr.getDeclDesc()->asVarDecl() == S.EvaluatingDecl)
2391 return Error(2);
2392
2393 pushInteger(S, Result, Call->getType());
2394 return true;
2395}
2396
2398 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2399 llvm::function_ref<APInt(const APSInt &)> Fn) {
2400 assert(Call->getNumArgs() == 1);
2401
2402 / Single integer case.
2403 if (!Call->getArg(0)->getType()->isVectorType()) {
2404 assert(Call->getType()->isIntegerType());
2405 APSInt Src = popToAPSInt(S, Call->getArg(0));
2406 APInt Result = Fn(Src);
2407 pushInteger(S, APSInt(std::move(Result), !Src.isSigned()), Call->getType());
2408 return true;
2409 }
2410
2411 / Vector case.
2412 const Pointer &Arg = S.Stk.pop<Pointer>();
2413 assert(Arg.getFieldDesc()->isPrimitiveArray());
2414 const Pointer &Dst = S.Stk.peek<Pointer>();
2415 assert(Dst.getFieldDesc()->isPrimitiveArray());
2416 assert(Arg.getFieldDesc()->getNumElems() ==
2417 Dst.getFieldDesc()->getNumElems());
2418
2419 QualType ElemType = Arg.getFieldDesc()->getElemQualType();
2420 PrimType ElemT = *S.getContext().classify(ElemType);
2421 unsigned NumElems = Arg.getNumElems();
2422 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2423
2424 for (unsigned I = 0; I != NumElems; ++I) {
2426 APSInt Src = Arg.elem<T>(I).toAPSInt();
2427 APInt Result = Fn(Src);
2428 Dst.elem<T>(I) = static_cast<T>(APSInt(std::move(Result), DestUnsigned));
2429 });
2430 }
2432
2433 return true;
2434}
2435
2437 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2438 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2439 assert(Call->getNumArgs() == 2);
2440
2441 / Single integer case.
2442 if (!Call->getArg(0)->getType()->isVectorType()) {
2443 assert(!Call->getArg(1)->getType()->isVectorType());
2444 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2445 APSInt LHS = popToAPSInt(S, Call->getArg(0));
2446 APInt Result = Fn(LHS, RHS);
2447 pushInteger(S, APSInt(std::move(Result), !LHS.isSigned()), Call->getType());
2448 return true;
2449 }
2450
2451 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2452 assert(VT->getElementType()->isIntegralOrEnumerationType());
2453 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2454 unsigned NumElems = VT->getNumElements();
2455 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2456
2457 / Vector + Scalar case.
2458 if (!Call->getArg(1)->getType()->isVectorType()) {
2459 assert(Call->getArg(1)->getType()->isIntegralOrEnumerationType());
2460
2461 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2462 const Pointer &LHS = S.Stk.pop<Pointer>();
2463 const Pointer &Dst = S.Stk.peek<Pointer>();
2464
2465 for (unsigned I = 0; I != NumElems; ++I) {
2467 Dst.elem<T>(I) = static_cast<T>(
2468 APSInt(Fn(LHS.elem<T>(I).toAPSInt(), RHS), DestUnsigned));
2469 });
2470 }
2472 return true;
2473 }
2474
2475 / Vector case.
2476 assert(Call->getArg(0)->getType()->isVectorType() &&
2477 Call->getArg(1)->getType()->isVectorType());
2478 assert(VT->getElementType() ==
2479 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2480 assert(VT->getNumElements() ==
2481 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2482 assert(VT->getElementType()->isIntegralOrEnumerationType());
2483
2484 const Pointer &RHS = S.Stk.pop<Pointer>();
2485 const Pointer &LHS = S.Stk.pop<Pointer>();
2486 const Pointer &Dst = S.Stk.peek<Pointer>();
2487 for (unsigned I = 0; I != NumElems; ++I) {
2489 APSInt Elem1 = LHS.elem<T>(I).toAPSInt();
2490 APSInt Elem2 = RHS.elem<T>(I).toAPSInt();
2491 Dst.elem<T>(I) = static_cast<T>(APSInt(Fn(Elem1, Elem2), DestUnsigned));
2492 });
2493 }
2495
2496 return true;
2497}
2498
2499static bool
2501 llvm::function_ref<APInt(const APSInt &)> PackFn) {
2502 const auto *VT0 = E->getArg(0)->getType()->castAs<VectorType>();
2503 [[maybe_unused]] const auto *VT1 =
2504 E->getArg(1)->getType()->castAs<VectorType>();
2505 assert(VT0 && VT1 && "pack builtin VT0 and VT1 must be VectorType");
2506 assert(VT0->getElementType() == VT1->getElementType() &&
2507 VT0->getNumElements() == VT1->getNumElements() &&
2508 "pack builtin VT0 and VT1 ElementType must be same");
2509
2510 const Pointer &RHS = S.Stk.pop<Pointer>();
2511 const Pointer &LHS = S.Stk.pop<Pointer>();
2512 const Pointer &Dst = S.Stk.peek<Pointer>();
2513
2514 const ASTContext &ASTCtx = S.getASTContext();
2515 unsigned SrcBits = ASTCtx.getIntWidth(VT0->getElementType());
2516 unsigned LHSVecLen = VT0->getNumElements();
2517 unsigned SrcPerLane = 128 / SrcBits;
2518 unsigned Lanes = LHSVecLen * SrcBits / 128;
2519
2520 PrimType SrcT = *S.getContext().classify(VT0->getElementType());
2521 PrimType DstT = *S.getContext().classify(getElemType(Dst));
2522 bool IsUnsigend = getElemType(Dst)->isUnsignedIntegerType();
2523
2524 for (unsigned Lane = 0; Lane != Lanes; ++Lane) {
2525 unsigned BaseSrc = Lane * SrcPerLane;
2526 unsigned BaseDst = Lane * (2 * SrcPerLane);
2527
2528 for (unsigned I = 0; I != SrcPerLane; ++I) {
2530 APSInt A = LHS.elem<T>(BaseSrc + I).toAPSInt();
2531 APSInt B = RHS.elem<T>(BaseSrc + I).toAPSInt();
2532
2533 assignInteger(S, Dst.atIndex(BaseDst + I), DstT,
2534 APSInt(PackFn(A), IsUnsigend));
2535 assignInteger(S, Dst.atIndex(BaseDst + SrcPerLane + I), DstT,
2536 APSInt(PackFn(B), IsUnsigend));
2537 });
2538 }
2539 }
2540
2541 Dst.initializeAllElements();
2542 return true;
2543}
2544
2546 const CallExpr *Call,
2547 unsigned BuiltinID) {
2548 assert(Call->getNumArgs() == 2);
2549
2550 QualType Arg0Type = Call->getArg(0)->getType();
2551
2552 / TODO: Support floating-point types.
2553 if (!(Arg0Type->isIntegerType() ||
2554 (Arg0Type->isVectorType() &&
2555 Arg0Type->castAs<VectorType>()->getElementType()->isIntegerType())))
2556 return false;
2557
2558 if (!Arg0Type->isVectorType()) {
2559 assert(!Call->getArg(1)->getType()->isVectorType());
2560 APSInt RHS = popToAPSInt(S, Call->getArg(1));
2561 APSInt LHS = popToAPSInt(S, Arg0Type);
2562 APInt Result;
2563 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2564 Result = std::max(LHS, RHS);
2565 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2566 Result = std::min(LHS, RHS);
2567 } else {
2568 llvm_unreachable("Wrong builtin ID");
2569 }
2570
2571 pushInteger(S, APSInt(Result, !LHS.isSigned()), Call->getType());
2572 return true;
2573 }
2574
2575 / Vector case.
2576 assert(Call->getArg(0)->getType()->isVectorType() &&
2577 Call->getArg(1)->getType()->isVectorType());
2578 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2579 assert(VT->getElementType() ==
2580 Call->getArg(1)->getType()->castAs<VectorType>()->getElementType());
2581 assert(VT->getNumElements() ==
2582 Call->getArg(1)->getType()->castAs<VectorType>()->getNumElements());
2583 assert(VT->getElementType()->isIntegralOrEnumerationType());
2584
2585 const Pointer &RHS = S.Stk.pop<Pointer>();
2586 const Pointer &LHS = S.Stk.pop<Pointer>();
2587 const Pointer &Dst = S.Stk.peek<Pointer>();
2588 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2589 unsigned NumElems = VT->getNumElements();
2590 for (unsigned I = 0; I != NumElems; ++I) {
2591 APSInt Elem1;
2592 APSInt Elem2;
2594 Elem1 = LHS.elem<T>(I).toAPSInt();
2595 Elem2 = RHS.elem<T>(I).toAPSInt();
2596 });
2597
2598 APSInt Result;
2599 if (BuiltinID == Builtin::BI__builtin_elementwise_max) {
2600 Result = APSInt(std::max(Elem1, Elem2),
2601 Call->getType()->isUnsignedIntegerOrEnumerationType());
2602 } else if (BuiltinID == Builtin::BI__builtin_elementwise_min) {
2603 Result = APSInt(std::min(Elem1, Elem2),
2604 Call->getType()->isUnsignedIntegerOrEnumerationType());
2605 } else {
2606 llvm_unreachable("Wrong builtin ID");
2607 }
2608
2610 { Dst.elem<T>(I) = static_cast<T>(Result); });
2611 }
2612 Dst.initializeAllElements();
2613
2614 return true;
2615}
2616
2618 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2619 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &,
2620 const APSInt &)>
2621 Fn) {
2622 assert(Call->getArg(0)->getType()->isVectorType() &&
2623 Call->getArg(1)->getType()->isVectorType());
2624 const Pointer &RHS = S.Stk.pop<Pointer>();
2625 const Pointer &LHS = S.Stk.pop<Pointer>();
2626 const Pointer &Dst = S.Stk.peek<Pointer>();
2627
2628 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2629 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2630 unsigned NumElems = VT->getNumElements();
2631 const auto *DestVT = Call->getType()->castAs<VectorType>();
2632 PrimType DestElemT = *S.getContext().classify(DestVT->getElementType());
2633 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2634
2635 unsigned DstElem = 0;
2636 for (unsigned I = 0; I != NumElems; I += 2) {
2637 APSInt Result;
2639 APSInt LoLHS = LHS.elem<T>(I).toAPSInt();
2640 APSInt HiLHS = LHS.elem<T>(I + 1).toAPSInt();
2641 APSInt LoRHS = RHS.elem<T>(I).toAPSInt();
2642 APSInt HiRHS = RHS.elem<T>(I + 1).toAPSInt();
2643 Result = APSInt(Fn(LoLHS, HiLHS, LoRHS, HiRHS), DestUnsigned);
2644 });
2645
2646 INT_TYPE_SWITCH_NO_BOOL(DestElemT,
2647 { Dst.elem<T>(DstElem) = static_cast<T>(Result); });
2648 ++DstElem;
2649 }
2650
2651 Dst.initializeAllElements();
2652 return true;
2653}
2654
2656 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2657 llvm::function_ref<APInt(const APSInt &, const APSInt &)> Fn) {
2658 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2659 PrimType ElemT = *S.getContext().classify(VT->getElementType());
2660 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2661
2662 const Pointer &RHS = S.Stk.pop<Pointer>();
2663 const Pointer &LHS = S.Stk.pop<Pointer>();
2664 const Pointer &Dst = S.Stk.peek<Pointer>();
2665 unsigned NumElts = VT->getNumElements();
2666 unsigned EltBits = S.getASTContext().getIntWidth(VT->getElementType());
2667 unsigned EltsPerLane = 128 / EltBits;
2668 unsigned Lanes = NumElts * EltBits / 128;
2669 unsigned DestIndex = 0;
2670
2671 for (unsigned Lane = 0; Lane < Lanes; ++Lane) {
2672 unsigned LaneStart = Lane * EltsPerLane;
2673 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2675 APSInt Elem1 = LHS.elem<T>(LaneStart + I).toAPSInt();
2676 APSInt Elem2 = LHS.elem<T>(LaneStart + I + 1).toAPSInt();
2677 APSInt ResL = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2678 Dst.elem<T>(DestIndex++) = static_cast<T>(ResL);
2679 });
2680 }
2681
2682 for (unsigned I = 0; I < EltsPerLane; I += 2) {
2684 APSInt Elem1 = RHS.elem<T>(LaneStart + I).toAPSInt();
2685 APSInt Elem2 = RHS.elem<T>(LaneStart + I + 1).toAPSInt();
2686 APSInt ResR = APSInt(Fn(Elem1, Elem2), DestUnsigned);
2687 Dst.elem<T>(DestIndex++) = static_cast<T>(ResR);
2688 });
2689 }
2690 }
2691 Dst.initializeAllElements();
2692 return true;
2693}
2694
2696 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2697 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2698 llvm::RoundingMode)>
2699 Fn) {
2700 const Pointer &RHS = S.Stk.pop<Pointer>();
2701 const Pointer &LHS = S.Stk.pop<Pointer>();
2702 const Pointer &Dst = S.Stk.peek<Pointer>();
2703 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2704 llvm::RoundingMode RM = getRoundingMode(FPO);
2705 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2706
2707 unsigned NumElts = VT->getNumElements();
2708 unsigned EltBits = S.getASTContext().getTypeSize(VT->getElementType());
2709 unsigned NumLanes = NumElts * EltBits / 128;
2710 unsigned NumElemsPerLane = NumElts / NumLanes;
2711 unsigned HalfElemsPerLane = NumElemsPerLane / 2;
2712
2713 for (unsigned L = 0; L != NumElts; L += NumElemsPerLane) {
2714 using T = PrimConv<PT_Float>::T;
2715 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2716 APFloat Elem1 = LHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2717 APFloat Elem2 = LHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2718 Dst.elem<T>(L + E) = static_cast<T>(Fn(Elem1, Elem2, RM));
2719 }
2720 for (unsigned E = 0; E != HalfElemsPerLane; ++E) {
2721 APFloat Elem1 = RHS.elem<T>(L + (2 * E) + 0).getAPFloat();
2722 APFloat Elem2 = RHS.elem<T>(L + (2 * E) + 1).getAPFloat();
2723 Dst.elem<T>(L + E + HalfElemsPerLane) =
2724 static_cast<T>(Fn(Elem1, Elem2, RM));
2725 }
2726 }
2727 Dst.initializeAllElements();
2728 return true;
2729}
2730
2732 const CallExpr *Call) {
2733 / Addsub: alternates between subtraction and addition
2734 / Result[i] = (i % 2 == 0) ? (a[i] - b[i]) : (a[i] + b[i])
2735 const Pointer &RHS = S.Stk.pop<Pointer>();
2736 const Pointer &LHS = S.Stk.pop<Pointer>();
2737 const Pointer &Dst = S.Stk.peek<Pointer>();
2738 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2739 llvm::RoundingMode RM = getRoundingMode(FPO);
2740 const auto *VT = Call->getArg(0)->getType()->castAs<VectorType>();
2741 unsigned NumElems = VT->getNumElements();
2742
2743 using T = PrimConv<PT_Float>::T;
2744 for (unsigned I = 0; I != NumElems; ++I) {
2745 APFloat LElem = LHS.elem<T>(I).getAPFloat();
2746 APFloat RElem = RHS.elem<T>(I).getAPFloat();
2747 if (I % 2 == 0) {
2748 / Even indices: subtract
2749 LElem.subtract(RElem, RM);
2750 } else {
2751 / Odd indices: add
2752 LElem.add(RElem, RM);
2753 }
2754 Dst.elem<T>(I) = static_cast<T>(LElem);
2755 }
2756 Dst.initializeAllElements();
2757 return true;
2758}
2759
2761 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2762 llvm::function_ref<APFloat(const APFloat &, const APFloat &,
2763 const APFloat &, llvm::RoundingMode)>
2764 Fn) {
2765 assert(Call->getNumArgs() == 3);
2766
2767 FPOptions FPO = Call->getFPFeaturesInEffect(S.Ctx.getLangOpts());
2768 llvm::RoundingMode RM = getRoundingMode(FPO);
2769 QualType Arg1Type = Call->getArg(0)->getType();
2770 QualType Arg2Type = Call->getArg(1)->getType();
2771 QualType Arg3Type = Call->getArg(2)->getType();
2772
2773 / Non-vector floating point types.
2774 if (!Arg1Type->isVectorType()) {
2775 assert(!Arg2Type->isVectorType());
2776 assert(!Arg3Type->isVectorType());
2777 (void)Arg2Type;
2778 (void)Arg3Type;
2779
2780 const Floating &Z = S.Stk.pop<Floating>();
2781 const Floating &Y = S.Stk.pop<Floating>();
2782 const Floating &X = S.Stk.pop<Floating>();
2783 APFloat F = Fn(X.getAPFloat(), Y.getAPFloat(), Z.getAPFloat(), RM);
2784 Floating Result = S.allocFloat(X.getSemantics());
2785 Result.copy(F);
2786 S.Stk.push<Floating>(Result);
2787 return true;
2788 }
2789
2790 / Vector type.
2791 assert(Arg1Type->isVectorType() && Arg2Type->isVectorType() &&
2792 Arg3Type->isVectorType());
2793
2794 const VectorType *VecTy = Arg1Type->castAs<VectorType>();
2795 QualType ElemQT = VecTy->getElementType();
2796 unsigned NumElems = VecTy->getNumElements();
2797
2798 assert(ElemQT == Arg2Type->castAs<VectorType>()->getElementType() &&
2799 ElemQT == Arg3Type->castAs<VectorType>()->getElementType());
2800 assert(NumElems == Arg2Type->castAs<VectorType>()->getNumElements() &&
2801 NumElems == Arg3Type->castAs<VectorType>()->getNumElements());
2802 assert(ElemQT->isRealFloatingType());
2803 (void)ElemQT;
2804
2805 const Pointer &VZ = S.Stk.pop<Pointer>();
2806 const Pointer &VY = S.Stk.pop<Pointer>();
2807 const Pointer &VX = S.Stk.pop<Pointer>();
2808 const Pointer &Dst = S.Stk.peek<Pointer>();
2809 for (unsigned I = 0; I != NumElems; ++I) {
2810 using T = PrimConv<PT_Float>::T;
2811 APFloat X = VX.elem<T>(I).getAPFloat();
2812 APFloat Y = VY.elem<T>(I).getAPFloat();
2813 APFloat Z = VZ.elem<T>(I).getAPFloat();
2814 APFloat F = Fn(X, Y, Z, RM);
2815 Dst.elem<Floating>(I) = Floating(F);
2816 }
2818 return true;
2819}
2820
2821/ AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
2823 const CallExpr *Call) {
2824 const Pointer &RHS = S.Stk.pop<Pointer>();
2825 const Pointer &LHS = S.Stk.pop<Pointer>();
2826 APSInt Mask = popToAPSInt(S, Call->getArg(0));
2827 const Pointer &Dst = S.Stk.peek<Pointer>();
2828
2829 assert(LHS.getNumElems() == RHS.getNumElems());
2830 assert(LHS.getNumElems() == Dst.getNumElems());
2831 unsigned NumElems = LHS.getNumElems();
2832 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
2833 PrimType DstElemT = Dst.getFieldDesc()->getPrimType();
2834
2835 for (unsigned I = 0; I != NumElems; ++I) {
2836 if (ElemT == PT_Float) {
2837 assert(DstElemT == PT_Float);
2838 Dst.elem<Floating>(I) =
2839 Mask[I] ? LHS.elem<Floating>(I) : RHS.elem<Floating>(I);
2840 } else {
2841 APSInt Elem;
2842 INT_TYPE_SWITCH(ElemT, {
2843 Elem = Mask[I] ? LHS.elem<T>(I).toAPSInt() : RHS.elem<T>(I).toAPSInt();
2844 });
2845 INT_TYPE_SWITCH_NO_BOOL(DstElemT,
2846 { Dst.elem<T>(I) = static_cast<T>(Elem); });
2847 }
2848 }
2850
2851 return true;
2852}
2853
2854/ Scalar variant of AVX512 predicated select:
2855/ Result[i] = (Mask bit 0) ? LHS[i] : RHS[i], but only element 0 may change.
2856/ All other elements are taken from RHS.
2858 const CallExpr *Call) {
2859 unsigned N =
2860 Call->getArg(1)->getType()->getAs<VectorType>()->getNumElements();
2861
2862 const Pointer &W = S.Stk.pop<Pointer>();
2863 const Pointer &A = S.Stk.pop<Pointer>();
2864 APSInt U = popToAPSInt(S, Call->getArg(0));
2865 const Pointer &Dst = S.Stk.peek<Pointer>();
2866
2867 bool TakeA0 = U.getZExtValue() & 1ULL;
2868
2869 for (unsigned I = TakeA0; I != N; ++I)
2870 Dst.elem<Floating>(I) = W.elem<Floating>(I);
2871 if (TakeA0)
2872 Dst.elem<Floating>(0) = A.elem<Floating>(0);
2873
2875 return true;
2876}
2877
2879 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2880 llvm::function_ref<bool(const APInt &A, const APInt &B)> Fn) {
2881 const Pointer &RHS = S.Stk.pop<Pointer>();
2882 const Pointer &LHS = S.Stk.pop<Pointer>();
2883
2884 assert(LHS.getNumElems() == RHS.getNumElems());
2885
2886 unsigned SourceLen = LHS.getNumElems();
2887 QualType ElemQT = getElemType(LHS);
2888 OptPrimType ElemPT = S.getContext().classify(ElemQT);
2889 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
2890
2891 APInt AWide(LaneWidth * SourceLen, 0);
2892 APInt BWide(LaneWidth * SourceLen, 0);
2893
2894 for (unsigned I = 0; I != SourceLen; ++I) {
2895 APInt ALane;
2896 APInt BLane;
2897
2898 if (ElemQT->isIntegerType()) { / Get value.
2899 INT_TYPE_SWITCH_NO_BOOL(*ElemPT, {
2900 ALane = LHS.elem<T>(I).toAPSInt();
2901 BLane = RHS.elem<T>(I).toAPSInt();
2902 });
2903 } else if (ElemQT->isFloatingType()) { / Get only sign bit.
2904 using T = PrimConv<PT_Float>::T;
2905 ALane = LHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2906 BLane = RHS.elem<T>(I).getAPFloat().bitcastToAPInt().isNegative();
2907 } else { / Must be integer or floating type.
2908 return false;
2909 }
2910 AWide.insertBits(ALane, I * LaneWidth);
2911 BWide.insertBits(BLane, I * LaneWidth);
2912 }
2913 pushInteger(S, Fn(AWide, BWide), Call->getType());
2914 return true;
2915}
2916
2918 const CallExpr *Call) {
2919 assert(Call->getNumArgs() == 1);
2920
2921 const Pointer &Source = S.Stk.pop<Pointer>();
2922
2923 unsigned SourceLen = Source.getNumElems();
2924 QualType ElemQT = getElemType(Source);
2925 OptPrimType ElemT = S.getContext().classify(ElemQT);
2926 unsigned ResultLen =
2927 S.getASTContext().getTypeSize(Call->getType()); / Always 32-bit integer.
2928 APInt Result(ResultLen, 0);
2929
2930 for (unsigned I = 0; I != SourceLen; ++I) {
2931 APInt Elem;
2932 if (ElemQT->isIntegerType()) {
2933 INT_TYPE_SWITCH_NO_BOOL(*ElemT, { Elem = Source.elem<T>(I).toAPSInt(); });
2934 } else if (ElemQT->isRealFloatingType()) {
2935 using T = PrimConv<PT_Float>::T;
2936 Elem = Source.elem<T>(I).getAPFloat().bitcastToAPInt();
2937 } else {
2938 return false;
2939 }
2940 Result.setBitVal(I, Elem.isNegative());
2941 }
2942 pushInteger(S, Result, Call->getType());
2943 return true;
2944}
2945
2947 InterpState &S, CodePtr OpPC, const CallExpr *Call,
2948 llvm::function_ref<APInt(const APSInt &, const APSInt &, const APSInt &)>
2949 Fn) {
2950 assert(Call->getNumArgs() == 3);
2951
2952 QualType Arg0Type = Call->getArg(0)->getType();
2953 QualType Arg2Type = Call->getArg(2)->getType();
2954 / Non-vector integer types.
2955 if (!Arg0Type->isVectorType()) {
2956 const APSInt &Op2 = popToAPSInt(S, Arg2Type);
2957 const APSInt &Op1 = popToAPSInt(S, Call->getArg(1));
2958 const APSInt &Op0 = popToAPSInt(S, Arg0Type);
2959 APSInt Result = APSInt(Fn(Op0, Op1, Op2), Op0.isUnsigned());
2960 pushInteger(S, Result, Call->getType());
2961 return true;
2962 }
2963
2964 const auto *VecT = Arg0Type->castAs<VectorType>();
2965 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
2966 unsigned NumElems = VecT->getNumElements();
2967 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
2968
2969 / Vector + Vector + Scalar case.
2970 if (!Arg2Type->isVectorType()) {
2971 APSInt Op2 = popToAPSInt(S, Arg2Type);
2972
2973 const Pointer &Op1 = S.Stk.pop<Pointer>();
2974 const Pointer &Op0 = S.Stk.pop<Pointer>();
2975 const Pointer &Dst = S.Stk.peek<Pointer>();
2976 for (unsigned I = 0; I != NumElems; ++I) {
2978 Dst.elem<T>(I) = static_cast<T>(APSInt(
2979 Fn(Op0.elem<T>(I).toAPSInt(), Op1.elem<T>(I).toAPSInt(), Op2),
2980 DestUnsigned));
2981 });
2982 }
2984
2985 return true;
2986 }
2987
2988 / Vector type.
2989 const Pointer &Op2 = S.Stk.pop<Pointer>();
2990 const Pointer &Op1 = S.Stk.pop<Pointer>();
2991 const Pointer &Op0 = S.Stk.pop<Pointer>();
2992 const Pointer &Dst = S.Stk.peek<Pointer>();
2993 for (unsigned I = 0; I != NumElems; ++I) {
2994 APSInt Val0, Val1, Val2;
2996 Val0 = Op0.elem<T>(I).toAPSInt();
2997 Val1 = Op1.elem<T>(I).toAPSInt();
2998 Val2 = Op2.elem<T>(I).toAPSInt();
2999 });
3000 APSInt Result = APSInt(Fn(Val0, Val1, Val2), Val0.isUnsigned());
3002 { Dst.elem<T>(I) = static_cast<T>(Result); });
3003 }
3005
3006 return true;
3007}
3008
3010 const CallExpr *Call,
3011 unsigned ID) {
3012 assert(Call->getNumArgs() == 2);
3013
3014 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3015 uint64_t Index = ImmAPS.getZExtValue();
3016
3017 const Pointer &Src = S.Stk.pop<Pointer>();
3018 if (!Src.getFieldDesc()->isPrimitiveArray())
3019 return false;
3020
3021 const Pointer &Dst = S.Stk.peek<Pointer>();
3022 if (!Dst.getFieldDesc()->isPrimitiveArray())
3023 return false;
3024
3025 unsigned SrcElems = Src.getNumElems();
3026 unsigned DstElems = Dst.getNumElems();
3027
3028 unsigned NumLanes = SrcElems / DstElems;
3029 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3030 unsigned ExtractPos = Lane * DstElems;
3031
3032 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3033
3034 TYPE_SWITCH(ElemT, {
3035 for (unsigned I = 0; I != DstElems; ++I) {
3036 Dst.elem<T>(I) = Src.elem<T>(ExtractPos + I);
3037 }
3038 });
3039
3041 return true;
3042}
3043
3045 CodePtr OpPC,
3046 const CallExpr *Call,
3047 unsigned ID) {
3048 assert(Call->getNumArgs() == 4);
3049
3050 APSInt MaskAPS = popToAPSInt(S, Call->getArg(3));
3051 const Pointer &Merge = S.Stk.pop<Pointer>();
3052 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3053 const Pointer &Src = S.Stk.pop<Pointer>();
3054
3055 if (!Src.getFieldDesc()->isPrimitiveArray() ||
3056 !Merge.getFieldDesc()->isPrimitiveArray())
3057 return false;
3058
3059 const Pointer &Dst = S.Stk.peek<Pointer>();
3060 if (!Dst.getFieldDesc()->isPrimitiveArray())
3061 return false;
3062
3063 unsigned SrcElems = Src.getNumElems();
3064 unsigned DstElems = Dst.getNumElems();
3065
3066 unsigned NumLanes = SrcElems / DstElems;
3067 unsigned Lane = static_cast<unsigned>(ImmAPS.getZExtValue() % NumLanes);
3068 unsigned Base = Lane * DstElems;
3069
3070 PrimType ElemT = Src.getFieldDesc()->getPrimType();
3071
3072 TYPE_SWITCH(ElemT, {
3073 for (unsigned I = 0; I != DstElems; ++I) {
3074 if (MaskAPS[I])
3075 Dst.elem<T>(I) = Src.elem<T>(Base + I);
3076 else
3077 Dst.elem<T>(I) = Merge.elem<T>(I);
3078 }
3079 });
3080
3082 return true;
3083}
3084
3086 const CallExpr *Call,
3087 unsigned ID) {
3088 assert(Call->getNumArgs() == 3);
3089
3090 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3091 uint64_t Index = ImmAPS.getZExtValue();
3092
3093 const Pointer &SubVec = S.Stk.pop<Pointer>();
3094 if (!SubVec.getFieldDesc()->isPrimitiveArray())
3095 return false;
3096
3097 const Pointer &BaseVec = S.Stk.pop<Pointer>();
3098 if (!BaseVec.getFieldDesc()->isPrimitiveArray())
3099 return false;
3100
3101 const Pointer &Dst = S.Stk.peek<Pointer>();
3102
3103 unsigned BaseElements = BaseVec.getNumElems();
3104 unsigned SubElements = SubVec.getNumElems();
3105
3106 assert(SubElements != 0 && BaseElements != 0 &&
3107 (BaseElements % SubElements) == 0);
3108
3109 unsigned NumLanes = BaseElements / SubElements;
3110 unsigned Lane = static_cast<unsigned>(Index % NumLanes);
3111 unsigned InsertPos = Lane * SubElements;
3112
3113 PrimType ElemT = BaseVec.getFieldDesc()->getPrimType();
3114
3115 TYPE_SWITCH(ElemT, {
3116 for (unsigned I = 0; I != BaseElements; ++I)
3117 Dst.elem<T>(I) = BaseVec.elem<T>(I);
3118 for (unsigned I = 0; I != SubElements; ++I)
3119 Dst.elem<T>(InsertPos + I) = SubVec.elem<T>(I);
3120 });
3121
3123 return true;
3124}
3125
3127 const CallExpr *Call) {
3128 assert(Call->getNumArgs() == 1);
3129
3130 const Pointer &Source = S.Stk.pop<Pointer>();
3131 const Pointer &Dest = S.Stk.peek<Pointer>();
3132
3133 unsigned SourceLen = Source.getNumElems();
3134 QualType ElemQT = getElemType(Source);
3135 OptPrimType ElemT = S.getContext().classify(ElemQT);
3136 unsigned ElemBitWidth = S.getASTContext().getTypeSize(ElemQT);
3137
3138 bool DestUnsigned = Call->getCallReturnType(S.getASTContext())
3139 ->castAs<VectorType>()
3140 ->getElementType()
3142
3143 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3144 APSInt MinIndex(ElemBitWidth, DestUnsigned);
3145 APSInt MinVal = Source.elem<T>(0).toAPSInt();
3146
3147 for (unsigned I = 1; I != SourceLen; ++I) {
3148 APSInt Val = Source.elem<T>(I).toAPSInt();
3149 if (MinVal.ugt(Val)) {
3150 MinVal = Val;
3151 MinIndex = I;
3152 }
3153 }
3154
3155 Dest.elem<T>(0) = static_cast<T>(MinVal);
3156 Dest.elem<T>(1) = static_cast<T>(MinIndex);
3157 for (unsigned I = 2; I != SourceLen; ++I) {
3158 Dest.elem<T>(I) = static_cast<T>(APSInt(ElemBitWidth, DestUnsigned));
3159 }
3160 });
3161 Dest.initializeAllElements();
3162 return true;
3163}
3164
3166 const CallExpr *Call, bool MaskZ) {
3167 assert(Call->getNumArgs() == 5);
3168
3169 APInt U = popToAPSInt(S, Call->getArg(4)); / Lane mask
3170 APInt Imm = popToAPSInt(S, Call->getArg(3)); / Ternary truth table
3171 const Pointer &C = S.Stk.pop<Pointer>();
3172 const Pointer &B = S.Stk.pop<Pointer>();
3173 const Pointer &A = S.Stk.pop<Pointer>();
3174 const Pointer &Dst = S.Stk.peek<Pointer>();
3175
3176 unsigned DstLen = A.getNumElems();
3177 QualType ElemQT = getElemType(A);
3178 OptPrimType ElemT = S.getContext().classify(ElemQT);
3179 unsigned LaneWidth = S.getASTContext().getTypeSize(ElemQT);
3180 bool DstUnsigned = ElemQT->isUnsignedIntegerOrEnumerationType();
3181
3182 INT_TYPE_SWITCH_NO_BOOL(*ElemT, {
3183 for (unsigned I = 0; I != DstLen; ++I) {
3184 APInt ALane = A.elem<T>(I).toAPSInt();
3185 APInt BLane = B.elem<T>(I).toAPSInt();
3186 APInt CLane = C.elem<T>(I).toAPSInt();
3187 APInt RLane(LaneWidth, 0);
3188 if (U[I]) { / If lane not masked, compute ternary logic.
3189 for (unsigned Bit = 0; Bit != LaneWidth; ++Bit) {
3190 unsigned ABit = ALane[Bit];
3191 unsigned BBit = BLane[Bit];
3192 unsigned CBit = CLane[Bit];
3193 unsigned Idx = (ABit << 2) | (BBit << 1) | (CBit);
3194 RLane.setBitVal(Bit, Imm[Idx]);
3195 }
3196 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3197 } else if (MaskZ) { / If zero masked, zero the lane.
3198 Dst.elem<T>(I) = static_cast<T>(APSInt(RLane, DstUnsigned));
3199 } else { / Just masked, put in A lane.
3200 Dst.elem<T>(I) = static_cast<T>(APSInt(ALane, DstUnsigned));
3201 }
3202 }
3203 });
3204 Dst.initializeAllElements();
3205 return true;
3206}
3207
3209 const CallExpr *Call, unsigned ID) {
3210 assert(Call->getNumArgs() == 2);
3211
3212 APSInt ImmAPS = popToAPSInt(S, Call->getArg(1));
3213 const Pointer &Vec = S.Stk.pop<Pointer>();
3214 if (!Vec.getFieldDesc()->isPrimitiveArray())
3215 return false;
3216
3217 unsigned NumElems = Vec.getNumElems();
3218 unsigned Index =
3219 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3220
3221 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3222 / FIXME(#161685): Replace float+int split with a numeric-only type switch
3223 if (ElemT == PT_Float) {
3224 S.Stk.push<Floating>(Vec.elem<Floating>(Index));
3225 return true;
3226 }
3228 APSInt V = Vec.elem<T>(Index).toAPSInt();
3229 pushInteger(S, V, Call->getType());
3230 });
3231
3232 return true;
3233}
3234
3236 const CallExpr *Call, unsigned ID) {
3237 assert(Call->getNumArgs() == 3);
3238
3239 APSInt ImmAPS = popToAPSInt(S, Call->getArg(2));
3240 APSInt ValAPS = popToAPSInt(S, Call->getArg(1));
3241
3242 const Pointer &Base = S.Stk.pop<Pointer>();
3243 if (!Base.getFieldDesc()->isPrimitiveArray())
3244 return false;
3245
3246 const Pointer &Dst = S.Stk.peek<Pointer>();
3247
3248 unsigned NumElems = Base.getNumElems();
3249 unsigned Index =
3250 static_cast<unsigned>(ImmAPS.getZExtValue() & (NumElems - 1));
3251
3252 PrimType ElemT = Base.getFieldDesc()->getPrimType();
3254 for (unsigned I = 0; I != NumElems; ++I)
3255 Dst.elem<T>(I) = Base.elem<T>(I);
3256 Dst.elem<T>(Index) = static_cast<T>(ValAPS);
3257 });
3258
3260 return true;
3261}
3262
3263static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B,
3264 bool IsUnsigned) {
3265 switch (Imm & 0x7) {
3266 case 0x00: / _MM_CMPINT_EQ
3267 return (A == B);
3268 case 0x01: / _MM_CMPINT_LT
3269 return IsUnsigned ? A.ult(B) : A.slt(B);
3270 case 0x02: / _MM_CMPINT_LE
3271 return IsUnsigned ? A.ule(B) : A.sle(B);
3272 case 0x03: / _MM_CMPINT_FALSE
3273 return false;
3274 case 0x04: / _MM_CMPINT_NE
3275 return (A != B);
3276 case 0x05: / _MM_CMPINT_NLT
3277 return IsUnsigned ? A.ugt(B) : A.sgt(B);
3278 case 0x06: / _MM_CMPINT_NLE
3279 return IsUnsigned ? A.uge(B) : A.sge(B);
3280 case 0x07: / _MM_CMPINT_TRUE
3281 return true;
3282 default:
3283 llvm_unreachable("Invalid Op");
3284 }
3285}
3286
3288 const CallExpr *Call, unsigned ID,
3289 bool IsUnsigned) {
3290 assert(Call->getNumArgs() == 4);
3291
3292 APSInt Mask = popToAPSInt(S, Call->getArg(3));
3293 APSInt Opcode = popToAPSInt(S, Call->getArg(2));
3294 unsigned CmpOp = static_cast<unsigned>(Opcode.getZExtValue());
3295 const Pointer &RHS = S.Stk.pop<Pointer>();
3296 const Pointer &LHS = S.Stk.pop<Pointer>();
3297
3298 assert(LHS.getNumElems() == RHS.getNumElems());
3299
3300 APInt RetMask = APInt::getZero(LHS.getNumElems());
3301 unsigned VectorLen = LHS.getNumElems();
3302 PrimType ElemT = LHS.getFieldDesc()->getPrimType();
3303
3304 for (unsigned ElemNum = 0; ElemNum < VectorLen; ++ElemNum) {
3305 APSInt A, B;
3307 A = LHS.elem<T>(ElemNum).toAPSInt();
3308 B = RHS.elem<T>(ElemNum).toAPSInt();
3309 });
3310 RetMask.setBitVal(ElemNum,
3311 Mask[ElemNum] && evalICmpImm(CmpOp, A, B, IsUnsigned));
3312 }
3313 pushInteger(S, RetMask, Call->getType());
3314 return true;
3315}
3316
3318 const CallExpr *Call) {
3319 assert(Call->getNumArgs() == 1);
3320
3321 QualType Arg0Type = Call->getArg(0)->getType();
3322 const auto *VecT = Arg0Type->castAs<VectorType>();
3323 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3324 unsigned NumElems = VecT->getNumElements();
3325 bool DestUnsigned = Call->getType()->isUnsignedIntegerOrEnumerationType();
3326 const Pointer &Src = S.Stk.pop<Pointer>();
3327 const Pointer &Dst = S.Stk.peek<Pointer>();
3328
3329 for (unsigned I = 0; I != NumElems; ++I) {
3331 APSInt ElemI = Src.elem<T>(I).toAPSInt();
3332 APInt ConflictMask(ElemI.getBitWidth(), 0);
3333 for (unsigned J = 0; J != I; ++J) {
3334 APSInt ElemJ = Src.elem<T>(J).toAPSInt();
3335 ConflictMask.setBitVal(J, ElemI == ElemJ);
3336 }
3337 Dst.elem<T>(I) = static_cast<T>(APSInt(ConflictMask, DestUnsigned));
3338 });
3339 }
3341 return true;
3342}
3343
3345 const CallExpr *Call,
3346 unsigned ID) {
3347 assert(Call->getNumArgs() == 1);
3348
3349 const Pointer &Vec = S.Stk.pop<Pointer>();
3350 unsigned RetWidth = S.getASTContext().getIntWidth(Call->getType());
3351 APInt RetMask(RetWidth, 0);
3352
3353 unsigned VectorLen = Vec.getNumElems();
3354 PrimType ElemT = Vec.getFieldDesc()->getPrimType();
3355
3356 for (unsigned ElemNum = 0; ElemNum != VectorLen; ++ElemNum) {
3357 APSInt A;
3358 INT_TYPE_SWITCH_NO_BOOL(ElemT, { A = Vec.elem<T>(ElemNum).toAPSInt(); });
3359 unsigned MSB = A[A.getBitWidth() - 1];
3360 RetMask.setBitVal(ElemNum, MSB);
3361 }
3362 pushInteger(S, RetMask, Call->getType());
3363 return true;
3364}
3366 const CallExpr *Call,
3367 bool HasRoundingMask) {
3368 APSInt Rounding, MaskInt;
3369 Pointer Src, B, A;
3370
3371 if (HasRoundingMask) {
3372 assert(Call->getNumArgs() == 5);
3373 Rounding = popToAPSInt(S, Call->getArg(4));
3374 MaskInt = popToAPSInt(S, Call->getArg(3));
3375 Src = S.Stk.pop<Pointer>();
3376 B = S.Stk.pop<Pointer>();
3377 A = S.Stk.pop<Pointer>();
3378 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B) ||
3379 !CheckLoad(S, OpPC, Src))
3380 return false;
3381 } else {
3382 assert(Call->getNumArgs() == 2);
3383 B = S.Stk.pop<Pointer>();
3384 A = S.Stk.pop<Pointer>();
3385 if (!CheckLoad(S, OpPC, A) || !CheckLoad(S, OpPC, B))
3386 return false;
3387 }
3388
3389 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3390 unsigned NumElems = DstVTy->getNumElements();
3391 const Pointer &Dst = S.Stk.peek<Pointer>();
3392
3393 / Copy all elements except lane 0 (overwritten below) from A to Dst.
3394 for (unsigned I = 1; I != NumElems; ++I)
3395 Dst.elem<Floating>(I) = A.elem<Floating>(I);
3396
3397 / Convert element 0 from double to float, or use Src if masked off.
3398 if (!HasRoundingMask || (MaskInt.getZExtValue() & 0x1)) {
3399 assert(S.getASTContext().FloatTy == DstVTy->getElementType() &&
3400 "cvtsd2ss requires float element type in destination vector");
3401
3402 Floating Conv = S.allocFloat(
3403 S.getASTContext().getFloatTypeSemantics(DstVTy->getElementType()));
3404 APFloat SrcVal = B.elem<Floating>(0).getAPFloat();
3405 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3406 return false;
3407 Dst.elem<Floating>(0) = Conv;
3408 } else {
3409 Dst.elem<Floating>(0) = Src.elem<Floating>(0);
3410 }
3411
3413 return true;
3414}
3415
3417 const CallExpr *Call, bool IsMasked,
3418 bool HasRounding) {
3419
3420 APSInt MaskVal;
3421 Pointer PassThrough;
3422 Pointer Src;
3423 APSInt Rounding;
3424
3425 if (IsMasked) {
3426 / Pop in reverse order.
3427 if (HasRounding) {
3428 Rounding = popToAPSInt(S, Call->getArg(3));
3429 MaskVal = popToAPSInt(S, Call->getArg(2));
3430 PassThrough = S.Stk.pop<Pointer>();
3431 Src = S.Stk.pop<Pointer>();
3432 } else {
3433 MaskVal = popToAPSInt(S, Call->getArg(2));
3434 PassThrough = S.Stk.pop<Pointer>();
3435 Src = S.Stk.pop<Pointer>();
3436 }
3437
3438 if (!CheckLoad(S, OpPC, PassThrough))
3439 return false;
3440 } else {
3441 / Pop source only.
3442 Src = S.Stk.pop<Pointer>();
3443 }
3444
3445 if (!CheckLoad(S, OpPC, Src))
3446 return false;
3447
3448 const auto *RetVTy = Call->getType()->castAs<VectorType>();
3449 unsigned RetElems = RetVTy->getNumElements();
3450 unsigned SrcElems = Src.getNumElems();
3451 const Pointer &Dst = S.Stk.peek<Pointer>();
3452
3453 / Initialize destination with passthrough or zeros.
3454 for (unsigned I = 0; I != RetElems; ++I)
3455 if (IsMasked)
3456 Dst.elem<Floating>(I) = PassThrough.elem<Floating>(I);
3457 else
3458 Dst.elem<Floating>(I) = Floating(APFloat(0.0f));
3459
3460 assert(S.getASTContext().FloatTy == RetVTy->getElementType() &&
3461 "cvtpd2ps requires float element type in return vector");
3462
3463 / Convert double to float for enabled elements (only process source elements
3464 / that exist).
3465 for (unsigned I = 0; I != SrcElems; ++I) {
3466 if (IsMasked && !MaskVal[I])
3467 continue;
3468
3469 APFloat SrcVal = Src.elem<Floating>(I).getAPFloat();
3470
3471 Floating Conv = S.allocFloat(
3472 S.getASTContext().getFloatTypeSemantics(RetVTy->getElementType()));
3473 if (!convertDoubleToFloatStrict(SrcVal, Conv, S, Call))
3474 return false;
3475 Dst.elem<Floating>(I) = Conv;
3476 }
3477
3479 return true;
3480}
3481
3483 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3484 llvm::function_ref<std::pair<unsigned, int>(unsigned, unsigned)>
3485 GetSourceIndex) {
3486
3487 assert(Call->getNumArgs() == 2 || Call->getNumArgs() == 3);
3488
3489 unsigned ShuffleMask = 0;
3490 Pointer A, MaskVector, B;
3491 bool IsVectorMask = false;
3492 bool IsSingleOperand = (Call->getNumArgs() == 2);
3493
3494 if (IsSingleOperand) {
3495 QualType MaskType = Call->getArg(1)->getType();
3496 if (MaskType->isVectorType()) {
3497 IsVectorMask = true;
3498 MaskVector = S.Stk.pop<Pointer>();
3499 A = S.Stk.pop<Pointer>();
3500 B = A;
3501 } else if (MaskType->isIntegerType()) {
3502 ShuffleMask = popToAPSInt(S, Call->getArg(1)).getZExtValue();
3503 A = S.Stk.pop<Pointer>();
3504 B = A;
3505 } else {
3506 return false;
3507 }
3508 } else {
3509 QualType Arg2Type = Call->getArg(2)->getType();
3510 if (Arg2Type->isVectorType()) {
3511 IsVectorMask = true;
3512 B = S.Stk.pop<Pointer>();
3513 MaskVector = S.Stk.pop<Pointer>();
3514 A = S.Stk.pop<Pointer>();
3515 } else if (Arg2Type->isIntegerType()) {
3516 ShuffleMask = popToAPSInt(S, Call->getArg(2)).getZExtValue();
3517 B = S.Stk.pop<Pointer>();
3518 A = S.Stk.pop<Pointer>();
3519 } else {
3520 return false;
3521 }
3522 }
3523
3524 QualType Arg0Type = Call->getArg(0)->getType();
3525 const auto *VecT = Arg0Type->castAs<VectorType>();
3526 PrimType ElemT = *S.getContext().classify(VecT->getElementType());
3527 unsigned NumElems = VecT->getNumElements();
3528
3529 const Pointer &Dst = S.Stk.peek<Pointer>();
3530
3531 PrimType MaskElemT = PT_Uint32;
3532 if (IsVectorMask) {
3533 QualType Arg1Type = Call->getArg(1)->getType();
3534 const auto *MaskVecT = Arg1Type->castAs<VectorType>();
3535 QualType MaskElemType = MaskVecT->getElementType();
3536 MaskElemT = *S.getContext().classify(MaskElemType);
3537 }
3538
3539 for (unsigned DstIdx = 0; DstIdx != NumElems; ++DstIdx) {
3540 if (IsVectorMask) {
3541 INT_TYPE_SWITCH(MaskElemT, {
3542 ShuffleMask = static_cast<unsigned>(MaskVector.elem<T>(DstIdx));
3543 });
3544 }
3545
3546 auto [SrcVecIdx, SrcIdx] = GetSourceIndex(DstIdx, ShuffleMask);
3547
3548 if (SrcIdx < 0) {
3549 / Zero out this element
3550 if (ElemT == PT_Float) {
3551 Dst.elem<Floating>(DstIdx) = Floating(
3552 S.getASTContext().getFloatTypeSemantics(VecT->getElementType()));
3553 } else {
3554 INT_TYPE_SWITCH_NO_BOOL(ElemT, { Dst.elem<T>(DstIdx) = T::from(0); });
3555 }
3556 } else {
3557 const Pointer &Src = (SrcVecIdx == 0) ? A : B;
3558 TYPE_SWITCH(ElemT, { Dst.elem<T>(DstIdx) = Src.elem<T>(SrcIdx); });
3559 }
3560 }
3562
3563 return true;
3564}
3565
3567 InterpState &S, CodePtr OpPC, const CallExpr *Call,
3568 llvm::function_ref<APInt(const APInt &, uint64_t)> ShiftOp,
3569 llvm::function_ref<APInt(const APInt &, unsigned)> OverflowOp) {
3570
3571 assert(Call->getNumArgs() == 2);
3572
3573 const Pointer &Count = S.Stk.pop<Pointer>();
3574 const Pointer &Source = S.Stk.pop<Pointer>();
3575
3576 QualType SourceType = Call->getArg(0)->getType();
3577 QualType CountType = Call->getArg(1)->getType();
3578 assert(SourceType->isVectorType() && CountType->isVectorType());
3579
3580 const auto *SourceVecT = SourceType->castAs<VectorType>();
3581 const auto *CountVecT = CountType->castAs<VectorType>();
3582 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3583 PrimType CountElemT = *S.getContext().classify(CountVecT->getElementType());
3584
3585 const Pointer &Dst = S.Stk.peek<Pointer>();
3586
3587 unsigned DestEltWidth =
3588 S.getASTContext().getTypeSize(SourceVecT->getElementType());
3589 bool IsDestUnsigned = SourceVecT->getElementType()->isUnsignedIntegerType();
3590 unsigned DestLen = SourceVecT->getNumElements();
3591 unsigned CountEltWidth =
3592 S.getASTContext().getTypeSize(CountVecT->getElementType());
3593 unsigned NumBitsInQWord = 64;
3594 unsigned NumCountElts = NumBitsInQWord / CountEltWidth;
3595
3596 uint64_t CountLQWord = 0;
3597 for (unsigned EltIdx = 0; EltIdx != NumCountElts; ++EltIdx) {
3598 uint64_t Elt = 0;
3599 INT_TYPE_SWITCH(CountElemT,
3600 { Elt = static_cast<uint64_t>(Count.elem<T>(EltIdx)); });
3601 CountLQWord |= (Elt << (EltIdx * CountEltWidth));
3602 }
3603
3604 for (unsigned EltIdx = 0; EltIdx != DestLen; ++EltIdx) {
3605 APSInt Elt;
3606 INT_TYPE_SWITCH(SourceElemT, { Elt = Source.elem<T>(EltIdx).toAPSInt(); });
3607
3608 APInt Result;
3609 if (CountLQWord < DestEltWidth) {
3610 Result = ShiftOp(Elt, CountLQWord);
3611 } else {
3612 Result = OverflowOp(Elt, DestEltWidth);
3613 }
3614 if (IsDestUnsigned) {
3615 INT_TYPE_SWITCH(SourceElemT, {
3616 Dst.elem<T>(EltIdx) = T::from(Result.getZExtValue());
3617 });
3618 } else {
3619 INT_TYPE_SWITCH(SourceElemT, {
3620 Dst.elem<T>(EltIdx) = T::from(Result.getSExtValue());
3621 });
3622 }
3623 }
3624
3626 return true;
3627}
3628
3630 const CallExpr *Call) {
3631
3632 assert(Call->getNumArgs() == 3);
3633
3634 QualType SourceType = Call->getArg(0)->getType();
3635 QualType ShuffleMaskType = Call->getArg(1)->getType();
3636 QualType ZeroMaskType = Call->getArg(2)->getType();
3637 if (!SourceType->isVectorType() || !ShuffleMaskType->isVectorType() ||
3638 !ZeroMaskType->isIntegerType()) {
3639 return false;
3640 }
3641
3642 Pointer Source, ShuffleMask;
3643 APSInt ZeroMask = popToAPSInt(S, Call->getArg(2));
3644 ShuffleMask = S.Stk.pop<Pointer>();
3645 Source = S.Stk.pop<Pointer>();
3646
3647 const auto *SourceVecT = SourceType->castAs<VectorType>();
3648 const auto *ShuffleMaskVecT = ShuffleMaskType->castAs<VectorType>();
3649 assert(SourceVecT->getNumElements() == ShuffleMaskVecT->getNumElements());
3650 assert(ZeroMask.getBitWidth() == SourceVecT->getNumElements());
3651
3652 PrimType SourceElemT = *S.getContext().classify(SourceVecT->getElementType());
3653 PrimType ShuffleMaskElemT =
3654 *S.getContext().classify(ShuffleMaskVecT->getElementType());
3655
3656 unsigned NumBytesInQWord = 8;
3657 unsigned NumBitsInByte = 8;
3658 unsigned NumBytes = SourceVecT->getNumElements();
3659 unsigned NumQWords = NumBytes / NumBytesInQWord;
3660 unsigned RetWidth = ZeroMask.getBitWidth();
3661 APSInt RetMask(llvm::APInt(RetWidth, 0), /*isUnsigned=*/true);
3662
3663 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3664 APInt SourceQWord(64, 0);
3665 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3666 uint64_t Byte = 0;
3667 INT_TYPE_SWITCH(SourceElemT, {
3668 Byte = static_cast<uint64_t>(
3669 Source.elem<T>(QWordId * NumBytesInQWord + ByteIdx));
3670 });
3671 SourceQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3672 }
3673
3674 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3675 unsigned SelIdx = QWordId * NumBytesInQWord + ByteIdx;
3676 unsigned M = 0;
3677 INT_TYPE_SWITCH(ShuffleMaskElemT, {
3678 M = static_cast<unsigned>(ShuffleMask.elem<T>(SelIdx)) & 0x3F;
3679 });
3680
3681 if (ZeroMask[SelIdx]) {
3682 RetMask.setBitVal(SelIdx, SourceQWord[M]);
3683 }
3684 }
3685 }
3686
3687 pushInteger(S, RetMask, Call->getType());
3688 return true;
3689}
3690
3692 const CallExpr *Call) {
3693 / Arguments are: vector of floats, rounding immediate
3694 assert(Call->getNumArgs() == 2);
3695
3696 APSInt Imm = popToAPSInt(S, Call->getArg(1));
3697 const Pointer &Src = S.Stk.pop<Pointer>();
3698 const Pointer &Dst = S.Stk.peek<Pointer>();
3699
3700 assert(Src.getFieldDesc()->isPrimitiveArray());
3701 assert(Dst.getFieldDesc()->isPrimitiveArray());
3702
3703 const auto *SrcVTy = Call->getArg(0)->getType()->castAs<VectorType>();
3704 unsigned SrcNumElems = SrcVTy->getNumElements();
3705 const auto *DstVTy = Call->getType()->castAs<VectorType>();
3706 unsigned DstNumElems = DstVTy->getNumElements();
3707
3708 const llvm::fltSemantics &HalfSem =
3710
3711 / imm[2] == 1 means use MXCSR rounding mode.
3712 / In that case, we can only evaluate if the conversion is exact.
3713 int ImmVal = Imm.getZExtValue();
3714 bool UseMXCSR = (ImmVal & 4) != 0;
3715 bool IsFPConstrained =
3716 Call->getFPFeaturesInEffect(S.getASTContext().getLangOpts())
3717 .isFPConstrained();
3718
3719 llvm::RoundingMode RM;
3720 if (!UseMXCSR) {
3721 switch (ImmVal & 3) {
3722 case 0:
3723 RM = llvm::RoundingMode::NearestTiesToEven;
3724 break;
3725 case 1:
3726 RM = llvm::RoundingMode::TowardNegative;
3727 break;
3728 case 2:
3729 RM = llvm::RoundingMode::TowardPositive;
3730 break;
3731 case 3:
3732 RM = llvm::RoundingMode::TowardZero;
3733 break;
3734 default:
3735 llvm_unreachable("Invalid immediate rounding mode");
3736 }
3737 } else {
3738 / For MXCSR, we must check for exactness. We can use any rounding mode
3739 / for the trial conversion since the result is the same if it's exact.
3740 RM = llvm::RoundingMode::NearestTiesToEven;
3741 }
3742
3743 QualType DstElemQT = Dst.getFieldDesc()->getElemQualType();
3744 PrimType DstElemT = *S.getContext().classify(DstElemQT);
3745
3746 for (unsigned I = 0; I != SrcNumElems; ++I) {
3747 Floating SrcVal = Src.elem<Floating>(I);
3748 APFloat DstVal = SrcVal.getAPFloat();
3749
3750 bool LostInfo;
3751 APFloat::opStatus St = DstVal.convert(HalfSem, RM, &LostInfo);
3752
3753 if (UseMXCSR && IsFPConstrained && St != APFloat::opOK) {
3754 S.FFDiag(S.Current->getSource(OpPC),
3755 diag::note_constexpr_dynamic_rounding);
3756 return false;
3757 }
3758
3759 INT_TYPE_SWITCH_NO_BOOL(DstElemT, {
3760 / Convert the destination value's bit pattern to an unsigned integer,
3761 / then reconstruct the element using the target type's 'from' method.
3762 uint64_t RawBits = DstVal.bitcastToAPInt().getZExtValue();
3763 Dst.elem<T>(I) = T::from(RawBits);
3764 });
3765 }
3766
3767 / Zero out remaining elements if the destination has more elements
3768 / (e.g., vcvtps2ph converting 4 floats to 8 shorts).
3769 if (DstNumElems > SrcNumElems) {
3770 for (unsigned I = SrcNumElems; I != DstNumElems; ++I) {
3771 INT_TYPE_SWITCH_NO_BOOL(DstElemT, { Dst.elem<T>(I) = T::from(0); });
3772 }
3773 }
3774
3775 Dst.initializeAllElements();
3776 return true;
3777}
3778
3780 const CallExpr *Call) {
3781 assert(Call->getNumArgs() == 2);
3782
3783 QualType ATy = Call->getArg(0)->getType();
3784 QualType BTy = Call->getArg(1)->getType();
3785 if (!ATy->isVectorType() || !BTy->isVectorType()) {
3786 return false;
3787 }
3788
3789 const Pointer &BPtr = S.Stk.pop<Pointer>();
3790 const Pointer &APtr = S.Stk.pop<Pointer>();
3791 const auto *AVecT = ATy->castAs<VectorType>();
3792 assert(AVecT->getNumElements() ==
3793 BTy->castAs<VectorType>()->getNumElements());
3794
3795 PrimType ElemT = *S.getContext().classify(AVecT->getElementType());
3796
3797 unsigned NumBytesInQWord = 8;
3798 unsigned NumBitsInByte = 8;
3799 unsigned NumBytes = AVecT->getNumElements();
3800 unsigned NumQWords = NumBytes / NumBytesInQWord;
3801 const Pointer &Dst = S.Stk.peek<Pointer>();
3802
3803 for (unsigned QWordId = 0; QWordId != NumQWords; ++QWordId) {
3804 APInt BQWord(64, 0);
3805 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3806 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3807 INT_TYPE_SWITCH(ElemT, {
3808 uint64_t Byte = static_cast<uint64_t>(BPtr.elem<T>(Idx));
3809 BQWord.insertBits(APInt(8, Byte & 0xFF), ByteIdx * NumBitsInByte);
3810 });
3811 }
3812
3813 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3814 unsigned Idx = QWordId * NumBytesInQWord + ByteIdx;
3815 uint64_t Ctrl = 0;
3817 ElemT, { Ctrl = static_cast<uint64_t>(APtr.elem<T>(Idx)) & 0x3F; });
3818
3819 APInt Byte(8, 0);
3820 for (unsigned BitIdx = 0; BitIdx != NumBitsInByte; ++BitIdx) {
3821 Byte.setBitVal(BitIdx, BQWord[(Ctrl + BitIdx) & 0x3F]);
3822 }
3823 INT_TYPE_SWITCH(ElemT,
3824 { Dst.elem<T>(Idx) = T::from(Byte.getZExtValue()); });
3825 }
3826 }
3827
3829
3830 return true;
3831}
3832
3834 const CallExpr *Call,
3835 bool Inverse) {
3836 assert(Call->getNumArgs() == 3);
3837 QualType XType = Call->getArg(0)->getType();
3838 QualType AType = Call->getArg(1)->getType();
3839 QualType ImmType = Call->getArg(2)->getType();
3840 if (!XType->isVectorType() || !AType->isVectorType() ||
3841 !ImmType->isIntegerType()) {
3842 return false;
3843 }
3844
3845 Pointer X, A;
3846 APSInt Imm = popToAPSInt(S, Call->getArg(2));
3847 A = S.Stk.pop<Pointer>();
3848 X = S.Stk.pop<Pointer>();
3849
3850 const Pointer &Dst = S.Stk.peek<Pointer>();
3851 const auto *AVecT = AType->castAs<VectorType>();
3852 assert(XType->castAs<VectorType>()->getNumElements() ==
3853 AVecT->getNumElements());
3854 unsigned NumBytesInQWord = 8;
3855 unsigned NumBytes = AVecT->getNumElements();
3856 unsigned NumBitsInQWord = 64;
3857 unsigned NumQWords = NumBytes / NumBytesInQWord;
3858 unsigned NumBitsInByte = 8;
3859 PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
3860
3861 / computing A*X + Imm
3862 for (unsigned QWordIdx = 0; QWordIdx != NumQWords; ++QWordIdx) {
3863 / Extract the QWords from X, A
3864 APInt XQWord(NumBitsInQWord, 0);
3865 APInt AQWord(NumBitsInQWord, 0);
3866 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3867 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
3868 uint8_t XByte;
3869 uint8_t AByte;
3870 INT_TYPE_SWITCH(AElemT, {
3871 XByte = static_cast<uint8_t>(X.elem<T>(Idx));
3872 AByte = static_cast<uint8_t>(A.elem<T>(Idx));
3873 });
3874
3875 XQWord.insertBits(APInt(NumBitsInByte, XByte), ByteIdx * NumBitsInByte);
3876 AQWord.insertBits(APInt(NumBitsInByte, AByte), ByteIdx * NumBitsInByte);
3877 }
3878
3879 for (unsigned ByteIdx = 0; ByteIdx != NumBytesInQWord; ++ByteIdx) {
3880 unsigned Idx = QWordIdx * NumBytesInQWord + ByteIdx;
3881 uint8_t XByte =
3882 XQWord.lshr(ByteIdx * NumBitsInByte).getLoBits(8).getZExtValue();
3883 INT_TYPE_SWITCH(AElemT, {
3884 Dst.elem<T>(Idx) = T::from(GFNIAffine(XByte, AQWord, Imm, Inverse));
3885 });
3886 }
3887 }
3888 Dst.initializeAllElements();
3889 return true;
3890}
3891
3893 const CallExpr *Call) {
3894 assert(Call->getNumArgs() == 2);
3895
3896 QualType AType = Call->getArg(0)->getType();
3897 QualType BType = Call->getArg(1)->getType();
3898 if (!AType->isVectorType() || !BType->isVectorType()) {
3899 return false;
3900 }
3901
3902 Pointer A, B;
3903 B = S.Stk.pop<Pointer>();
3904 A = S.Stk.pop<Pointer>();
3905
3906 const Pointer &Dst = S.Stk.peek<Pointer>();
3907 const auto *AVecT = AType->castAs<VectorType>();
3908 assert(AVecT->getNumElements() ==
3909 BType->castAs<VectorType>()->getNumElements());
3910
3911 PrimType AElemT = *S.getContext().classify(AVecT->getElementType());
3912 unsigned NumBytes = A.getNumElems();
3913
3914 for (unsigned ByteIdx = 0; ByteIdx != NumBytes; ++ByteIdx) {
3915 uint8_t AByte, BByte;
3916 INT_TYPE_SWITCH(AElemT, {
3917 AByte = static_cast<uint8_t>(A.elem<T>(ByteIdx));
3918 BByte = static_cast<uint8_t>(B.elem<T>(ByteIdx));
3919 Dst.elem<T>(ByteIdx) = T::from(GFNIMul(AByte, BByte));
3920 });
3921 }
3922
3923 Dst.initializeAllElements();
3924 return true;
3925}
3926
3928 uint32_t BuiltinID) {
3929 if (!S.getASTContext().BuiltinInfo.isConstantEvaluated(BuiltinID))
3930 return Invalid(S, OpPC);
3931
3932 const InterpFrame *Frame = S.Current;
3933 switch (BuiltinID) {
3934 case Builtin::BI__builtin_is_constant_evaluated:
3936
3937 case Builtin::BI__builtin_assume:
3938 case Builtin::BI__assume:
3939 return interp__builtin_assume(S, OpPC, Frame, Call);
3940
3941 case Builtin::BI__builtin_strcmp:
3942 case Builtin::BIstrcmp:
3943 case Builtin::BI__builtin_strncmp:
3944 case Builtin::BIstrncmp:
3945 case Builtin::BI__builtin_wcsncmp:
3946 case Builtin::BIwcsncmp:
3947 case Builtin::BI__builtin_wcscmp:
3948 case Builtin::BIwcscmp:
3949 return interp__builtin_strcmp(S, OpPC, Frame, Call, BuiltinID);
3950
3951 case Builtin::BI__builtin_strlen:
3952 case Builtin::BIstrlen:
3953 case Builtin::BI__builtin_wcslen:
3954 case Builtin::BIwcslen:
3955 return interp__builtin_strlen(S, OpPC, Frame, Call, BuiltinID);
3956
3957 case Builtin::BI__builtin_nan:
3958 case Builtin::BI__builtin_nanf:
3959 case Builtin::BI__builtin_nanl:
3960 case Builtin::BI__builtin_nanf16:
3961 case Builtin::BI__builtin_nanf128:
3962 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/false);
3963
3964 case Builtin::BI__builtin_nans:
3965 case Builtin::BI__builtin_nansf:
3966 case Builtin::BI__builtin_nansl:
3967 case Builtin::BI__builtin_nansf16:
3968 case Builtin::BI__builtin_nansf128:
3969 return interp__builtin_nan(S, OpPC, Frame, Call, /*Signaling=*/true);
3970
3971 case Builtin::BI__builtin_huge_val:
3972 case Builtin::BI__builtin_huge_valf:
3973 case Builtin::BI__builtin_huge_vall:
3974 case Builtin::BI__builtin_huge_valf16:
3975 case Builtin::BI__builtin_huge_valf128:
3976 case Builtin::BI__builtin_inf:
3977 case Builtin::BI__builtin_inff:
3978 case Builtin::BI__builtin_infl:
3979 case Builtin::BI__builtin_inff16:
3980 case Builtin::BI__builtin_inff128:
3981 return interp__builtin_inf(S, OpPC, Frame, Call);
3982
3983 case Builtin::BI__builtin_copysign:
3984 case Builtin::BI__builtin_copysignf:
3985 case Builtin::BI__builtin_copysignl:
3986 case Builtin::BI__builtin_copysignf128:
3987 return interp__builtin_copysign(S, OpPC, Frame);
3988
3989 case Builtin::BI__builtin_fmin:
3990 case Builtin::BI__builtin_fminf:
3991 case Builtin::BI__builtin_fminl:
3992 case Builtin::BI__builtin_fminf16:
3993 case Builtin::BI__builtin_fminf128:
3994 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/false);
3995
3996 case Builtin::BI__builtin_fminimum_num:
3997 case Builtin::BI__builtin_fminimum_numf:
3998 case Builtin::BI__builtin_fminimum_numl:
3999 case Builtin::BI__builtin_fminimum_numf16:
4000 case Builtin::BI__builtin_fminimum_numf128:
4001 return interp__builtin_fmin(S, OpPC, Frame, /*IsNumBuiltin=*/true);
4002
4003 case Builtin::BI__builtin_fmax:
4004 case Builtin::BI__builtin_fmaxf:
4005 case Builtin::BI__builtin_fmaxl:
4006 case Builtin::BI__builtin_fmaxf16:
4007 case Builtin::BI__builtin_fmaxf128:
4008 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/false);
4009
4010 case Builtin::BI__builtin_fmaximum_num:
4011 case Builtin::BI__builtin_fmaximum_numf:
4012 case Builtin::BI__builtin_fmaximum_numl:
4013 case Builtin::BI__builtin_fmaximum_numf16:
4014 case Builtin::BI__builtin_fmaximum_numf128:
4015 return interp__builtin_fmax(S, OpPC, Frame, /*IsNumBuiltin=*/true);
4016
4017 case Builtin::BI__builtin_isnan:
4018 return interp__builtin_isnan(S, OpPC, Frame, Call);
4019
4020 case Builtin::BI__builtin_issignaling:
4021 return interp__builtin_issignaling(S, OpPC, Frame, Call);
4022
4023 case Builtin::BI__builtin_isinf:
4024 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/false, Call);
4025
4026 case Builtin::BI__builtin_isinf_sign:
4027 return interp__builtin_isinf(S, OpPC, Frame, /*Sign=*/true, Call);
4028
4029 case Builtin::BI__builtin_isfinite:
4030 return interp__builtin_isfinite(S, OpPC, Frame, Call);
4031
4032 case Builtin::BI__builtin_isnormal:
4033 return interp__builtin_isnormal(S, OpPC, Frame, Call);
4034
4035 case Builtin::BI__builtin_issubnormal:
4036 return interp__builtin_issubnormal(S, OpPC, Frame, Call);
4037
4038 case Builtin::BI__builtin_iszero:
4039 return interp__builtin_iszero(S, OpPC, Frame, Call);
4040
4041 case Builtin::BI__builtin_signbit:
4042 case Builtin::BI__builtin_signbitf:
4043 case Builtin::BI__builtin_signbitl:
4044 return interp__builtin_signbit(S, OpPC, Frame, Call);
4045
4046 case Builtin::BI__builtin_isgreater:
4047 case Builtin::BI__builtin_isgreaterequal:
4048 case Builtin::BI__builtin_isless:
4049 case Builtin::BI__builtin_islessequal:
4050 case Builtin::BI__builtin_islessgreater:
4051 case Builtin::BI__builtin_isunordered:
4052 return interp_floating_comparison(S, OpPC, Call, BuiltinID);
4053
4054 case Builtin::BI__builtin_isfpclass:
4055 return interp__builtin_isfpclass(S, OpPC, Frame, Call);
4056
4057 case Builtin::BI__builtin_fpclassify:
4058 return interp__builtin_fpclassify(S, OpPC, Frame, Call);
4059
4060 case Builtin::BI__builtin_fabs:
4061 case Builtin::BI__builtin_fabsf:
4062 case Builtin::BI__builtin_fabsl:
4063 case Builtin::BI__builtin_fabsf128:
4064 return interp__builtin_fabs(S, OpPC, Frame);
4065
4066 case Builtin::BI__builtin_abs:
4067 case Builtin::BI__builtin_labs:
4068 case Builtin::BI__builtin_llabs:
4069 return interp__builtin_abs(S, OpPC, Frame, Call);
4070
4071 case Builtin::BI__builtin_popcount:
4072 case Builtin::BI__builtin_popcountl:
4073 case Builtin::BI__builtin_popcountll:
4074 case Builtin::BI__builtin_popcountg:
4075 case Builtin::BI__popcnt16: / Microsoft variants of popcount
4076 case Builtin::BI__popcnt:
4077 case Builtin::BI__popcnt64:
4078 return interp__builtin_popcount(S, OpPC, Frame, Call);
4079
4080 case Builtin::BI__builtin_parity:
4081 case Builtin::BI__builtin_parityl:
4082 case Builtin::BI__builtin_parityll:
4084 S, OpPC, Call, [](const APSInt &Val) {
4085 return APInt(Val.getBitWidth(), Val.popcount() % 2);
4086 });
4087 case Builtin::BI__builtin_clrsb:
4088 case Builtin::BI__builtin_clrsbl:
4089 case Builtin::BI__builtin_clrsbll:
4091 S, OpPC, Call, [](const APSInt &Val) {
4092 return APInt(Val.getBitWidth(),
4093 Val.getBitWidth() - Val.getSignificantBits());
4094 });
4095 case Builtin::BI__builtin_bitreverse8:
4096 case Builtin::BI__builtin_bitreverse16:
4097 case Builtin::BI__builtin_bitreverse32:
4098 case Builtin::BI__builtin_bitreverse64:
4100 S, OpPC, Call, [](const APSInt &Val) { return Val.reverseBits(); });
4101
4102 case Builtin::BI__builtin_classify_type:
4103 return interp__builtin_classify_type(S, OpPC, Frame, Call);
4104
4105 case Builtin::BI__builtin_expect:
4106 case Builtin::BI__builtin_expect_with_probability:
4107 return interp__builtin_expect(S, OpPC, Frame, Call);
4108
4109 case Builtin::BI__builtin_rotateleft8:
4110 case Builtin::BI__builtin_rotateleft16:
4111 case Builtin::BI__builtin_rotateleft32:
4112 case Builtin::BI__builtin_rotateleft64:
4113 case Builtin::BI_rotl8: / Microsoft variants of rotate left
4114 case Builtin::BI_rotl16:
4115 case Builtin::BI_rotl:
4116 case Builtin::BI_lrotl:
4117 case Builtin::BI_rotl64:
4119 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4120 return Value.rotl(Amount);
4121 });
4122
4123 case Builtin::BI__builtin_rotateright8:
4124 case Builtin::BI__builtin_rotateright16:
4125 case Builtin::BI__builtin_rotateright32:
4126 case Builtin::BI__builtin_rotateright64:
4127 case Builtin::BI_rotr8: / Microsoft variants of rotate right
4128 case Builtin::BI_rotr16:
4129 case Builtin::BI_rotr:
4130 case Builtin::BI_lrotr:
4131 case Builtin::BI_rotr64:
4133 S, OpPC, Call, [](const APSInt &Value, const APSInt &Amount) {
4134 return Value.rotr(Amount);
4135 });
4136
4137 case Builtin::BI__builtin_ffs:
4138 case Builtin::BI__builtin_ffsl:
4139 case Builtin::BI__builtin_ffsll:
4141 S, OpPC, Call, [](const APSInt &Val) {
4142 return APInt(Val.getBitWidth(),
4143 Val.isZero() ? 0u : Val.countTrailingZeros() + 1u);
4144 });
4145
4146 case Builtin::BIaddressof:
4147 case Builtin::BI__addressof:
4148 case Builtin::BI__builtin_addressof:
4149 assert(isNoopBuiltin(BuiltinID));
4150 return interp__builtin_addressof(S, OpPC, Frame, Call);
4151
4152 case Builtin::BIas_const:
4153 case Builtin::BIforward:
4154 case Builtin::BIforward_like:
4155 case Builtin::BImove:
4156 case Builtin::BImove_if_noexcept:
4157 assert(isNoopBuiltin(BuiltinID));
4158 return interp__builtin_move(S, OpPC, Frame, Call);
4159
4160 case Builtin::BI__builtin_eh_return_data_regno:
4162
4163 case Builtin::BI__builtin_launder:
4164 assert(isNoopBuiltin(BuiltinID));
4165 return true;
4166
4167 case Builtin::BI__builtin_add_overflow:
4168 case Builtin::BI__builtin_sub_overflow:
4169 case Builtin::BI__builtin_mul_overflow:
4170 case Builtin::BI__builtin_sadd_overflow:
4171 case Builtin::BI__builtin_uadd_overflow:
4172 case Builtin::BI__builtin_uaddl_overflow:
4173 case Builtin::BI__builtin_uaddll_overflow:
4174 case Builtin::BI__builtin_usub_overflow:
4175 case Builtin::BI__builtin_usubl_overflow:
4176 case Builtin::BI__builtin_usubll_overflow:
4177 case Builtin::BI__builtin_umul_overflow:
4178 case Builtin::BI__builtin_umull_overflow:
4179 case Builtin::BI__builtin_umulll_overflow:
4180 case Builtin::BI__builtin_saddl_overflow:
4181 case Builtin::BI__builtin_saddll_overflow:
4182 case Builtin::BI__builtin_ssub_overflow:
4183 case Builtin::BI__builtin_ssubl_overflow:
4184 case Builtin::BI__builtin_ssubll_overflow:
4185 case Builtin::BI__builtin_smul_overflow:
4186 case Builtin::BI__builtin_smull_overflow:
4187 case Builtin::BI__builtin_smulll_overflow:
4188 return interp__builtin_overflowop(S, OpPC, Call, BuiltinID);
4189
4190 case Builtin::BI__builtin_addcb:
4191 case Builtin::BI__builtin_addcs:
4192 case Builtin::BI__builtin_addc:
4193 case Builtin::BI__builtin_addcl:
4194 case Builtin::BI__builtin_addcll:
4195 case Builtin::BI__builtin_subcb:
4196 case Builtin::BI__builtin_subcs:
4197 case Builtin::BI__builtin_subc:
4198 case Builtin::BI__builtin_subcl:
4199 case Builtin::BI__builtin_subcll:
4200 return interp__builtin_carryop(S, OpPC, Frame, Call, BuiltinID);
4201
4202 case Builtin::BI__builtin_clz:
4203 case Builtin::BI__builtin_clzl:
4204 case Builtin::BI__builtin_clzll:
4205 case Builtin::BI__builtin_clzs:
4206 case Builtin::BI__builtin_clzg:
4207 case Builtin::BI__lzcnt16: / Microsoft variants of count leading-zeroes
4208 case Builtin::BI__lzcnt:
4209 case Builtin::BI__lzcnt64:
4210 return interp__builtin_clz(S, OpPC, Frame, Call, BuiltinID);
4211
4212 case Builtin::BI__builtin_ctz:
4213 case Builtin::BI__builtin_ctzl:
4214 case Builtin::BI__builtin_ctzll:
4215 case Builtin::BI__builtin_ctzs:
4216 case Builtin::BI__builtin_ctzg:
4217 return interp__builtin_ctz(S, OpPC, Frame, Call, BuiltinID);
4218
4219 case Builtin::BI__builtin_elementwise_clzg:
4220 case Builtin::BI__builtin_elementwise_ctzg:
4222 BuiltinID);
4223 case Builtin::BI__builtin_bswapg:
4224 case Builtin::BI__builtin_bswap16:
4225 case Builtin::BI__builtin_bswap32:
4226 case Builtin::BI__builtin_bswap64:
4227 return interp__builtin_bswap(S, OpPC, Frame, Call);
4228
4229 case Builtin::BI__atomic_always_lock_free:
4230 case Builtin::BI__atomic_is_lock_free:
4231 return interp__builtin_atomic_lock_free(S, OpPC, Frame, Call, BuiltinID);
4232
4233 case Builtin::BI__c11_atomic_is_lock_free:
4235
4236 case Builtin::BI__builtin_complex:
4237 return interp__builtin_complex(S, OpPC, Frame, Call);
4238
4239 case Builtin::BI__builtin_is_aligned:
4240 case Builtin::BI__builtin_align_up:
4241 case Builtin::BI__builtin_align_down:
4242 return interp__builtin_is_aligned_up_down(S, OpPC, Frame, Call, BuiltinID);
4243
4244 case Builtin::BI__builtin_assume_aligned:
4245 return interp__builtin_assume_aligned(S, OpPC, Frame, Call);
4246
4247 case clang::X86::BI__builtin_ia32_bextr_u32:
4248 case clang::X86::BI__builtin_ia32_bextr_u64:
4249 case clang::X86::BI__builtin_ia32_bextri_u32:
4250 case clang::X86::BI__builtin_ia32_bextri_u64:
4252 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4253 unsigned BitWidth = Val.getBitWidth();
4254 uint64_t Shift = Idx.extractBitsAsZExtValue(8, 0);
4255 uint64_t Length = Idx.extractBitsAsZExtValue(8, 8);
4256 if (Length > BitWidth) {
4257 Length = BitWidth;
4258 }
4259
4260 / Handle out of bounds cases.
4261 if (Length == 0 || Shift >= BitWidth)
4262 return APInt(BitWidth, 0);
4263
4264 uint64_t Result = Val.getZExtValue() >> Shift;
4265 Result &= llvm::maskTrailingOnes<uint64_t>(Length);
4266 return APInt(BitWidth, Result);
4267 });
4268
4269 case clang::X86::BI__builtin_ia32_bzhi_si:
4270 case clang::X86::BI__builtin_ia32_bzhi_di:
4272 S, OpPC, Call, [](const APSInt &Val, const APSInt &Idx) {
4273 unsigned BitWidth = Val.getBitWidth();
4274 uint64_t Index = Idx.extractBitsAsZExtValue(8, 0);
4275 APSInt Result = Val;
4276
4277 if (Index < BitWidth)
4278 Result.clearHighBits(BitWidth - Index);
4279
4280 return Result;
4281 });
4282
4283 case clang::X86::BI__builtin_ia32_ktestcqi:
4284 case clang::X86::BI__builtin_ia32_ktestchi:
4285 case clang::X86::BI__builtin_ia32_ktestcsi:
4286 case clang::X86::BI__builtin_ia32_ktestcdi:
4288 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4289 return APInt(sizeof(unsigned char) * 8, (~A & B) == 0);
4290 });
4291
4292 case clang::X86::BI__builtin_ia32_ktestzqi:
4293 case clang::X86::BI__builtin_ia32_ktestzhi:
4294 case clang::X86::BI__builtin_ia32_ktestzsi:
4295 case clang::X86::BI__builtin_ia32_ktestzdi:
4297 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4298 return APInt(sizeof(unsigned char) * 8, (A & B) == 0);
4299 });
4300
4301 case clang::X86::BI__builtin_ia32_kortestcqi:
4302 case clang::X86::BI__builtin_ia32_kortestchi:
4303 case clang::X86::BI__builtin_ia32_kortestcsi:
4304 case clang::X86::BI__builtin_ia32_kortestcdi:
4306 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4307 return APInt(sizeof(unsigned char) * 8, ~(A | B) == 0);
4308 });
4309
4310 case clang::X86::BI__builtin_ia32_kortestzqi:
4311 case clang::X86::BI__builtin_ia32_kortestzhi:
4312 case clang::X86::BI__builtin_ia32_kortestzsi:
4313 case clang::X86::BI__builtin_ia32_kortestzdi:
4315 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
4316 return APInt(sizeof(unsigned char) * 8, (A | B) == 0);
4317 });
4318
4319 case clang::X86::BI__builtin_ia32_kshiftliqi:
4320 case clang::X86::BI__builtin_ia32_kshiftlihi:
4321 case clang::X86::BI__builtin_ia32_kshiftlisi:
4322 case clang::X86::BI__builtin_ia32_kshiftlidi:
4324 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4325 unsigned Amt = RHS.getZExtValue() & 0xFF;
4326 if (Amt >= LHS.getBitWidth())
4327 return APInt::getZero(LHS.getBitWidth());
4328 return LHS.shl(Amt);
4329 });
4330
4331 case clang::X86::BI__builtin_ia32_kshiftriqi:
4332 case clang::X86::BI__builtin_ia32_kshiftrihi:
4333 case clang::X86::BI__builtin_ia32_kshiftrisi:
4334 case clang::X86::BI__builtin_ia32_kshiftridi:
4336 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4337 unsigned Amt = RHS.getZExtValue() & 0xFF;
4338 if (Amt >= LHS.getBitWidth())
4339 return APInt::getZero(LHS.getBitWidth());
4340 return LHS.lshr(Amt);
4341 });
4342
4343 case clang::X86::BI__builtin_ia32_lzcnt_u16:
4344 case clang::X86::BI__builtin_ia32_lzcnt_u32:
4345 case clang::X86::BI__builtin_ia32_lzcnt_u64:
4347 S, OpPC, Call, [](const APSInt &Src) {
4348 return APInt(Src.getBitWidth(), Src.countLeadingZeros());
4349 });
4350
4351 case clang::X86::BI__builtin_ia32_tzcnt_u16:
4352 case clang::X86::BI__builtin_ia32_tzcnt_u32:
4353 case clang::X86::BI__builtin_ia32_tzcnt_u64:
4355 S, OpPC, Call, [](const APSInt &Src) {
4356 return APInt(Src.getBitWidth(), Src.countTrailingZeros());
4357 });
4358
4359 case clang::X86::BI__builtin_ia32_pdep_si:
4360 case clang::X86::BI__builtin_ia32_pdep_di:
4362 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4363 unsigned BitWidth = Val.getBitWidth();
4364 APInt Result = APInt::getZero(BitWidth);
4365
4366 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4367 if (Mask[I])
4368 Result.setBitVal(I, Val[P++]);
4369 }
4370
4371 return Result;
4372 });
4373
4374 case clang::X86::BI__builtin_ia32_pext_si:
4375 case clang::X86::BI__builtin_ia32_pext_di:
4377 S, OpPC, Call, [](const APSInt &Val, const APSInt &Mask) {
4378 unsigned BitWidth = Val.getBitWidth();
4379 APInt Result = APInt::getZero(BitWidth);
4380
4381 for (unsigned I = 0, P = 0; I != BitWidth; ++I) {
4382 if (Mask[I])
4383 Result.setBitVal(P++, Val[I]);
4384 }
4385
4386 return Result;
4387 });
4388
4389 case clang::X86::BI__builtin_ia32_addcarryx_u32:
4390 case clang::X86::BI__builtin_ia32_addcarryx_u64:
4391 case clang::X86::BI__builtin_ia32_subborrow_u32:
4392 case clang::X86::BI__builtin_ia32_subborrow_u64:
4394 BuiltinID);
4395
4396 case Builtin::BI__builtin_os_log_format_buffer_size:
4398
4399 case Builtin::BI__builtin_ptrauth_string_discriminator:
4401
4402 case Builtin::BI__builtin_infer_alloc_token:
4404
4405 case Builtin::BI__noop:
4406 pushInteger(S, 0, Call->getType());
4407 return true;
4408
4409 case Builtin::BI__builtin_operator_new:
4410 return interp__builtin_operator_new(S, OpPC, Frame, Call);
4411
4412 case Builtin::BI__builtin_operator_delete:
4413 return interp__builtin_operator_delete(S, OpPC, Frame, Call);
4414
4415 case Builtin::BI__arithmetic_fence:
4417
4418 case Builtin::BI__builtin_reduce_add:
4419 case Builtin::BI__builtin_reduce_mul:
4420 case Builtin::BI__builtin_reduce_and:
4421 case Builtin::BI__builtin_reduce_or:
4422 case Builtin::BI__builtin_reduce_xor:
4423 case Builtin::BI__builtin_reduce_min:
4424 case Builtin::BI__builtin_reduce_max:
4425 return interp__builtin_vector_reduce(S, OpPC, Call, BuiltinID);
4426
4427 case Builtin::BI__builtin_elementwise_popcount:
4429 S, OpPC, Call, [](const APSInt &Src) {
4430 return APInt(Src.getBitWidth(), Src.popcount());
4431 });
4432 case Builtin::BI__builtin_elementwise_bitreverse:
4434 S, OpPC, Call, [](const APSInt &Src) { return Src.reverseBits(); });
4435
4436 case Builtin::BI__builtin_elementwise_abs:
4437 return interp__builtin_elementwise_abs(S, OpPC, Frame, Call, BuiltinID);
4438
4439 case Builtin::BI__builtin_memcpy:
4440 case Builtin::BImemcpy:
4441 case Builtin::BI__builtin_wmemcpy:
4442 case Builtin::BIwmemcpy:
4443 case Builtin::BI__builtin_memmove:
4444 case Builtin::BImemmove:
4445 case Builtin::BI__builtin_wmemmove:
4446 case Builtin::BIwmemmove:
4447 return interp__builtin_memcpy(S, OpPC, Frame, Call, BuiltinID);
4448
4449 case Builtin::BI__builtin_memcmp:
4450 case Builtin::BImemcmp:
4451 case Builtin::BI__builtin_bcmp:
4452 case Builtin::BIbcmp:
4453 case Builtin::BI__builtin_wmemcmp:
4454 case Builtin::BIwmemcmp:
4455 return interp__builtin_memcmp(S, OpPC, Frame, Call, BuiltinID);
4456
4457 case Builtin::BImemchr:
4458 case Builtin::BI__builtin_memchr:
4459 case Builtin::BIstrchr:
4460 case Builtin::BI__builtin_strchr:
4461 case Builtin::BIwmemchr:
4462 case Builtin::BI__builtin_wmemchr:
4463 case Builtin::BIwcschr:
4464 case Builtin::BI__builtin_wcschr:
4465 case Builtin::BI__builtin_char_memchr:
4466 return interp__builtin_memchr(S, OpPC, Call, BuiltinID);
4467
4468 case Builtin::BI__builtin_object_size:
4469 case Builtin::BI__builtin_dynamic_object_size:
4470 return interp__builtin_object_size(S, OpPC, Frame, Call);
4471
4472 case Builtin::BI__builtin_is_within_lifetime:
4474
4475 case Builtin::BI__builtin_elementwise_add_sat:
4477 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4478 return LHS.isSigned() ? LHS.sadd_sat(RHS) : LHS.uadd_sat(RHS);
4479 });
4480
4481 case Builtin::BI__builtin_elementwise_sub_sat:
4483 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4484 return LHS.isSigned() ? LHS.ssub_sat(RHS) : LHS.usub_sat(RHS);
4485 });
4486 case X86::BI__builtin_ia32_extract128i256:
4487 case X86::BI__builtin_ia32_vextractf128_pd256:
4488 case X86::BI__builtin_ia32_vextractf128_ps256:
4489 case X86::BI__builtin_ia32_vextractf128_si256:
4490 return interp__builtin_x86_extract_vector(S, OpPC, Call, BuiltinID);
4491
4492 case X86::BI__builtin_ia32_extractf32x4_256_mask:
4493 case X86::BI__builtin_ia32_extractf32x4_mask:
4494 case X86::BI__builtin_ia32_extractf32x8_mask:
4495 case X86::BI__builtin_ia32_extractf64x2_256_mask:
4496 case X86::BI__builtin_ia32_extractf64x2_512_mask:
4497 case X86::BI__builtin_ia32_extractf64x4_mask:
4498 case X86::BI__builtin_ia32_extracti32x4_256_mask:
4499 case X86::BI__builtin_ia32_extracti32x4_mask:
4500 case X86::BI__builtin_ia32_extracti32x8_mask:
4501 case X86::BI__builtin_ia32_extracti64x2_256_mask:
4502 case X86::BI__builtin_ia32_extracti64x2_512_mask:
4503 case X86::BI__builtin_ia32_extracti64x4_mask:
4504 return interp__builtin_x86_extract_vector_masked(S, OpPC, Call, BuiltinID);
4505
4506 case clang::X86::BI__builtin_ia32_pmulhrsw128:
4507 case clang::X86::BI__builtin_ia32_pmulhrsw256:
4508 case clang::X86::BI__builtin_ia32_pmulhrsw512:
4510 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4511 return (llvm::APIntOps::mulsExtended(LHS, RHS).ashr(14) + 1)
4512 .extractBits(16, 1);
4513 });
4514
4515 case clang::X86::BI__builtin_ia32_movmskps:
4516 case clang::X86::BI__builtin_ia32_movmskpd:
4517 case clang::X86::BI__builtin_ia32_pmovmskb128:
4518 case clang::X86::BI__builtin_ia32_pmovmskb256:
4519 case clang::X86::BI__builtin_ia32_movmskps256:
4520 case clang::X86::BI__builtin_ia32_movmskpd256: {
4521 return interp__builtin_ia32_movmsk_op(S, OpPC, Call);
4522 }
4523
4524 case X86::BI__builtin_ia32_psignb128:
4525 case X86::BI__builtin_ia32_psignb256:
4526 case X86::BI__builtin_ia32_psignw128:
4527 case X86::BI__builtin_ia32_psignw256:
4528 case X86::BI__builtin_ia32_psignd128:
4529 case X86::BI__builtin_ia32_psignd256:
4531 S, OpPC, Call, [](const APInt &AElem, const APInt &BElem) {
4532 if (BElem.isZero())
4533 return APInt::getZero(AElem.getBitWidth());
4534 if (BElem.isNegative())
4535 return -AElem;
4536 return AElem;
4537 });
4538
4539 case clang::X86::BI__builtin_ia32_pavgb128:
4540 case clang::X86::BI__builtin_ia32_pavgw128:
4541 case clang::X86::BI__builtin_ia32_pavgb256:
4542 case clang::X86::BI__builtin_ia32_pavgw256:
4543 case clang::X86::BI__builtin_ia32_pavgb512:
4544 case clang::X86::BI__builtin_ia32_pavgw512:
4546 llvm::APIntOps::avgCeilU);
4547
4548 case clang::X86::BI__builtin_ia32_pmaddubsw128:
4549 case clang::X86::BI__builtin_ia32_pmaddubsw256:
4550 case clang::X86::BI__builtin_ia32_pmaddubsw512:
4552 S, OpPC, Call,
4553 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4554 const APSInt &HiRHS) {
4555 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4556 return (LoLHS.zext(BitWidth) * LoRHS.sext(BitWidth))
4557 .sadd_sat((HiLHS.zext(BitWidth) * HiRHS.sext(BitWidth)));
4558 });
4559
4560 case clang::X86::BI__builtin_ia32_pmaddwd128:
4561 case clang::X86::BI__builtin_ia32_pmaddwd256:
4562 case clang::X86::BI__builtin_ia32_pmaddwd512:
4564 S, OpPC, Call,
4565 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4566 const APSInt &HiRHS) {
4567 unsigned BitWidth = 2 * LoLHS.getBitWidth();
4568 return (LoLHS.sext(BitWidth) * LoRHS.sext(BitWidth)) +
4569 (HiLHS.sext(BitWidth) * HiRHS.sext(BitWidth));
4570 });
4571
4572 case clang::X86::BI__builtin_ia32_pmulhuw128:
4573 case clang::X86::BI__builtin_ia32_pmulhuw256:
4574 case clang::X86::BI__builtin_ia32_pmulhuw512:
4576 llvm::APIntOps::mulhu);
4577
4578 case clang::X86::BI__builtin_ia32_pmulhw128:
4579 case clang::X86::BI__builtin_ia32_pmulhw256:
4580 case clang::X86::BI__builtin_ia32_pmulhw512:
4582 llvm::APIntOps::mulhs);
4583
4584 case clang::X86::BI__builtin_ia32_psllv2di:
4585 case clang::X86::BI__builtin_ia32_psllv4di:
4586 case clang::X86::BI__builtin_ia32_psllv4si:
4587 case clang::X86::BI__builtin_ia32_psllv8di:
4588 case clang::X86::BI__builtin_ia32_psllv8hi:
4589 case clang::X86::BI__builtin_ia32_psllv8si:
4590 case clang::X86::BI__builtin_ia32_psllv16hi:
4591 case clang::X86::BI__builtin_ia32_psllv16si:
4592 case clang::X86::BI__builtin_ia32_psllv32hi:
4593 case clang::X86::BI__builtin_ia32_psllwi128:
4594 case clang::X86::BI__builtin_ia32_psllwi256:
4595 case clang::X86::BI__builtin_ia32_psllwi512:
4596 case clang::X86::BI__builtin_ia32_pslldi128:
4597 case clang::X86::BI__builtin_ia32_pslldi256:
4598 case clang::X86::BI__builtin_ia32_pslldi512:
4599 case clang::X86::BI__builtin_ia32_psllqi128:
4600 case clang::X86::BI__builtin_ia32_psllqi256:
4601 case clang::X86::BI__builtin_ia32_psllqi512:
4603 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4604 if (RHS.uge(LHS.getBitWidth())) {
4605 return APInt::getZero(LHS.getBitWidth());
4606 }
4607 return LHS.shl(RHS.getZExtValue());
4608 });
4609
4610 case clang::X86::BI__builtin_ia32_psrav4si:
4611 case clang::X86::BI__builtin_ia32_psrav8di:
4612 case clang::X86::BI__builtin_ia32_psrav8hi:
4613 case clang::X86::BI__builtin_ia32_psrav8si:
4614 case clang::X86::BI__builtin_ia32_psrav16hi:
4615 case clang::X86::BI__builtin_ia32_psrav16si:
4616 case clang::X86::BI__builtin_ia32_psrav32hi:
4617 case clang::X86::BI__builtin_ia32_psravq128:
4618 case clang::X86::BI__builtin_ia32_psravq256:
4619 case clang::X86::BI__builtin_ia32_psrawi128:
4620 case clang::X86::BI__builtin_ia32_psrawi256:
4621 case clang::X86::BI__builtin_ia32_psrawi512:
4622 case clang::X86::BI__builtin_ia32_psradi128:
4623 case clang::X86::BI__builtin_ia32_psradi256:
4624 case clang::X86::BI__builtin_ia32_psradi512:
4625 case clang::X86::BI__builtin_ia32_psraqi128:
4626 case clang::X86::BI__builtin_ia32_psraqi256:
4627 case clang::X86::BI__builtin_ia32_psraqi512:
4629 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4630 if (RHS.uge(LHS.getBitWidth())) {
4631 return LHS.ashr(LHS.getBitWidth() - 1);
4632 }
4633 return LHS.ashr(RHS.getZExtValue());
4634 });
4635
4636 case clang::X86::BI__builtin_ia32_psrlv2di:
4637 case clang::X86::BI__builtin_ia32_psrlv4di:
4638 case clang::X86::BI__builtin_ia32_psrlv4si:
4639 case clang::X86::BI__builtin_ia32_psrlv8di:
4640 case clang::X86::BI__builtin_ia32_psrlv8hi:
4641 case clang::X86::BI__builtin_ia32_psrlv8si:
4642 case clang::X86::BI__builtin_ia32_psrlv16hi:
4643 case clang::X86::BI__builtin_ia32_psrlv16si:
4644 case clang::X86::BI__builtin_ia32_psrlv32hi:
4645 case clang::X86::BI__builtin_ia32_psrlwi128:
4646 case clang::X86::BI__builtin_ia32_psrlwi256:
4647 case clang::X86::BI__builtin_ia32_psrlwi512:
4648 case clang::X86::BI__builtin_ia32_psrldi128:
4649 case clang::X86::BI__builtin_ia32_psrldi256:
4650 case clang::X86::BI__builtin_ia32_psrldi512:
4651 case clang::X86::BI__builtin_ia32_psrlqi128:
4652 case clang::X86::BI__builtin_ia32_psrlqi256:
4653 case clang::X86::BI__builtin_ia32_psrlqi512:
4655 S, OpPC, Call, [](const APSInt &LHS, const APSInt &RHS) {
4656 if (RHS.uge(LHS.getBitWidth())) {
4657 return APInt::getZero(LHS.getBitWidth());
4658 }
4659 return LHS.lshr(RHS.getZExtValue());
4660 });
4661 case clang::X86::BI__builtin_ia32_packsswb128:
4662 case clang::X86::BI__builtin_ia32_packsswb256:
4663 case clang::X86::BI__builtin_ia32_packsswb512:
4664 case clang::X86::BI__builtin_ia32_packssdw128:
4665 case clang::X86::BI__builtin_ia32_packssdw256:
4666 case clang::X86::BI__builtin_ia32_packssdw512:
4667 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4668 return APInt(Src).truncSSat(Src.getBitWidth() / 2);
4669 });
4670 case clang::X86::BI__builtin_ia32_packusdw128:
4671 case clang::X86::BI__builtin_ia32_packusdw256:
4672 case clang::X86::BI__builtin_ia32_packusdw512:
4673 case clang::X86::BI__builtin_ia32_packuswb128:
4674 case clang::X86::BI__builtin_ia32_packuswb256:
4675 case clang::X86::BI__builtin_ia32_packuswb512:
4676 return interp__builtin_x86_pack(S, OpPC, Call, [](const APSInt &Src) {
4677 unsigned DstBits = Src.getBitWidth() / 2;
4678 if (Src.isNegative())
4679 return APInt::getZero(DstBits);
4680 if (Src.isIntN(DstBits))
4681 return APInt(Src).trunc(DstBits);
4682 return APInt::getAllOnes(DstBits);
4683 });
4684
4685 case clang::X86::BI__builtin_ia32_selectss_128:
4686 case clang::X86::BI__builtin_ia32_selectsd_128:
4687 case clang::X86::BI__builtin_ia32_selectsh_128:
4688 case clang::X86::BI__builtin_ia32_selectsbf_128:
4690 case clang::X86::BI__builtin_ia32_vprotbi:
4691 case clang::X86::BI__builtin_ia32_vprotdi:
4692 case clang::X86::BI__builtin_ia32_vprotqi:
4693 case clang::X86::BI__builtin_ia32_vprotwi:
4694 case clang::X86::BI__builtin_ia32_prold128:
4695 case clang::X86::BI__builtin_ia32_prold256:
4696 case clang::X86::BI__builtin_ia32_prold512:
4697 case clang::X86::BI__builtin_ia32_prolq128:
4698 case clang::X86::BI__builtin_ia32_prolq256:
4699 case clang::X86::BI__builtin_ia32_prolq512:
4701 S, OpPC, Call,
4702 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotl(RHS); });
4703
4704 case clang::X86::BI__builtin_ia32_prord128:
4705 case clang::X86::BI__builtin_ia32_prord256:
4706 case clang::X86::BI__builtin_ia32_prord512:
4707 case clang::X86::BI__builtin_ia32_prorq128:
4708 case clang::X86::BI__builtin_ia32_prorq256:
4709 case clang::X86::BI__builtin_ia32_prorq512:
4711 S, OpPC, Call,
4712 [](const APSInt &LHS, const APSInt &RHS) { return LHS.rotr(RHS); });
4713
4714 case Builtin::BI__builtin_elementwise_max:
4715 case Builtin::BI__builtin_elementwise_min:
4716 return interp__builtin_elementwise_maxmin(S, OpPC, Call, BuiltinID);
4717
4718 case clang::X86::BI__builtin_ia32_phaddw128:
4719 case clang::X86::BI__builtin_ia32_phaddw256:
4720 case clang::X86::BI__builtin_ia32_phaddd128:
4721 case clang::X86::BI__builtin_ia32_phaddd256:
4723 S, OpPC, Call,
4724 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
4725 case clang::X86::BI__builtin_ia32_phaddsw128:
4726 case clang::X86::BI__builtin_ia32_phaddsw256:
4728 S, OpPC, Call,
4729 [](const APSInt &LHS, const APSInt &RHS) { return LHS.sadd_sat(RHS); });
4730 case clang::X86::BI__builtin_ia32_phsubw128:
4731 case clang::X86::BI__builtin_ia32_phsubw256:
4732 case clang::X86::BI__builtin_ia32_phsubd128:
4733 case clang::X86::BI__builtin_ia32_phsubd256:
4735 S, OpPC, Call,
4736 [](const APSInt &LHS, const APSInt &RHS) { return LHS - RHS; });
4737 case clang::X86::BI__builtin_ia32_phsubsw128:
4738 case clang::X86::BI__builtin_ia32_phsubsw256:
4740 S, OpPC, Call,
4741 [](const APSInt &LHS, const APSInt &RHS) { return LHS.ssub_sat(RHS); });
4742 case clang::X86::BI__builtin_ia32_haddpd:
4743 case clang::X86::BI__builtin_ia32_haddps:
4744 case clang::X86::BI__builtin_ia32_haddpd256:
4745 case clang::X86::BI__builtin_ia32_haddps256:
4747 S, OpPC, Call,
4748 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4749 APFloat F = LHS;
4750 F.add(RHS, RM);
4751 return F;
4752 });
4753 case clang::X86::BI__builtin_ia32_hsubpd:
4754 case clang::X86::BI__builtin_ia32_hsubps:
4755 case clang::X86::BI__builtin_ia32_hsubpd256:
4756 case clang::X86::BI__builtin_ia32_hsubps256:
4758 S, OpPC, Call,
4759 [](const APFloat &LHS, const APFloat &RHS, llvm::RoundingMode RM) {
4760 APFloat F = LHS;
4761 F.subtract(RHS, RM);
4762 return F;
4763 });
4764 case clang::X86::BI__builtin_ia32_addsubpd:
4765 case clang::X86::BI__builtin_ia32_addsubps:
4766 case clang::X86::BI__builtin_ia32_addsubpd256:
4767 case clang::X86::BI__builtin_ia32_addsubps256:
4768 return interp__builtin_ia32_addsub(S, OpPC, Call);
4769
4770 case clang::X86::BI__builtin_ia32_pmuldq128:
4771 case clang::X86::BI__builtin_ia32_pmuldq256:
4772 case clang::X86::BI__builtin_ia32_pmuldq512:
4774 S, OpPC, Call,
4775 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4776 const APSInt &HiRHS) {
4777 return llvm::APIntOps::mulsExtended(LoLHS, LoRHS);
4778 });
4779
4780 case clang::X86::BI__builtin_ia32_pmuludq128:
4781 case clang::X86::BI__builtin_ia32_pmuludq256:
4782 case clang::X86::BI__builtin_ia32_pmuludq512:
4784 S, OpPC, Call,
4785 [](const APSInt &LoLHS, const APSInt &HiLHS, const APSInt &LoRHS,
4786 const APSInt &HiRHS) {
4787 return llvm::APIntOps::muluExtended(LoLHS, LoRHS);
4788 });
4789
4790 case Builtin::BI__builtin_elementwise_fma:
4792 S, OpPC, Call,
4793 [](const APFloat &X, const APFloat &Y, const APFloat &Z,
4794 llvm::RoundingMode RM) {
4795 APFloat F = X;
4796 F.fusedMultiplyAdd(Y, Z, RM);
4797 return F;
4798 });
4799
4800 case X86::BI__builtin_ia32_vpmadd52luq128:
4801 case X86::BI__builtin_ia32_vpmadd52luq256:
4802 case X86::BI__builtin_ia32_vpmadd52luq512:
4804 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4805 return A + (B.trunc(52) * C.trunc(52)).zext(64);
4806 });
4807 case X86::BI__builtin_ia32_vpmadd52huq128:
4808 case X86::BI__builtin_ia32_vpmadd52huq256:
4809 case X86::BI__builtin_ia32_vpmadd52huq512:
4811 S, OpPC, Call, [](const APSInt &A, const APSInt &B, const APSInt &C) {
4812 return A + llvm::APIntOps::mulhu(B.trunc(52), C.trunc(52)).zext(64);
4813 });
4814
4815 case X86::BI__builtin_ia32_vpshldd128:
4816 case X86::BI__builtin_ia32_vpshldd256:
4817 case X86::BI__builtin_ia32_vpshldd512:
4818 case X86::BI__builtin_ia32_vpshldq128:
4819 case X86::BI__builtin_ia32_vpshldq256:
4820 case X86::BI__builtin_ia32_vpshldq512:
4821 case X86::BI__builtin_ia32_vpshldw128:
4822 case X86::BI__builtin_ia32_vpshldw256:
4823 case X86::BI__builtin_ia32_vpshldw512:
4825 S, OpPC, Call,
4826 [](const APSInt &Hi, const APSInt &Lo, const APSInt &Amt) {
4827 return llvm::APIntOps::fshl(Hi, Lo, Amt);
4828 });
4829
4830 case X86::BI__builtin_ia32_vpshrdd128:
4831 case X86::BI__builtin_ia32_vpshrdd256:
4832 case X86::BI__builtin_ia32_vpshrdd512:
4833 case X86::BI__builtin_ia32_vpshrdq128:
4834 case X86::BI__builtin_ia32_vpshrdq256:
4835 case X86::BI__builtin_ia32_vpshrdq512:
4836 case X86::BI__builtin_ia32_vpshrdw128:
4837 case X86::BI__builtin_ia32_vpshrdw256:
4838 case X86::BI__builtin_ia32_vpshrdw512:
4839 / NOTE: Reversed Hi/Lo operands.
4841 S, OpPC, Call,
4842 [](const APSInt &Lo, const APSInt &Hi, const APSInt &Amt) {
4843 return llvm::APIntOps::fshr(Hi, Lo, Amt);
4844 });
4845 case X86::BI__builtin_ia32_vpconflictsi_128:
4846 case X86::BI__builtin_ia32_vpconflictsi_256:
4847 case X86::BI__builtin_ia32_vpconflictsi_512:
4848 case X86::BI__builtin_ia32_vpconflictdi_128:
4849 case X86::BI__builtin_ia32_vpconflictdi_256:
4850 case X86::BI__builtin_ia32_vpconflictdi_512:
4851 return interp__builtin_ia32_vpconflict(S, OpPC, Call);
4852 case clang::X86::BI__builtin_ia32_blendpd:
4853 case clang::X86::BI__builtin_ia32_blendpd256:
4854 case clang::X86::BI__builtin_ia32_blendps:
4855 case clang::X86::BI__builtin_ia32_blendps256:
4856 case clang::X86::BI__builtin_ia32_pblendw128:
4857 case clang::X86::BI__builtin_ia32_pblendw256:
4858 case clang::X86::BI__builtin_ia32_pblendd128:
4859 case clang::X86::BI__builtin_ia32_pblendd256:
4861 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4862 / Bit index for mask.
4863 unsigned MaskBit = (ShuffleMask >> (DstIdx % 8)) & 0x1;
4864 unsigned SrcVecIdx = MaskBit ? 1 : 0; / 1 = TrueVec, 0 = FalseVec
4865 return std::pair<unsigned, int>{SrcVecIdx, static_cast<int>(DstIdx)};
4866 });
4867
4868
4869
4870 case clang::X86::BI__builtin_ia32_blendvpd:
4871 case clang::X86::BI__builtin_ia32_blendvpd256:
4872 case clang::X86::BI__builtin_ia32_blendvps:
4873 case clang::X86::BI__builtin_ia32_blendvps256:
4875 S, OpPC, Call,
4876 [](const APFloat &F, const APFloat &T, const APFloat &C,
4877 llvm::RoundingMode) { return C.isNegative() ? T : F; });
4878
4879 case clang::X86::BI__builtin_ia32_pblendvb128:
4880 case clang::X86::BI__builtin_ia32_pblendvb256:
4882 S, OpPC, Call, [](const APSInt &F, const APSInt &T, const APSInt &C) {
4883 return ((APInt)C).isNegative() ? T : F;
4884 });
4885 case X86::BI__builtin_ia32_ptestz128:
4886 case X86::BI__builtin_ia32_ptestz256:
4887 case X86::BI__builtin_ia32_vtestzps:
4888 case X86::BI__builtin_ia32_vtestzps256:
4889 case X86::BI__builtin_ia32_vtestzpd:
4890 case X86::BI__builtin_ia32_vtestzpd256:
4892 S, OpPC, Call,
4893 [](const APInt &A, const APInt &B) { return (A & B) == 0; });
4894 case X86::BI__builtin_ia32_ptestc128:
4895 case X86::BI__builtin_ia32_ptestc256:
4896 case X86::BI__builtin_ia32_vtestcps:
4897 case X86::BI__builtin_ia32_vtestcps256:
4898 case X86::BI__builtin_ia32_vtestcpd:
4899 case X86::BI__builtin_ia32_vtestcpd256:
4901 S, OpPC, Call,
4902 [](const APInt &A, const APInt &B) { return (~A & B) == 0; });
4903 case X86::BI__builtin_ia32_ptestnzc128:
4904 case X86::BI__builtin_ia32_ptestnzc256:
4905 case X86::BI__builtin_ia32_vtestnzcps:
4906 case X86::BI__builtin_ia32_vtestnzcps256:
4907 case X86::BI__builtin_ia32_vtestnzcpd:
4908 case X86::BI__builtin_ia32_vtestnzcpd256:
4910 S, OpPC, Call, [](const APInt &A, const APInt &B) {
4911 return ((A & B) != 0) && ((~A & B) != 0);
4912 });
4913 case X86::BI__builtin_ia32_selectb_128:
4914 case X86::BI__builtin_ia32_selectb_256:
4915 case X86::BI__builtin_ia32_selectb_512:
4916 case X86::BI__builtin_ia32_selectw_128:
4917 case X86::BI__builtin_ia32_selectw_256:
4918 case X86::BI__builtin_ia32_selectw_512:
4919 case X86::BI__builtin_ia32_selectd_128:
4920 case X86::BI__builtin_ia32_selectd_256:
4921 case X86::BI__builtin_ia32_selectd_512:
4922 case X86::BI__builtin_ia32_selectq_128:
4923 case X86::BI__builtin_ia32_selectq_256:
4924 case X86::BI__builtin_ia32_selectq_512:
4925 case X86::BI__builtin_ia32_selectph_128:
4926 case X86::BI__builtin_ia32_selectph_256:
4927 case X86::BI__builtin_ia32_selectph_512:
4928 case X86::BI__builtin_ia32_selectpbf_128:
4929 case X86::BI__builtin_ia32_selectpbf_256:
4930 case X86::BI__builtin_ia32_selectpbf_512:
4931 case X86::BI__builtin_ia32_selectps_128:
4932 case X86::BI__builtin_ia32_selectps_256:
4933 case X86::BI__builtin_ia32_selectps_512:
4934 case X86::BI__builtin_ia32_selectpd_128:
4935 case X86::BI__builtin_ia32_selectpd_256:
4936 case X86::BI__builtin_ia32_selectpd_512:
4937 return interp__builtin_select(S, OpPC, Call);
4938
4939 case X86::BI__builtin_ia32_shufps:
4940 case X86::BI__builtin_ia32_shufps256:
4941 case X86::BI__builtin_ia32_shufps512:
4943 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4944 unsigned NumElemPerLane = 4;
4945 unsigned NumSelectableElems = NumElemPerLane / 2;
4946 unsigned BitsPerElem = 2;
4947 unsigned IndexMask = 0x3;
4948 unsigned MaskBits = 8;
4949 unsigned Lane = DstIdx / NumElemPerLane;
4950 unsigned ElemInLane = DstIdx % NumElemPerLane;
4951 unsigned LaneOffset = Lane * NumElemPerLane;
4952 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4953 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4954 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4955 return std::pair<unsigned, int>{SrcIdx,
4956 static_cast<int>(LaneOffset + Index)};
4957 });
4958 case X86::BI__builtin_ia32_shufpd:
4959 case X86::BI__builtin_ia32_shufpd256:
4960 case X86::BI__builtin_ia32_shufpd512:
4962 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
4963 unsigned NumElemPerLane = 2;
4964 unsigned NumSelectableElems = NumElemPerLane / 2;
4965 unsigned BitsPerElem = 1;
4966 unsigned IndexMask = 0x1;
4967 unsigned MaskBits = 8;
4968 unsigned Lane = DstIdx / NumElemPerLane;
4969 unsigned ElemInLane = DstIdx % NumElemPerLane;
4970 unsigned LaneOffset = Lane * NumElemPerLane;
4971 unsigned SrcIdx = ElemInLane >= NumSelectableElems ? 1 : 0;
4972 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
4973 unsigned Index = (ShuffleMask >> BitIndex) & IndexMask;
4974 return std::pair<unsigned, int>{SrcIdx,
4975 static_cast<int>(LaneOffset + Index)};
4976 });
4977
4978 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v16qi:
4979 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v32qi:
4980 case X86::BI__builtin_ia32_vgf2p8affineinvqb_v64qi:
4981 return interp_builtin_ia32_gfni_affine(S, OpPC, Call, true);
4982 case X86::BI__builtin_ia32_vgf2p8affineqb_v16qi:
4983 case X86::BI__builtin_ia32_vgf2p8affineqb_v32qi:
4984 case X86::BI__builtin_ia32_vgf2p8affineqb_v64qi:
4985 return interp_builtin_ia32_gfni_affine(S, OpPC, Call, false);
4986
4987 case X86::BI__builtin_ia32_vgf2p8mulb_v16qi:
4988 case X86::BI__builtin_ia32_vgf2p8mulb_v32qi:
4989 case X86::BI__builtin_ia32_vgf2p8mulb_v64qi:
4990 return interp__builtin_ia32_gfni_mul(S, OpPC, Call);
4991
4992 case X86::BI__builtin_ia32_insertps128:
4994 S, OpPC, Call, [](unsigned DstIdx, unsigned Mask) {
4995 / Bits [3:0]: zero mask - if bit is set, zero this element
4996 if ((Mask & (1 << DstIdx)) != 0) {
4997 return std::pair<unsigned, int>{0, -1};
4998 }
4999 / Bits [7:6]: select element from source vector Y (0-3)
5000 / Bits [5:4]: select destination position (0-3)
5001 unsigned SrcElem = (Mask >> 6) & 0x3;
5002 unsigned DstElem = (Mask >> 4) & 0x3;
5003 if (DstIdx == DstElem) {
5004 / Insert element from source vector (B) at this position
5005 return std::pair<unsigned, int>{1, static_cast<int>(SrcElem)};
5006 } else {
5007 / Copy from destination vector (A)
5008 return std::pair<unsigned, int>{0, static_cast<int>(DstIdx)};
5009 }
5010 });
5011 case X86::BI__builtin_ia32_permvarsi256:
5012 case X86::BI__builtin_ia32_permvarsf256:
5013 case X86::BI__builtin_ia32_permvardf512:
5014 case X86::BI__builtin_ia32_permvardi512:
5015 case X86::BI__builtin_ia32_permvarhi128:
5017 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5018 int Offset = ShuffleMask & 0x7;
5019 return std::pair<unsigned, int>{0, Offset};
5020 });
5021 case X86::BI__builtin_ia32_permvarqi128:
5022 case X86::BI__builtin_ia32_permvarhi256:
5023 case X86::BI__builtin_ia32_permvarsi512:
5024 case X86::BI__builtin_ia32_permvarsf512:
5026 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5027 int Offset = ShuffleMask & 0xF;
5028 return std::pair<unsigned, int>{0, Offset};
5029 });
5030 case X86::BI__builtin_ia32_permvardi256:
5031 case X86::BI__builtin_ia32_permvardf256:
5033 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5034 int Offset = ShuffleMask & 0x3;
5035 return std::pair<unsigned, int>{0, Offset};
5036 });
5037 case X86::BI__builtin_ia32_permvarqi256:
5038 case X86::BI__builtin_ia32_permvarhi512:
5040 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5041 int Offset = ShuffleMask & 0x1F;
5042 return std::pair<unsigned, int>{0, Offset};
5043 });
5044 case X86::BI__builtin_ia32_permvarqi512:
5046 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5047 int Offset = ShuffleMask & 0x3F;
5048 return std::pair<unsigned, int>{0, Offset};
5049 });
5050 case X86::BI__builtin_ia32_vpermi2varq128:
5051 case X86::BI__builtin_ia32_vpermi2varpd128:
5053 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5054 int Offset = ShuffleMask & 0x1;
5055 unsigned SrcIdx = (ShuffleMask >> 1) & 0x1;
5056 return std::pair<unsigned, int>{SrcIdx, Offset};
5057 });
5058 case X86::BI__builtin_ia32_vpermi2vard128:
5059 case X86::BI__builtin_ia32_vpermi2varps128:
5060 case X86::BI__builtin_ia32_vpermi2varq256:
5061 case X86::BI__builtin_ia32_vpermi2varpd256:
5063 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5064 int Offset = ShuffleMask & 0x3;
5065 unsigned SrcIdx = (ShuffleMask >> 2) & 0x1;
5066 return std::pair<unsigned, int>{SrcIdx, Offset};
5067 });
5068 case X86::BI__builtin_ia32_vpermi2varhi128:
5069 case X86::BI__builtin_ia32_vpermi2vard256:
5070 case X86::BI__builtin_ia32_vpermi2varps256:
5071 case X86::BI__builtin_ia32_vpermi2varq512:
5072 case X86::BI__builtin_ia32_vpermi2varpd512:
5074 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5075 int Offset = ShuffleMask & 0x7;
5076 unsigned SrcIdx = (ShuffleMask >> 3) & 0x1;
5077 return std::pair<unsigned, int>{SrcIdx, Offset};
5078 });
5079 case X86::BI__builtin_ia32_vpermi2varqi128:
5080 case X86::BI__builtin_ia32_vpermi2varhi256:
5081 case X86::BI__builtin_ia32_vpermi2vard512:
5082 case X86::BI__builtin_ia32_vpermi2varps512:
5084 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5085 int Offset = ShuffleMask & 0xF;
5086 unsigned SrcIdx = (ShuffleMask >> 4) & 0x1;
5087 return std::pair<unsigned, int>{SrcIdx, Offset};
5088 });
5089 case X86::BI__builtin_ia32_vpermi2varqi256:
5090 case X86::BI__builtin_ia32_vpermi2varhi512:
5092 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5093 int Offset = ShuffleMask & 0x1F;
5094 unsigned SrcIdx = (ShuffleMask >> 5) & 0x1;
5095 return std::pair<unsigned, int>{SrcIdx, Offset};
5096 });
5097 case X86::BI__builtin_ia32_vpermi2varqi512:
5099 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5100 int Offset = ShuffleMask & 0x3F;
5101 unsigned SrcIdx = (ShuffleMask >> 6) & 0x1;
5102 return std::pair<unsigned, int>{SrcIdx, Offset};
5103 });
5104 case X86::BI__builtin_ia32_pshufb128:
5105 case X86::BI__builtin_ia32_pshufb256:
5106 case X86::BI__builtin_ia32_pshufb512:
5108 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5109 uint8_t Ctlb = static_cast<uint8_t>(ShuffleMask);
5110 if (Ctlb & 0x80)
5111 return std::make_pair(0, -1);
5112
5113 unsigned LaneBase = (DstIdx / 16) * 16;
5114 unsigned SrcOffset = Ctlb & 0x0F;
5115 unsigned SrcIdx = LaneBase + SrcOffset;
5116 return std::make_pair(0, static_cast<int>(SrcIdx));
5117 });
5118
5119 case X86::BI__builtin_ia32_pshuflw:
5120 case X86::BI__builtin_ia32_pshuflw256:
5121 case X86::BI__builtin_ia32_pshuflw512:
5123 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5124 unsigned LaneBase = (DstIdx / 8) * 8;
5125 unsigned LaneIdx = DstIdx % 8;
5126 if (LaneIdx < 4) {
5127 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5128 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5129 }
5130
5131 return std::make_pair(0, static_cast<int>(DstIdx));
5132 });
5133
5134 case X86::BI__builtin_ia32_pshufhw:
5135 case X86::BI__builtin_ia32_pshufhw256:
5136 case X86::BI__builtin_ia32_pshufhw512:
5138 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5139 unsigned LaneBase = (DstIdx / 8) * 8;
5140 unsigned LaneIdx = DstIdx % 8;
5141 if (LaneIdx >= 4) {
5142 unsigned Sel = (ShuffleMask >> (2 * (LaneIdx - 4))) & 0x3;
5143 return std::make_pair(0, static_cast<int>(LaneBase + 4 + Sel));
5144 }
5145
5146 return std::make_pair(0, static_cast<int>(DstIdx));
5147 });
5148
5149 case X86::BI__builtin_ia32_pshufd:
5150 case X86::BI__builtin_ia32_pshufd256:
5151 case X86::BI__builtin_ia32_pshufd512:
5152 case X86::BI__builtin_ia32_vpermilps:
5153 case X86::BI__builtin_ia32_vpermilps256:
5154 case X86::BI__builtin_ia32_vpermilps512:
5156 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5157 unsigned LaneBase = (DstIdx / 4) * 4;
5158 unsigned LaneIdx = DstIdx % 4;
5159 unsigned Sel = (ShuffleMask >> (2 * LaneIdx)) & 0x3;
5160 return std::make_pair(0, static_cast<int>(LaneBase + Sel));
5161 });
5162
5163 case X86::BI__builtin_ia32_vpermilvarpd:
5164 case X86::BI__builtin_ia32_vpermilvarpd256:
5165 case X86::BI__builtin_ia32_vpermilvarpd512:
5167 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5168 unsigned NumElemPerLane = 2;
5169 unsigned Lane = DstIdx / NumElemPerLane;
5170 unsigned Offset = ShuffleMask & 0b10 ? 1 : 0;
5171 return std::make_pair(
5172 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5173 });
5174
5175 case X86::BI__builtin_ia32_vpermilvarps:
5176 case X86::BI__builtin_ia32_vpermilvarps256:
5177 case X86::BI__builtin_ia32_vpermilvarps512:
5179 S, OpPC, Call, [](unsigned DstIdx, unsigned ShuffleMask) {
5180 unsigned NumElemPerLane = 4;
5181 unsigned Lane = DstIdx / NumElemPerLane;
5182 unsigned Offset = ShuffleMask & 0b11;
5183 return std::make_pair(
5184 0, static_cast<int>(Lane * NumElemPerLane + Offset));
5185 });
5186
5187 case X86::BI__builtin_ia32_vpermilpd:
5188 case X86::BI__builtin_ia32_vpermilpd256:
5189 case X86::BI__builtin_ia32_vpermilpd512:
5191 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5192 unsigned NumElemPerLane = 2;
5193 unsigned BitsPerElem = 1;
5194 unsigned MaskBits = 8;
5195 unsigned IndexMask = 0x1;
5196 unsigned Lane = DstIdx / NumElemPerLane;
5197 unsigned LaneOffset = Lane * NumElemPerLane;
5198 unsigned BitIndex = (DstIdx * BitsPerElem) % MaskBits;
5199 unsigned Index = (Control >> BitIndex) & IndexMask;
5200 return std::make_pair(0, static_cast<int>(LaneOffset + Index));
5201 });
5202
5203 case X86::BI__builtin_ia32_permdf256:
5204 case X86::BI__builtin_ia32_permdi256:
5206 S, OpPC, Call, [](unsigned DstIdx, unsigned Control) {
5207 / permute4x64 operates on 4 64-bit elements
5208 / For element i (0-3), extract bits [2*i+1:2*i] from Control
5209 unsigned Index = (Control >> (2 * DstIdx)) & 0x3;
5210 return std::make_pair(0, static_cast<int>(Index));
5211 });
5212
5213 case X86::BI__builtin_ia32_vpmultishiftqb128:
5214 case X86::BI__builtin_ia32_vpmultishiftqb256:
5215 case X86::BI__builtin_ia32_vpmultishiftqb512:
5216 return interp__builtin_ia32_multishiftqb(S, OpPC, Call);
5217 case X86::BI__builtin_ia32_kandqi:
5218 case X86::BI__builtin_ia32_kandhi:
5219 case X86::BI__builtin_ia32_kandsi:
5220 case X86::BI__builtin_ia32_kanddi:
5222 S, OpPC, Call,
5223 [](const APSInt &LHS, const APSInt &RHS) { return LHS & RHS; });
5224
5225 case X86::BI__builtin_ia32_kandnqi:
5226 case X86::BI__builtin_ia32_kandnhi:
5227 case X86::BI__builtin_ia32_kandnsi:
5228 case X86::BI__builtin_ia32_kandndi:
5230 S, OpPC, Call,
5231 [](const APSInt &LHS, const APSInt &RHS) { return ~LHS & RHS; });
5232
5233 case X86::BI__builtin_ia32_korqi:
5234 case X86::BI__builtin_ia32_korhi:
5235 case X86::BI__builtin_ia32_korsi:
5236 case X86::BI__builtin_ia32_kordi:
5238 S, OpPC, Call,
5239 [](const APSInt &LHS, const APSInt &RHS) { return LHS | RHS; });
5240
5241 case X86::BI__builtin_ia32_kxnorqi:
5242 case X86::BI__builtin_ia32_kxnorhi:
5243 case X86::BI__builtin_ia32_kxnorsi:
5244 case X86::BI__builtin_ia32_kxnordi:
5246 S, OpPC, Call,
5247 [](const APSInt &LHS, const APSInt &RHS) { return ~(LHS ^ RHS); });
5248
5249 case X86::BI__builtin_ia32_kxorqi:
5250 case X86::BI__builtin_ia32_kxorhi:
5251 case X86::BI__builtin_ia32_kxorsi:
5252 case X86::BI__builtin_ia32_kxordi:
5254 S, OpPC, Call,
5255 [](const APSInt &LHS, const APSInt &RHS) { return LHS ^ RHS; });
5256
5257 case X86::BI__builtin_ia32_knotqi:
5258 case X86::BI__builtin_ia32_knothi:
5259 case X86::BI__builtin_ia32_knotsi:
5260 case X86::BI__builtin_ia32_knotdi:
5262 S, OpPC, Call, [](const APSInt &Src) { return ~Src; });
5263
5264 case X86::BI__builtin_ia32_kaddqi:
5265 case X86::BI__builtin_ia32_kaddhi:
5266 case X86::BI__builtin_ia32_kaddsi:
5267 case X86::BI__builtin_ia32_kadddi:
5269 S, OpPC, Call,
5270 [](const APSInt &LHS, const APSInt &RHS) { return LHS + RHS; });
5271
5272 case X86::BI__builtin_ia32_kmovb:
5273 case X86::BI__builtin_ia32_kmovw:
5274 case X86::BI__builtin_ia32_kmovd:
5275 case X86::BI__builtin_ia32_kmovq:
5277 S, OpPC, Call, [](const APSInt &Src) { return Src; });
5278
5279 case X86::BI__builtin_ia32_kunpckhi:
5280 case X86::BI__builtin_ia32_kunpckdi:
5281 case X86::BI__builtin_ia32_kunpcksi:
5283 S, OpPC, Call, [](const APSInt &A, const APSInt &B) {
5284 / Generic kunpack: extract lower half of each operand and concatenate
5285 / Result = A[HalfWidth-1:0] concat B[HalfWidth-1:0]
5286 unsigned BW = A.getBitWidth();
5287 return APSInt(A.trunc(BW / 2).concat(B.trunc(BW / 2)),
5288 A.isUnsigned());
5289 });
5290
5291 case X86::BI__builtin_ia32_phminposuw128:
5292 return interp__builtin_ia32_phminposuw(S, OpPC, Call);
5293
5294 case X86::BI__builtin_ia32_psraq128:
5295 case X86::BI__builtin_ia32_psraq256:
5296 case X86::BI__builtin_ia32_psraq512:
5297 case X86::BI__builtin_ia32_psrad128:
5298 case X86::BI__builtin_ia32_psrad256:
5299 case X86::BI__builtin_ia32_psrad512:
5300 case X86::BI__builtin_ia32_psraw128:
5301 case X86::BI__builtin_ia32_psraw256:
5302 case X86::BI__builtin_ia32_psraw512:
5304 S, OpPC, Call,
5305 [](const APInt &Elt, uint64_t Count) { return Elt.ashr(Count); },
5306 [](const APInt &Elt, unsigned Width) { return Elt.ashr(Width - 1); });
5307
5308 case X86::BI__builtin_ia32_psllq128:
5309 case X86::BI__builtin_ia32_psllq256:
5310 case X86::BI__builtin_ia32_psllq512:
5311 case X86::BI__builtin_ia32_pslld128:
5312 case X86::BI__builtin_ia32_pslld256:
5313 case X86::BI__builtin_ia32_pslld512:
5314 case X86::BI__builtin_ia32_psllw128:
5315 case X86::BI__builtin_ia32_psllw256:
5316 case X86::BI__builtin_ia32_psllw512:
5318 S, OpPC, Call,
5319 [](const APInt &Elt, uint64_t Count) { return Elt.shl(Count); },
5320 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5321
5322 case X86::BI__builtin_ia32_psrlq128:
5323 case X86::BI__builtin_ia32_psrlq256:
5324 case X86::BI__builtin_ia32_psrlq512:
5325 case X86::BI__builtin_ia32_psrld128:
5326 case X86::BI__builtin_ia32_psrld256:
5327 case X86::BI__builtin_ia32_psrld512:
5328 case X86::BI__builtin_ia32_psrlw128:
5329 case X86::BI__builtin_ia32_psrlw256:
5330 case X86::BI__builtin_ia32_psrlw512:
5332 S, OpPC, Call,
5333 [](const APInt &Elt, uint64_t Count) { return Elt.lshr(Count); },
5334 [](const APInt &Elt, unsigned Width) { return APInt::getZero(Width); });
5335
5336 case X86::BI__builtin_ia32_pternlogd128_mask:
5337 case X86::BI__builtin_ia32_pternlogd256_mask:
5338 case X86::BI__builtin_ia32_pternlogd512_mask:
5339 case X86::BI__builtin_ia32_pternlogq128_mask:
5340 case X86::BI__builtin_ia32_pternlogq256_mask:
5341 case X86::BI__builtin_ia32_pternlogq512_mask:
5342 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/false);
5343 case X86::BI__builtin_ia32_pternlogd128_maskz:
5344 case X86::BI__builtin_ia32_pternlogd256_maskz:
5345 case X86::BI__builtin_ia32_pternlogd512_maskz:
5346 case X86::BI__builtin_ia32_pternlogq128_maskz:
5347 case X86::BI__builtin_ia32_pternlogq256_maskz:
5348 case X86::BI__builtin_ia32_pternlogq512_maskz:
5349 return interp__builtin_ia32_pternlog(S, OpPC, Call, /*MaskZ=*/true);
5350 case Builtin::BI__builtin_elementwise_fshl:
5352 llvm::APIntOps::fshl);
5353 case Builtin::BI__builtin_elementwise_fshr:
5355 llvm::APIntOps::fshr);
5356
5357 case X86::BI__builtin_ia32_shuf_f32x4_256:
5358 case X86::BI__builtin_ia32_shuf_i32x4_256:
5359 case X86::BI__builtin_ia32_shuf_f64x2_256:
5360 case X86::BI__builtin_ia32_shuf_i64x2_256:
5361 case X86::BI__builtin_ia32_shuf_f32x4:
5362 case X86::BI__builtin_ia32_shuf_i32x4:
5363 case X86::BI__builtin_ia32_shuf_f64x2:
5364 case X86::BI__builtin_ia32_shuf_i64x2: {
5365 / Destination and sources A, B all have the same type.
5366 QualType VecQT = Call->getArg(0)->getType();
5367 const auto *VecT = VecQT->castAs<VectorType>();
5368 unsigned NumElems = VecT->getNumElements();
5369 unsigned ElemBits = S.getASTContext().getTypeSize(VecT->getElementType());
5370 unsigned LaneBits = 128u;
5371 unsigned NumLanes = (NumElems * ElemBits) / LaneBits;
5372 unsigned NumElemsPerLane = LaneBits / ElemBits;
5373
5375 S, OpPC, Call,
5376 [NumLanes, NumElemsPerLane](unsigned DstIdx, unsigned ShuffleMask) {
5377 / DstIdx determines source. ShuffleMask selects lane in source.
5378 unsigned BitsPerElem = NumLanes / 2;
5379 unsigned IndexMask = (1u << BitsPerElem) - 1;
5380 unsigned Lane = DstIdx / NumElemsPerLane;
5381 unsigned SrcIdx = (Lane < NumLanes / 2) ? 0 : 1;
5382 unsigned BitIdx = BitsPerElem * Lane;
5383 unsigned SrcLaneIdx = (ShuffleMask >> BitIdx) & IndexMask;
5384 unsigned ElemInLane = DstIdx % NumElemsPerLane;
5385 unsigned IdxToPick = SrcLaneIdx * NumElemsPerLane + ElemInLane;
5386 return std::pair<unsigned, int>{SrcIdx, IdxToPick};
5387 });
5388 }
5389
5390 case X86::BI__builtin_ia32_insertf32x4_256:
5391 case X86::BI__builtin_ia32_inserti32x4_256:
5392 case X86::BI__builtin_ia32_insertf64x2_256:
5393 case X86::BI__builtin_ia32_inserti64x2_256:
5394 case X86::BI__builtin_ia32_insertf32x4:
5395 case X86::BI__builtin_ia32_inserti32x4:
5396 case X86::BI__builtin_ia32_insertf64x2_512:
5397 case X86::BI__builtin_ia32_inserti64x2_512:
5398 case X86::BI__builtin_ia32_insertf32x8:
5399 case X86::BI__builtin_ia32_inserti32x8:
5400 case X86::BI__builtin_ia32_insertf64x4:
5401 case X86::BI__builtin_ia32_inserti64x4:
5402 case X86::BI__builtin_ia32_vinsertf128_ps256:
5403 case X86::BI__builtin_ia32_vinsertf128_pd256:
5404 case X86::BI__builtin_ia32_vinsertf128_si256:
5405 case X86::BI__builtin_ia32_insert128i256:
5406 return interp__builtin_x86_insert_subvector(S, OpPC, Call, BuiltinID);
5407
5408 case clang::X86::BI__builtin_ia32_vcvtps2ph:
5409 case clang::X86::BI__builtin_ia32_vcvtps2ph256:
5410 return interp__builtin_ia32_vcvtps2ph(S, OpPC, Call);
5411
5412 case X86::BI__builtin_ia32_vec_ext_v4hi:
5413 case X86::BI__builtin_ia32_vec_ext_v16qi:
5414 case X86::BI__builtin_ia32_vec_ext_v8hi:
5415 case X86::BI__builtin_ia32_vec_ext_v4si:
5416 case X86::BI__builtin_ia32_vec_ext_v2di:
5417 case X86::BI__builtin_ia32_vec_ext_v32qi:
5418 case X86::BI__builtin_ia32_vec_ext_v16hi:
5419 case X86::BI__builtin_ia32_vec_ext_v8si:
5420 case X86::BI__builtin_ia32_vec_ext_v4di:
5421 case X86::BI__builtin_ia32_vec_ext_v4sf:
5422 return interp__builtin_vec_ext(S, OpPC, Call, BuiltinID);
5423
5424 case X86::BI__builtin_ia32_vec_set_v4hi:
5425 case X86::BI__builtin_ia32_vec_set_v16qi:
5426 case X86::BI__builtin_ia32_vec_set_v8hi:
5427 case X86::BI__builtin_ia32_vec_set_v4si:
5428 case X86::BI__builtin_ia32_vec_set_v2di:
5429 case X86::BI__builtin_ia32_vec_set_v32qi:
5430 case X86::BI__builtin_ia32_vec_set_v16hi:
5431 case X86::BI__builtin_ia32_vec_set_v8si:
5432 case X86::BI__builtin_ia32_vec_set_v4di:
5433 return interp__builtin_vec_set(S, OpPC, Call, BuiltinID);
5434
5435 case X86::BI__builtin_ia32_cvtb2mask128:
5436 case X86::BI__builtin_ia32_cvtb2mask256:
5437 case X86::BI__builtin_ia32_cvtb2mask512:
5438 case X86::BI__builtin_ia32_cvtw2mask128:
5439 case X86::BI__builtin_ia32_cvtw2mask256:
5440 case X86::BI__builtin_ia32_cvtw2mask512:
5441 case X86::BI__builtin_ia32_cvtd2mask128:
5442 case X86::BI__builtin_ia32_cvtd2mask256:
5443 case X86::BI__builtin_ia32_cvtd2mask512:
5444 case X86::BI__builtin_ia32_cvtq2mask128:
5445 case X86::BI__builtin_ia32_cvtq2mask256:
5446 case X86::BI__builtin_ia32_cvtq2mask512:
5447 return interp__builtin_ia32_cvt_vec2mask(S, OpPC, Call, BuiltinID);
5448
5449 case X86::BI__builtin_ia32_cvtsd2ss:
5450 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, false);
5451
5452 case X86::BI__builtin_ia32_cvtsd2ss_round_mask:
5453 return interp__builtin_ia32_cvtsd2ss(S, OpPC, Call, true);
5454
5455 case X86::BI__builtin_ia32_cvtpd2ps:
5456 case X86::BI__builtin_ia32_cvtpd2ps256:
5457 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, false, false);
5458 case X86::BI__builtin_ia32_cvtpd2ps_mask:
5459 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, false);
5460 case X86::BI__builtin_ia32_cvtpd2ps512_mask:
5461 return interp__builtin_ia32_cvtpd2ps(S, OpPC, Call, true, true);
5462
5463 case X86::BI__builtin_ia32_cmpb128_mask:
5464 case X86::BI__builtin_ia32_cmpw128_mask:
5465 case X86::BI__builtin_ia32_cmpd128_mask:
5466 case X86::BI__builtin_ia32_cmpq128_mask:
5467 case X86::BI__builtin_ia32_cmpb256_mask:
5468 case X86::BI__builtin_ia32_cmpw256_mask:
5469 case X86::BI__builtin_ia32_cmpd256_mask:
5470 case X86::BI__builtin_ia32_cmpq256_mask:
5471 case X86::BI__builtin_ia32_cmpb512_mask:
5472 case X86::BI__builtin_ia32_cmpw512_mask:
5473 case X86::BI__builtin_ia32_cmpd512_mask:
5474 case X86::BI__builtin_ia32_cmpq512_mask:
5475 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5476 /*IsUnsigned=*/false);
5477
5478 case X86::BI__builtin_ia32_ucmpb128_mask:
5479 case X86::BI__builtin_ia32_ucmpw128_mask:
5480 case X86::BI__builtin_ia32_ucmpd128_mask:
5481 case X86::BI__builtin_ia32_ucmpq128_mask:
5482 case X86::BI__builtin_ia32_ucmpb256_mask:
5483 case X86::BI__builtin_ia32_ucmpw256_mask:
5484 case X86::BI__builtin_ia32_ucmpd256_mask:
5485 case X86::BI__builtin_ia32_ucmpq256_mask:
5486 case X86::BI__builtin_ia32_ucmpb512_mask:
5487 case X86::BI__builtin_ia32_ucmpw512_mask:
5488 case X86::BI__builtin_ia32_ucmpd512_mask:
5489 case X86::BI__builtin_ia32_ucmpq512_mask:
5490 return interp__builtin_ia32_cmp_mask(S, OpPC, Call, BuiltinID,
5491 /*IsUnsigned=*/true);
5492
5493 case X86::BI__builtin_ia32_vpshufbitqmb128_mask:
5494 case X86::BI__builtin_ia32_vpshufbitqmb256_mask:
5495 case X86::BI__builtin_ia32_vpshufbitqmb512_mask:
5497
5498 case X86::BI__builtin_ia32_pslldqi128_byteshift:
5499 case X86::BI__builtin_ia32_pslldqi256_byteshift:
5500 case X86::BI__builtin_ia32_pslldqi512_byteshift:
5501 / These SLLDQ intrinsics always operate on byte elements (8 bits).
5502 / The lane width is hardcoded to 16 to match the SIMD register size,
5503 / but the algorithm processes one byte per iteration,
5504 / so APInt(8, ...) is correct and intentional.
5506 S, OpPC, Call,
5507 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5508 unsigned LaneBase = (DstIdx / 16) * 16;
5509 unsigned LaneIdx = DstIdx % 16;
5510 if (LaneIdx < Shift)
5511 return std::make_pair(0, -1);
5512
5513 return std::make_pair(0,
5514 static_cast<int>(LaneBase + LaneIdx - Shift));
5515 });
5516
5517 case X86::BI__builtin_ia32_psrldqi128_byteshift:
5518 case X86::BI__builtin_ia32_psrldqi256_byteshift:
5519 case X86::BI__builtin_ia32_psrldqi512_byteshift:
5520 / These SRLDQ intrinsics always operate on byte elements (8 bits).
5521 / The lane width is hardcoded to 16 to match the SIMD register size,
5522 / but the algorithm processes one byte per iteration,
5523 / so APInt(8, ...) is correct and intentional.
5525 S, OpPC, Call,
5526 [](unsigned DstIdx, unsigned Shift) -> std::pair<unsigned, int> {
5527 unsigned LaneBase = (DstIdx / 16) * 16;
5528 unsigned LaneIdx = DstIdx % 16;
5529 if (LaneIdx + Shift < 16)
5530 return std::make_pair(0,
5531 static_cast<int>(LaneBase + LaneIdx + Shift));
5532
5533 return std::make_pair(0, -1);
5534 });
5535
5536 case X86::BI__builtin_ia32_palignr128:
5537 case X86::BI__builtin_ia32_palignr256:
5538 case X86::BI__builtin_ia32_palignr512:
5540 S, OpPC, Call, [](unsigned DstIdx, unsigned Shift) {
5541 / Default to -1 → zero-fill this destination element
5542 unsigned VecIdx = 1;
5543 int ElemIdx = -1;
5544
5545 int Lane = DstIdx / 16;
5546 int Offset = DstIdx % 16;
5547
5548 / Elements come from VecB first, then VecA after the shift boundary
5549 unsigned ShiftedIdx = Offset + (Shift & 0xFF);
5550 if (ShiftedIdx < 16) { / from VecB
5551 ElemIdx = ShiftedIdx + (Lane * 16);
5552 } else if (ShiftedIdx < 32) { / from VecA
5553 VecIdx = 0;
5554 ElemIdx = (ShiftedIdx - 16) + (Lane * 16);
5555 }
5556
5557 return std::pair<unsigned, int>{VecIdx, ElemIdx};
5558 });
5559
5560 case X86::BI__builtin_ia32_alignd128:
5561 case X86::BI__builtin_ia32_alignd256:
5562 case X86::BI__builtin_ia32_alignd512:
5563 case X86::BI__builtin_ia32_alignq128:
5564 case X86::BI__builtin_ia32_alignq256:
5565 case X86::BI__builtin_ia32_alignq512: {
5566 unsigned NumElems = Call->getType()->castAs<VectorType>()->getNumElements();
5568 S, OpPC, Call, [NumElems](unsigned DstIdx, unsigned Shift) {
5569 unsigned Imm = Shift & 0xFF;
5570 unsigned EffectiveShift = Imm & (NumElems - 1);
5571 unsigned SourcePos = DstIdx + EffectiveShift;
5572 unsigned VecIdx = SourcePos < NumElems ? 1u : 0u;
5573 unsigned ElemIdx = SourcePos & (NumElems - 1);
5574 return std::pair<unsigned, int>{VecIdx, static_cast<int>(ElemIdx)};
5575 });
5576 }
5577
5578 default:
5579 S.FFDiag(S.Current->getLocation(OpPC),
5580 diag::note_invalid_subexpr_in_const_expr)
5581 << S.Current->getRange(OpPC);
5582
5583 return false;
5584 }
5585
5586 llvm_unreachable("Unhandled builtin ID");
5587}
5588
5590 ArrayRef<int64_t> ArrayIndices, int64_t &IntResult) {
5592 unsigned N = E->getNumComponents();
5593 assert(N > 0);
5594
5595 unsigned ArrayIndex = 0;
5596 QualType CurrentType = E->getTypeSourceInfo()->getType();
5597 for (unsigned I = 0; I != N; ++I) {
5598 const OffsetOfNode &Node = E->getComponent(I);
5599 switch (Node.getKind()) {
5600 case OffsetOfNode::Field: {
5601 const FieldDecl *MemberDecl = Node.getField();
5602 const auto *RD = CurrentType->getAsRecordDecl();
5603 if (!RD || RD->isInvalidDecl())
5604 return false;
5606 unsigned FieldIndex = MemberDecl->getFieldIndex();
5607 assert(FieldIndex < RL.getFieldCount() && "offsetof field in wrong type");
5608 Result +=
5610 CurrentType = MemberDecl->getType().getNonReferenceType();
5611 break;
5612 }
5613 case OffsetOfNode::Array: {
5614 / When generating bytecode, we put all the index expressions as Sint64 on
5615 / the stack.
5616 int64_t Index = ArrayIndices[ArrayIndex];
5617 const ArrayType *AT = S.getASTContext().getAsArrayType(CurrentType);
5618 if (!AT)
5619 return false;
5620 CurrentType = AT->getElementType();
5621 CharUnits ElementSize = S.getASTContext().getTypeSizeInChars(CurrentType);
5622 Result += Index * ElementSize;
5623 ++ArrayIndex;
5624 break;
5625 }
5626 case OffsetOfNode::Base: {
5627 const CXXBaseSpecifier *BaseSpec = Node.getBase();
5628 if (BaseSpec->isVirtual())
5629 return false;
5630
5631 / Find the layout of the class whose base we are looking into.
5632 const auto *RD = CurrentType->getAsCXXRecordDecl();
5633 if (!RD || RD->isInvalidDecl())
5634 return false;
5636
5637 / Find the base class itself.
5638 CurrentType = BaseSpec->getType();
5639 const auto *BaseRD = CurrentType->getAsCXXRecordDecl();
5640 if (!BaseRD)
5641 return false;
5642
5643 / Add the offset to the base.
5644 Result += RL.getBaseClassOffset(BaseRD);
5645 break;
5646 }
5648 llvm_unreachable("Dependent OffsetOfExpr?");
5649 }
5650 }
5651
5652 IntResult = Result.getQuantity();
5653
5654 return true;
5655}
5656
5658 const Pointer &Ptr, const APSInt &IntValue) {
5659
5660 const Record *R = Ptr.getRecord();
5661 assert(R);
5662 assert(R->getNumFields() == 1);
5663
5664 unsigned FieldOffset = R->getField(0u)->Offset;
5665 const Pointer &FieldPtr = Ptr.atField(FieldOffset);
5666 PrimType FieldT = *S.getContext().classify(FieldPtr.getType());
5667
5668 INT_TYPE_SWITCH(FieldT,
5669 FieldPtr.deref<T>() = T::from(IntValue.getSExtValue()));
5670 FieldPtr.initialize();
5671 return true;
5672}
5673
5674static void zeroAll(Pointer &Dest) {
5675 const Descriptor *Desc = Dest.getFieldDesc();
5676
5677 if (Desc->isPrimitive()) {
5678 TYPE_SWITCH(Desc->getPrimType(), {
5679 Dest.deref<T>().~T();
5680 new (&Dest.deref<T>()) T();
5681 });
5682 return;
5683 }
5684
5685 if (Desc->isRecord()) {
5686 const Record *R = Desc->ElemRecord;
5687 for (const Record::Field &F : R->fields()) {
5688 Pointer FieldPtr = Dest.atField(F.Offset);
5689 zeroAll(FieldPtr);
5690 }
5691 return;
5692 }
5693
5694 if (Desc->isPrimitiveArray()) {
5695 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5696 TYPE_SWITCH(Desc->getPrimType(), {
5697 Dest.deref<T>().~T();
5698 new (&Dest.deref<T>()) T();
5699 });
5700 }
5701 return;
5702 }
5703
5704 if (Desc->isCompositeArray()) {
5705 for (unsigned I = 0, N = Desc->getNumElems(); I != N; ++I) {
5706 Pointer ElemPtr = Dest.atIndex(I).narrow();
5707 zeroAll(ElemPtr);
5708 }
5709 return;
5710 }
5711}
5712
5713static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5714 Pointer &Dest, bool Activate);
5715static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src,
5716 Pointer &Dest, bool Activate = false) {
5717 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5718 const Descriptor *DestDesc = Dest.getFieldDesc();
5719
5720 auto copyField = [&](const Record::Field &F, bool Activate) -> bool {
5721 Pointer DestField = Dest.atField(F.Offset);
5722 if (OptPrimType FT = S.Ctx.classify(F.Decl->getType())) {
5723 TYPE_SWITCH(*FT, {
5724 DestField.deref<T>() = Src.atField(F.Offset).deref<T>();
5725 if (Src.atField(F.Offset).isInitialized())
5726 DestField.initialize();
5727 if (Activate)
5728 DestField.activate();
5729 });
5730 return true;
5731 }
5732 / Composite field.
5733 return copyComposite(S, OpPC, Src.atField(F.Offset), DestField, Activate);
5734 };
5735
5736 assert(SrcDesc->isRecord());
5737 assert(SrcDesc->ElemRecord == DestDesc->ElemRecord);
5738 const Record *R = DestDesc->ElemRecord;
5739 for (const Record::Field &F : R->fields()) {
5740 if (R->isUnion()) {
5741 / For unions, only copy the active field. Zero all others.
5742 const Pointer &SrcField = Src.atField(F.Offset);
5743 if (SrcField.isActive()) {
5744 if (!copyField(F, /*Activate=*/true))
5745 return false;
5746 } else {
5747 if (!CheckMutable(S, OpPC, Src.atField(F.Offset)))
5748 return false;
5749 Pointer DestField = Dest.atField(F.Offset);
5750 zeroAll(DestField);
5751 }
5752 } else {
5753 if (!copyField(F, Activate))
5754 return false;
5755 }
5756 }
5757
5758 for (const Record::Base &B : R->bases()) {
5759 Pointer DestBase = Dest.atField(B.Offset);
5760 if (!copyRecord(S, OpPC, Src.atField(B.Offset), DestBase, Activate))
5761 return false;
5762 }
5763
5764 Dest.initialize();
5765 return true;
5766}
5767
5768static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src,
5769 Pointer &Dest, bool Activate = false) {
5770 assert(Src.isLive() && Dest.isLive());
5771
5772 [[maybe_unused]] const Descriptor *SrcDesc = Src.getFieldDesc();
5773 const Descriptor *DestDesc = Dest.getFieldDesc();
5774
5775 assert(!DestDesc->isPrimitive() && !SrcDesc->isPrimitive());
5776
5777 if (DestDesc->isPrimitiveArray()) {
5778 assert(SrcDesc->isPrimitiveArray());
5779 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5780 PrimType ET = DestDesc->getPrimType();
5781 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5782 Pointer DestElem = Dest.atIndex(I);
5783 TYPE_SWITCH(ET, {
5784 DestElem.deref<T>() = Src.elem<T>(I);
5785 DestElem.initialize();
5786 });
5787 }
5788 return true;
5789 }
5790
5791 if (DestDesc->isCompositeArray()) {
5792 assert(SrcDesc->isCompositeArray());
5793 assert(SrcDesc->getNumElems() == DestDesc->getNumElems());
5794 for (unsigned I = 0, N = DestDesc->getNumElems(); I != N; ++I) {
5795 const Pointer &SrcElem = Src.atIndex(I).narrow();
5796 Pointer DestElem = Dest.atIndex(I).narrow();
5797 if (!copyComposite(S, OpPC, SrcElem, DestElem, Activate))
5798 return false;
5799 }
5800 return true;
5801 }
5802
5803 if (DestDesc->isRecord())
5804 return copyRecord(S, OpPC, Src, Dest, Activate);
5805 return Invalid(S, OpPC);
5806}
5807
5808bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest) {
5809 return copyComposite(S, OpPC, Src, Dest);
5810}
5811
5812} / namespace interp
5813} / namespace clang
#define V(N, I)
Defines enum values for all the target-independent builtin functions.
llvm::APSInt APSInt
Definition Compiler.cpp:24
GCCTypeClass
Values returned by __builtin_classify_type, chosen to match the values produced by GCC's builtin.
CharUnits GetAlignOfExpr(const ASTContext &Ctx, const Expr *E, UnaryExprOrTypeTrait ExprKind)
GCCTypeClass EvaluateBuiltinClassifyType(QualType T, const LangOptions &LangOpts)
EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way as GCC.
static bool isOneByteCharacterType(QualType T)
static bool isUserWritingOffTheEnd(const ASTContext &Ctx, const LValue &LVal)
Attempts to detect a user writing into a piece of memory that's impossible to figure out the size of ...
uint8_t GFNIMul(uint8_t AByte, uint8_t BByte)
uint8_t GFNIAffine(uint8_t XByte, const APInt &AQword, const APSInt &Imm, bool Inverse)
TokenType getType() const
Returns the token's type, e.g.
#define X(type, name)
Definition Value.h:97
static DiagnosticBuilder Diag(DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc TokLoc, const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd, unsigned DiagID)
Produce a diagnostic highlighting some portion of a literal.
#define INT_TYPE_SWITCH_NO_BOOL(Expr, B)
Definition PrimType.h:251
#define INT_TYPE_SWITCH(Expr, B)
Definition PrimType.h:232
#define TYPE_SWITCH(Expr, B)
Definition PrimType.h:211
static std::string toString(const clang::SanitizerSet &Sanitizers)
Produce a string containing comma-separated names of sanitizers in Sanitizers set.
static QualType getPointeeType(const MemRegion *R)
Enumerates target-specific builtins in their own namespaces within namespace clang.
APValue - This class implements a discriminated union of [uninitialized] [APSInt] [APFloat],...
Definition APValue.h:122
CharUnits & getLValueOffset()
Definition APValue.cpp:993
Holds long-lived AST nodes (such as types and decls) that can be referred to throughout the semantic ...
Definition ASTContext.h:220
CharUnits getTypeAlignInChars(QualType T) const
Return the ABI-specified alignment of a (complete) type T, in characters.
unsigned getIntWidth(QualType T) const
const llvm::fltSemantics & getFloatTypeSemantics(QualType T) const
Return the APFloat 'semantics' for the specified scalar floating point type.
CanQualType FloatTy
const ASTRecordLayout & getASTRecordLayout(const RecordDecl *D) const
Get or compute information about the layout of the specified record (struct/union/class) D,...
Builtin::Context & BuiltinInfo
Definition ASTContext.h:778
QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize, const Expr *SizeExpr, ArraySizeModifier ASM, unsigned IndexTypeQuals) const
Return the unique reference to the type for a constant array of the specified element type.
const LangOptions & getLangOpts() const
Definition ASTContext.h:930
CanQualType CharTy
CharUnits getDeclAlign(const Decl *D, bool ForAlignof=false) const
Return a conservative estimate of the alignment of the specified decl D.
QualType getWCharType() const
Return the unique wchar_t type available in C++ (and available as __wchar_t as a Microsoft extension)...
const ArrayType * getAsArrayType(QualType T) const
Type Query functions.
uint64_t getTypeSize(QualType T) const
Return the size of the specified (complete) type T, in bits.
CharUnits getTypeSizeInChars(QualType T) const
Return the size of the specified (complete) type T, in characters.
QualType getSizeType() const
Return the unique type for "size_t" (C99 7.17), defined in <stddef.h>.
const TargetInfo & getTargetInfo() const
Definition ASTContext.h:895
CharUnits toCharUnitsFromBits(int64_t BitSize) const
Convert a size in bits to a size in characters.
CanQualType getCanonicalTagType(const TagDecl *TD) const
static bool hasSameUnqualifiedType(QualType T1, QualType T2)
Determine whether the given types are equivalent after cvr-qualifiers have been removed.
CanQualType HalfTy
uint64_t getCharWidth() const
Return the size of the character type, in bits.
ASTRecordLayout - This class contains layout information for one RecordDecl, which is a struct/union/...
unsigned getFieldCount() const
getFieldCount - Get the number of fields in the layout.
uint64_t getFieldOffset(unsigned FieldNo) const
getFieldOffset - Get the offset of the given field index, in bits.
CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const
getBaseClassOffset - Get the offset, in chars, for the given base class.
CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const
getVBaseClassOffset - Get the offset, in chars, for the given base class.
Represents an array type, per C99 6.7.5.2 - Array Declarators.
Definition TypeBase.h:3722
QualType getElementType() const
Definition TypeBase.h:3734
std::string getQuotedName(unsigned ID) const
Return the identifier name for the specified builtin inside single quotes for a diagnostic,...
Definition Builtins.cpp:85
bool isConstantEvaluated(unsigned ID) const
Return true if this function can be constant evaluated by Clang frontend.
Definition Builtins.h:459
Represents a base class of a C++ class.
Definition DeclCXX.h:146
bool isVirtual() const
Determines whether the base class is a virtual base class (or not).
Definition DeclCXX.h:203
QualType getType() const
Retrieves the type of the base class.
Definition DeclCXX.h:249
CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
Definition Expr.h:2877
Expr * getArg(unsigned Arg)
getArg - Return the specified argument.
Definition Expr.h:3081
CharUnits - This is an opaque type for sizes expressed in character units.
Definition CharUnits.h:38
CharUnits alignmentAtOffset(CharUnits offset) const
Given that this is a non-zero alignment value, what is the alignment at the given offset?
Definition CharUnits.h:207
bool isZero() const
isZero - Test whether the quantity equals zero.
Definition CharUnits.h:122
QuantityType getQuantity() const
getQuantity - Get the raw integer representation of this quantity.
Definition CharUnits.h:185
static CharUnits One()
One - Construct a CharUnits quantity of one.
Definition CharUnits.h:58
static CharUnits fromQuantity(QuantityType Quantity)
fromQuantity - Construct a CharUnits quantity from a raw integer type.
Definition CharUnits.h:63
CharUnits alignTo(const CharUnits &Align) const
alignTo - Returns the next integer (mod 2**64) that is greater than or equal to this quantity and is ...
Definition CharUnits.h:201
static unsigned getMaxSizeBits(const ASTContext &Context)
Determine the maximum number of active bits that an array's size can require, which limits the maximu...
Definition Type.cpp:254
This represents one expression.
Definition Expr.h:112
SourceLocation getExprLoc() const LLVM_READONLY
getExprLoc - Return the preferred location for the arrow when diagnosing a problem with a generic exp...
Definition Expr.cpp:273
QualType getType() const
Definition Expr.h:144
Represents a member of a struct/union/class.
Definition Decl.h:3160
unsigned getFieldIndex() const
Returns the index of this field within its record, as appropriate for passing to ASTRecordLayout::get...
Definition Decl.h:3245
const RecordDecl * getParent() const
Returns the parent of this field declaration, which is the struct in which this field is defined.
Definition Decl.h:3396
Represents a function declaration or definition.
Definition Decl.h:2000
One of these records is kept for each identifier that is lexed.
bool isStr(const char(&Str)[StrLen]) const
Return true if this is the identifier for the specified string.
std::optional< llvm::AllocTokenMode > AllocTokenMode
The allocation token mode.
std::optional< uint64_t > AllocTokenMax
Maximum number of allocation tokens (0 = target SIZE_MAX), nullopt if none set (use target SIZE_MAX).
OffsetOfExpr - [C99 7.17] - This represents an expression of the form offsetof(record-type,...
Definition Expr.h:2527
const OffsetOfNode & getComponent(unsigned Idx) const
Definition Expr.h:2574
TypeSourceInfo * getTypeSourceInfo() const
Definition Expr.h:2567
unsigned getNumComponents() const
Definition Expr.h:2582
Helper class for OffsetOfExpr.
Definition Expr.h:2421
FieldDecl * getField() const
For a field offsetof node, returns the field.
Definition Expr.h:2485
@ Array
An index into an array.
Definition Expr.h:2426
@ Identifier
A field in a dependent type, known only by its name.
Definition Expr.h:2430
@ Field
A field.
Definition Expr.h:2428
@ Base
An implicit indirection through a C++ base class, when the field found is in a base class.
Definition Expr.h:2433
Kind getKind() const
Determine what kind of offsetof node this is.
Definition Expr.h:2475
CXXBaseSpecifier * getBase() const
For a base class node, returns the base specifier.
Definition Expr.h:2495
PointerType - C99 6.7.5.1 - Pointer Declarators.
Definition TypeBase.h:3328
A (possibly-)qualified type.
Definition TypeBase.h:937
bool isTriviallyCopyableType(const ASTContext &Context) const
Return true if this is a trivially copyable type (C++0x [basic.types]p9)
Definition Type.cpp:2866
bool isNull() const
Return true if this QualType doesn't point to a type yet.
Definition TypeBase.h:1004
const Type * getTypePtr() const
Retrieves a pointer to the underlying (unqualified) type.
Definition TypeBase.h:8278
QualType getNonReferenceType() const
If Type is a reference type (e.g., const int&), returns the type that the reference refers to ("const...
Definition TypeBase.h:8463
SourceRange getSourceRange() const LLVM_READONLY
SourceLocation tokens are not useful in isolation - they are low level value objects created/interpre...
Definition Stmt.cpp:338
unsigned getMaxAtomicInlineWidth() const
Return the maximum width lock-free atomic operation which can be inlined given the supported features...
Definition TargetInfo.h:858
bool isBigEndian() const
virtual int getEHDataRegisterNumber(unsigned RegNo) const
Return the register number that __builtin_eh_return_regno would return with the specified argument.
virtual bool isNan2008() const
Returns true if NaN encoding is IEEE 754-2008.
QualType getType() const
Return the type wrapped by this type source info.
Definition TypeBase.h:8260
bool isBooleanType() const
Definition TypeBase.h:9001
bool isSignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is signed or an enumeration types whose underlying ty...
Definition Type.cpp:2225
bool isUnsignedIntegerOrEnumerationType() const
Determines whether this is an integer type that is unsigned or an enumeration types whose underlying ...
Definition Type.cpp:2273
CXXRecordDecl * getAsCXXRecordDecl() const
Retrieves the CXXRecordDecl that this type refers to, either because the type is a RecordType or beca...
Definition Type.h:26
RecordDecl * getAsRecordDecl() const
Retrieves the RecordDecl this type refers to.
Definition Type.h:41
bool isPointerType() const
Definition TypeBase.h:8515
bool isIntegerType() const
isIntegerType() does not include complex integers (a GCC extension).
Definition TypeBase.h:8915
const T * castAs() const
Member-template castAs<specific type>.
Definition TypeBase.h:9158
QualType getPointeeType() const
If this is a pointer, ObjC object pointer, or block pointer, this returns the respective pointee.
Definition Type.cpp:752
bool isIncompleteType(NamedDecl **Def=nullptr) const
Types are partitioned into 3 broad categories (C99 6.2.5p1): object types, function types,...
Definition Type.cpp:2435
bool isVectorType() const
Definition TypeBase.h:8654
bool isRealFloatingType() const
Floating point categories.
Definition Type.cpp:2320
bool isFloatingType() const
Definition Type.cpp:2304
const T * getAs() const
Member-template getAs<specific type>'.
Definition TypeBase.h:9091
QualType getType() const
Definition Decl.h:723
Represents a GCC generic vector type.
Definition TypeBase.h:4175
unsigned getNumElements() const
Definition TypeBase.h:4190
QualType getElementType() const
Definition TypeBase.h:4189
A memory block, either on the stack or in the heap.
Definition InterpBlock.h:44
const Descriptor * getDescriptor() const
Returns the block's descriptor.
Definition InterpBlock.h:73
bool isDynamic() const
Definition InterpBlock.h:83
Wrapper around boolean types.
Definition Boolean.h:25
static Boolean from(T Value)
Definition Boolean.h:97
Pointer into the code segment.
Definition Source.h:30
const LangOptions & getLangOpts() const
Returns the language options.
Definition Context.cpp:328
OptPrimType classify(QualType T) const
Classifies a type.
Definition Context.cpp:362
unsigned getEvalID() const
Definition Context.h:147
Manages dynamic memory allocations done during bytecode interpretation.
bool deallocate(const Expr *Source, const Block *BlockToDelete, InterpState &S)
Deallocate the given source+block combination.
std::optional< Form > getAllocationForm(const Expr *Source) const
Checks whether the allocation done at the given source is an array allocation.
Block * allocate(const Descriptor *D, unsigned EvalID, Form AllocForm)
Allocate ONE element of the given descriptor.
If a Floating is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition Floating.h:35
void copy(const APFloat &F)
Definition Floating.h:123
llvm::FPClassTest classify() const
Definition Floating.h:154
bool isSignaling() const
Definition Floating.h:149
bool isNormal() const
Definition Floating.h:152
ComparisonCategoryResult compare(const Floating &RHS) const
Definition Floating.h:157
bool isZero() const
Definition Floating.h:144
bool isNegative() const
Definition Floating.h:143
bool isFinite() const
Definition Floating.h:151
bool isDenormal() const
Definition Floating.h:153
APFloat::fltCategory getCategory() const
Definition Floating.h:155
APFloat getAPFloat() const
Definition Floating.h:64
Base class for stack frames, shared between VM and walker.
Definition Frame.h:25
virtual const FunctionDecl * getCallee() const =0
Returns the called function's declaration.
If an IntegralAP is constructed from Memory, it DOES NOT OWN THAT MEMORY.
Definition IntegralAP.h:36
Frame storing local variables.
Definition InterpFrame.h:27
const Expr * getExpr(CodePtr PC) const
InterpFrame * Caller
The frame of the previous function.
Definition InterpFrame.h:30
SourceInfo getSource(CodePtr PC) const
Map a location to a source.
CodePtr getRetPC() const
Returns the return address of the frame.
SourceLocation getLocation(CodePtr PC) const
SourceRange getRange(CodePtr PC) const
unsigned getDepth() const
const FunctionDecl * getCallee() const override
Returns the caller.
Stack frame storing temporaries and parameters.
Definition InterpStack.h:25
T pop()
Returns the value from the top of the stack and removes it.
Definition InterpStack.h:39
void push(Tys &&...Args)
Constructs a value in place on the top of the stack.
Definition InterpStack.h:33
void discard()
Discards the top value from the stack.
Definition InterpStack.h:50
T & peek() const
Returns a reference to the value on the top of the stack.
Definition InterpStack.h:62
Interpreter context.
Definition InterpState.h:43
Expr::EvalStatus & getEvalStatus() const override
Definition InterpState.h:67
Context & getContext() const
DynamicAllocator & getAllocator()
Context & Ctx
Interpreter Context.
Floating allocFloat(const llvm::fltSemantics &Sem)
llvm::SmallVector< const Block * > InitializingBlocks
List of blocks we're currently running either constructors or destructors for.
ASTContext & getASTContext() const override
Definition InterpState.h:70
InterpStack & Stk
Temporary stack.
const VarDecl * EvaluatingDecl
Declaration we're initializing/evaluting, if any.
InterpFrame * Current
The current frame.
T allocAP(unsigned BitWidth)
const LangOptions & getLangOpts() const
Definition InterpState.h:71
StdAllocatorCaller getStdAllocatorCaller(StringRef Name) const
Program & P
Reference to the module containing all bytecode.
PrimType value_or(PrimType PT) const
Definition PrimType.h:68
A pointer to a memory block, live or dead.
Definition Pointer.h:92
Pointer narrow() const
Restricts the scope of an array element pointer.
Definition Pointer.h:189
bool isInitialized() const
Checks if an object was initialized.
Definition Pointer.cpp:441
Pointer atIndex(uint64_t Idx) const
Offsets a pointer inside an array.
Definition Pointer.h:157
bool isDummy() const
Checks if the pointer points to a dummy value.
Definition Pointer.h:552
int64_t getIndex() const
Returns the index into an array.
Definition Pointer.h:617
bool isActive() const
Checks if the object is active.
Definition Pointer.h:541
Pointer atField(unsigned Off) const
Creates a pointer to a field.
Definition Pointer.h:174
T & deref() const
Dereferences the pointer, if it's live.
Definition Pointer.h:668
unsigned getNumElems() const
Returns the number of elements.
Definition Pointer.h:601
Pointer getArray() const
Returns the parent array.
Definition Pointer.h:321
bool isUnknownSizeArray() const
Checks if the structure is an array of unknown size.
Definition Pointer.h:420
void activate() const
Activats a field.
Definition Pointer.cpp:577
bool isIntegralPointer() const
Definition Pointer.h:474
QualType getType() const
Returns the type of the innermost field.
Definition Pointer.h:341
bool isArrayElement() const
Checks if the pointer points to an array.
Definition Pointer.h:426
void initializeAllElements() const
Initialize all elements of a primitive array at once.
Definition Pointer.cpp:546
bool isLive() const
Checks if the pointer is live.
Definition Pointer.h:273
bool inArray() const
Checks if the innermost field is an array.
Definition Pointer.h:402
T & elem(unsigned I) const
Dereferences the element at index I.
Definition Pointer.h:684
Pointer getBase() const
Returns a pointer to the object of which this pointer is a field.
Definition Pointer.h:312
std::string toDiagnosticString(const ASTContext &Ctx) const
Converts the pointer to a string usable in diagnostics.
Definition Pointer.cpp:428
bool isZero() const
Checks if the pointer is null.
Definition Pointer.h:259
bool isRoot() const
Pointer points directly to a block.
Definition Pointer.h:442
const Descriptor * getDeclDesc() const
Accessor for information about the declaration site.
Definition Pointer.h:287
static bool pointToSameBlock(const Pointer &A, const Pointer &B)
Checks if both given pointers point to the same block.
Definition Pointer.cpp:653
APValue toAPValue(const ASTContext &ASTCtx) const
Converts the pointer to an APValue.
Definition Pointer.cpp:172
bool isOnePastEnd() const
Checks if the index is one past end.
Definition Pointer.h:634
uint64_t getIntegerRepresentation() const
Definition Pointer.h:144
const FieldDecl * getField() const
Returns the field information.
Definition Pointer.h:486
Pointer expand() const
Expands a pointer to the containing array, undoing narrowing.
Definition Pointer.h:224
bool isBlockPointer() const
Definition Pointer.h:473
const Block * block() const
Definition Pointer.h:607
const Descriptor * getFieldDesc() const
Accessors for information about the innermost field.
Definition Pointer.h:331
bool isVirtualBaseClass() const
Definition Pointer.h:548
bool isBaseClass() const
Checks if a structure is a base class.
Definition Pointer.h:547
size_t elemSize() const
Returns the element size of the innermost field.
Definition Pointer.h:363
bool canBeInitialized() const
If this pointer has an InlineDescriptor we can use to initialize.
Definition Pointer.h:449
Lifetime getLifetime() const
Definition Pointer.h:729
void initialize() const
Initializes a field.
Definition Pointer.cpp:494
bool isField() const
Checks if the item is a field in an object.
Definition Pointer.h:279
const Record * getRecord() const
Returns the record descriptor of a class.
Definition Pointer.h:479
Descriptor * createDescriptor(const DeclTy &D, PrimType T, const Type *SourceTy=nullptr, Descriptor::MetadataSize MDSize=std::nullopt, bool IsConst=false, bool IsTemporary=false, bool IsMutable=false, bool IsVolatile=false)
Creates a descriptor for a primitive type.
Definition Program.h:119
Structure/Class descriptor.
Definition Record.h:25
const RecordDecl * getDecl() const
Returns the underlying declaration.
Definition Record.h:53
bool isUnion() const
Checks if the record is a union.
Definition Record.h:57
const Field * getField(const FieldDecl *FD) const
Returns a field.
Definition Record.cpp:47
llvm::iterator_range< const_base_iter > bases() const
Definition Record.h:92
unsigned getNumFields() const
Definition Record.h:88
llvm::iterator_range< const_field_iter > fields() const
Definition Record.h:84
Describes the statement/declaration an opcode was generated from.
Definition Source.h:74
OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId)
Add a note to a prior diagnostic.
Definition State.cpp:63
DiagnosticBuilder report(SourceLocation Loc, diag::kind DiagId)
Directly reports a diagnostic message.
Definition State.cpp:74
OptionalDiagnostic FFDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation could not be folded (FF => FoldFailure)
Definition State.cpp:21
OptionalDiagnostic CCEDiag(SourceLocation Loc, diag::kind DiagId=diag::note_invalid_subexpr_in_const_expr, unsigned ExtraNotes=0)
Diagnose that the evaluation does not produce a C++11 core constant expression.
Definition State.cpp:42
bool checkingPotentialConstantExpression() const
Are we checking whether the expression is a potential constant expression?
Definition State.h:99
Defines the clang::TargetInfo interface.
bool computeOSLogBufferLayout(clang::ASTContext &Ctx, const clang::CallExpr *E, OSLogBufferLayout &layout)
Definition OSLog.cpp:192
std::optional< llvm::AllocTokenMetadata > getAllocTokenMetadata(QualType T, const ASTContext &Ctx)
Get the information required for construction of an allocation token ID.
QualType inferPossibleType(const CallExpr *E, const ASTContext &Ctx, const CastExpr *CastE)
Infer the possible allocated type from an allocation call expression.
static bool isNoopBuiltin(unsigned ID)
static bool interp__builtin_is_within_lifetime(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shuffle_generic(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< std::pair< unsigned, int >(unsigned, unsigned)> GetSourceIndex)
static bool interp__builtin_ia32_phminposuw(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static void assignInteger(InterpState &S, const Pointer &Dest, PrimType ValueT, const APSInt &Value)
static bool interp_builtin_ia32_gfni_affine(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool Inverse)
bool readPointerToBuffer(const Context &Ctx, const Pointer &FromPtr, BitcastBuffer &Buffer, bool ReturnOnUninit)
static Floating abs(InterpState &S, const Floating &In)
static bool interp__builtin_x86_extract_vector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_fmax(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
static bool interp__builtin_elementwise_maxmin(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_bswap(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_elementwise_triop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_assume(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool CheckNewDeleteForms(InterpState &S, CodePtr OpPC, DynamicAllocator::Form AllocForm, DynamicAllocator::Form DeleteForm, const Descriptor *D, const Expr *NewExpr)
Diagnose mismatched new[]/delete or new/delete[] pairs.
Definition Interp.cpp:1117
static bool interp__builtin_ia32_shift_with_count(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APInt &, uint64_t)> ShiftOp, llvm::function_ref< APInt(const APInt &, unsigned)> OverflowOp)
static bool interp__builtin_isnan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Defined as __builtin_isnan(...), to accommodate the fact that it can take a float,...
static llvm::RoundingMode getRoundingMode(FPOptions FPO)
static bool interp__builtin_elementwise_countzeroes(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
Can be called with an integer or vector as the first and only parameter.
bool Call(InterpState &S, CodePtr OpPC, const Function *Func, uint32_t VarArgSize)
Definition Interp.cpp:1588
static bool interp__builtin_classify_type(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fmin(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool IsNumBuiltin)
bool SetThreeWayComparisonField(InterpState &S, CodePtr OpPC, const Pointer &Ptr, const APSInt &IntValue)
Sets the given integral value to the pointer, which is of a std::{weak,partial,strong}...
static bool interp__builtin_operator_delete(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_fabs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static uint64_t popToUInt64(const InterpState &S, const Expr *E)
static bool interp__builtin_ia32_vpconflict(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp__builtin_atomic_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool __atomic_always_lock_free(size_t, void const volatile*) bool __atomic_is_lock_free(size_t,...
static llvm::APSInt convertBoolVectorToInt(const Pointer &Val)
static bool interp__builtin_move(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_clz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
bool CheckMutable(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if a pointer points to a mutable field.
Definition Interp.cpp:594
static bool interp__builtin_is_aligned_up_down(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
__builtin_is_aligned() __builtin_align_up() __builtin_align_down() The first parameter is either an i...
static bool interp__builtin_ia32_addsub(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool Activate(InterpState &S, CodePtr OpPC)
Definition Interp.h:1964
static bool interp__builtin_ia32_addcarry_subborrow(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
(CarryIn, LHS, RHS, Result)
static bool isOneByteCharacterType(QualType T)
Determine if T is a character type for which we guarantee that sizeof(T) == 1.
static bool convertDoubleToFloatStrict(APFloat Src, Floating &Dst, InterpState &S, const Expr *DiagExpr)
static unsigned computePointerOffset(const ASTContext &ASTCtx, const Pointer &Ptr)
Compute the byte offset of Ptr in the full declaration.
static bool interp__builtin_strcmp(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckLoad(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a value can be loaded from a block.
Definition Interp.cpp:793
static bool interp__builtin_ia32_cmp_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID, bool IsUnsigned)
static bool interp__builtin_overflowop(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned BuiltinOp)
static bool interp__builtin_inf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_ext(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_test_op(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< bool(const APInt &A, const APInt &B)> Fn)
static bool interp__builtin_isinf(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, bool CheckSign, const CallExpr *Call)
static bool interp__builtin_os_log_format_buffer_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretOffsetOf(InterpState &S, CodePtr OpPC, const OffsetOfExpr *E, ArrayRef< int64_t > ArrayIndices, int64_t &IntResult)
Interpret an offsetof operation.
static bool interp__builtin_x86_insert_subvector(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
bool CheckRange(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is in range.
Definition Interp.cpp:519
static bool pointsToLastObject(const Pointer &Ptr)
Does Ptr point to the last subobject?
static bool interp__builtin_select(InterpState &S, CodePtr OpPC, const CallExpr *Call)
AVX512 predicated move: "Result = Mask[] ? LHS[] : RHS[]".
llvm::APFloat APFloat
Definition Floating.h:27
static void discard(InterpStack &Stk, PrimType T)
static bool interp__builtin_select_scalar(InterpState &S, const CallExpr *Call)
Scalar variant of AVX512 predicated select: Result[i] = (Mask bit 0) ?
bool CheckLive(InterpState &S, CodePtr OpPC, const Pointer &Ptr, AccessKinds AK)
Checks if a pointer is live and accessible.
Definition Interp.cpp:414
static bool interp__builtin_fpclassify(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
Five int values followed by one floating value.
static bool interp__builtin_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_floating_comparison(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool handleOverflow(InterpState &S, CodePtr OpPC, const T &SrcValue)
llvm::APInt APInt
Definition FixedPoint.h:19
static bool copyComposite(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate)
static bool copyRecord(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest, bool Activate=false)
static bool interp__builtin_c11_atomic_is_lock_free(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool __c11_atomic_is_lock_free(size_t)
static void zeroAll(Pointer &Dest)
static bool interp__builtin_elementwise_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_issubnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_arithmetic_fence(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_x86_extract_vector_masked(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
PrimType
Enumeration of the primitive types of the VM.
Definition PrimType.h:34
static bool interp__builtin_isfinite(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool InterpretBuiltin(InterpState &S, CodePtr OpPC, const CallExpr *Call, uint32_t BuiltinID)
Interpret a builtin function.
static bool interp__builtin_expect(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_vec_set(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_complex(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_complex(Float A, float B);
static bool evalICmpImm(uint8_t Imm, const APSInt &A, const APSInt &B, bool IsUnsigned)
bool CheckDummy(InterpState &S, CodePtr OpPC, const Block *B, AccessKinds AK)
Checks if a pointer is a dummy pointer.
Definition Interp.cpp:1168
static bool interp__builtin_assume_aligned(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
__builtin_assume_aligned(Ptr, Alignment[, ExtraOffset])
static bool interp__builtin_ia32_cvt_vec2mask(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ptrauth_string_discriminator(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_memchr(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_pmul(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &, const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_x86_pack(InterpState &S, CodePtr, const CallExpr *E, llvm::function_ref< APInt(const APSInt &)> PackFn)
static void pushInteger(InterpState &S, const APSInt &Val, QualType QT)
Pushes Val on the stack as the type given by QT.
static bool interp__builtin_operator_new(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_strlen(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
bool CheckArray(InterpState &S, CodePtr OpPC, const Pointer &Ptr)
Checks if the array is offsetable.
Definition Interp.cpp:406
static bool interp__builtin_elementwise_abs(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_copysign(InterpState &S, CodePtr OpPC, const InterpFrame *Frame)
static bool interp__builtin_iszero(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_addressof(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_signbit(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
bool Error(InterpState &S, CodePtr OpPC)
Do nothing and just abort execution.
Definition Interp.h:3291
static bool interp__builtin_vector_reduce(InterpState &S, CodePtr OpPC, const CallExpr *Call, unsigned ID)
static bool interp__builtin_ia32_movmsk_op(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_memcpy(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned ID)
static bool interp_builtin_horizontal_fp_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_elementwise_triop_fp(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APFloat(const APFloat &, const APFloat &, const APFloat &, llvm::RoundingMode)> Fn)
static bool interp__builtin_popcount(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_object_size(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_carryop(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinOp)
Three integral values followed by a pointer (lhs, rhs, carry, carryOut).
bool CheckArraySize(InterpState &S, CodePtr OpPC, uint64_t NumElems)
static bool interp__builtin_ctz(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, unsigned BuiltinID)
static bool interp__builtin_is_constant_evaluated(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static APSInt popToAPSInt(InterpStack &Stk, PrimType T)
static std::optional< unsigned > computeFullDescSize(const ASTContext &ASTCtx, const Descriptor *Desc)
static bool interp__builtin_isfpclass(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
First parameter to __builtin_isfpclass is the floating value, the second one is an integral value.
static bool interp__builtin_ia32_vcvtps2ph(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_issignaling(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_ia32_multishiftqb(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_ia32_shufbitqmb_mask(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static bool interp__builtin_nan(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call, bool Signaling)
bool DoMemcpy(InterpState &S, CodePtr OpPC, const Pointer &Src, Pointer &Dest)
Copy the contents of Src into Dest.
static bool interp__builtin_elementwise_int_unaryop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &)> Fn)
constexpr bool isIntegralType(PrimType T)
Definition PrimType.h:128
static bool interp__builtin_eh_return_data_regno(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp__builtin_infer_alloc_token(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static bool interp_builtin_horizontal_int_binop(InterpState &S, CodePtr OpPC, const CallExpr *Call, llvm::function_ref< APInt(const APSInt &, const APSInt &)> Fn)
static bool interp__builtin_ia32_cvtsd2ss(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool HasRoundingMask)
static void diagnoseNonConstexprBuiltin(InterpState &S, CodePtr OpPC, unsigned ID)
llvm::APSInt APSInt
Definition FixedPoint.h:20
static bool interp__builtin_ia32_gfni_mul(InterpState &S, CodePtr OpPC, const CallExpr *Call)
static QualType getElemType(const Pointer &P)
static bool interp__builtin_ia32_pternlog(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool MaskZ)
static bool interp__builtin_isnormal(InterpState &S, CodePtr OpPC, const InterpFrame *Frame, const CallExpr *Call)
static void swapBytes(std::byte *M, size_t N)
static bool interp__builtin_ia32_cvtpd2ps(InterpState &S, CodePtr OpPC, const CallExpr *Call, bool IsMasked, bool HasRounding)
The JSON file list parser is used to communicate input to InstallAPI.
if(T->getSizeExpr()) TRY_TO(TraverseStmt(const_cast< Expr * >(T -> getSizeExpr())))
ComparisonCategoryResult
An enumeration representing the possible results of a three-way comparison.
@ Result
The result type of a method or function.
Definition TypeBase.h:905
@ AK_Read
Definition State.h:27
const FunctionProtoType * T
U cast(CodeGen::Address addr)
Definition Address.h:327
SmallVectorImpl< PartialDiagnosticAt > * Diag
Diag - If this is non-null, it will be filled in with a stack of notes indicating why evaluation fail...
Definition Expr.h:633
Track what bits have been initialized to known values and which ones have indeterminate value.
T deref(Bytes Offset) const
Dereferences the value at the given offset.
std::unique_ptr< std::byte[]> Data
A quantity in bits.
A quantity in bytes.
size_t getQuantity() const
Describes a memory block created by an allocation site.
Definition Descriptor.h:122
unsigned getNumElems() const
Returns the number of elements stored in the block.
Definition Descriptor.h:249
bool isPrimitive() const
Checks if the descriptor is of a primitive.
Definition Descriptor.h:263
QualType getElemQualType() const
bool isCompositeArray() const
Checks if the descriptor is of an array of composites.
Definition Descriptor.h:256
const ValueDecl * asValueDecl() const
Definition Descriptor.h:214
static constexpr unsigned MaxArrayElemBytes
Maximum number of bytes to be used for array elements.
Definition Descriptor.h:148
QualType getType() const
const Decl * asDecl() const
Definition Descriptor.h:210
static constexpr MetadataSize InlineDescMD
Definition Descriptor.h:144
unsigned getElemSize() const
returns the size of an element when the structure is viewed as an array.
Definition Descriptor.h:244
bool isPrimitiveArray() const
Checks if the descriptor is of an array of primitives.
Definition Descriptor.h:254
const VarDecl * asVarDecl() const
Definition Descriptor.h:218
PrimType getPrimType() const
Definition Descriptor.h:236
bool isRecord() const
Checks if the descriptor is of a record.
Definition Descriptor.h:268
const Record *const ElemRecord
Pointer to the record, if block contains records.
Definition Descriptor.h:153
const Expr * asExpr() const
Definition Descriptor.h:211
bool isArray() const
Checks if the descriptor is of an array.
Definition Descriptor.h:266
Mapping from primitive types to their representation.
Definition PrimType.h:138

Follow Lee on X/Twitter - Father, Husband, Serial builder creating AI, crypto, games & web tools. We are friends :) AI Will Come To Life!

Check out: eBank.nz (Art Generator) | Netwrck.com (AI Tools) | Text-Generator.io (AI API) | BitBank.nz (Crypto AI) | ReadingTime (Kids Reading) | RewordGame | BigMultiplayerChess | WebFiddle | How.nz | Helix AI Assistant