================
@@ -172,28 +190,21 @@ RValue CIRGenFunction::emitBuiltinExpr(const GlobalDecl
&gd, unsigned builtinID,
builder.getUInt8Ty(), "bi_alloca", suitableAlignmentInBytes, size);
// Initialize the allocated buffer if required.
- if (builtinID != Builtin::BI__builtin_alloca_uninitialized) {
- // Initialize the alloca with the given size and alignment according to
- // the lang opts. Only the trivial non-initialization is supported for
- // now.
-
- switch (getLangOpts().getTrivialAutoVarInit()) {
- case LangOptions::TrivialAutoVarInitKind::Uninitialized:
- // Nothing to initialize.
- break;
- case LangOptions::TrivialAutoVarInitKind::Zero:
- case LangOptions::TrivialAutoVarInitKind::Pattern:
- cgm.errorNYI("trivial auto var init");
- break;
- }
- }
+ if (builtinID != Builtin::BI__builtin_alloca_uninitialized)
+ initializeAlloca(*this, allocaAddr, size, suitableAlignmentInBytes);
// An alloca will always return a pointer to the alloca (stack) address
// space. This address space need not be the same as the AST / Language
// default (e.g. in C / C++ auto vars are in the generic address space). At
// the AST level this is handled within CreateTempAlloca et al., but for
the
// builtin / dynamic alloca we have to handle it here.
assert(!cir::MissingFeatures::addressSpace());
+ cir::AddressSpace aas = getCIRAllocaAddressSpace();
+ cir::AddressSpace eas = cir::toCIRAddressSpace(
+ e->getType()->getPointeeType().getAddressSpace());
+ if (eas != aas) {
+ assert(false && "Non-default address space for alloca NYI");
----------------
RiverDave wrote:
I prefer we defer this for a different PR. Reason is: We cannot simply perform
an address space cast in a case like this where pointee types differ. (src
differs from allocaDest)
see:
```cpp
//cpp
void test_builtin_alloca_addrspace() {
// Alloca happens in default address space (0), then we cast to address space
1
void *raw_ptr = __builtin_alloca(sizeof(int));
int __attribute__((address_space(1))) *as1_ptr =
(int __attribute__((address_space(1))) *)raw_ptr;
}
//cir
"cir.func"() <{dso_local, function_type = !cir.func<()>, global_visibility =
#cir<visibility default>, linkage = 0 : i32, sym_name =
"_Z29test_builtin_alloca_addrspacev"}> ({
%0 = "cir.alloca"() <{alignment = 8 : i64, allocaType = !cir.ptr<!void>,
init, name = "raw_ptr"}> : () -> !cir.ptr<!cir.ptr<!void>> loc(#loc11)
%1 = "cir.const"() <{value = #cir.int<4> : !u64i}> : () -> !u64i loc(#loc12)
%2 = "cir.alloca"(%1) <{alignment = 16 : i64, allocaType = !u8i, name =
"bi_alloca"}> : (!u64i) -> !cir.ptr<!u8i> loc(#loc13)
%3 = "cir.alloca"() <{alignment = 8 : i64, allocaType = !cir.ptr<!s32i,
target_address_space(1)>, init, name = "as1_ptr"}> : () ->
!cir.ptr<!cir.ptr<!s32i, target_address_space(1)>> loc(#loc14)
%4 = "cir.cast"(%2) <{kind = 1 : i32}> : (!cir.ptr<!u8i>) ->
!cir.ptr<!void> loc(#loc13)
"cir.store"(%4, %0) <{alignment = 8 : i64}> : (!cir.ptr<!void>,
!cir.ptr<!cir.ptr<!void>>) -> () loc(#loc11)
%5 = "cir.load"(%0) <{alignment = 8 : i64}> : (!cir.ptr<!cir.ptr<!void>>)
-> !cir.ptr<!void> loc(#loc9)
/* CAST IS INVALID HERE =>*/%6 = "cir.cast"(%5) <{kind = 63 : i32}> :
(!cir.ptr<!void>) -> !cir.ptr<!s32i, target_address_space(1)> loc(#loc9)
"cir.store"(%6, %3) <{alignment = 8 : i64}> : (!cir.ptr<!s32i,
target_address_space(1)>, !cir.ptr<!cir.ptr<!s32i, target_address_space(1)>>)
-> () loc(#loc14)
"cir.return"() : () -> () loc(#loc2)
}) : () -> () loc(#loc10)
// OG:
```
As you can see, we’d hit the verifier (address casts are valid as long as both
pointees are of the same type). I assume this is not the case in OG since they
moved to utilize opaque/generic ptrs.
I assume the solution is to introduce an intermediate bitcast. (I experimented
this on my own and worked fine, OG IR generated seemed to be equal). But again
if you want we can dive more into that in the future.
https://github.com/llvm/llvm-project/pull/161212
_______________________________________________
llvm-branch-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits