Author: abataev Date: Fri Dec 14 13:00:58 2018 New Revision: 349192 URL: http://llvm.org/viewvc/llvm-project?rev=349192&view=rev Log: [OPENMP][NVPTX]Improved interwarp copy function.
Inlined runtime with the current implementation of the interwarp copy function leads to the undefined behavior because of the not quite correct implementation of the barriers. Start using generic __kmpc_barier function instead of the custom made barriers. Modified: cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp cfe/trunk/test/OpenMP/nvptx_target_parallel_reduction_codegen.cpp cfe/trunk/test/OpenMP/nvptx_teams_reduction_codegen.cpp Modified: cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp?rev=349192&r1=349191&r2=349192&view=diff ============================================================================== --- cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp (original) +++ cfe/trunk/lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp Fri Dec 14 13:00:58 2018 @@ -189,13 +189,6 @@ enum MachineConfiguration : unsigned { SharedMemorySize = 128, }; -enum NamedBarrier : unsigned { - /// Synchronize on this barrier #ID using a named barrier primitive. - /// Only the subset of active threads in a parallel region arrive at the - /// barrier. - NB_Parallel = 1, -}; - static const ValueDecl *getPrivateItem(const Expr *RefExpr) { RefExpr = RefExpr->IgnoreParens(); if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(RefExpr)) { @@ -655,26 +648,9 @@ static void getNVPTXCTABarrier(CodeGenFu CGF.EmitRuntimeCall(F); } -/// Get barrier #ID to synchronize selected (multiple of warp size) threads in -/// a CTA. -static void getNVPTXBarrier(CodeGenFunction &CGF, int ID, - llvm::Value *NumThreads) { - CGBuilderTy &Bld = CGF.Builder; - llvm::Value *Args[] = {Bld.getInt32(ID), NumThreads}; - llvm::Function *F = llvm::Intrinsic::getDeclaration( - &CGF.CGM.getModule(), llvm::Intrinsic::nvvm_barrier); - F->addFnAttr(llvm::Attribute::Convergent); - CGF.EmitRuntimeCall(F, Args); -} - /// Synchronize all GPU threads in a block. static void syncCTAThreads(CodeGenFunction &CGF) { getNVPTXCTABarrier(CGF); } -/// Synchronize worker threads in a parallel region. -static void syncParallelThreads(CodeGenFunction &CGF, llvm::Value *NumThreads) { - return getNVPTXBarrier(CGF, NB_Parallel, NumThreads); -} - /// Get the value of the thread_limit clause in the teams directive. /// For the 'generic' execution mode, the runtime encodes thread_limit in /// the launch parameters, always starting thread_limit+warpSize threads per @@ -3272,14 +3248,10 @@ static llvm::Value *emitInterWarpCopyFun CGF.EmitBlock(MergeBB); - Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); - llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( - AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); - - llvm::Value *NumActiveThreads = Bld.CreateNSWMul( - NumWarpsVal, getNVPTXWarpSize(CGF), "num_active_threads"); - // named_barrier_sync(ParallelBarrierID, num_active_threads) - syncParallelThreads(CGF, NumActiveThreads); + // kmpc_barrier. + CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, + /*EmitChecks=*/false, + /*ForceSimpleCall=*/true); // // Warp 0 copies reduce element from transfer medium. @@ -3288,6 +3260,10 @@ static llvm::Value *emitInterWarpCopyFun llvm::BasicBlock *W0ElseBB = CGF.createBasicBlock("else"); llvm::BasicBlock *W0MergeBB = CGF.createBasicBlock("ifcont"); + Address AddrNumWarpsArg = CGF.GetAddrOfLocalVar(&NumWarpsArg); + llvm::Value *NumWarpsVal = CGF.EmitLoadOfScalar( + AddrNumWarpsArg, /*Volatile=*/false, C.IntTy, Loc); + // Up to 32 threads in warp 0 are active. llvm::Value *IsActiveThread = Bld.CreateICmpULT(ThreadID, NumWarpsVal, "is_active_thread"); @@ -3329,7 +3305,10 @@ static llvm::Value *emitInterWarpCopyFun // While warp 0 copies values from transfer medium, all other warps must // wait. - syncParallelThreads(CGF, NumActiveThreads); + // kmpc_barrier. + CGM.getOpenMPRuntime().emitBarrierCall(CGF, Loc, OMPD_unknown, + /*EmitChecks=*/false, + /*ForceSimpleCall=*/true); if (NumIters > 1) { Cnt = Bld.CreateNSWAdd(Cnt, llvm::ConstantInt::get(CGM.IntTy, /*V=*/1)); CGF.EmitStoreOfScalar(Cnt, CntAddr, /*Volatile=*/false, C.IntTy); Modified: cfe/trunk/test/OpenMP/nvptx_target_parallel_reduction_codegen.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/nvptx_target_parallel_reduction_codegen.cpp?rev=349192&r1=349191&r2=349192&view=diff ============================================================================== --- cfe/trunk/test/OpenMP/nvptx_target_parallel_reduction_codegen.cpp (original) +++ cfe/trunk/test/OpenMP/nvptx_target_parallel_reduction_codegen.cpp Fri Dec 14 13:00:58 2018 @@ -209,9 +209,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -231,7 +230,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: [[NEXT:%.+]] = add nsw i32 [[CNT]], 1 // CHECK: store i32 [[NEXT]], i32* [[CNT_ADDR]], // CHECK: br label @@ -446,9 +445,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -467,7 +465,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: [[IS_WARP_MASTER:%.+]] = icmp eq i32 [[LANEID]], 0 // CHECK: br i1 [[IS_WARP_MASTER]], label {{%?}}[[DO_COPY:.+]], label {{%?}}[[COPY_ELSE:.+]] // @@ -486,9 +484,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -507,7 +504,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: ret @@ -761,9 +758,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -782,7 +778,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: [[IS_WARP_MASTER:%.+]] = icmp eq i32 [[LANEID]], 0 // CHECK: br i1 [[IS_WARP_MASTER]], label {{%?}}[[DO_COPY:.+]], label {{%?}}[[COPY_ELSE:.+]] // @@ -802,9 +798,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -824,7 +819,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: ret #endif Modified: cfe/trunk/test/OpenMP/nvptx_teams_reduction_codegen.cpp URL: http://llvm.org/viewvc/llvm-project/cfe/trunk/test/OpenMP/nvptx_teams_reduction_codegen.cpp?rev=349192&r1=349191&r2=349192&view=diff ============================================================================== --- cfe/trunk/test/OpenMP/nvptx_teams_reduction_codegen.cpp (original) +++ cfe/trunk/test/OpenMP/nvptx_teams_reduction_codegen.cpp Fri Dec 14 13:00:58 2018 @@ -412,9 +412,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -433,7 +432,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: [[IS_WARP_MASTER:%.+]] = icmp eq i32 [[LANEID]], 0 // CHECK: br i1 [[IS_WARP_MASTER]], label {{%?}}[[DO_COPY:.+]], label {{%?}}[[COPY_ELSE:.+]] // @@ -453,9 +452,8 @@ int bar(int n){ // // Barrier after copy to shared memory storage medium. // CHECK: [[COPY_CONT]] - // CHECK: [[WS:%.+]] = call i32 @llvm.nvvm.read.ptx.sreg.warpsize() - // CHECK: [[ACTIVE_THREADS:%.+]] = mul nsw i32 [[ACTIVE_WARPS:%.+]], [[WS]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ + // CHECK: [[ACTIVE_WARPS:%.+]] = load i32, i32* // // Read into warp 0. // CHECK: [[IS_W0_ACTIVE_THREAD:%.+]] = icmp ult i32 [[TID:%.+]], [[ACTIVE_WARPS]] @@ -475,7 +473,7 @@ int bar(int n){ // CHECK: br label {{%?}}[[READ_CONT]] // // CHECK: [[READ_CONT]] - // CHECK: call void @llvm.nvvm.barrier(i32 1, i32 [[ACTIVE_THREADS]]) + // CHECK: call void @__kmpc_barrier(%struct.ident_t* @ // CHECK: ret #endif _______________________________________________ cfe-commits mailing list cfe-commits@lists.llvm.org http://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits