Author: Frederik Gossen Date: 2020-12-14T10:45:28+01:00 New Revision: 75d9a46090249ed8abfafea090e686b4fecf182f
URL: https://github.com/llvm/llvm-project/commit/75d9a46090249ed8abfafea090e686b4fecf182f DIFF: https://github.com/llvm/llvm-project/commit/75d9a46090249ed8abfafea090e686b4fecf182f.diff LOG: [MLIR] Add atan and atan2 lowerings to CUDA intrinsics Differential Revision: https://reviews.llvm.org/D93124 Added: Modified: mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir Removed: ################################################################################ diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp index 3e90894e2fe9..a8b4d074a08e 100644 --- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp +++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp @@ -173,6 +173,10 @@ void mlir::populateGpuToNVVMConversionPatterns( GPUFuncOpLowering<0>>(converter); patterns.insert<OpToFuncCallLowering<AbsFOp>>(converter, "__nv_fabsf", "__nv_fabs"); + patterns.insert<OpToFuncCallLowering<AtanOp>>(converter, "__nv_atanf", + "__nv_atan"); + patterns.insert<OpToFuncCallLowering<Atan2Op>>(converter, "__nv_atan2f", + "__nv_atan2"); patterns.insert<OpToFuncCallLowering<CeilFOp>>(converter, "__nv_ceilf", "__nv_ceil"); patterns.insert<OpToFuncCallLowering<CosOp>>(converter, "__nv_cosf", diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir index 1c0257de0d55..d8c4d7e064e5 100644 --- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir +++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir @@ -335,6 +335,47 @@ gpu.module @test_module { // ----- +gpu.module @test_module { + // CHECK: llvm.func @__nv_atanf(!llvm.float) -> !llvm.float + // CHECK: llvm.func @__nv_atan(!llvm.double) -> !llvm.double + // CHECK-LABEL: func @gpu_atan + func @gpu_atan(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + -> (f16, f32, f64) { + %result16 = std.atan %arg_f16 : f16 + // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float + // CHECK-NEXT: llvm.call @__nv_atanf(%{{.*}}) : (!llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half + %result32 = std.atan %arg_f32 : f32 + // CHECK: llvm.call @__nv_atanf(%{{.*}}) : (!llvm.float) -> !llvm.float + %result64 = std.atan %arg_f64 : f64 + // CHECK: llvm.call @__nv_atan(%{{.*}}) : (!llvm.double) -> !llvm.double + std.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + +gpu.module @test_module { + // CHECK: llvm.func @__nv_atan2f(!llvm.float, !llvm.float) -> !llvm.float + // CHECK: llvm.func @__nv_atan2(!llvm.double, !llvm.double) -> !llvm.double + // CHECK-LABEL: func @gpu_atan2 + func @gpu_atan2(%arg_f16 : f16, %arg_f32 : f32, %arg_f64 : f64) + -> (f16, f32, f64) { + %result16 = std.atan2 %arg_f16, %arg_f16 : f16 + // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float + // CHECK: llvm.fpext %{{.*}} : !llvm.half to !llvm.float + // CHECK-NEXT: llvm.call @__nv_atan2f(%{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float + // CHECK-NEXT: llvm.fptrunc %{{.*}} : !llvm.float to !llvm.half + %result32 = std.atan2 %arg_f32, %arg_f32 : f32 + // CHECK: llvm.call @__nv_atan2f(%{{.*}}) : (!llvm.float, !llvm.float) -> !llvm.float + %result64 = std.atan2 %arg_f64, %arg_f64 : f64 + // CHECK: llvm.call @__nv_atan2(%{{.*}}) : (!llvm.double, !llvm.double) -> !llvm.double + std.return %result16, %result32, %result64 : f16, f32, f64 + } +} + +// ----- + // Test that we handled properly operation with SymbolTable other than module op gpu.module @test_module { "test.symbol_scope"() ({ _______________________________________________ llvm-branch-commits mailing list llvm-branch-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-branch-commits