commit:     f0352be064f33d986811b7e806e2051979e0209c
Author:     Sv. Lockal <lockalsash <AT> gmail <DOT> com>
AuthorDate: Sun Mar 23 15:44:28 2025 +0000
Commit:     Sam James <sam <AT> gentoo <DOT> org>
CommitDate: Sun Jun 15 16:10:23 2025 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=f0352be0

sci-libs/composable-kernel: fix compilation with AMDGPU_TARGETS="gfx1012"

This adds few patches from 
https://github.com/ROCm/composable_kernel/issues/775#issuecomment-2726315348 
that allow to compile with RDNA1 GPUs.
Also it limits the number of threads (jobs) expecting ~2Gb RAM usage per thread 
(as suggested in composable_kernel README).

Closes: https://bugs.gentoo.org/947583
Signed-off-by: Sv. Lockal <lockalsash <AT> gmail.com>
Part-of: https://github.com/gentoo/gentoo/pull/41240
Closes: https://github.com/gentoo/gentoo/pull/41240
Signed-off-by: Sam James <sam <AT> gentoo.org>

 .../composable-kernel-6.3.0.ebuild                 |  31 ++++-
 .../files/composable-kernel-6.3.0-expand-isa.patch | 140 +++++++++++++++++++++
 2 files changed, 168 insertions(+), 3 deletions(-)

diff --git a/sci-libs/composable-kernel/composable-kernel-6.3.0.ebuild 
b/sci-libs/composable-kernel/composable-kernel-6.3.0.ebuild
index bbd6854cb1e8..44062a2f9eef 100644
--- a/sci-libs/composable-kernel/composable-kernel-6.3.0.ebuild
+++ b/sci-libs/composable-kernel/composable-kernel-6.3.0.ebuild
@@ -1,4 +1,4 @@
-# Copyright 1999-2024 Gentoo Authors
+# Copyright 1999-2025 Gentoo Authors
 # Distributed under the terms of the GNU General Public License v2
 
 # shellcheck disable=SC2317
@@ -7,7 +7,7 @@ EAPI=8
 ROCM_VERSION=${PV}
 PYTHON_COMPAT=( python3_{10..13} python3_13t )
 
-inherit cmake flag-o-matic python-r1 rocm
+inherit check-reqs cmake flag-o-matic multiprocessing python-r1 rocm
 
 GTEST_COMMIT="b85864c64758dec007208e56af933fc3f52044ee"
 GTEST_FILE="gtest-1.14.0_p20220421.tar.gz"
@@ -43,15 +43,40 @@ PATCHES=(
        "${FILESDIR}"/${PN}-6.3.0-no-inline-all.patch
        "${FILESDIR}"/${PN}-6.3.0-conditional-kernels.patch
        "${FILESDIR}"/${PN}-6.3.0-conditional-ckprofiler.patch
+       "${FILESDIR}"/${PN}-6.3.0-expand-isa.patch
 )
 
-pkg_pretend() {
+ck_check-reqs() {
+       [[ ${MERGE_TYPE} == binary ]] && return
+
        targets=($AMDGPU_TARGETS)
        if [[ ${#targets[@]} -gt 1 ]]; then
                ewarn "composable-kernel will be compiled for multiple GPU 
architectures,"
                ewarn "which will take a significant amount of time."
                ewarn "Please consider setting AMDGPU_TARGETS USE_EXPAND 
variable to a single architecture."
        fi
+
+       # It takes ~2Gb of RAM per build thread
+       local user_jobs=$(makeopts_jobs)
+       local free_memory_mb=$(free -m | awk '/Mem:/ {print $4}')
+       local max_jobs=$(( free_memory_mb / 2048 ))
+       max_jobs=$(( max_jobs < 1 ? 1 : max_jobs ))
+       local limited_jobs=$(( user_jobs < max_jobs ? user_jobs : max_jobs ))
+       if [[ "${max_jobs}" -lt "${user_jobs}" ]]; then
+               ewarn "${free_memory_mb} MB of free RAM is not enough for 
${user_jobs} parallel build jobs (~2Gb per job)."
+               ewarn "Please consider setting MAKEOPTS=\"-j${limited_jobs}\" 
for this package."
+       fi
+
+       local CHECKREQS_MEMORY=$((user_jobs*2048))M
+       check-reqs_${EBUILD_PHASE_FUNC}
+}
+
+pkg_pretend() {
+       ck_check-reqs
+}
+
+pkg_setup() {
+       ck_check-reqs
 }
 
 src_prepare() {

diff --git 
a/sci-libs/composable-kernel/files/composable-kernel-6.3.0-expand-isa.patch 
b/sci-libs/composable-kernel/files/composable-kernel-6.3.0-expand-isa.patch
new file mode 100644
index 000000000000..2593e8c5e25e
--- /dev/null
+++ b/sci-libs/composable-kernel/files/composable-kernel-6.3.0-expand-isa.patch
@@ -0,0 +1,140 @@
+Fix for "undeclared identifier 'CK_BUFFER_RESOURCE_3RD_DWORD'" for 
AMDGPU_TARGETS="gfx1012".
+Combines of 3 patches from 
https://github.com/ROCm/composable_kernel/issues/775#issuecomment-2726315348
+
+Bug: https://bugs.gentoo.org/947583
+--- a/include/ck/ck.hpp
++++ b/include/ck/ck.hpp
+@@ -78,7 +78,7 @@ CK_DECLARE_ENV_VAR_BOOL(CK_LOGGING)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD -1
+ #elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || 
defined(__gfx9__)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x00020000
+-#elif defined(__gfx103__)
++#elif defined(__gfx101__) || defined(__gfx103__)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31014000
+ #elif defined(__gfx11__) || defined(__gfx12__)
+ #define CK_BUFFER_RESOURCE_3RD_DWORD 0x31004000
+@@ -86,12 +86,12 @@ CK_DECLARE_ENV_VAR_BOOL(CK_LOGGING)
+ 
+ // FMA instruction
+ #ifndef __HIP_DEVICE_COMPILE__                   // for host code, define 
nothing
+-#elif defined(__gfx803__) || defined(__gfx900__) // for GPU code
+-#define CK_USE_AMD_V_MAC_F32
+-#elif defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) // for 
GPU code
++#elif defined(__gfx906__) || defined(__gfx9__) || defined(__gfx103__) || 
defined(__gfx1011__) || defined(__gfx1012__) // for GPU code
+ #define CK_USE_AMD_V_FMAC_F32
+ #define CK_USE_AMD_V_DOT2_F32_F16
+ #define CK_USE_AMD_V_DOT4_I32_I8
++#elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx101__) // 
for GPU code
++#define CK_USE_AMD_V_MAC_F32
+ #elif defined(__gfx11__) || defined(__gfx12__)
+ #define CK_USE_AMD_V_FMAC_F32
+ #define CK_USE_AMD_V_DOT2_F32_F16
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_dl.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_batched_gemm_multiple_d_dl.hpp
+@@ -71,7 +71,7 @@ __global__ void
+             const Block2CTileMap block_2_ctile_map)
+ {
+ #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx908__) ||         \
+-    defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx103__) || 
defined(__gfx11__) || \
++    defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx101__) || 
defined(__gfx103__) || defined(__gfx11__) || \
+     defined(__gfx12__))
+ 
+     const index_t num_blocks_per_batch =
+--- a/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_dl.hpp
++++ b/include/ck/tensor_operation/gpu/device/impl/device_gemm_multiple_d_dl.hpp
+@@ -51,7 +51,7 @@ __global__ void
+             const Block2CTileMap block_2_ctile_map)
+ {
+ #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx908__) ||         \
+-    defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx103__) || 
defined(__gfx11__) || \
++    defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx101__) || 
defined(__gfx103__) || defined(__gfx11__) || \
+     defined(__gfx12__))
+ 
+     constexpr index_t shared_block_size =
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_dl.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_bwd_weight_dl.hpp
+@@ -48,7 +48,7 @@ __global__ void
+             const Block2CTileMap block_2_ctile_map,
+             const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch)
+ {
+-#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx103__) ||         \
++#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx101__) || defined(__gfx103__) ||         \
+     defined(__gfx90a__) || defined(__gfx908__) || defined(__gfx94__) || 
defined(__gfx11__) || \
+     defined(__gfx12__))
+     const index_t num_blocks_per_batch =
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_multiple_d_nhwc_kyxc_nhwk.hpp
+@@ -90,7 +90,7 @@ __global__ void
+             const Block2CTileMap block_2_ctile_map,
+             const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch)
+ {
+-#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx103__) ||         \
++#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx101__) || defined(__gfx103__) ||         \
+     defined(__gfx90a__) || defined(__gfx908__) || defined(__gfx94__) || 
defined(__gfx11__) || \
+     defined(__gfx12__))
+     // offset base pointer for each work-group
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_conv_fwd_dl_nhwc_kyxc_nhwk.hpp
+@@ -106,7 +106,7 @@ __global__ void
+             const Block2CTileMap block_2_ctile_map,
+             const ComputePtrOffsetOfBatch compute_ptr_offset_of_batch)
+ {
+-#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx103__) || \
++#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx101__) || defined(__gfx103__) || \
+     defined(__gfx11__) || defined(__gfx12__))
+     // offset base pointer for each work-group
+     const index_t num_blocks_per_batch =
+--- 
a/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
++++ 
b/include/ck/tensor_operation/gpu/device/impl/device_grouped_gemm_multiple_d_dl.hpp
+@@ -40,7 +40,7 @@ __global__ void
+                                           const CDEElementwiseOperation 
cde_element_op)
+ {
+ #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx908__) ||         \
+-    defined(__gfx90a__) || defined(__gfx103__) || defined(__gfx11__) || 
defined(__gfx94__) || \
++    defined(__gfx90a__) || defined(__gfx101__) || defined(__gfx103__) || 
defined(__gfx11__) || defined(__gfx94__) || \
+     defined(__gfx12__))
+     __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
+ 
+--- a/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dpp.hpp
++++ b/include/ck/tensor_operation/gpu/grid/gridwise_gemm_dpp.hpp
+@@ -28,7 +28,7 @@ __global__ void
+ #endif
+         kernel_gemm_dpp(const typename GridwiseGemm::Argument karg)
+ {
+-#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx103__) || 
defined(__gfx11__))
++#if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx101__) || 
defined(__gfx103__) || defined(__gfx11__))
+     __shared__ char p_shared[GridwiseGemm::GetSharedMemoryNumberOfByte()];
+ 
+     const auto a_grid_desc_ak0_m_ak1 = amd_wave_read_first_lane(
+--- a/include/ck/tensor_operation/gpu/grid/gridwise_tensor_rearrange.hpp
++++ b/include/ck/tensor_operation/gpu/grid/gridwise_tensor_rearrange.hpp
+@@ -36,7 +36,7 @@ __global__ void
+                                 const ComputePtrOffsetOfStridedBatch 
compute_ptr_offset_of_batch)
+ {
+ #if(!defined(__HIP_DEVICE_COMPILE__) || defined(__gfx906__) || 
defined(__gfx908__) ||         \
+-    defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx103__) || 
defined(__gfx11__) || \
++    defined(__gfx90a__) || defined(__gfx94__) || defined(__gfx101__) || 
defined(__gfx103__) || defined(__gfx11__) || \
+     defined(__gfx12__))
+     GridwiseTensorRearrangeKernel::Run(in_grid_desc,
+                                        p_in_global,
+--- a/include/ck_tile/core/config.hpp
++++ b/include/ck_tile/core/config.hpp
+@@ -10,6 +10,9 @@
+ #if defined(__gfx940__) || defined(__gfx941__) || defined(__gfx942__)
+ #define __gfx94__
+ #endif
++#if defined(__gfx1010__) || defined(__gfx1011__) || defined(__gfx1012__)
++#define __gfx101__
++#endif
+ #if defined(__gfx1030__) || defined(__gfx1031__) || defined(__gfx1032__) || \
+     defined(__gfx1034__) || defined(__gfx1035__) || defined(__gfx1036__)
+ #define __gfx103__
+@@ -177,7 +180,7 @@
+ #elif defined(__gfx803__) || defined(__gfx900__) || defined(__gfx906__) || \
+     defined(__gfx9__) // for GPU code
+ #define CK_TILE_BUFFER_RESOURCE_3RD_DWORD 0x00020000
+-#elif defined(__gfx103__) // for GPU code
++#elif defined(__gfx101__) || defined(__gfx103__) // for GPU code
+ #define CK_TILE_BUFFER_RESOURCE_3RD_DWORD 0x31014000
+ #elif defined(__gfx11__) || defined(__gfx12__) // for GPU code
+ #define CK_TILE_BUFFER_RESOURCE_3RD_DWORD 0x31004000

Reply via email to