commit:     e3b7cde9d825c40eba0c1ee43a71d9455aa77c2c
Author:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
AuthorDate: Fri Aug  9 16:12:10 2024 +0000
Commit:     Alfredo Tupone <tupone <AT> gentoo <DOT> org>
CommitDate: Fri Aug  9 16:13:52 2024 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=e3b7cde9

sci-libs/caffe2: update SRC_URI

Signed-off-by: Alfredo Tupone <tupone <AT> gentoo.org>

 sci-libs/caffe2/Manifest                           |   2 +-
 sci-libs/caffe2/caffe2-2.2.2-r1.ebuild             |  23 +-
 sci-libs/caffe2/caffe2-2.3.0-r3.ebuild             |  33 +--
 sci-libs/caffe2/caffe2-2.3.1.ebuild                |  33 +--
 sci-libs/caffe2/caffe2-2.4.0.ebuild                |   2 +-
 .../caffe2/files/caffe2-1.12.0-glog-0.6.0.patch    |  29 ---
 .../caffe2/files/caffe2-1.13.0-install-dirs.patch  | 121 -----------
 .../caffe2/files/caffe2-1.13.1-tensorpipe.patch    |  10 -
 .../files/caffe2-2.0.0-cudnn_include_fix.patch     |  12 --
 sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch     |  41 ----
 .../files/caffe2-2.1.2-fix-openmp-link.patch       |  15 --
 sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch |  12 --
 .../files/caffe2-2.1.2-rocm-fix-std-cpp17.patch    |  68 ------
 sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch    | 195 -----------------
 sci-libs/caffe2/files/caffe2-2.2.2-musl.patch      |  13 --
 sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch  |  11 -
 .../files/caffe2-2.3.0-cudnn_include_fix.patch     |  11 -
 .../files/caffe2-2.3.0-exclude-aotriton.patch      |  35 ---
 .../caffe2-2.3.0-fix-gcc-clang-abi-compat.patch    |  17 --
 .../caffe2/files/caffe2-2.3.0-fix-libcpp.patch     |  24 ---
 .../files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch  |  18 --
 .../files/caffe2-2.3.0-optional-hipblaslt.patch    | 235 ---------------------
 .../files/caffe2-2.3.0-rocm-fix-std-cpp17.patch    |  68 ------
 23 files changed, 48 insertions(+), 980 deletions(-)

diff --git a/sci-libs/caffe2/Manifest b/sci-libs/caffe2/Manifest
index 0ab8e6d1b824..d190ba229493 100644
--- a/sci-libs/caffe2/Manifest
+++ b/sci-libs/caffe2/Manifest
@@ -1,4 +1,4 @@
-DIST caffe2-20240809.tar.gz 15242 BLAKE2B 
77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c
 SHA512 
74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b
+DIST caffe2-patches-20240809.tar.gz 15242 BLAKE2B 
77503c61487e7d85cca5afcab9a6e638f9833a70861845638cf1b62bc492d7b6650e6db81d53ebb2f39c6313509250d339f725f04d03ec6dd23dd0cf70843d8c
 SHA512 
74b3b0b6671b655ecac93f7436c4ed7cb0157a83aafbf6afcc0811e11cef341cd8f638db1a111bcbb01e1a6dd4daf3a36b96d7a8ce90f04c2fa091bd6e3a142b
 DIST pytorch-2.2.2.tar.gz 116367503 BLAKE2B 
0be22f2ec4b9aac6f5e976664cae01facf07929a32565cd57d7cc5b2d9888e9ae71ca301853752fe8f31d174d04c9974eb9ed2f3d452360a50ccf024f200726a
 SHA512 
7990e0f9484038c3458c0bda2c863bf2b19e56edab81fc5938c6e0f08b17558287f853bb67350e8cca8f42bec0f1d4ba0e94e50a145db8da44bdd4bd703d91d0
 DIST pytorch-2.3.0.tar.gz 117029829 BLAKE2B 
8f9c0d71ee0a9219b495eddccdcc65107f7ad537c43c68100b229f3d27b0e6c01ccb1659c7fffc356a48d80f2adc0a10361305dc8f1df20446de837d380f89f6
 SHA512 
67f7e9a096c3ffb952206ebf9105bedebb68c24ad82456083adf1d1d210437fcaa9dd52b68484cfc97d408c9eebc9541c76868c34a7c9982494dc3f424cfb07c
 DIST pytorch-2.3.1.tar.gz 117035696 BLAKE2B 
d419d7fa1342f1fb317ffce09ec9dc1447414627cc83d36578fe60f68c283c620b2b4d49f414cd206d537b90b16432a06cd1941662720db05d5e2b6c493325f5
 SHA512 
e1bcae44f9939fc7ccb1360a9b1970d92426f25e5de73e36964df3dd15ad5d8d9f5bd2f9a7dda6b8f64e2bba3674005bd869f542489cc442ad0125a02676f587

diff --git a/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild 
b/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
index 6649975ddf2d..773808bc4f76 100644
--- a/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.2.2-r1.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
 DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/";
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-       -> ${MYP}.tar.gz"
+       -> ${MYP}.tar.gz
+       https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz";
 
 S="${WORKDIR}"/${MYP}
 
@@ -97,16 +98,16 @@ DEPEND="
 "
 
 PATCHES=(
-       "${FILESDIR}"/${PN}-2.2.1-gentoo.patch
-       "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
-       "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-       "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
-       "${FILESDIR}"/${PN}-2.0.0-gcc13.patch
-       "${FILESDIR}"/${PN}-2.0.0-cudnn_include_fix.patch
-       "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
-       "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
-       "${FILESDIR}"/${PN}-2.1.2-rocm-fix-std-cpp17.patch
-       "${FILESDIR}"/${P}-musl.patch
+       ../patches/${PN}-2.2.1-gentoo.patch
+       ../patches/${PN}-1.13.0-install-dirs.patch
+       ../patches/${PN}-1.12.0-glog-0.6.0.patch
+       ../patches/${PN}-1.13.1-tensorpipe.patch
+       ../patches/${PN}-2.0.0-gcc13.patch
+       ../patches/${PN}-2.0.0-cudnn_include_fix.patch
+       ../patches/${PN}-2.1.2-fix-rpath.patch
+       ../patches/${PN}-2.1.2-fix-openmp-link.patch
+       ../patches/${PN}-2.1.2-rocm-fix-std-cpp17.patch
+       ../patches/${P}-musl.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild 
b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
index 666800d8f4b6..7fe4818311cb 100644
--- a/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.0-r3.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
 DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/";
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-       -> ${MYP}.tar.gz"
+       -> ${MYP}.tar.gz
+       https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz";
 
 S="${WORKDIR}"/${MYP}
 
@@ -106,21 +107,21 @@ DEPEND="
 "
 
 PATCHES=(
-       "${FILESDIR}"/${PN}-2.2.1-gentoo.patch
-       "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
-       "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-       "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
-       "${FILESDIR}"/${P}-cudnn_include_fix.patch
-       "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
-       "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
-       "${FILESDIR}"/${P}-rocm-fix-std-cpp17.patch
-       "${FILESDIR}"/${PN}-2.2.2-musl.patch
-       "${FILESDIR}"/${P}-CMakeFix.patch
-       "${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
-       "${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
-       "${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
-       "${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
-       "${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
+       ../patches/${PN}-2.2.1-gentoo.patch
+       ../patches/${PN}-1.13.0-install-dirs.patch
+       ../patches/${PN}-1.12.0-glog-0.6.0.patch
+       ../patches/${PN}-1.13.1-tensorpipe.patch
+       ../patches/${P}-cudnn_include_fix.patch
+       ../patches/${PN}-2.1.2-fix-rpath.patch
+       ../patches/${PN}-2.1.2-fix-openmp-link.patch
+       ../patches/${P}-rocm-fix-std-cpp17.patch
+       ../patches/${PN}-2.2.2-musl.patch
+       ../patches/${P}-CMakeFix.patch
+       ../patches/${PN}-2.3.0-exclude-aotriton.patch
+       ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+       ../patches/${PN}-2.3.0-optional-hipblaslt.patch
+       ../patches/${PN}-2.3.0-fix-libcpp.patch
+       ../patches/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/caffe2-2.3.1.ebuild 
b/sci-libs/caffe2/caffe2-2.3.1.ebuild
index ee1da28aa12f..ff2a9caebd59 100644
--- a/sci-libs/caffe2/caffe2-2.3.1.ebuild
+++ b/sci-libs/caffe2/caffe2-2.3.1.ebuild
@@ -13,7 +13,8 @@ MYP=${MYPN}-${PV}
 DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/";
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
-       -> ${MYP}.tar.gz"
+       -> ${MYP}.tar.gz
+       https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz";
 
 S="${WORKDIR}"/${MYP}
 
@@ -106,21 +107,21 @@ DEPEND="
 "
 
 PATCHES=(
-       "${FILESDIR}"/${PN}-2.2.1-gentoo.patch
-       "${FILESDIR}"/${PN}-1.13.0-install-dirs.patch
-       "${FILESDIR}"/${PN}-1.12.0-glog-0.6.0.patch
-       "${FILESDIR}"/${PN}-1.13.1-tensorpipe.patch
-       "${FILESDIR}"/${PN}-2.3.0-cudnn_include_fix.patch
-       "${FILESDIR}"/${PN}-2.1.2-fix-rpath.patch
-       "${FILESDIR}"/${PN}-2.1.2-fix-openmp-link.patch
-       "${FILESDIR}"/${PN}-2.3.0-rocm-fix-std-cpp17.patch
-       "${FILESDIR}"/${PN}-2.2.2-musl.patch
-       "${FILESDIR}"/${PN}-2.3.0-CMakeFix.patch
-       "${FILESDIR}"/${PN}-2.3.0-exclude-aotriton.patch
-       "${FILESDIR}"/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
-       "${FILESDIR}"/${PN}-2.3.0-optional-hipblaslt.patch
-       "${FILESDIR}"/${PN}-2.3.0-fix-libcpp.patch
-       "${FILESDIR}"/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
+       ../patches/${PN}-2.2.1-gentoo.patch
+       ../patches/${PN}-1.13.0-install-dirs.patch
+       ../patches/${PN}-1.12.0-glog-0.6.0.patch
+       ../patches/${PN}-1.13.1-tensorpipe.patch
+       ../patches/${PN}-2.3.0-cudnn_include_fix.patch
+       ../patches/${PN}-2.1.2-fix-rpath.patch
+       ../patches/${PN}-2.1.2-fix-openmp-link.patch
+       ../patches/${PN}-2.3.0-rocm-fix-std-cpp17.patch
+       ../patches/${PN}-2.2.2-musl.patch
+       ../patches/${PN}-2.3.0-CMakeFix.patch
+       ../patches/${PN}-2.3.0-exclude-aotriton.patch
+       ../patches/${PN}-2.3.0-fix-rocm-gcc14-clamp.patch
+       ../patches/${PN}-2.3.0-optional-hipblaslt.patch
+       ../patches/${PN}-2.3.0-fix-libcpp.patch
+       ../patches/${PN}-2.3.0-fix-gcc-clang-abi-compat.patch
 )
 
 src_prepare() {

diff --git a/sci-libs/caffe2/caffe2-2.4.0.ebuild 
b/sci-libs/caffe2/caffe2-2.4.0.ebuild
index 730ea4d365f2..524dafcaacef 100644
--- a/sci-libs/caffe2/caffe2-2.4.0.ebuild
+++ b/sci-libs/caffe2/caffe2-2.4.0.ebuild
@@ -14,7 +14,7 @@ DESCRIPTION="A deep learning framework"
 HOMEPAGE="https://pytorch.org/";
 SRC_URI="https://github.com/pytorch/${MYPN}/archive/refs/tags/v${PV}.tar.gz
        -> ${MYP}.tar.gz
-       https://dev.gentoo.org/~tupone/distfiles/caffe2-20240809.tar.gz";
+       https://dev.gentoo.org/~tupone/distfiles/${PN}-patches-20240809.tar.gz";
 
 S="${WORKDIR}"/${MYP}
 

diff --git a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch 
b/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
deleted file mode 100644
index 6c06d2cca654..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.12.0-glog-0.6.0.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-https://github.com/pytorch/pytorch/issues/58054
-
---- a/c10/util/Logging.cpp
-+++ b/c10/util/Logging.cpp
-@@ -192,23 +192,13 @@
-     google::GLOG_WARNING,
-     "The minimum log level that caffe2 will output.");
- 
--// Google glog's api does not have an external function that allows one to 
check
--// if glog is initialized or not. It does have an internal function - so we 
are
--// declaring it here. This is a hack but has been used by a bunch of others 
too
--// (e.g. Torch).
--namespace google {
--namespace glog_internal_namespace_ {
--bool IsGoogleLoggingInitialized();
--} // namespace glog_internal_namespace_
--} // namespace google
--
- namespace c10 {
- namespace {
- 
- void initGoogleLogging(char const* name) {
- #if !defined(_MSC_VER)
-   // This trick can only be used on UNIX platforms
--  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
-+  if (!::google::IsGoogleLoggingInitialized())
- #endif
-   {
-     ::google::InitGoogleLogging(name);

diff --git a/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch 
b/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch
deleted file mode 100644
index 299c9f88a173..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.13.0-install-dirs.patch
+++ /dev/null
@@ -1,121 +0,0 @@
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -112,7 +112,7 @@
- # Note: for now, we will put all export path into one single Caffe2Targets 
group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still 
self-contained.
--install(TARGETS c10 EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10 EXPORT Caffe2Targets DESTINATION ${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
---- a/c10/cuda/CMakeLists.txt
-+++ b/c10/cuda/CMakeLists.txt
-@@ -64,7 +64,7 @@ add_subdirectory(test)
- # Note: for now, we will put all export path into one single Caffe2Targets 
group
- # to deal with the cmake deployment need. Inside the Caffe2Targets set, the
- # individual libraries like libc10.so and libcaffe2.so are still 
self-contained.
--install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_cuda EXPORT Caffe2Targets DESTINATION 
${CMAKE_INSTALL_LIBDIR})
- foreach(file ${C10_CUDA_HEADERS})
-   get_filename_component( dir ${file} DIRECTORY )
-   install( FILES ${file} DESTINATION include/c10/cuda/${dir} )
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -55,7 +55,7 @@ target_include_directories(
- add_subdirectory(test)
- 
- # ---[ Installation
--install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION lib)
-+install(TARGETS c10_hip EXPORT Caffe2Targets DESTINATION 
${CMAKE_INSTALL_LIBDIR})
- install(DIRECTORY ${CMAKE_CURRENT_LIST_DIR}
-         DESTINATION include
-         FILES_MATCHING PATTERN "*.h")
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -20,7 +20,7 @@
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops_gpu PRIVATE caffe2::mkldnn)
-     endif()
--    install(TARGETS caffe2_detectron_ops_gpu DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops_gpu DESTINATION 
${CMAKE_INSTALL_LIBDIR})
-     if(MSVC)
-       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops_gpu> DESTINATION 
lib OPTIONAL)
-     endif()
-@@ -37,7 +37,7 @@
-       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
-     endif()
-     target_link_libraries(caffe2_detectron_ops_hip PRIVATE torch)
--    install(TARGETS caffe2_detectron_ops_hip DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops_hip DESTINATION 
${CMAKE_INSTALL_LIBDIR})
-   elseif(NOT IOS_PLATFORM)
-     add_library(caffe2_detectron_ops SHARED ${Detectron_CPU_SRCS})
-     if(HAVE_SOVERSION)
-@@ -49,7 +49,7 @@
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops PRIVATE caffe2::mkldnn)
-     endif()
--    install(TARGETS caffe2_detectron_ops DESTINATION lib)
-+    install(TARGETS caffe2_detectron_ops DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     if(MSVC)
-       install(FILES $<TARGET_PDB_FILE:caffe2_detectron_ops> DESTINATION lib 
OPTIONAL)
-     endif()
---- a/modules/module_test/CMakeLists.txt
-+++ b/modules/module_test/CMakeLists.txt
-@@ -16,7 +16,7 @@ if(BUILD_TEST AND NOT BUILD_LITE_INTERPRETER)
-         VERSION ${TORCH_VERSION} SOVERSION ${TORCH_SOVERSION})
-   endif()
-   target_link_libraries(caffe2_module_test_dynamic torch_library)
--  install(TARGETS caffe2_module_test_dynamic DESTINATION lib)
-+  install(TARGETS caffe2_module_test_dynamic DESTINATION 
${CMAKE_INSTALL_LIBDIR})
-   if(MSVC AND BUILD_SHARED_LIBS)
-     install(FILES $<TARGET_PDB_FILE:caffe2_module_test_dynamic> DESTINATION 
lib OPTIONAL)
-   endif()
---- a/modules/observers/CMakeLists.txt
-+++ b/modules/observers/CMakeLists.txt
-@@ -21,7 +21,7 @@ endif()
- target_link_libraries(caffe2_observers PUBLIC torch_library)
- target_include_directories(caffe2_observers PUBLIC 
${CMAKE_CURRENT_SOURCE_DIR}/..)
- target_compile_options(caffe2_observers PRIVATE "-DCAFFE2_BUILD_OBSERVER_LIB")
--install(TARGETS caffe2_observers DESTINATION lib)
-+install(TARGETS caffe2_observers DESTINATION ${CMAKE_INSTALL_LIBDIR})
- caffe2_interface_library(caffe2_observers caffe2_observers_library)
- if(MSVC AND BUILD_SHARED_LIBS)
-   install(FILES $<TARGET_PDB_FILE:caffe2_observers> DESTINATION lib OPTIONAL)
---- a/modules/rocksdb/CMakeLists.txt
-+++ b/modules/rocksdb/CMakeLists.txt
-@@ -63,7 +63,7 @@ add_library(caffe2_rocksdb 
${CMAKE_CURRENT_SOURCE_DIR}/rocksdb.cc)
- target_link_libraries(caffe2_rocksdb PUBLIC torch_library)
- target_link_libraries(caffe2_rocksdb PRIVATE ${RocksDB_LIBRARIES})
- target_include_directories(caffe2_rocksdb PRIVATE ${RocksDB_INCLUDE_DIR})
--install(TARGETS caffe2_rocksdb DESTINATION lib)
-+install(TARGETS caffe2_rocksdb DESTINATION ${CMAKE_INSTALL_LIBDIR})
- 
- # ---[ Last, Append the library to Caffe2_MODULES, if we are building with
- # the main repo.
---- a/test/cpp/c10d/CMakeLists.txt
-+++ b/test/cpp/c10d/CMakeLists.txt
-@@ -51,7 +51,7 @@ if(USE_CUDA)
-     if(INSTALL_TEST)
-       install(TARGETS ProcessGroupNCCLTest DESTINATION bin)
-       install(TARGETS ProcessGroupNCCLErrorsTest DESTINATION bin)
--      install(TARGETS c10d_cuda_test DESTINATION lib)
-+      install(TARGETS c10d_cuda_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-     endif()
-   endif()
-   if(USE_UCC AND USE_C10D_UCC)
---- a/test/cpp/jit/CMakeLists.txt
-+++ b/test/cpp/jit/CMakeLists.txt
-@@ -32,9 +32,9 @@ endif()
- target_link_libraries(backend_with_compiler torch)
- 
- if(INSTALL_TEST)
--  install(TARGETS torchbind_test DESTINATION lib)
--  install(TARGETS jitbackend_test DESTINATION lib)
--  install(TARGETS backend_with_compiler DESTINATION lib)
-+  install(TARGETS torchbind_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+  install(TARGETS jitbackend_test DESTINATION ${CMAKE_INSTALL_LIBDIR})
-+  install(TARGETS backend_with_compiler DESTINATION ${CMAKE_INSTALL_LIBDIR})
- endif()
- 
- # Build the cpp gtest binary containing the cpp-only tests.

diff --git a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch 
b/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
deleted file mode 100644
index ae0cac9fb947..000000000000
--- a/sci-libs/caffe2/files/caffe2-1.13.1-tensorpipe.patch
+++ /dev/null
@@ -1,10 +0,0 @@
---- a/cmake/Dependencies.cmake 2023-02-28 14:14:49.099057348 +0100
-+++ b/cmake/Dependencies.cmake 2023-02-28 14:15:05.326790806 +0100
-@@ -1404,7 +1404,6 @@
- 
-     # Tensorpipe uses cuda_add_library
-     torch_update_find_cuda_flags()
--    add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)
- 
-     list(APPEND Caffe2_DEPENDENCY_LIBS tensorpipe)
-     if(USE_CUDA)

diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch 
b/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
deleted file mode 100644
index ff64e4108087..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.0.0-cudnn_include_fix.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff -uar pytorch-2.0.0/cmake/Dependencies.cmake 
pytorch-2.0.0orig/cmake/Dependencies.cmake
---- a/cmake/Dependencies.cmake 2023-04-23 09:43:20.767566047 -0400
-+++ b/cmake/Dependencies.cmake 2023-03-09 17:42:00.000000000 -0500
-@@ -1235,7 +1235,7 @@
- 
- # ---[ cuDNN
- if(USE_CUDNN)
--  set(CUDNN_FRONTEND_INCLUDE_DIR 
${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
-+  set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
-   target_include_directories(torch::cudnn INTERFACE 
${CUDNN_FRONTEND_INCLUDE_DIR})
- endif()
- 

diff --git a/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch 
b/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch
deleted file mode 100644
index acbcebad0a5d..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.0.0-gcc13.patch
+++ /dev/null
@@ -1,41 +0,0 @@
---- a/c10/util/Registry.h   2023-03-09 17:42:00.000000000 -0500
-+++ b/c10/util/Registry.h       2023-04-09 20:38:33.108135511 -0400
-@@ -16,6 +16,7 @@
- #include <memory>
- #include <mutex>
- #include <string>
-+#include <stdexcept>
- #include <unordered_map>
- #include <vector>
- 
---- a/torch/csrc/jit/passes/quantization/quantization_type.h    2023-03-09 
17:42:00.000000000 -0500
-+++ b/torch/csrc/jit/passes/quantization/quantization_type.h    2023-04-09 
20:43:43.124806308 -0400
-@@ -1,5 +1,6 @@
- #pragma once
- #include <ostream>
-+#include <cstdint>
- 
- namespace torch {
- namespace jit {
-
---- a/torch/csrc/jit/runtime/logging.cpp    2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/jit/runtime/logging.cpp    2023-04-09 20:47:49.758142941 -0400
-@@ -1,6 +1,7 @@
- #include <torch/csrc/jit/runtime/logging.h>
- 
- #include <atomic>
-+#include <stdexcept>
- #include <mutex>
- #include <unordered_map>
- 
-
---- a/torch/csrc/lazy/core/multi_wait.cpp   2023-03-09 17:42:00.000000000 -0500
-+++ b/torch/csrc/lazy/core/multi_wait.cpp   2023-04-09 20:50:36.608145172 -0400
-@@ -1,6 +1,7 @@
- #include <torch/csrc/lazy/core/multi_wait.h>
- 
- #include <chrono>
-+#include <stdexcept>
- #include <exception>
- 
- namespace torch {

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch 
b/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch
deleted file mode 100644
index 3f2d0ae3c30a..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-fix-openmp-link.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-Fix "undefined symbol: omp_get_max_active_levels" in mkl + <nothing else> 
builds
-https://github.com/pytorch/pytorch/issues/116576
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1575,6 +1575,10 @@ if(BUILD_SHARED_LIBS)
-     target_link_libraries(torch_global_deps TBB::tbb)
-   endif()
- 
-+  if(USE_OPENMP)
-+    target_link_libraries(torch_global_deps OpenMP::OpenMP_CXX)
-+  endif()
-+
-   install(TARGETS torch_global_deps DESTINATION "${TORCH_INSTALL_LIB_DIR}")
- endif()
- 

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch 
b/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch
deleted file mode 100644
index 731227fa25ee..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-fix-rpath.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-Unset rpath to support blas-lapack-switch
-Bug: https://bugs.gentoo.org/921129
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -10,7 +10,6 @@ endif(APPLE)
- set(CMAKE_SKIP_BUILD_RPATH  FALSE)
- # Don't use the install-rpath during the build phase
- set(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
--set(CMAKE_INSTALL_RPATH "${_rpath_portable_origin}")
- # Automatically add all linked folders that are NOT in the build directory to
- # the rpath (per library?)
- set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)

diff --git a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch 
b/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
deleted file mode 100644
index cb0fa0c48e80..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.1.2-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
- 
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- 
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is 
supposed to be
- # minimal.  I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1598,6 +1598,7 @@ if(USE_ROCM)
- 
-   # Since PyTorch files contain HIP headers, these flags are required for the 
necessary definitions to be added.
-   target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS})  # experiment
-+  set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS 
OFF)
-   target_link_libraries(torch_hip PUBLIC c10_hip)
- 
-   if(NOT INTERN_BUILD_MOBILE)
-@@ -1774,6 +1775,7 @@ if(BUILD_TEST)
-       target_include_directories(${test_name} PRIVATE 
$<INSTALL_INTERFACE:include>)
-       target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} 
${Caffe2_HIP_INCLUDE})
-       target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+      set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 
CXX_EXTENSIONS OFF)
-       add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
-       if(INSTALL_TEST)
-         install(TARGETS ${test_name} DESTINATION test)
-@@ -1955,6 +1957,7 @@ if(BUILD_PYTHON)
-     endif()
-     if(NOT MSVC)
-       target_compile_options(caffe2_pybind11_state_hip PRIVATE 
${HIP_CXX_FLAGS} -fvisibility=hidden)
-+      set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 
17 CXX_EXTENSIONS OFF)
-     endif()
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX 
${PY_EXT_SUFFIX})
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1287,7 +1287,6 @@ if(USE_ROCM)
-     list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
-     list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
-     list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
--    list(APPEND HIP_CXX_FLAGS -std=c++17)
-     add_definitions(-DROCM_VERSION=${ROCM_VERSION_DEV_INT})
-     add_definitions(-DTORCH_HIP_VERSION=${TORCH_HIP_VERSION})
-     message("TORCH_HIP_VERSION=${TORCH_HIP_VERSION} is added as a compiler 
defines")
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
-   caffe2_binary_target(${target_name_or_src})
- 
-   target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+  set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS 
OFF)
-   target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
- 
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
-         ${Detectron_CPU_SRCS}
-         ${Detectron_HIP_SRCS})
-     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-+    set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 
CXX_EXTENSIONS OFF)
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
-     endif()

diff --git a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch 
b/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
deleted file mode 100644
index 5472a2c41836..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.2.1-gentoo.patch
+++ /dev/null
@@ -1,195 +0,0 @@
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -474,7 +474,7 @@
- endif()
- 
- # ---[ QNNPACK
--if(USE_QNNPACK)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
- 
-   if(NOT DEFINED QNNPACK_SOURCE_DIR)
-@@ -530,7 +530,7 @@
- endif()
- 
- # ---[ Caffe2 Int8 operators (enabled by USE_QNNPACK) depend on gemmlowp and 
neon2sse headers
--if(USE_QNNPACK)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
-   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/gemmlowp")
-   include_directories(SYSTEM "${CAFFE2_THIRD_PARTY_ROOT}/neon2sse")
-@@ -780,7 +780,7 @@
- endif()
- 
- # ---[ FBGEMM
--if(USE_FBGEMM)
-+if(FALSE)
-   set(CAFFE2_THIRD_PARTY_ROOT "${PROJECT_SOURCE_DIR}/third_party")
-   if(NOT DEFINED FBGEMM_SOURCE_DIR)
-     set(FBGEMM_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/fbgemm" CACHE STRING 
"FBGEMM source directory")
-@@ -828,6 +828,7 @@
- endif()
- 
- if(USE_FBGEMM)
-+  list(APPEND Caffe2_DEPENDENCY_LIBS fbgemm)
-   caffe2_update_option(USE_FBGEMM ON)
- else()
-   caffe2_update_option(USE_FBGEMM OFF)
-@@ -1529,7 +1530,6 @@
-       set_target_properties(onnx_proto PROPERTIES CXX_STANDARD 17)
-     endif()
-   endif()
--  add_subdirectory(${CMAKE_CURRENT_LIST_DIR}/../third_party/foxi 
EXCLUDE_FROM_ALL)
- 
-   add_definitions(-DONNX_NAMESPACE=${ONNX_NAMESPACE})
-   if(NOT USE_SYSTEM_ONNX)
-@@ -1796,7 +1796,6 @@
- #
- set(TEMP_BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS})
- set(BUILD_SHARED_LIBS OFF CACHE BOOL "Build shared libs" FORCE)
--add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/fmt)
- 
- # Disable compiler feature checks for `fmt`.
- #
-@@ -1805,9 +1804,7 @@
- # CMAKE_CXX_FLAGS in ways that break feature checks. Since we already know
- # `fmt` is compatible with a superset of the compilers that PyTorch is, it
- # shouldn't be too bad to just disable the checks.
--set_target_properties(fmt-header-only PROPERTIES INTERFACE_COMPILE_FEATURES 
"")
- 
--list(APPEND Caffe2_DEPENDENCY_LIBS fmt::fmt-header-only)
- set(BUILD_SHARED_LIBS ${TEMP_BUILD_SHARED_LIBS} CACHE BOOL "Build shared 
libs" FORCE)
- 
- # ---[ Kineto
---- a/c10/CMakeLists.txt
-+++ b/c10/CMakeLists.txt
-@@ -89,7 +89,7 @@
- if(C10_USE_GLOG)
-     target_link_libraries(c10 PUBLIC glog::glog)
- endif()
--target_link_libraries(c10 PRIVATE fmt::fmt-header-only)
-+target_link_libraries(c10 PRIVATE fmt)
- 
- if(C10_USE_NUMA)
-   target_include_directories(c10 PRIVATE ${Numa_INCLUDE_DIR})
---- a/torch/CMakeLists.txt
-+++ b/torch/CMakeLists.txt
-@@ -59,15 +59,9 @@
-     ${CMAKE_BINARY_DIR}
-     ${CMAKE_BINARY_DIR}/aten/src
-     ${CMAKE_BINARY_DIR}/caffe2/aten/src
--    ${CMAKE_BINARY_DIR}/third_party
--    ${CMAKE_BINARY_DIR}/third_party/onnx
- 
--    ${TORCH_ROOT}/third_party/valgrind-headers
- 
--    ${TORCH_ROOT}/third_party/gloo
--    ${TORCH_ROOT}/third_party/onnx
--    ${TORCH_ROOT}/third_party/flatbuffers/include
--    ${TORCH_ROOT}/third_party/kineto/libkineto/include
-+    /usr/include/kineto
- 
-     ${TORCH_SRC_DIR}/csrc
-     ${TORCH_SRC_DIR}/csrc/api/include
-@@ -80,7 +74,6 @@
-     python::python
-     pybind::pybind11
-     shm
--    fmt::fmt-header-only
-     ATEN_CPU_FILES_GEN_LIB)
- 
- if(USE_ASAN AND TARGET Sanitizer::address)
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -835,12 +835,11 @@
- # Re-include to override append_cxx_flag_if_supported from third_party/FBGEMM
- include(cmake/public/utils.cmake)
- if(NOT MSVC)
--  string(APPEND CMAKE_CXX_FLAGS " -O2 -fPIC")
-+  string(APPEND CMAKE_CXX_FLAGS " -O2")
-   # Eigen fails to build with some versions, so convert this to a warning
-   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
-   string(APPEND CMAKE_CXX_FLAGS " -Wall")
-   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
--  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
-@@ -930,7 +930,6 @@
-   string(APPEND CMAKE_LINKER_FLAGS_DEBUG " -fno-omit-frame-pointer -O0")
-   append_cxx_flag_if_supported("-fno-math-errno" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-fno-trapping-math" CMAKE_CXX_FLAGS)
--  append_cxx_flag_if_supported("-Werror=format" CMAKE_CXX_FLAGS)
- else()
-   # skip unwanted includes from windows.h
-   add_compile_definitions(WIN32_LEAN_AND_MEAN)
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -486,8 +486,6 @@
-   endif()
- 
-   # Use -O2 for release builds (-O3 doesn't improve perf, and -Os results in 
perf regression)
--  target_compile_options(${libname} PRIVATE
--      
$<$<AND:$<COMPILE_LANGUAGE:CXX>,$<OR:$<CONFIG:Release>,$<CONFIG:RelWithDebInfo>>>:-O2>)
- 
- endfunction()
- 
---- a/cmake/Codegen.cmake
-+++ b/cmake/Codegen.cmake
-@@ -57,7 +57,7 @@
-   if(MSVC)
-     set(OPT_FLAG "/fp:strict ")
-   else(MSVC)
--    set(OPT_FLAG "-O3 ")
-+    set(OPT_FLAG " ")
-     if("${CMAKE_BUILD_TYPE}" MATCHES "Debug")
-       set(OPT_FLAG " ")
-     endif()
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -107,7 +107,7 @@
- # Note: the folders that are being commented out have not been properly
- # addressed yet.
- 
--if(NOT MSVC AND USE_XNNPACK)
-+if(FALSE)
-   if(NOT TARGET fxdiv)
-     set(FXDIV_BUILD_TESTS OFF CACHE BOOL "")
-     set(FXDIV_BUILD_BENCHMARKS OFF CACHE BOOL "")
-@@ -1055,7 +1055,6 @@
- endif()
- 
- if(NOT MSVC AND USE_XNNPACK)
--  TARGET_LINK_LIBRARIES(torch_cpu PRIVATE fxdiv)
- endif()
- 
- # ==========================================================
-@@ -1175,8 +1174,7 @@
- target_include_directories(torch_cpu PRIVATE
-   ${TORCH_ROOT}/third_party/miniz-2.1.0)
- 
--target_include_directories(torch_cpu PRIVATE
--  ${TORCH_ROOT}/third_party/kineto/libkineto/include)
-+target_include_directories(torch_cpu PRIVATE /usr/include/kineto)
- 
- if(USE_KINETO)
-   target_include_directories(torch_cpu PRIVATE
---- a/cmake/External/nnpack.cmake
-+++ b/cmake/External/nnpack.cmake
-@@ -56,7 +56,7 @@
-   set(PTHREADPOOL_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/pthreadpool" CACHE 
STRING "pthreadpool source directory")
-   set(GOOGLETEST_SOURCE_DIR "${CAFFE2_THIRD_PARTY_ROOT}/googletest" CACHE 
STRING "Google Test source directory")
- 
--  if(NOT TARGET nnpack)
-+  if(FALSE)
-     if(NOT USE_SYSTEM_PTHREADPOOL AND USE_INTERNAL_PTHREADPOOL_IMPL)
-       set(NNPACK_CUSTOM_THREADPOOL ON CACHE BOOL "")
-     endif()
---- a/functorch/CMakeLists.txt 2023-11-30 20:30:45.805209036 +0100
-+++ b/functorch/CMakeLists.txt 2023-11-30 20:31:13.284766157 +0100
-@@ -35,4 +35,4 @@
- if(NOT ${TORCH_PYTHON_LINK_FLAGS} STREQUAL "")
-   set_target_properties(${PROJECT_NAME} PROPERTIES LINK_FLAGS 
${TORCH_PYTHON_LINK_FLAGS})
- endif()
--install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_CURRENT_SOURCE_DIR}")
-+install(TARGETS ${PROJECT_NAME} DESTINATION "${CMAKE_INSTALL_LIBDIR}")

diff --git a/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch 
b/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch
deleted file mode 100644
index f63e9f1df332..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.2.2-musl.patch
+++ /dev/null
@@ -1,13 +0,0 @@
---- a/torch/csrc/profiler/unwind/unwind.cpp    2024-04-29 12:05:40.895667482 
+0200
-+++ b/torch/csrc/profiler/unwind/unwind.cpp    2024-04-29 12:05:53.099524760 
+0200
-@@ -112,8 +112,8 @@
- }
- 
- struct Version {
--  uint64_t adds_ = LONG_LONG_MAX;
--  uint64_t subs_ = LONG_LONG_MAX;
-+  uint64_t adds_ = LLONG_MAX;
-+  uint64_t subs_ = LLONG_MAX;
- };
- 
- struct UnwindCache {

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch
deleted file mode 100644
index eba37d933cac..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-CMakeFix.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/CMakeLists.txt   2024-04-29 20:32:26.259716769 +0200
-+++ b/CMakeLists.txt   2024-04-29 20:32:35.886384618 +0200
-@@ -50,7 +50,7 @@
- 
- # This define is needed to preserve behavior given anticpated changes to 
cccl/thrust
- # 
https://nvidia.github.io/libcudacxx/standard_api/numerics_library/complex.html
--string(APPEND CMAKE_CUDA_FLAGS 
"-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
-+string(APPEND CMAKE_CUDA_FLAGS " 
-DLIBCUDACXX_ENABLE_SIMPLIFIED_COMPLEX_OPERATIONS")
- 
- if(LINUX)
-   include(cmake/CheckAbi.cmake)

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch
deleted file mode 100644
index 77905dbd1ac8..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-cudnn_include_fix.patch
+++ /dev/null
@@ -1,11 +0,0 @@
---- a/cmake/Dependencies.cmake 2024-04-29 18:37:34.005639858 +0200
-+++ b/cmake/Dependencies.cmake 2024-04-29 18:39:29.126587738 +0200
-@@ -1235,7 +1235,7 @@
-   if(CUDNN_VERSION VERSION_LESS 8.5)
-     message(FATAL_ERROR "PyTorch needs CuDNN-8.5 or above, but found 
${CUDNN_VERSION}. Builds are still possible with `USE_CUDNN=0`")
-   endif()
--  set(CUDNN_FRONTEND_INCLUDE_DIR 
${CMAKE_CURRENT_LIST_DIR}/../third_party/cudnn_frontend/include)
-+  set(CUDNN_FRONTEND_INCLUDE_DIR /opt/cuda/include)
-   target_include_directories(torch::cudnn INTERFACE 
${CUDNN_FRONTEND_INCLUDE_DIR})
- endif()
- 

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
deleted file mode 100644
index 2c65987acd85..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-exclude-aotriton.patch
+++ /dev/null
@@ -1,35 +0,0 @@
-Disables aotriton download when both USE_FLASH_ATTENTION and 
USE_MEM_EFF_ATTENTION cmake flags are OFF
-Backports upstream PR to 2.3.0: https://github.com/pytorch/pytorch/pull/130197
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1334,7 +1334,9 @@ if(USE_ROCM)
-       message(STATUS "Disabling Kernel Assert for ROCm")
-     endif()
- 
--    include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+    if(USE_FLASH_ATTENTION)
-+      include(${CMAKE_CURRENT_LIST_DIR}/External/aotriton.cmake)
-+    endif()
-     if(USE_CUDA)
-       caffe2_update_option(USE_MEM_EFF_ATTENTION OFF)
-     endif()
---- a/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-+++ b/aten/src/ATen/native/transformers/cuda/sdp_utils.cpp
-@@ -21,7 +21,7 @@
- #include <cmath>
- #include <functional>
- 
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
- #include <aotriton/flash.h>
- #endif
- 
-@@ -186,7 +186,7 @@ bool check_flash_attention_hardware_support(sdp_params 
const& params, bool debug
-   // Check that the gpu is capable of running flash attention
-   using sm80 = SMVersion<8, 0>;
-   using sm90 = SMVersion<9, 0>;
--#if USE_ROCM
-+#if defined(USE_ROCM) && defined(USE_FLASH_ATTENTION)
-   auto stream = at::cuda::getCurrentCUDAStream().stream();
-   if (hipSuccess != aotriton::v2::flash::check_gpu(stream)) {
-       auto dprops = at::cuda::getCurrentDeviceProperties();

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
deleted file mode 100644
index a6f981b7e054..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-gcc-clang-abi-compat.patch
+++ /dev/null
@@ -1,17 +0,0 @@
-
-When gcc builds libtorch_cpu.so and hipcc (clang-18) build libtorch_hip.so,
-resulting binary fails in runtime due to different mangling.
-Related issue in LLVM: https://github.com/llvm/llvm-project/issues/85656
-Fixed in pytorch-2.4.0 in 
https://github.com/pytorch/pytorch/commit/a89f442f0b103fa6f38103784a2dfedbd147f863
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1314,6 +1314,9 @@ if(USE_ROCM)
-        list(APPEND HIP_HIPCC_FLAGS -fdebug-info-for-profiling)
-     endif(CMAKE_BUILD_TYPE MATCHES Debug)
- 
-+    # needed for compat with newer versions of hip-clang that introduced 
C++20 mangling rules
-+    list(APPEND HIP_HIPCC_FLAGS -fclang-abi-compat=17)
-+
-     set(HIP_CLANG_FLAGS ${HIP_CXX_FLAGS})
-     # Ask hcc to generate device code during compilation so we can use
-     # host linker to link.

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
deleted file mode 100644
index 75808fd7ec50..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-libcpp.patch
+++ /dev/null
@@ -1,24 +0,0 @@
-Workaround for libc++ issue https://github.com/llvm/llvm-project/issues/100802
-"reference to __host__ function 'memcpy' in __device__ function"
---- a/c10/util/Half.h
-+++ b/c10/util/Half.h
-@@ -227,7 +227,7 @@ C10_HOST_DEVICE inline float 
fp16_ieee_to_fp32_value(uint16_t h) {
-   // const float exp_scale = 0x1.0p-112f;
-   constexpr uint32_t scale_bits = (uint32_t)15 << 23;
-   float exp_scale_val = 0;
--  std::memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
-+  memcpy(&exp_scale_val, &scale_bits, sizeof(exp_scale_val));
-   const float exp_scale = exp_scale_val;
-   const float normalized_value =
-       fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
-@@ -298,8 +298,8 @@ inline uint16_t fp16_ieee_from_fp32_value(float f) {
-   constexpr uint32_t scale_to_inf_bits = (uint32_t)239 << 23;
-   constexpr uint32_t scale_to_zero_bits = (uint32_t)17 << 23;
-   float scale_to_inf_val = 0, scale_to_zero_val = 0;
--  std::memcpy(&scale_to_inf_val, &scale_to_inf_bits, 
sizeof(scale_to_inf_val));
--  std::memcpy(
-+  memcpy(&scale_to_inf_val, &scale_to_inf_bits, sizeof(scale_to_inf_val));
-+  memcpy(
-       &scale_to_zero_val, &scale_to_zero_bits, sizeof(scale_to_zero_val));
-   const float scale_to_inf = scale_to_inf_val;
-   const float scale_to_zero = scale_to_zero_val;

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
deleted file mode 100644
index 81ae075c67cc..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-fix-rocm-gcc14-clamp.patch
+++ /dev/null
@@ -1,18 +0,0 @@
-Fix hip compilation with gcc-14
-Upstream commit: 
https://github.com/pytorch/pytorch/commit/8c2c3a03fb87c3568a22362d83b00d82b9fb3db2
---- a/aten/src/ATen/native/cuda/IndexKernel.cu
-+++ b/aten/src/ATen/native/cuda/IndexKernel.cu
-@@ -259,7 +259,13 @@ void index_put_kernel_quantized_cuda(TensorIterator& 
iter, const IntArrayRef ind
- 
-     gpu_index_kernel(iter, index_size, index_stride, [inv_scale, zero_point, 
qmin, qmax]C10_DEVICE(char* const out_data, const char* const in_data, const 
int64_t offset) {
-       int64_t qvalue = static_cast<int64_t>(zero_point + 
nearbyintf(*(float*)in_data * inv_scale));
-+      // See https://github.com/pytorch/pytorch/issues/127666
-+      // hip-clang std::clamp __glibcxx_assert_fail host function when 
building on Fedora40/gcc14
-+#ifndef USE_ROCM
-       qvalue = std::clamp(qvalue, qmin, qmax);
-+#else
-+      qvalue = (qvalue < qmin) ? qmin : (qmax < qvalue) ? qmax : qvalue;
-+#endif
-       *(scalar_t*)(out_data + offset) = static_cast<scalar_t>(qvalue);
-     });
-   });

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
deleted file mode 100644
index dc544255c2bd..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-optional-hipblaslt.patch
+++ /dev/null
@@ -1,235 +0,0 @@
-Makes hipblaslt optional to simplify build for non-datacenter GPUs.
-Based on https://github.com/pytorch/pytorch/pull/120551 with added 
USE_HIPBLASLT cmake option.
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -225,6 +225,9 @@ option(USE_FAKELOWP "Use FakeLowp operators" OFF)
- option(USE_FFMPEG "Use ffmpeg" OFF)
- option(USE_GFLAGS "Use GFLAGS" OFF)
- option(USE_GLOG "Use GLOG" OFF)
-+cmake_dependent_option(
-+    USE_HIPBLASLT "Use hipBLASLt" ON
-+    "USE_ROCM" OFF)
- option(USE_LEVELDB "Use LEVELDB" OFF)
- option(USE_LITE_PROTO "Use lite protobuf instead of full." OFF)
- option(USE_LMDB "Use LMDB" OFF)
---- a/aten/src/ATen/cuda/CUDABlas.cpp
-+++ b/aten/src/ATen/cuda/CUDABlas.cpp
-@@ -14,7 +14,7 @@
- #include <c10/util/irange.h>
- 
- #ifdef USE_ROCM
--#if ROCM_VERSION >= 60000
-+#ifdef USE_HIPBLASLT
- #include <hipblaslt/hipblaslt-ext.hpp>
- #endif
- // until hipblas has an API to accept flags, we must use rocblas here
-@@ -781,7 +781,7 @@ void 
gemm<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16)) {
-   }
- }
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- 
- #if defined(USE_ROCM) && ROCM_VERSION >= 50700 && ROCM_VERSION < 60000
- // only for rocm 5.7 where we first supported hipblaslt, it was difficult
-@@ -912,6 +912,7 @@ class CuBlasLtMatmulPreference : public CuBlasLtDescriptor<
- };
- } // namespace
- 
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- template <typename Dtype>
- void gemm_and_bias(
-     bool transpose_mat1,
-@@ -1124,7 +1125,7 @@ template void gemm_and_bias(
-     at::BFloat16* result_ptr,
-     int64_t result_ld,
-     GEMMAndBiasActivationEpilogue activation);
--
-+#endif
- void scaled_gemm(
-     char transa,
-     char transb,
---- a/aten/src/ATen/cuda/CUDABlas.h
-+++ b/aten/src/ATen/cuda/CUDABlas.h
-@@ -82,7 +82,7 @@ void 
gemm_internal<at::Half>(CUDABLAS_GEMM_ARGTYPES(at::Half));
- template <>
- void gemm_internal<at::BFloat16>(CUDABLAS_GEMM_ARGTYPES(at::BFloat16));
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- enum GEMMAndBiasActivationEpilogue {
-   None,
-   RELU,
---- a/aten/src/ATen/cuda/CUDAContextLight.h
-+++ b/aten/src/ATen/cuda/CUDAContextLight.h
-@@ -9,7 +9,7 @@
- 
- // cublasLT was introduced in CUDA 10.1 but we enable only for 11.1 that also
- // added bf16 support
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- #include <cublasLt.h>
- #endif
- 
-@@ -82,7 +82,7 @@ TORCH_CUDA_CPP_API c10::Allocator* getCUDADeviceAllocator();
- /* Handles */
- TORCH_CUDA_CPP_API cusparseHandle_t getCurrentCUDASparseHandle();
- TORCH_CUDA_CPP_API cublasHandle_t getCurrentCUDABlasHandle();
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- TORCH_CUDA_CPP_API cublasLtHandle_t getCurrentCUDABlasLtHandle();
- #endif
- 
---- a/aten/src/ATen/cuda/CublasHandlePool.cpp
-+++ b/aten/src/ATen/cuda/CublasHandlePool.cpp
-@@ -29,7 +29,7 @@ namespace at::cuda {
- 
- namespace {
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
- void createCublasLtHandle(cublasLtHandle_t *handle) {
-   TORCH_CUDABLAS_CHECK(cublasLtCreate(handle));
- }
-@@ -190,7 +190,7 @@ cublasHandle_t getCurrentCUDABlasHandle() {
-   return handle;
- }
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- cublasLtHandle_t getCurrentCUDABlasLtHandle() {
- #ifdef USE_ROCM
-   c10::DeviceIndex device = 0;
---- a/aten/src/ATen/cuda/tunable/TunableGemm.h
-+++ b/aten/src/ATen/cuda/tunable/TunableGemm.h
-@@ -11,7 +11,7 @@
- 
- #include <ATen/cuda/tunable/GemmCommon.h>
- #ifdef USE_ROCM
--#if ROCM_VERSION >= 50700
-+#ifdef USE_HIPBLASLT
- #include <ATen/cuda/tunable/GemmHipblaslt.h>
- #endif
- #include <ATen/cuda/tunable/GemmRocblas.h>
-@@ -166,7 +166,7 @@ class GemmTunableOp : public TunableOp<GemmParams<T>, 
StreamTimer> {
-     }
- #endif
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     static const char *env = 
std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
-     if (env == nullptr || strcmp(env, "1") == 0) {
-       // disallow tuning of hipblaslt with c10::complex
-@@ -240,7 +240,7 @@ class GemmStridedBatchedTunableOp : public 
TunableOp<GemmStridedBatchedParams<T>
-     }
- #endif
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     static const char *env = 
std::getenv("PYTORCH_TUNABLEOP_HIPBLASLT_ENABLED");
-     if (env == nullptr || strcmp(env, "1") == 0) {
-       // disallow tuning of hipblaslt with c10::complex
---- a/aten/src/ATen/native/cuda/Blas.cpp
-+++ b/aten/src/ATen/native/cuda/Blas.cpp
-@@ -155,7 +155,7 @@ enum class Activation {
-   GELU,
- };
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
- cuda::blas::GEMMAndBiasActivationEpilogue 
activation_to_gemm_and_blas_arg(Activation a) {
-   switch (a) {
-     case Activation::None:
-@@ -193,6 +193,7 @@ static bool getDisableAddmmCudaLt() {
- 
- #ifdef USE_ROCM
- static bool isSupportedHipLtROCmArch(int index) {
-+#if defined(USE_HIPBLASLT)
-     hipDeviceProp_t* prop = at::cuda::getDeviceProperties(index);
-     std::string device_arch = prop->gcnArchName;
-     static const std::vector<std::string> archs = {"gfx90a", "gfx940", 
"gfx941", "gfx942"};
-@@ -203,6 +204,7 @@ static bool isSupportedHipLtROCmArch(int index) {
-         }
-     }
-     TORCH_CHECK(false, "Attempting to use hipBLASLt on a unsupported 
architecture!");
-+#endif
-     return false;
- }
- #endif
-@@ -228,7 +230,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& 
self, const Tensor& ma
-   at::ScalarType scalar_type = self.scalar_type();
-   c10::MaybeOwned<Tensor> self_;
-   if (&result != &self) {
--#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || 
defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if (defined(CUDA_VERSION) && CUDA_VERSION >= 11040 && !defined(_MSC_VER)) || 
defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     // Strangely, if mat2 has only 1 row or column, we get
-     // CUBLAS_STATUS_INVALID_VALUE error from cublasLtMatmulAlgoGetHeuristic.
-     // self.dim() == 1 && result.dim() == 2 && self.sizes()[0] == 
mat2_sizes[1]
-@@ -271,7 +273,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& 
self, const Tensor& ma
-     }
-     self__sizes = self_->sizes();
-   } else {
--#if defined(USE_ROCM) && ROCM_VERSION >= 50700
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-     useLtInterface = !disable_addmm_cuda_lt &&
-         result.dim() == 2 && result.is_contiguous() &&
-         isSupportedHipLtROCmArch(self.device().index()) &&
-@@ -322,7 +324,7 @@ Tensor& addmm_out_cuda_impl(Tensor& result, const Tensor& 
self, const Tensor& ma
- 
-   TORCH_INTERNAL_ASSERT_DEBUG_ONLY(!args.result->is_conj());
- 
--#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
ROCM_VERSION >= 50700)
-+#if (!defined(USE_ROCM) && !defined(_MSC_VER)) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
-   if (useLtInterface) {
-     AT_DISPATCH_FLOATING_TYPES_AND2(
-         at::ScalarType::Half,
-@@ -876,7 +878,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
-   at::native::resize_output(out, {mat1_sizes[0], mat2_sizes[1]});
-   at::native::resize_output(amax, {});
- 
--#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && 
ROCM_VERSION >= 60000)
-+#if !defined(USE_ROCM) && !defined(_MSC_VER) || (defined(USE_ROCM) && 
defined(USE_HIPBLASLT))
-   cublasCommonArgs args(mat1, mat2, out);
-   const auto out_dtype_ = args.result->scalar_type();
-   TORCH_CHECK(args.transa == 't' && args.transb == 'n', "Only multiplication 
of row-major and column-major matrices is supported by cuBLASLt");
-@@ -906,7 +908,7 @@ _scaled_mm_out_cuda(const Tensor& mat1, const Tensor& mat2,
-   TORCH_CHECK(false, "_scaled_mm_out_cuda is not compiled for this 
platform.");
- #endif
- 
--#if defined(USE_ROCM) && ROCM_VERSION >= 60000
-+#if defined(USE_ROCM) && defined(USE_HIPBLASLT)
-   // rocm's hipblaslt does not yet support amax, so calculate separately
-   auto out_float32 = out.to(kFloat);
-   out_float32.abs_();
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1282,6 +1282,9 @@ if(USE_ROCM)
-     if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
-       list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
-     endif()
-+    if(hipblast_FOUND)
-+      list(APPEND HIP_CXX_FLAGS -DHIPBLASLT)
-+    endif()
-     if(HIPBLASLT_CUSTOM_DATA_TYPE)
-       list(APPEND HIP_CXX_FLAGS -DHIPBLASLT_CUSTOM_DATA_TYPE)
-     endif()
---- a/cmake/public/LoadHIP.cmake
-+++ b/cmake/public/LoadHIP.cmake
-@@ -155,7 +155,7 @@ if(HIP_FOUND)
-   find_package_and_print_version(hiprand REQUIRED)
-   find_package_and_print_version(rocblas REQUIRED)
-   find_package_and_print_version(hipblas REQUIRED)
--  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
-+  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0" AND USE_HIPBLASLT)
-     find_package_and_print_version(hipblaslt REQUIRED)
-   endif()
-   find_package_and_print_version(miopen REQUIRED)
-@@ -191,7 +191,7 @@ if(HIP_FOUND)
-   # roctx is part of roctracer
-   find_library(ROCM_ROCTX_LIB roctx64 HINTS ${ROCM_PATH}/lib)
- 
--  if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "5.7.0")
-+  if(hipblastlt_FOUND)
-     # check whether hipblaslt is using its own datatype
-     set(file "${PROJECT_BINARY_DIR}/hipblaslt_test_data_type.cc")
-     file(WRITE ${file} ""

diff --git a/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch 
b/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch
deleted file mode 100644
index 127a31e4b225..000000000000
--- a/sci-libs/caffe2/files/caffe2-2.3.0-rocm-fix-std-cpp17.patch
+++ /dev/null
@@ -1,68 +0,0 @@
-Fix for error: invalid argument '-std=c++17' not allowed with 'C'
-https://github.com/pytorch/pytorch/issues/103222
---- a/c10/hip/CMakeLists.txt
-+++ b/c10/hip/CMakeLists.txt
-@@ -30,6 +30,7 @@ hip_add_library(c10_hip ${C10_HIP_SRCS} ${C10_HIP_HEADERS})
- 
- # Propagate HIP_CXX_FLAGS that were set from Dependencies.cmake
- target_compile_options(c10_hip PRIVATE ${HIP_CXX_FLAGS})
-+set_target_properties(c10_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS OFF)
- 
- # caffe2_hip adds a bunch of dependencies like rocsparse, but c10/hip is 
supposed to be
- # minimal.  I'm not sure if we need hip_hcc or not; for now leave it out
---- a/caffe2/CMakeLists.txt
-+++ b/caffe2/CMakeLists.txt
-@@ -1712,6 +1712,7 @@ if(USE_ROCM)
- 
-   # Since PyTorch files contain HIP headers, these flags are required for the 
necessary definitions to be added.
-   target_compile_options(torch_hip PUBLIC ${HIP_CXX_FLAGS})  # experiment
-+  set_target_properties(torch_hip PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS 
OFF)
-   target_link_libraries(torch_hip PUBLIC c10_hip)
- 
-   if(NOT INTERN_BUILD_MOBILE)
-@@ -1908,6 +1909,7 @@ if(BUILD_TEST)
-       target_include_directories(${test_name} PRIVATE 
$<INSTALL_INTERFACE:include>)
-       target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE} 
${Caffe2_HIP_INCLUDE})
-       target_compile_options(${test_name} PRIVATE ${HIP_CXX_FLAGS})
-+      set_target_properties(${test_name} PROPERTIES CXX_STANDARD 17 
CXX_EXTENSIONS OFF)
-       add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
-       if(INSTALL_TEST)
-         install(TARGETS ${test_name} DESTINATION test)
-@@ -2092,6 +2094,7 @@ if(BUILD_PYTHON)
-     endif()
-     if(NOT MSVC)
-       target_compile_options(caffe2_pybind11_state_hip PRIVATE 
${HIP_CXX_FLAGS} -fvisibility=hidden)
-+      set_target_properties(caffe2_pybind11_state_hip PROPERTIES CXX_STANDARD 
17 CXX_EXTENSIONS OFF)
-     endif()
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES PREFIX "")
-     set_target_properties(caffe2_pybind11_state_hip PROPERTIES SUFFIX 
${PY_EXT_SUFFIX})
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1278,7 +1278,6 @@
-     list(APPEND HIP_CXX_FLAGS -Wno-duplicate-decl-specifier)
-     list(APPEND HIP_CXX_FLAGS -DCAFFE2_USE_MIOPEN)
-     list(APPEND HIP_CXX_FLAGS -DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_HIP)
--    list(APPEND HIP_CXX_FLAGS -std=c++17)
-     if(ROCM_VERSION_DEV VERSION_GREATER_EQUAL "6.0.0")
-       list(APPEND HIP_CXX_FLAGS -DHIPBLAS_V2)
-     endif()
---- a/cmake/public/utils.cmake
-+++ b/cmake/public/utils.cmake
-@@ -335,6 +335,7 @@ function(caffe2_hip_binary_target target_name_or_src)
-   caffe2_binary_target(${target_name_or_src})
- 
-   target_compile_options(${__target} PRIVATE ${HIP_CXX_FLAGS})
-+  set_target_properties(${__target} PROPERTIES CXX_STANDARD 17 CXX_EXTENSIONS 
OFF)
-   target_include_directories(${__target} PRIVATE ${Caffe2_HIP_INCLUDE})
- endfunction()
- 
---- a/modules/detectron/CMakeLists.txt
-+++ b/modules/detectron/CMakeLists.txt
-@@ -31,6 +31,7 @@ if(BUILD_CAFFE2_OPS)
-         ${Detectron_CPU_SRCS}
-         ${Detectron_HIP_SRCS})
-     target_compile_options(caffe2_detectron_ops_hip PRIVATE ${HIP_CXX_FLAGS})
-+    set_target_properties(caffe2_detectron_ops_hip PROPERTIES CXX_STANDARD 17 
CXX_EXTENSIONS OFF)
-     if(USE_MKLDNN)
-       target_link_libraries(caffe2_detectron_ops_hip PRIVATE caffe2::mkldnn)
-     endif()

Reply via email to