Date: Monday, April 10, 2023 @ 16:11:07
  Author: felixonmars
Revision: 1444461

archrelease: copy trunk to community-staging-x86_64

Added:
  python-pytorch/repos/community-staging-x86_64/87773.patch
    (from rev 1444460, python-pytorch/trunk/87773.patch)
  python-pytorch/repos/community-staging-x86_64/PKGBUILD
    (from rev 1444460, python-pytorch/trunk/PKGBUILD)
  python-pytorch/repos/community-staging-x86_64/disable-werror1.patch
    (from rev 1444460, python-pytorch/trunk/disable-werror1.patch)
  python-pytorch/repos/community-staging-x86_64/disable-werror2.patch
    (from rev 1444460, python-pytorch/trunk/disable-werror2.patch)
  python-pytorch/repos/community-staging-x86_64/disable-werror3.patch
    (from rev 1444460, python-pytorch/trunk/disable-werror3.patch)
  python-pytorch/repos/community-staging-x86_64/disable-werror4.patch
    (from rev 1444460, python-pytorch/trunk/disable-werror4.patch)
  python-pytorch/repos/community-staging-x86_64/ffmpeg4.4.patch
    (from rev 1444460, python-pytorch/trunk/ffmpeg4.4.patch)
  
python-pytorch/repos/community-staging-x86_64/fix-building-for-torchvision.patch
    (from rev 1444460, python-pytorch/trunk/fix-building-for-torchvision.patch)
  python-pytorch/repos/community-staging-x86_64/fix_include_system.patch
    (from rev 1444460, python-pytorch/trunk/fix_include_system.patch)
  python-pytorch/repos/community-staging-x86_64/rocblas-batched.patch
    (from rev 1444460, python-pytorch/trunk/rocblas-batched.patch)
  python-pytorch/repos/community-staging-x86_64/test.py
    (from rev 1444460, python-pytorch/trunk/test.py)
  python-pytorch/repos/community-staging-x86_64/use-system-libuv.patch
    (from rev 1444460, python-pytorch/trunk/use-system-libuv.patch)
Deleted:
  python-pytorch/repos/community-staging-x86_64/87773.patch
  python-pytorch/repos/community-staging-x86_64/PKGBUILD
  python-pytorch/repos/community-staging-x86_64/disable-werror1.patch
  python-pytorch/repos/community-staging-x86_64/disable-werror2.patch
  python-pytorch/repos/community-staging-x86_64/disable-werror3.patch
  python-pytorch/repos/community-staging-x86_64/disable-werror4.patch
  python-pytorch/repos/community-staging-x86_64/ffmpeg4.4.patch
  
python-pytorch/repos/community-staging-x86_64/fix-building-for-torchvision.patch
  python-pytorch/repos/community-staging-x86_64/fix_include_system.patch
  python-pytorch/repos/community-staging-x86_64/rocblas-batched.patch
  python-pytorch/repos/community-staging-x86_64/test.py
  python-pytorch/repos/community-staging-x86_64/use-system-libuv.patch

------------------------------------+
 87773.patch                        |   78 +--
 PKGBUILD                           |  876 +++++++++++++++++------------------
 disable-werror1.patch              |   26 -
 disable-werror2.patch              |   26 -
 disable-werror3.patch              |   26 -
 disable-werror4.patch              |   30 -
 ffmpeg4.4.patch                    |  102 ++--
 fix-building-for-torchvision.patch |   50 -
 fix_include_system.patch           |   26 -
 rocblas-batched.patch              |   72 +-
 test.py                            |   14 
 use-system-libuv.patch             |   26 -
 12 files changed, 676 insertions(+), 676 deletions(-)

Deleted: 87773.patch
===================================================================
--- 87773.patch 2023-04-10 16:10:27 UTC (rev 1444460)
+++ 87773.patch 2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,39 +0,0 @@
-From 94465d6287e8f54c99f4b0b891a6c618bb80d7ce Mon Sep 17 00:00:00 2001
-From: Arfrever Frehtes Taifersar Arahesis <[email protected]>
-Date: Wed, 26 Oct 2022 07:32:59 +0200
-Subject: [PATCH] Support only GLog >=0.6.0
-
-Fixes https://github.com/pytorch/pytorch/issues/58054
----
- c10/util/Logging.cpp | 12 +-----------
- 1 file changed, 1 insertion(+), 11 deletions(-)
-
-diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
-index fe74e49548646..d8fce12ce4840 100644
---- a/c10/util/Logging.cpp
-+++ b/c10/util/Logging.cpp
-@@ -192,23 +192,13 @@ C10_DEFINE_int(
-     google::GLOG_WARNING,
-     "The minimum log level that caffe2 will output.");
- 
--// Google glog's api does not have an external function that allows one to 
check
--// if glog is initialized or not. It does have an internal function - so we 
are
--// declaring it here. This is a hack but has been used by a bunch of others 
too
--// (e.g. Torch).
--namespace google {
--namespace glog_internal_namespace_ {
--bool IsGoogleLoggingInitialized();
--} // namespace glog_internal_namespace_
--} // namespace google
--
- namespace c10 {
- namespace {
- 
- void initGoogleLogging(char const* name) {
- #if !defined(_MSC_VER)
-   // This trick can only be used on UNIX platforms
--  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
-+  if (!::google::IsGoogleLoggingInitialized())
- #endif
-   {
-     ::google::InitGoogleLogging(name);

Copied: python-pytorch/repos/community-staging-x86_64/87773.patch (from rev 
1444460, python-pytorch/trunk/87773.patch)
===================================================================
--- 87773.patch                         (rev 0)
+++ 87773.patch 2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,39 @@
+From 94465d6287e8f54c99f4b0b891a6c618bb80d7ce Mon Sep 17 00:00:00 2001
+From: Arfrever Frehtes Taifersar Arahesis <[email protected]>
+Date: Wed, 26 Oct 2022 07:32:59 +0200
+Subject: [PATCH] Support only GLog >=0.6.0
+
+Fixes https://github.com/pytorch/pytorch/issues/58054
+---
+ c10/util/Logging.cpp | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
+index fe74e49548646..d8fce12ce4840 100644
+--- a/c10/util/Logging.cpp
++++ b/c10/util/Logging.cpp
+@@ -192,23 +192,13 @@ C10_DEFINE_int(
+     google::GLOG_WARNING,
+     "The minimum log level that caffe2 will output.");
+ 
+-// Google glog's api does not have an external function that allows one to 
check
+-// if glog is initialized or not. It does have an internal function - so we 
are
+-// declaring it here. This is a hack but has been used by a bunch of others 
too
+-// (e.g. Torch).
+-namespace google {
+-namespace glog_internal_namespace_ {
+-bool IsGoogleLoggingInitialized();
+-} // namespace glog_internal_namespace_
+-} // namespace google
+-
+ namespace c10 {
+ namespace {
+ 
+ void initGoogleLogging(char const* name) {
+ #if !defined(_MSC_VER)
+   // This trick can only be used on UNIX platforms
+-  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
++  if (!::google::IsGoogleLoggingInitialized())
+ #endif
+   {
+     ::google::InitGoogleLogging(name);

Deleted: PKGBUILD
===================================================================
--- PKGBUILD    2023-04-10 16:10:27 UTC (rev 1444460)
+++ PKGBUILD    2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,438 +0,0 @@
-# Maintainer: Sven-Hendrik Haase <[email protected]>
-# Maintainer: Torsten Keßler <[email protected]>
-# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
-
-_pkgname=pytorch
-pkgbase="python-${_pkgname}"
-pkgname=("${pkgbase}" "${pkgbase}-opt" "${pkgbase}-cuda" "${pkgbase}-opt-cuda" 
"${pkgbase}-rocm" "${pkgbase}-opt-rocm")
-pkgver=2.0.0
-_pkgver=2.0.0
-pkgrel=3
-_pkgdesc='Tensors and Dynamic neural networks in Python with strong GPU 
acceleration'
-pkgdesc="${_pkgdesc}"
-arch=('x86_64')
-url="https://pytorch.org";
-license=('BSD')
-depends=('google-glog' 'gflags' 'opencv' 'openmp' 'nccl' 'pybind11' 'python' 
'python-yaml' 'libuv'
-         'python-numpy' 'python-sympy' 'protobuf' 'ffmpeg4.4' 'python-future' 
'qt5-base'
-         'intel-oneapi-mkl' 'python-typing_extensions')
-# Exclude the magma package here and add the corresponding {cuda, rocm/hip} 
version
-# to makedepends of the split packages.
-# The magma package does not allow to build the cuda and rocm/hip code at the 
same time,
-# so we need to work with the split packages magma-{cuda,hip}.
-makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 
'cuda'
-             'cudnn' 'git' 'rocm-hip-sdk' 'roctracer' 'miopen'
-             'ninja' 'pkgconfig' 'doxygen' 'vulkan-headers' 'shaderc')
-source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$_pkgver";
-        # generated using parse-submodules
-        
"${pkgname}-ARM_NEON_2_x86_SSE::git+https://github.com/intel/ARM_NEON_2_x86_SSE.git";
-        "${pkgname}-FP16::git+https://github.com/Maratyszcza/FP16.git";
-        "${pkgname}-FXdiv::git+https://github.com/Maratyszcza/FXdiv.git";
-        "${pkgname}-NNPACK::git+https://github.com/Maratyszcza/NNPACK.git";
-        "${pkgname}-PeachPy::git+https://github.com/malfet/PeachPy.git";
-        "${pkgname}-QNNPACK::git+https://github.com/pytorch/QNNPACK";
-        
"${pkgname}-VulkanMemoryAllocator::git+https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git";
-        "${pkgname}-XNNPACK::git+https://github.com/google/XNNPACK.git";
-        "${pkgname}-benchmark::git+https://github.com/google/benchmark.git";
-        "${pkgname}-cpuinfo::git+https://github.com/pytorch/cpuinfo.git";
-        "${pkgname}-cub::git+https://github.com/NVlabs/cub.git";
-        
"${pkgname}-cudnn-frontend::git+https://github.com/NVIDIA/cudnn-frontend.git";
-        "${pkgname}-cutlass::git+https://github.com/NVIDIA/cutlass.git";
-        "${pkgname}-eigen::git+https://gitlab.com/libeigen/eigen.git";
-        "${pkgname}-enum34::git+https://github.com/PeachPy/enum34.git";
-        "${pkgname}-fbgemm::git+https://github.com/pytorch/fbgemm";
-        "${pkgname}-fbjni::git+https://github.com/facebookincubator/fbjni.git";
-        "${pkgname}-flatbuffers::git+https://github.com/google/flatbuffers.git";
-        "${pkgname}-fmt::git+https://github.com/fmtlib/fmt.git";
-        "${pkgname}-foxi::git+https://github.com/houseroad/foxi.git";
-        "${pkgname}-gemmlowp::git+https://github.com/google/gemmlowp.git";
-        "${pkgname}-gloo::git+https://github.com/facebookincubator/gloo";
-        "${pkgname}-googletest::git+https://github.com/google/googletest.git";
-        "${pkgname}-ideep::git+https://github.com/intel/ideep";
-        "${pkgname}-ios-cmake::git+https://github.com/Yangqing/ios-cmake.git";
-        "${pkgname}-ittapi::git+https://github.com/intel/ittapi.git";
-        "${pkgname}-json::git+https://github.com/nlohmann/json.git";
-        "${pkgname}-kineto::git+https://github.com/pytorch/kineto";
-        "${pkgname}-nccl::git+https://github.com/NVIDIA/nccl";
-        "${pkgname}-onnx-tensorrt::git+https://github.com/onnx/onnx-tensorrt";
-        "${pkgname}-onnx::git+https://github.com/onnx/onnx.git";
-        "${pkgname}-pocketfft::git+https://github.com/mreineck/pocketfft";
-        
"${pkgname}-protobuf::git+https://github.com/protocolbuffers/protobuf.git";
-        "${pkgname}-psimd::git+https://github.com/Maratyszcza/psimd.git";
-        
"${pkgname}-pthreadpool::git+https://github.com/Maratyszcza/pthreadpool.git";
-        "${pkgname}-pybind11::git+https://github.com/pybind/pybind11.git";
-        "${pkgname}-six::git+https://github.com/benjaminp/six.git";
-        "${pkgname}-sleef::git+https://github.com/shibatch/sleef";
-        "${pkgname}-tbb::git+https://github.com/01org/tbb";
-        "${pkgname}-tensorpipe::git+https://github.com/pytorch/tensorpipe.git";
-        "${pkgname}-zstd::git+https://github.com/facebook/zstd.git";
-        fix_include_system.patch
-        use-system-libuv.patch
-        fix-building-for-torchvision.patch
-        87773.patch
-        disable-werror1.patch
-        disable-werror2.patch
-        disable-werror3.patch
-        disable-werror4.patch
-        ffmpeg4.4.patch
-        rocblas-batched.patch)
-b2sums=('SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        
'77f85808e480bd37dfb5f072d565466ae30a8f827f49ef97591fc2fc03bea54944eb1adeaa4a1e3466518a5640f575eda88d15b4c4d549a6f41f0bf4f2cfb086'
-        
'1f7ce593fa9fc62535ca1c3d85c996a73006cc614c7b7258160c3fc53cd52a1cfddcb18baf897f2e1223ecdfee52ca1471b91c9f845368ed6ac51b66f6e0e676'
-        
'fdea0b815d7750a4233c1d4668593020da017aea43cf4cb63b4c00d0852c7d34f0333e618fcf98b8df2185313a2089b8c2e9fe8ec3cfb0bf693598f9c61461a8'
-        
'0a8fc110a306e81beeb9ddfb3a1ddfd26aeda5e3f7adfb0f7c9bc3fd999c2dde62e0b407d3eca573097a53fd97329214e30e8767fb38d770197c7ec2b53daf18'
-        
'844d0b7b39777492a6d456fa845d5399f673b4bb37b62473393449c9ad0c29dca3c33276dc3980f2e766680100335c0acfb69d51781b79575f4da112d9c4018c'
-        
'985e331b2025e1ca5a4fba5188af0900f1f38bd0fd32c9173deb8bed7358af01e387d4654c7e0389e5f98b6f7cbed053226934d180b8b3b1270bdbbb36fc89b2'
-        
'96de2729b29c7ce3e4fdd8008f575d24c2c3ef9f85d6217e607902d7b870ac71b9290fde71e87a68d75bb75ef28eacbf5ce04e071146809ccf1e76a03f97b479'
-        
'eea86bbed0a37e1661035913536456f90e0cd1e687c7e4103011f0688bc8347b6fc2ff82019909c41e7c89ddbc3b80dde641e88abf406f4faebc71b0bb693d25'
-        
'6286b05d5b5143f117363e3ce3c7d693910f53845aeb6f501b3eea64aa71778cb2d7dcd4ac945d5321ef23b4da02446e86dedc6a9b6a998df4a7f3b1ce50550a'
-        
'232d2aca7cae8da511d1451890f8696d47da72276929ac5731a1a1a481d2a515fa7288bf33730d8ea2c892616551a74ca2439b53de6b1dfee156c30919120741')
-options=('!lto' '!debug')
-
-get_pyver () {
-  python -c 'import sys; print(str(sys.version_info[0]) + "." + 
str(sys.version_info[1]))'
-}
-
-prepare() {
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-
-  # generated using parse-submodules
-  git submodule init
-
-  git config submodule."android/libs/fbjni".url "${srcdir}/${pkgname}"-fbjni
-  git config submodule."third_party/NNPACK".url "${srcdir}/${pkgname}"-NNPACK
-  git config submodule."third_party/NNPACK_deps/FP16".url 
"${srcdir}/${pkgname}"-FP16
-  git config submodule."third_party/NNPACK_deps/FXdiv".url 
"${srcdir}/${pkgname}"-FXdiv
-  git config submodule."third_party/NNPACK_deps/psimd".url 
"${srcdir}/${pkgname}"-psimd
-  git config submodule."third_party/NNPACK_deps/pthreadpool".url 
"${srcdir}/${pkgname}"-pthreadpool
-  git config submodule."third_party/QNNPACK".url "${srcdir}/${pkgname}"-QNNPACK
-  git config submodule."third_party/VulkanMemoryAllocator".url 
"${srcdir}/${pkgname}"-VulkanMemoryAllocator
-  git config submodule."third_party/XNNPACK".url "${srcdir}/${pkgname}"-XNNPACK
-  git config submodule."third_party/benchmark".url 
"${srcdir}/${pkgname}"-benchmark
-  git config submodule."third_party/cpuinfo".url "${srcdir}/${pkgname}"-cpuinfo
-  git config submodule."third_party/cub".url "${srcdir}/${pkgname}"-cub
-  git config submodule."third_party/cudnn_frontend".url 
"${srcdir}/${pkgname}"-cudnn-frontend
-  git config submodule."third_party/cutlass".url "${srcdir}/${pkgname}"-cutlass
-  git config submodule."third_party/eigen".url "${srcdir}/${pkgname}"-eigen
-  git config submodule."third_party/fbgemm".url "${srcdir}/${pkgname}"-fbgemm
-  git config submodule."third_party/flatbuffers".url 
"${srcdir}/${pkgname}"-flatbuffers
-  git config submodule."third_party/fmt".url "${srcdir}/${pkgname}"-fmt
-  git config submodule."third_party/foxi".url "${srcdir}/${pkgname}"-foxi
-  git config submodule."third_party/gemmlowp/gemmlowp".url 
"${srcdir}/${pkgname}"-gemmlowp
-  git config submodule."third_party/gloo".url "${srcdir}/${pkgname}"-gloo
-  git config submodule."third_party/googletest".url 
"${srcdir}/${pkgname}"-googletest
-  git config submodule."third_party/ideep".url "${srcdir}/${pkgname}"-ideep
-  git config submodule."third_party/ios-cmake".url 
"${srcdir}/${pkgname}"-ios-cmake
-  git config submodule."third_party/ittapi".url "${srcdir}/${pkgname}"-ittapi
-  git config submodule."third_party/kineto".url "${srcdir}/${pkgname}"-kineto
-  git config submodule."third_party/nccl/nccl".url "${srcdir}/${pkgname}"-nccl
-  git config submodule."third_party/neon2sse".url 
"${srcdir}/${pkgname}"-ARM_NEON_2_x86_SSE
-  git config submodule."third_party/nlohmann".url "${srcdir}/${pkgname}"-json
-  git config submodule."third_party/onnx".url "${srcdir}/${pkgname}"-onnx
-  git config submodule."third_party/onnx-tensorrt".url 
"${srcdir}/${pkgname}"-onnx-tensorrt
-  git config submodule."third_party/pocketfft".url 
"${srcdir}/${pkgname}"-pocketfft
-  git config submodule."third_party/protobuf".url 
"${srcdir}/${pkgname}"-protobuf
-  git config submodule."third_party/pybind11".url 
"${srcdir}/${pkgname}"-pybind11
-  git config submodule."third_party/python-enum".url 
"${srcdir}/${pkgname}"-enum34
-  git config submodule."third_party/python-peachpy".url 
"${srcdir}/${pkgname}"-PeachPy
-  git config submodule."third_party/python-six".url "${srcdir}/${pkgname}"-six
-  git config submodule."third_party/sleef".url "${srcdir}/${pkgname}"-sleef
-  git config submodule."third_party/tbb".url "${srcdir}/${pkgname}"-tbb
-  git config submodule."third_party/tensorpipe".url 
"${srcdir}/${pkgname}"-tensorpipe
-  git config submodule."third_party/zstd".url "${srcdir}/${pkgname}"-zstd
-
-  git -c protocol.file.allow=always submodule update --init --recursive
-
-  # Fix include with GCC 12
-  sed "1i#include <mutex>" -i 
third_party/kineto/libkineto/src/RoctracerActivityApi.h
-
-  # https://bugs.archlinux.org/task/64981
-  patch -N torch/utils/cpp_extension.py "${srcdir}"/fix_include_system.patch
-
-  # Use system libuv
-  patch -Np1 -i "${srcdir}"/use-system-libuv.patch
-
-  # fix https://github.com/pytorch/vision/issues/3695
-  patch -Np1 -i "${srcdir}/fix-building-for-torchvision.patch"
-
-  # Fix building against glog 0.6
-  patch -Np1 -i "${srcdir}/87773.patch"
-
-  # Disable -Werror
-  patch -Np1 -d third_party/fbgemm -i "${srcdir}/disable-werror1.patch"
-  patch -Np1 -d third_party/benchmark -i "${srcdir}/disable-werror2.patch"
-  patch -Np1 -d third_party/ideep/mkl-dnn -i "${srcdir}/disable-werror3.patch"
-  patch -Np1 -i "${srcdir}/disable-werror4.patch"
-
-  # build against ffmpeg4.4
-  patch -Np1 -i "${srcdir}/ffmpeg4.4.patch"
-
-  # fix https://github.com/pytorch/pytorch/issues/97640
-  patch -Np1 -i "${srcdir}/rocblas-batched.patch"
-
-  cd "${srcdir}"
-
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt"
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-cuda"
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-rocm"
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-rocm"
-}
-
-# Common build configuration, called in all package() functions.
-_prepare() {
-  export VERBOSE=1
-  export PYTORCH_BUILD_VERSION="${pkgver}"
-  export PYTORCH_BUILD_NUMBER=1
-
-  # Check tools/setup_helpers/cmake.py, setup.py and CMakeLists.txt for a list 
of flags that can be set via env vars.
-  export ATEN_NO_TEST=ON  # do not build ATen tests
-  export USE_MKLDNN=ON
-  export BUILD_CUSTOM_PROTOBUF=OFF
-  export BUILD_CAFFE2=ON
-  export BUILD_CAFFE2_OPS=ON
-  # export BUILD_SHARED_LIBS=OFF
-  export USE_FFMPEG=ON
-  export USE_GFLAGS=ON
-  export USE_GLOG=ON
-  export USE_VULKAN=ON
-  export BUILD_BINARY=ON
-  export USE_OBSERVERS=ON
-  export USE_OPENCV=ON
-  # export USE_SYSTEM_LIBS=ON  # experimental, not all libs present in repos
-  export USE_SYSTEM_NCCL=ON
-  export NCCL_VERSION=$(pkg-config nccl --modversion)
-  export NCCL_VER_CODE=$(sed -n 's/^#define NCCL_VERSION_CODE\s*\(.*\).*/\1/p' 
/usr/include/nccl.h)
-  # export BUILD_SPLIT_CUDA=ON  # modern preferred build, but splits libs and 
symbols, ABI break
-  # export USE_FAST_NVCC=ON  # parallel build with nvcc, spawns too many 
processes
-  export USE_CUPTI_SO=ON  # make sure cupti.so is used as shared lib
-  export CC=/usr/bin/gcc
-  export CXX=/usr/bin/g++
-  export CUDAHOSTCXX=/opt/cuda/bin/g++
-  export CUDA_HOST_COMPILER="${CUDAHOSTCXX}"
-  export CUDA_HOME=/opt/cuda
-  # hide build-time CUDA devices
-  export CUDA_VISIBLE_DEVICES=""
-  export CUDNN_LIB_DIR=/usr/lib
-  export CUDNN_INCLUDE_DIR=/usr/include
-  export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
-  # CUDA arch 8.7 is not supported (needed by Jetson boards, etc.)
-  export 
TORCH_CUDA_ARCH_LIST="5.2;5.3;6.0;6.1;6.2;7.0;7.2;7.5;8.0;8.6;8.9;9.0;9.0+PTX"  
#include latest PTX for future compat
-  export OVERRIDE_TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}"
-  export PYTORCH_ROCM_ARCH="gfx803;gfx900;gfx906;gfx908;gfx90a;gfx1030"
-
-  # Hack to make sure that the generated dnnl_config.h from onednn can be 
inlcuded.
-  export CXXFLAGS="${CXXFLAGS} -I 
third_party/ideep/mkl-dnn/third_party/oneDNN/include/"
-}
-
-#
-# Important note on the missing build() function
-#
-# We build the pytorch packages for the different backends directly in
-# corresponding package() functions. This change became necessary when
-# merging the two different GPU backends (CUDA and ROCm) into one package.
-# Both share a dependency on the magma package but compiled against
-# different GPU backends. This leads to two incompatible magma-{cuda,hip}
-# packages that cannot be installed side-by-side.
-# Therefore, we need to separately add magma-{cuda,hip} as (make-) dependencies
-# of pytorch-{cuda,rocm}.
-
-_package() {
-  # Prevent setup.py from re-running CMake and rebuilding
-  sed -e 's/RUN_BUILD_DEPS = True/RUN_BUILD_DEPS = False/g' -i setup.py
-
-  python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
-
-  install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
-
-  pytorchpath="usr/lib/python$(get_pyver)/site-packages/torch"
-  install -d "${pkgdir}/usr/lib"
-
-  # put CMake files in correct place
-  mv "${pkgdir}/${pytorchpath}/share/cmake" "${pkgdir}/usr/lib/cmake"
-
-  # put C++ API in correct place
-  mv "${pkgdir}/${pytorchpath}/include" "${pkgdir}/usr/include"
-  find "${pkgdir}/${pytorchpath}"/lib/ -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
-    mv "${_lib}" "${pkgdir}"/usr/lib/
-  done
-
-  # clean up duplicates
-  rm -r "${pkgdir}/usr/include/pybind11"
-  rm "${pkgdir}"/usr/include/*.h
-
-  # Python module is hardcoded so look there at runtime
-  ln -s /usr/include "${pkgdir}/${pytorchpath}/include"
-  find "${pkgdir}"/usr/lib -maxdepth 1 -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
-    ln -s ${_lib#"$pkgdir"} "${pkgdir}/${pytorchpath}/lib/"
-  done
-}
-
-package_python-pytorch() {
-  pkgdesc="${_pkgdesc}"
-
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-  echo "Building without cuda or rocm and without non-x86-64 optimizations"
-  _prepare
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  export USE_ROCM=0
-  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
-  # this horrible hack is necessary because the current release
-  # ships inconsistent CMake which tries to build objects before
-  # thier dependencies, build twice when dependencies are available
-  python setup.py build || python setup.py build
-
-  _package
-}
-
-package_python-pytorch-opt() {
-  pkgdesc="${_pkgdesc} (with AVX2 CPU optimizations)"
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
-  echo "Building without cuda or rocm and with non-x86-64 optimizations"
-  _prepare
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  export USE_ROCM=0
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  _package
-}
-
-package_python-pytorch-cuda() {
-  pkgdesc="${_pkgdesc} (with CUDA)"
-  depends+=(cuda cudnn magma-cuda)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
-  echo "Building with cuda and without non-x86-64 optimizations"
-  _prepare
-  export USE_CUDA=1
-  export USE_CUDNN=1
-  export USE_ROCM=0
-  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
-  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  _package
-  # oneDNN from the repos conflicts with the version in the ideep submodule,
-  # so we have to add the dependency after building the package.
-  depends+=(onednn)
-}
-
-package_python-pytorch-opt-cuda() {
-  pkgdesc="${_pkgdesc} (with CUDA and AVX2 CPU optimizations)"
-  depends+=(cuda cudnn magma-cuda)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch python-pytorch-cuda)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
-  echo "Building with cuda and with non-x86-64 optimizations"
-  export USE_CUDA=1
-  export USE_CUDNN=1
-  export USE_ROCM=0
-  _prepare
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  _package
-  # see above
-  depends+=(onednn)
-}
-
-package_python-pytorch-rocm() {
-  pkgdesc="${_pkgdesc} (with ROCm)"
-  depends+=(rocm-hip-sdk roctracer miopen magma-hip)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-rocm"
-  echo "Building with rocm and without non-x86-64 optimizations"
-  _prepare
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  export USE_ROCM=1
-  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
-  # Conversion of CUDA to ROCm source files
-  python tools/amd_build/build_amd.py
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  _package
-  # see above
-  depends+=(onednn)
-}
-
-package_python-pytorch-opt-rocm() {
-  pkgdesc="${_pkgdesc} (with ROCm and AVX2 CPU optimizations)"
-  depends+=(rocm-hip-sdk roctracer miopen magma-hip)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch python-pytorch-rocm)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt-rocm"
-  echo "Building with rocm and with non-x86-64 optimizations"
-  _prepare
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  export USE_ROCM=1
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  # Conversion of CUDA to ROCm source files
-  python tools/amd_build/build_amd.py
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  _package
-  # see above
-  depends+=(onednn)
-}
-
-# vim:set ts=2 sw=2 et:

Copied: python-pytorch/repos/community-staging-x86_64/PKGBUILD (from rev 
1444460, python-pytorch/trunk/PKGBUILD)
===================================================================
--- PKGBUILD                            (rev 0)
+++ PKGBUILD    2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,438 @@
+# Maintainer: Sven-Hendrik Haase <[email protected]>
+# Maintainer: Torsten Keßler <[email protected]>
+# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
+
+_pkgname=pytorch
+pkgbase="python-${_pkgname}"
+pkgname=("${pkgbase}" "${pkgbase}-opt" "${pkgbase}-cuda" "${pkgbase}-opt-cuda" 
"${pkgbase}-rocm" "${pkgbase}-opt-rocm")
+pkgver=2.0.0
+_pkgver=2.0.0
+pkgrel=4
+_pkgdesc='Tensors and Dynamic neural networks in Python with strong GPU 
acceleration'
+pkgdesc="${_pkgdesc}"
+arch=('x86_64')
+url="https://pytorch.org";
+license=('BSD')
+depends=('google-glog' 'gflags' 'opencv' 'openmp' 'nccl' 'pybind11' 'python' 
'python-yaml' 'libuv'
+         'python-numpy' 'python-sympy' 'protobuf' 'ffmpeg4.4' 'python-future' 
'qt5-base'
+         'intel-oneapi-mkl' 'python-typing_extensions')
+# Exclude the magma package here and add the corresponding {cuda, rocm/hip} 
version
+# to makedepends of the split packages.
+# The magma package does not allow to build the cuda and rocm/hip code at the 
same time,
+# so we need to work with the split packages magma-{cuda,hip}.
+makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 
'cuda'
+             'cudnn' 'git' 'rocm-hip-sdk' 'roctracer' 'miopen'
+             'ninja' 'pkgconfig' 'doxygen' 'vulkan-headers' 'shaderc')
+source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$_pkgver";
+        # generated using parse-submodules
+        
"${pkgname}-ARM_NEON_2_x86_SSE::git+https://github.com/intel/ARM_NEON_2_x86_SSE.git";
+        "${pkgname}-FP16::git+https://github.com/Maratyszcza/FP16.git";
+        "${pkgname}-FXdiv::git+https://github.com/Maratyszcza/FXdiv.git";
+        "${pkgname}-NNPACK::git+https://github.com/Maratyszcza/NNPACK.git";
+        "${pkgname}-PeachPy::git+https://github.com/malfet/PeachPy.git";
+        "${pkgname}-QNNPACK::git+https://github.com/pytorch/QNNPACK";
+        
"${pkgname}-VulkanMemoryAllocator::git+https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git";
+        "${pkgname}-XNNPACK::git+https://github.com/google/XNNPACK.git";
+        "${pkgname}-benchmark::git+https://github.com/google/benchmark.git";
+        "${pkgname}-cpuinfo::git+https://github.com/pytorch/cpuinfo.git";
+        "${pkgname}-cub::git+https://github.com/NVlabs/cub.git";
+        
"${pkgname}-cudnn-frontend::git+https://github.com/NVIDIA/cudnn-frontend.git";
+        "${pkgname}-cutlass::git+https://github.com/NVIDIA/cutlass.git";
+        "${pkgname}-eigen::git+https://gitlab.com/libeigen/eigen.git";
+        "${pkgname}-enum34::git+https://github.com/PeachPy/enum34.git";
+        "${pkgname}-fbgemm::git+https://github.com/pytorch/fbgemm";
+        "${pkgname}-fbjni::git+https://github.com/facebookincubator/fbjni.git";
+        "${pkgname}-flatbuffers::git+https://github.com/google/flatbuffers.git";
+        "${pkgname}-fmt::git+https://github.com/fmtlib/fmt.git";
+        "${pkgname}-foxi::git+https://github.com/houseroad/foxi.git";
+        "${pkgname}-gemmlowp::git+https://github.com/google/gemmlowp.git";
+        "${pkgname}-gloo::git+https://github.com/facebookincubator/gloo";
+        "${pkgname}-googletest::git+https://github.com/google/googletest.git";
+        "${pkgname}-ideep::git+https://github.com/intel/ideep";
+        "${pkgname}-ios-cmake::git+https://github.com/Yangqing/ios-cmake.git";
+        "${pkgname}-ittapi::git+https://github.com/intel/ittapi.git";
+        "${pkgname}-json::git+https://github.com/nlohmann/json.git";
+        "${pkgname}-kineto::git+https://github.com/pytorch/kineto";
+        "${pkgname}-nccl::git+https://github.com/NVIDIA/nccl";
+        "${pkgname}-onnx-tensorrt::git+https://github.com/onnx/onnx-tensorrt";
+        "${pkgname}-onnx::git+https://github.com/onnx/onnx.git";
+        "${pkgname}-pocketfft::git+https://github.com/mreineck/pocketfft";
+        
"${pkgname}-protobuf::git+https://github.com/protocolbuffers/protobuf.git";
+        "${pkgname}-psimd::git+https://github.com/Maratyszcza/psimd.git";
+        
"${pkgname}-pthreadpool::git+https://github.com/Maratyszcza/pthreadpool.git";
+        "${pkgname}-pybind11::git+https://github.com/pybind/pybind11.git";
+        "${pkgname}-six::git+https://github.com/benjaminp/six.git";
+        "${pkgname}-sleef::git+https://github.com/shibatch/sleef";
+        "${pkgname}-tbb::git+https://github.com/01org/tbb";
+        "${pkgname}-tensorpipe::git+https://github.com/pytorch/tensorpipe.git";
+        "${pkgname}-zstd::git+https://github.com/facebook/zstd.git";
+        fix_include_system.patch
+        use-system-libuv.patch
+        fix-building-for-torchvision.patch
+        87773.patch
+        disable-werror1.patch
+        disable-werror2.patch
+        disable-werror3.patch
+        disable-werror4.patch
+        ffmpeg4.4.patch
+        rocblas-batched.patch)
+b2sums=('SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        
'77f85808e480bd37dfb5f072d565466ae30a8f827f49ef97591fc2fc03bea54944eb1adeaa4a1e3466518a5640f575eda88d15b4c4d549a6f41f0bf4f2cfb086'
+        
'1f7ce593fa9fc62535ca1c3d85c996a73006cc614c7b7258160c3fc53cd52a1cfddcb18baf897f2e1223ecdfee52ca1471b91c9f845368ed6ac51b66f6e0e676'
+        
'fdea0b815d7750a4233c1d4668593020da017aea43cf4cb63b4c00d0852c7d34f0333e618fcf98b8df2185313a2089b8c2e9fe8ec3cfb0bf693598f9c61461a8'
+        
'0a8fc110a306e81beeb9ddfb3a1ddfd26aeda5e3f7adfb0f7c9bc3fd999c2dde62e0b407d3eca573097a53fd97329214e30e8767fb38d770197c7ec2b53daf18'
+        
'844d0b7b39777492a6d456fa845d5399f673b4bb37b62473393449c9ad0c29dca3c33276dc3980f2e766680100335c0acfb69d51781b79575f4da112d9c4018c'
+        
'985e331b2025e1ca5a4fba5188af0900f1f38bd0fd32c9173deb8bed7358af01e387d4654c7e0389e5f98b6f7cbed053226934d180b8b3b1270bdbbb36fc89b2'
+        
'96de2729b29c7ce3e4fdd8008f575d24c2c3ef9f85d6217e607902d7b870ac71b9290fde71e87a68d75bb75ef28eacbf5ce04e071146809ccf1e76a03f97b479'
+        
'eea86bbed0a37e1661035913536456f90e0cd1e687c7e4103011f0688bc8347b6fc2ff82019909c41e7c89ddbc3b80dde641e88abf406f4faebc71b0bb693d25'
+        
'6286b05d5b5143f117363e3ce3c7d693910f53845aeb6f501b3eea64aa71778cb2d7dcd4ac945d5321ef23b4da02446e86dedc6a9b6a998df4a7f3b1ce50550a'
+        
'232d2aca7cae8da511d1451890f8696d47da72276929ac5731a1a1a481d2a515fa7288bf33730d8ea2c892616551a74ca2439b53de6b1dfee156c30919120741')
+options=('!lto' '!debug')
+
+get_pyver () {
+  python -c 'import sys; print(str(sys.version_info[0]) + "." + 
str(sys.version_info[1]))'
+}
+
+prepare() {
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+
+  # generated using parse-submodules
+  git submodule init
+
+  git config submodule."android/libs/fbjni".url "${srcdir}/${pkgname}"-fbjni
+  git config submodule."third_party/NNPACK".url "${srcdir}/${pkgname}"-NNPACK
+  git config submodule."third_party/NNPACK_deps/FP16".url 
"${srcdir}/${pkgname}"-FP16
+  git config submodule."third_party/NNPACK_deps/FXdiv".url 
"${srcdir}/${pkgname}"-FXdiv
+  git config submodule."third_party/NNPACK_deps/psimd".url 
"${srcdir}/${pkgname}"-psimd
+  git config submodule."third_party/NNPACK_deps/pthreadpool".url 
"${srcdir}/${pkgname}"-pthreadpool
+  git config submodule."third_party/QNNPACK".url "${srcdir}/${pkgname}"-QNNPACK
+  git config submodule."third_party/VulkanMemoryAllocator".url 
"${srcdir}/${pkgname}"-VulkanMemoryAllocator
+  git config submodule."third_party/XNNPACK".url "${srcdir}/${pkgname}"-XNNPACK
+  git config submodule."third_party/benchmark".url 
"${srcdir}/${pkgname}"-benchmark
+  git config submodule."third_party/cpuinfo".url "${srcdir}/${pkgname}"-cpuinfo
+  git config submodule."third_party/cub".url "${srcdir}/${pkgname}"-cub
+  git config submodule."third_party/cudnn_frontend".url 
"${srcdir}/${pkgname}"-cudnn-frontend
+  git config submodule."third_party/cutlass".url "${srcdir}/${pkgname}"-cutlass
+  git config submodule."third_party/eigen".url "${srcdir}/${pkgname}"-eigen
+  git config submodule."third_party/fbgemm".url "${srcdir}/${pkgname}"-fbgemm
+  git config submodule."third_party/flatbuffers".url 
"${srcdir}/${pkgname}"-flatbuffers
+  git config submodule."third_party/fmt".url "${srcdir}/${pkgname}"-fmt
+  git config submodule."third_party/foxi".url "${srcdir}/${pkgname}"-foxi
+  git config submodule."third_party/gemmlowp/gemmlowp".url 
"${srcdir}/${pkgname}"-gemmlowp
+  git config submodule."third_party/gloo".url "${srcdir}/${pkgname}"-gloo
+  git config submodule."third_party/googletest".url 
"${srcdir}/${pkgname}"-googletest
+  git config submodule."third_party/ideep".url "${srcdir}/${pkgname}"-ideep
+  git config submodule."third_party/ios-cmake".url 
"${srcdir}/${pkgname}"-ios-cmake
+  git config submodule."third_party/ittapi".url "${srcdir}/${pkgname}"-ittapi
+  git config submodule."third_party/kineto".url "${srcdir}/${pkgname}"-kineto
+  git config submodule."third_party/nccl/nccl".url "${srcdir}/${pkgname}"-nccl
+  git config submodule."third_party/neon2sse".url 
"${srcdir}/${pkgname}"-ARM_NEON_2_x86_SSE
+  git config submodule."third_party/nlohmann".url "${srcdir}/${pkgname}"-json
+  git config submodule."third_party/onnx".url "${srcdir}/${pkgname}"-onnx
+  git config submodule."third_party/onnx-tensorrt".url 
"${srcdir}/${pkgname}"-onnx-tensorrt
+  git config submodule."third_party/pocketfft".url 
"${srcdir}/${pkgname}"-pocketfft
+  git config submodule."third_party/protobuf".url 
"${srcdir}/${pkgname}"-protobuf
+  git config submodule."third_party/pybind11".url 
"${srcdir}/${pkgname}"-pybind11
+  git config submodule."third_party/python-enum".url 
"${srcdir}/${pkgname}"-enum34
+  git config submodule."third_party/python-peachpy".url 
"${srcdir}/${pkgname}"-PeachPy
+  git config submodule."third_party/python-six".url "${srcdir}/${pkgname}"-six
+  git config submodule."third_party/sleef".url "${srcdir}/${pkgname}"-sleef
+  git config submodule."third_party/tbb".url "${srcdir}/${pkgname}"-tbb
+  git config submodule."third_party/tensorpipe".url 
"${srcdir}/${pkgname}"-tensorpipe
+  git config submodule."third_party/zstd".url "${srcdir}/${pkgname}"-zstd
+
+  git -c protocol.file.allow=always submodule update --init --recursive
+
+  # Fix include with GCC 12
+  sed "1i#include <mutex>" -i 
third_party/kineto/libkineto/src/RoctracerActivityApi.h
+
+  # https://bugs.archlinux.org/task/64981
+  patch -N torch/utils/cpp_extension.py "${srcdir}"/fix_include_system.patch
+
+  # Use system libuv
+  patch -Np1 -i "${srcdir}"/use-system-libuv.patch
+
+  # fix https://github.com/pytorch/vision/issues/3695
+  patch -Np1 -i "${srcdir}/fix-building-for-torchvision.patch"
+
+  # Fix building against glog 0.6
+  patch -Np1 -i "${srcdir}/87773.patch"
+
+  # Disable -Werror
+  patch -Np1 -d third_party/fbgemm -i "${srcdir}/disable-werror1.patch"
+  patch -Np1 -d third_party/benchmark -i "${srcdir}/disable-werror2.patch"
+  patch -Np1 -d third_party/ideep/mkl-dnn -i "${srcdir}/disable-werror3.patch"
+  patch -Np1 -i "${srcdir}/disable-werror4.patch"
+
+  # build against ffmpeg4.4
+  patch -Np1 -i "${srcdir}/ffmpeg4.4.patch"
+
+  # fix https://github.com/pytorch/pytorch/issues/97640
+  patch -Np1 -i "${srcdir}/rocblas-batched.patch"
+
+  cd "${srcdir}"
+
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt"
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-cuda"
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-rocm"
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-rocm"
+}
+
+# Common build configuration, called in all package() functions.
+_prepare() {
+  export VERBOSE=1
+  export PYTORCH_BUILD_VERSION="${pkgver}"
+  export PYTORCH_BUILD_NUMBER=1
+
+  # Check tools/setup_helpers/cmake.py, setup.py and CMakeLists.txt for a list 
of flags that can be set via env vars.
+  export ATEN_NO_TEST=ON  # do not build ATen tests
+  export USE_MKLDNN=ON
+  export BUILD_CUSTOM_PROTOBUF=OFF
+  export BUILD_CAFFE2=ON
+  export BUILD_CAFFE2_OPS=ON
+  # export BUILD_SHARED_LIBS=OFF
+  export USE_FFMPEG=ON
+  export USE_GFLAGS=ON
+  export USE_GLOG=ON
+  export USE_VULKAN=ON
+  export BUILD_BINARY=ON
+  export USE_OBSERVERS=ON
+  export USE_OPENCV=ON
+  # export USE_SYSTEM_LIBS=ON  # experimental, not all libs present in repos
+  export USE_SYSTEM_NCCL=ON
+  export NCCL_VERSION=$(pkg-config nccl --modversion)
+  export NCCL_VER_CODE=$(sed -n 's/^#define NCCL_VERSION_CODE\s*\(.*\).*/\1/p' 
/usr/include/nccl.h)
+  # export BUILD_SPLIT_CUDA=ON  # modern preferred build, but splits libs and 
symbols, ABI break
+  # export USE_FAST_NVCC=ON  # parallel build with nvcc, spawns too many 
processes
+  export USE_CUPTI_SO=ON  # make sure cupti.so is used as shared lib
+  export CC=/usr/bin/gcc
+  export CXX=/usr/bin/g++
+  export CUDAHOSTCXX=/opt/cuda/bin/g++
+  export CUDA_HOST_COMPILER="${CUDAHOSTCXX}"
+  export CUDA_HOME=/opt/cuda
+  # hide build-time CUDA devices
+  export CUDA_VISIBLE_DEVICES=""
+  export CUDNN_LIB_DIR=/usr/lib
+  export CUDNN_INCLUDE_DIR=/usr/include
+  export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
+  # CUDA arch 8.7 is not supported (needed by Jetson boards, etc.)
+  export 
TORCH_CUDA_ARCH_LIST="5.2;5.3;6.0;6.1;6.2;7.0;7.2;7.5;8.0;8.6;8.9;9.0;9.0+PTX"  
#include latest PTX for future compat
+  export OVERRIDE_TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}"
+  export PYTORCH_ROCM_ARCH="gfx803;gfx900;gfx906;gfx908;gfx90a;gfx1030"
+
+  # Hack to make sure that the generated dnnl_config.h from onednn can be 
inlcuded.
+  export CXXFLAGS="${CXXFLAGS} -I 
third_party/ideep/mkl-dnn/third_party/oneDNN/include/"
+}
+
+#
+# Important note on the missing build() function
+#
+# We build the pytorch packages for the different backends directly in
+# corresponding package() functions. This change became necessary when
+# merging the two different GPU backends (CUDA and ROCm) into one package.
+# Both share a dependency on the magma package but compiled against
+# different GPU backends. This leads to two incompatible magma-{cuda,hip}
+# packages that cannot be installed side-by-side.
+# Therefore, we need to separately add magma-{cuda,hip} as (make-) dependencies
+# of pytorch-{cuda,rocm}.
+
+_package() {
+  # Prevent setup.py from re-running CMake and rebuilding
+  sed -e 's/RUN_BUILD_DEPS = True/RUN_BUILD_DEPS = False/g' -i setup.py
+
+  python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
+
+  install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+
+  pytorchpath="usr/lib/python$(get_pyver)/site-packages/torch"
+  install -d "${pkgdir}/usr/lib"
+
+  # put CMake files in correct place
+  mv "${pkgdir}/${pytorchpath}/share/cmake" "${pkgdir}/usr/lib/cmake"
+
+  # put C++ API in correct place
+  mv "${pkgdir}/${pytorchpath}/include" "${pkgdir}/usr/include"
+  find "${pkgdir}/${pytorchpath}"/lib/ -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
+    mv "${_lib}" "${pkgdir}"/usr/lib/
+  done
+
+  # clean up duplicates
+  rm -r "${pkgdir}/usr/include/pybind11"
+  rm "${pkgdir}"/usr/include/*.h
+
+  # Python module is hardcoded so look there at runtime
+  ln -s /usr/include "${pkgdir}/${pytorchpath}/include"
+  find "${pkgdir}"/usr/lib -maxdepth 1 -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
+    ln -s ${_lib#"$pkgdir"} "${pkgdir}/${pytorchpath}/lib/"
+  done
+}
+
+package_python-pytorch() {
+  pkgdesc="${_pkgdesc}"
+
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+  echo "Building without cuda or rocm and without non-x86-64 optimizations"
+  _prepare
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  export USE_ROCM=0
+  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
+  # this horrible hack is necessary because the current release
+  # ships inconsistent CMake which tries to build objects before
+  # thier dependencies, build twice when dependencies are available
+  python setup.py build || python setup.py build
+
+  _package
+}
+
+package_python-pytorch-opt() {
+  pkgdesc="${_pkgdesc} (with AVX2 CPU optimizations)"
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
+  echo "Building without cuda or rocm and with non-x86-64 optimizations"
+  _prepare
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  export USE_ROCM=0
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  _package
+}
+
+package_python-pytorch-cuda() {
+  pkgdesc="${_pkgdesc} (with CUDA)"
+  depends+=(cuda cudnn magma-cuda)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
+  echo "Building with cuda and without non-x86-64 optimizations"
+  _prepare
+  export USE_CUDA=1
+  export USE_CUDNN=1
+  export USE_ROCM=0
+  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
+  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  _package
+  # oneDNN from the repos conflicts with the version in the ideep submodule,
+  # so we have to add the dependency after building the package.
+  depends+=(onednn)
+}
+
+package_python-pytorch-opt-cuda() {
+  pkgdesc="${_pkgdesc} (with CUDA and AVX2 CPU optimizations)"
+  depends+=(cuda cudnn magma-cuda)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch python-pytorch-cuda)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
+  echo "Building with cuda and with non-x86-64 optimizations"
+  export USE_CUDA=1
+  export USE_CUDNN=1
+  export USE_ROCM=0
+  _prepare
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  _package
+  # see above
+  depends+=(onednn)
+}
+
+package_python-pytorch-rocm() {
+  pkgdesc="${_pkgdesc} (with ROCm)"
+  depends+=(rocm-hip-sdk roctracer miopen magma-hip)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-rocm"
+  echo "Building with rocm and without non-x86-64 optimizations"
+  _prepare
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  export USE_ROCM=1
+  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
+  # Conversion of CUDA to ROCm source files
+  python tools/amd_build/build_amd.py
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  _package
+  # see above
+  depends+=(onednn)
+}
+
+package_python-pytorch-opt-rocm() {
+  pkgdesc="${_pkgdesc} (with ROCm and AVX2 CPU optimizations)"
+  depends+=(rocm-hip-sdk roctracer miopen magma-hip)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch python-pytorch-rocm)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt-rocm"
+  echo "Building with rocm and with non-x86-64 optimizations"
+  _prepare
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  export USE_ROCM=1
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  # Conversion of CUDA to ROCm source files
+  python tools/amd_build/build_amd.py
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  _package
+  # see above
+  depends+=(onednn)
+}
+
+# vim:set ts=2 sw=2 et:

Deleted: disable-werror1.patch
===================================================================
--- disable-werror1.patch       2023-04-10 16:10:27 UTC (rev 1444460)
+++ disable-werror1.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,13 +0,0 @@
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index 58dcb9a..269a7a6 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -144,7 +144,7 @@ if(MSVC)
- else(MSVC)
-   string(APPEND CMAKE_CXX_FLAGS " -Wall")
-   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
--  string(APPEND CMAKE_CXX_FLAGS " -Werror")
-+  # string(APPEND CMAKE_CXX_FLAGS " -Werror")
-   string(APPEND CMAKE_CXX_FLAGS " -Wno-deprecated-declarations")
-   target_compile_options(fbgemm_avx2 PRIVATE
-     "-m64" "-mavx2" "-mf16c" "-mfma")

Copied: python-pytorch/repos/community-staging-x86_64/disable-werror1.patch 
(from rev 1444460, python-pytorch/trunk/disable-werror1.patch)
===================================================================
--- disable-werror1.patch                               (rev 0)
+++ disable-werror1.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,13 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 58dcb9a..269a7a6 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -144,7 +144,7 @@ if(MSVC)
+ else(MSVC)
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  string(APPEND CMAKE_CXX_FLAGS " -Werror")
++  # string(APPEND CMAKE_CXX_FLAGS " -Werror")
+   string(APPEND CMAKE_CXX_FLAGS " -Wno-deprecated-declarations")
+   target_compile_options(fbgemm_avx2 PRIVATE
+     "-m64" "-mavx2" "-mf16c" "-mfma")

Deleted: disable-werror2.patch
===================================================================
--- disable-werror2.patch       2023-04-10 16:10:27 UTC (rev 1444460)
+++ disable-werror2.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,13 +0,0 @@
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index b8852e4..cf1d447 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -19,7 +19,7 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the 
benchmark library." ON)
- option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the 
benchmark library." ON)
- option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark 
library." OFF)
- option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard 
library." OFF)
--option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." ON)
-+option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." OFF)
- option(BENCHMARK_FORCE_WERROR "Build Release candidates with -Werror 
regardless of compiler issues." OFF)
- 
- if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI")

Copied: python-pytorch/repos/community-staging-x86_64/disable-werror2.patch 
(from rev 1444460, python-pytorch/trunk/disable-werror2.patch)
===================================================================
--- disable-werror2.patch                               (rev 0)
+++ disable-werror2.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,13 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index b8852e4..cf1d447 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -19,7 +19,7 @@ option(BENCHMARK_ENABLE_TESTING "Enable testing of the 
benchmark library." ON)
+ option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the 
benchmark library." ON)
+ option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark 
library." OFF)
+ option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard 
library." OFF)
+-option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." ON)
++option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." OFF)
+ option(BENCHMARK_FORCE_WERROR "Build Release candidates with -Werror 
regardless of compiler issues." OFF)
+ 
+ if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI")

Deleted: disable-werror3.patch
===================================================================
--- disable-werror3.patch       2023-04-10 16:10:27 UTC (rev 1444460)
+++ disable-werror3.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,13 +0,0 @@
-diff --git a/cmake/platform.cmake b/cmake/platform.cmake
-index b588bcc23..81283ab70 100644
---- a/cmake/platform.cmake
-+++ b/cmake/platform.cmake
-@@ -110,7 +110,7 @@ if(MSVC)
-         append(CMAKE_CCXX_FLAGS "/fp:precise")
-     endif()
- elseif(UNIX OR MINGW)
--    append(CMAKE_CCXX_FLAGS "-Wall -Werror -Wno-unknown-pragmas")
-+    append(CMAKE_CCXX_FLAGS "-Wall -Wno-unknown-pragmas")
-     if(DNNL_GRAPH_WITH_SYCL OR CMAKE_BASE_NAME STREQUAL "icx" OR 
CMAKE_BASE_NAME STREQUAL "icpx")
-         # When using Debug build mode CMake adds "-g" option without "-O0"
-         # causing the warning. This probably happens because clang/gcc 
compilers

Copied: python-pytorch/repos/community-staging-x86_64/disable-werror3.patch 
(from rev 1444460, python-pytorch/trunk/disable-werror3.patch)
===================================================================
--- disable-werror3.patch                               (rev 0)
+++ disable-werror3.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,13 @@
+diff --git a/cmake/platform.cmake b/cmake/platform.cmake
+index b588bcc23..81283ab70 100644
+--- a/cmake/platform.cmake
++++ b/cmake/platform.cmake
+@@ -110,7 +110,7 @@ if(MSVC)
+         append(CMAKE_CCXX_FLAGS "/fp:precise")
+     endif()
+ elseif(UNIX OR MINGW)
+-    append(CMAKE_CCXX_FLAGS "-Wall -Werror -Wno-unknown-pragmas")
++    append(CMAKE_CCXX_FLAGS "-Wall -Wno-unknown-pragmas")
+     if(DNNL_GRAPH_WITH_SYCL OR CMAKE_BASE_NAME STREQUAL "icx" OR 
CMAKE_BASE_NAME STREQUAL "icpx")
+         # When using Debug build mode CMake adds "-g" option without "-O0"
+         # causing the warning. This probably happens because clang/gcc 
compilers

Deleted: disable-werror4.patch
===================================================================
--- disable-werror4.patch       2023-04-10 16:10:27 UTC (rev 1444460)
+++ disable-werror4.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,15 +0,0 @@
-diff --git a/CMakeLists.txt b/CMakeLists.txt
-index b9addcf005b..27d2c761f4e 100644
---- a/CMakeLists.txt
-+++ b/CMakeLists.txt
-@@ -797,10 +797,8 @@ if(NOT MSVC)
-   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
-   string(APPEND CMAKE_CXX_FLAGS " -Wall")
-   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
--  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
--  append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Werror=bool-operation" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Wnarrowing" CMAKE_CXX_FLAGS)
-   append_cxx_flag_if_supported("-Wno-missing-field-initializers" 
CMAKE_CXX_FLAGS)

Copied: python-pytorch/repos/community-staging-x86_64/disable-werror4.patch 
(from rev 1444460, python-pytorch/trunk/disable-werror4.patch)
===================================================================
--- disable-werror4.patch                               (rev 0)
+++ disable-werror4.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,15 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index b9addcf005b..27d2c761f4e 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -797,10 +797,8 @@ if(NOT MSVC)
+   # Details at http://eigen.tuxfamily.org/bz/show_bug.cgi?id=1459
+   string(APPEND CMAKE_CXX_FLAGS " -Wall")
+   string(APPEND CMAKE_CXX_FLAGS " -Wextra")
+-  append_cxx_flag_if_supported("-Werror=return-type" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=non-virtual-dtor" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=braced-scalar-init" CMAKE_CXX_FLAGS)
+-  append_cxx_flag_if_supported("-Werror=range-loop-construct" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Werror=bool-operation" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Wnarrowing" CMAKE_CXX_FLAGS)
+   append_cxx_flag_if_supported("-Wno-missing-field-initializers" 
CMAKE_CXX_FLAGS)

Deleted: ffmpeg4.4.patch
===================================================================
--- ffmpeg4.4.patch     2023-04-10 16:10:27 UTC (rev 1444460)
+++ ffmpeg4.4.patch     2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,51 +0,0 @@
-diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
-index 04437562ee..dc8fe4aa5c 100644
---- a/cmake/Modules/FindFFmpeg.cmake
-+++ b/cmake/Modules/FindFFmpeg.cmake
-@@ -14,34 +14,40 @@ else (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
- 
-   find_path(FFMPEG_AVCODEC_INCLUDE_DIR
-     NAMES libavcodec/avcodec.h
--    PATHS ${_FFMPEG_AVCODEC_INCLUDE_DIRS} /usr/include /usr/local/include 
/opt/local/include /sw/include
-+    PATHS /usr/include/ffmpeg4.4 /usr/local/include /opt/local/include 
/sw/include
-     PATH_SUFFIXES ffmpeg libav
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBAVCODEC
-     NAMES avcodec
--    PATHS ${_FFMPEG_AVCODEC_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBAVFORMAT
-     NAMES avformat
--    PATHS ${_FFMPEG_AVFORMAT_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBAVUTIL
-     NAMES avutil
--    PATHS ${_FFMPEG_AVUTIL_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
- 
-   find_library(FFMPEG_LIBSWSCALE
-     NAMES swscale
--    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBSWRESAMPLE
-     NAMES swresample
--    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   if (FFMPEG_LIBAVCODEC AND FFMPEG_LIBAVFORMAT)

Copied: python-pytorch/repos/community-staging-x86_64/ffmpeg4.4.patch (from rev 
1444460, python-pytorch/trunk/ffmpeg4.4.patch)
===================================================================
--- ffmpeg4.4.patch                             (rev 0)
+++ ffmpeg4.4.patch     2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,51 @@
+diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
+index 04437562ee..dc8fe4aa5c 100644
+--- a/cmake/Modules/FindFFmpeg.cmake
++++ b/cmake/Modules/FindFFmpeg.cmake
+@@ -14,34 +14,40 @@ else (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
+ 
+   find_path(FFMPEG_AVCODEC_INCLUDE_DIR
+     NAMES libavcodec/avcodec.h
+-    PATHS ${_FFMPEG_AVCODEC_INCLUDE_DIRS} /usr/include /usr/local/include 
/opt/local/include /sw/include
++    PATHS /usr/include/ffmpeg4.4 /usr/local/include /opt/local/include 
/sw/include
+     PATH_SUFFIXES ffmpeg libav
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBAVCODEC
+     NAMES avcodec
+-    PATHS ${_FFMPEG_AVCODEC_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBAVFORMAT
+     NAMES avformat
+-    PATHS ${_FFMPEG_AVFORMAT_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBAVUTIL
+     NAMES avutil
+-    PATHS ${_FFMPEG_AVUTIL_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+ 
+   find_library(FFMPEG_LIBSWSCALE
+     NAMES swscale
+-    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBSWRESAMPLE
+     NAMES swresample
+-    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   if (FFMPEG_LIBAVCODEC AND FFMPEG_LIBAVFORMAT)

Deleted: fix-building-for-torchvision.patch
===================================================================
--- fix-building-for-torchvision.patch  2023-04-10 16:10:27 UTC (rev 1444460)
+++ fix-building-for-torchvision.patch  2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,25 +0,0 @@
-From 011495d8045c44527fbd7796ce860618120ae127 Mon Sep 17 00:00:00 2001
-From: Butui Hu <[email protected]>
-Date: Fri, 30 Apr 2021 11:36:30 +0800
-Subject: [PATCH] fix building torchvision
-
----
- aten/src/ATen/core/op_registration/op_allowlist.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/aten/src/ATen/core/op_registration/op_allowlist.h 
b/aten/src/ATen/core/op_registration/op_allowlist.h
-index f93462bb2cf..12903d1cc09 100644
---- a/aten/src/ATen/core/op_registration/op_allowlist.h
-+++ b/aten/src/ATen/core/op_registration/op_allowlist.h
-@@ -59,7 +59,7 @@ constexpr bool op_allowlist_contains(string_view allowlist, 
string_view item) {
- // Returns true iff the given op name is on the allowlist
- // and should be registered
- constexpr bool op_allowlist_check(string_view op_name) {
--  assert(op_name.find("::") != string_view::npos);
-+//  assert(op_name.find("::") != string_view::npos);
-   // Use assert() instead of throw() due to a gcc bug. See:
-   // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
-   // https://github.com/fmtlib/fmt/issues/682
--- 
-2.31.1
-

Copied: 
python-pytorch/repos/community-staging-x86_64/fix-building-for-torchvision.patch
 (from rev 1444460, python-pytorch/trunk/fix-building-for-torchvision.patch)
===================================================================
--- fix-building-for-torchvision.patch                          (rev 0)
+++ fix-building-for-torchvision.patch  2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,25 @@
+From 011495d8045c44527fbd7796ce860618120ae127 Mon Sep 17 00:00:00 2001
+From: Butui Hu <[email protected]>
+Date: Fri, 30 Apr 2021 11:36:30 +0800
+Subject: [PATCH] fix building torchvision
+
+---
+ aten/src/ATen/core/op_registration/op_allowlist.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/aten/src/ATen/core/op_registration/op_allowlist.h 
b/aten/src/ATen/core/op_registration/op_allowlist.h
+index f93462bb2cf..12903d1cc09 100644
+--- a/aten/src/ATen/core/op_registration/op_allowlist.h
++++ b/aten/src/ATen/core/op_registration/op_allowlist.h
+@@ -59,7 +59,7 @@ constexpr bool op_allowlist_contains(string_view allowlist, 
string_view item) {
+ // Returns true iff the given op name is on the allowlist
+ // and should be registered
+ constexpr bool op_allowlist_check(string_view op_name) {
+-  assert(op_name.find("::") != string_view::npos);
++//  assert(op_name.find("::") != string_view::npos);
+   // Use assert() instead of throw() due to a gcc bug. See:
+   // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
+   // https://github.com/fmtlib/fmt/issues/682
+-- 
+2.31.1
+

Deleted: fix_include_system.patch
===================================================================
--- fix_include_system.patch    2023-04-10 16:10:27 UTC (rev 1444460)
+++ fix_include_system.patch    2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,13 +0,0 @@
-diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
-index ca673033e1..c79ce8d37b 100644
---- a/torch/utils/cpp_extension.py
-+++ b/torch/utils/cpp_extension.py
-@@ -1760,7 +1760,7 @@ def _write_ninja_file_to_build_library(path,
-             common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
- 
-     common_cflags += [f'-I{include}' for include in user_includes]
--    common_cflags += [f'-isystem {include}' for include in system_includes]
-+    common_cflags += [f'-I{include}' for include in system_includes]
- 
-     common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + 
str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
- 

Copied: python-pytorch/repos/community-staging-x86_64/fix_include_system.patch 
(from rev 1444460, python-pytorch/trunk/fix_include_system.patch)
===================================================================
--- fix_include_system.patch                            (rev 0)
+++ fix_include_system.patch    2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,13 @@
+diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
+index ca673033e1..c79ce8d37b 100644
+--- a/torch/utils/cpp_extension.py
++++ b/torch/utils/cpp_extension.py
+@@ -1760,7 +1760,7 @@ def _write_ninja_file_to_build_library(path,
+             common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
+ 
+     common_cflags += [f'-I{include}' for include in user_includes]
+-    common_cflags += [f'-isystem {include}' for include in system_includes]
++    common_cflags += [f'-I{include}' for include in system_includes]
+ 
+     common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + 
str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
+ 

Deleted: rocblas-batched.patch
===================================================================
--- rocblas-batched.patch       2023-04-10 16:10:27 UTC (rev 1444460)
+++ rocblas-batched.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,36 +0,0 @@
-diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu
-index 2906d0acd9..33610c65f7 100644
---- a/caffe2/utils/math_gpu.cu
-+++ b/caffe2/utils/math_gpu.cu
-@@ -838,6 +838,24 @@ CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, 
CUDAContext>(
-     at::Half** C,
-     CUDAContext* context,
-     TensorProto::DataType math_type) {
-+#if defined(USE_ROCM)
-+  // loop over matrices in the batch
-+  for (int i = 0; i < batch_size; ++i) {
-+   Gemm<at::Half, CUDAContext>(
-+        trans_A,
-+        trans_B,
-+        M,
-+        N,
-+        K,
-+        alpha,
-+        A[i],
-+        B[i],
-+        beta,
-+        C[i],
-+        context,
-+        math_type);
-+  }
-+#else
-   // Note that cublas follows fortran order, so the order is different from
-   // the cblas convention.
-   const int lda = (trans_A == CblasNoTrans) ? K : M;
-@@ -912,6 +930,7 @@ CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
-   } else {
-     CAFFE_THROW("Unsupported math type");
-   }
-+#endif
- }
-

Copied: python-pytorch/repos/community-staging-x86_64/rocblas-batched.patch 
(from rev 1444460, python-pytorch/trunk/rocblas-batched.patch)
===================================================================
--- rocblas-batched.patch                               (rev 0)
+++ rocblas-batched.patch       2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,36 @@
+diff --git a/caffe2/utils/math_gpu.cu b/caffe2/utils/math_gpu.cu
+index 2906d0acd9..33610c65f7 100644
+--- a/caffe2/utils/math_gpu.cu
++++ b/caffe2/utils/math_gpu.cu
+@@ -838,6 +838,24 @@ CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, 
CUDAContext>(
+     at::Half** C,
+     CUDAContext* context,
+     TensorProto::DataType math_type) {
++#if defined(USE_ROCM)
++  // loop over matrices in the batch
++  for (int i = 0; i < batch_size; ++i) {
++   Gemm<at::Half, CUDAContext>(
++        trans_A,
++        trans_B,
++        M,
++        N,
++        K,
++        alpha,
++        A[i],
++        B[i],
++        beta,
++        C[i],
++        context,
++        math_type);
++  }
++#else
+   // Note that cublas follows fortran order, so the order is different from
+   // the cblas convention.
+   const int lda = (trans_A == CblasNoTrans) ? K : M;
+@@ -912,6 +930,7 @@ CAFFE2_CUDA_EXPORT void GemmBatched<at::Half, CUDAContext>(
+   } else {
+     CAFFE_THROW("Unsupported math type");
+   }
++#endif
+ }
+

Deleted: test.py
===================================================================
--- test.py     2023-04-10 16:10:27 UTC (rev 1444460)
+++ test.py     2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,7 +0,0 @@
-#!/usr/bin/env python
-
-import torch
-
-d = torch.device('cuda')
-a = torch.rand(1, 2).to(d)
-print(a + 0)

Copied: python-pytorch/repos/community-staging-x86_64/test.py (from rev 
1444460, python-pytorch/trunk/test.py)
===================================================================
--- test.py                             (rev 0)
+++ test.py     2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+import torch
+
+d = torch.device('cuda')
+a = torch.rand(1, 2).to(d)
+print(a + 0)

Deleted: use-system-libuv.patch
===================================================================
--- use-system-libuv.patch      2023-04-10 16:10:27 UTC (rev 1444460)
+++ use-system-libuv.patch      2023-04-10 16:11:07 UTC (rev 1444461)
@@ -1,13 +0,0 @@
-diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
-index 06464e799a..93410bc210 100644
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1346,7 +1346,7 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
-       set(TP_USE_CUDA ON CACHE BOOL "" FORCE)
-       set(TP_ENABLE_CUDA_IPC ON CACHE BOOL "" FORCE)
-     endif()
--    set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
-+    set(TP_BUILD_LIBUV OFF CACHE BOOL "" FORCE)
-     set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
- 
-     add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)

Copied: python-pytorch/repos/community-staging-x86_64/use-system-libuv.patch 
(from rev 1444460, python-pytorch/trunk/use-system-libuv.patch)
===================================================================
--- use-system-libuv.patch                              (rev 0)
+++ use-system-libuv.patch      2023-04-10 16:11:07 UTC (rev 1444461)
@@ -0,0 +1,13 @@
+diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
+index 06464e799a..93410bc210 100644
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1346,7 +1346,7 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
+       set(TP_USE_CUDA ON CACHE BOOL "" FORCE)
+       set(TP_ENABLE_CUDA_IPC ON CACHE BOOL "" FORCE)
+     endif()
+-    set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
++    set(TP_BUILD_LIBUV OFF CACHE BOOL "" FORCE)
+     set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
+ 
+     add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)

Reply via email to