Date: Monday, December 19, 2022 @ 09:34:52
  Author: svenstaro
Revision: 1361515

archrelease: copy trunk to community-x86_64

Added:
  python-pytorch/repos/community-x86_64/87773.patch
    (from rev 1361514, python-pytorch/trunk/87773.patch)
  python-pytorch/repos/community-x86_64/PKGBUILD
    (from rev 1361514, python-pytorch/trunk/PKGBUILD)
  python-pytorch/repos/community-x86_64/cuda_arch_update.patch
    (from rev 1361514, python-pytorch/trunk/cuda_arch_update.patch)
  python-pytorch/repos/community-x86_64/ffmpeg4.4.patch
    (from rev 1361514, python-pytorch/trunk/ffmpeg4.4.patch)
  python-pytorch/repos/community-x86_64/fix-building-for-torchvision.patch
    (from rev 1361514, python-pytorch/trunk/fix-building-for-torchvision.patch)
  python-pytorch/repos/community-x86_64/fix_include_system.patch
    (from rev 1361514, python-pytorch/trunk/fix_include_system.patch)
  python-pytorch/repos/community-x86_64/test.py
    (from rev 1361514, python-pytorch/trunk/test.py)
  python-pytorch/repos/community-x86_64/use-system-libuv.patch
    (from rev 1361514, python-pytorch/trunk/use-system-libuv.patch)
Deleted:
  python-pytorch/repos/community-x86_64/87773.patch
  python-pytorch/repos/community-x86_64/PKGBUILD
  python-pytorch/repos/community-x86_64/cuda_arch_update.patch
  python-pytorch/repos/community-x86_64/ffmpeg4.4.patch
  python-pytorch/repos/community-x86_64/fix-building-for-torchvision.patch
  python-pytorch/repos/community-x86_64/fix_include_system.patch
  python-pytorch/repos/community-x86_64/test.py
  python-pytorch/repos/community-x86_64/use-system-libuv.patch

------------------------------------+
 87773.patch                        |   78 ++--
 PKGBUILD                           |  682 +++++++++++++++++------------------
 cuda_arch_update.patch             |  116 ++---
 ffmpeg4.4.patch                    |  102 ++---
 fix-building-for-torchvision.patch |   50 +-
 fix_include_system.patch           |   26 -
 test.py                            |   14 
 use-system-libuv.patch             |   26 -
 8 files changed, 547 insertions(+), 547 deletions(-)

Deleted: 87773.patch
===================================================================
--- 87773.patch 2022-12-19 09:34:21 UTC (rev 1361514)
+++ 87773.patch 2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,39 +0,0 @@
-From 94465d6287e8f54c99f4b0b891a6c618bb80d7ce Mon Sep 17 00:00:00 2001
-From: Arfrever Frehtes Taifersar Arahesis <[email protected]>
-Date: Wed, 26 Oct 2022 07:32:59 +0200
-Subject: [PATCH] Support only GLog >=0.6.0
-
-Fixes https://github.com/pytorch/pytorch/issues/58054
----
- c10/util/Logging.cpp | 12 +-----------
- 1 file changed, 1 insertion(+), 11 deletions(-)
-
-diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
-index fe74e49548646..d8fce12ce4840 100644
---- a/c10/util/Logging.cpp
-+++ b/c10/util/Logging.cpp
-@@ -192,23 +192,13 @@ C10_DEFINE_int(
-     google::GLOG_WARNING,
-     "The minimum log level that caffe2 will output.");
- 
--// Google glog's api does not have an external function that allows one to 
check
--// if glog is initialized or not. It does have an internal function - so we 
are
--// declaring it here. This is a hack but has been used by a bunch of others 
too
--// (e.g. Torch).
--namespace google {
--namespace glog_internal_namespace_ {
--bool IsGoogleLoggingInitialized();
--} // namespace glog_internal_namespace_
--} // namespace google
--
- namespace c10 {
- namespace {
- 
- void initGoogleLogging(char const* name) {
- #if !defined(_MSC_VER)
-   // This trick can only be used on UNIX platforms
--  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
-+  if (!::google::IsGoogleLoggingInitialized())
- #endif
-   {
-     ::google::InitGoogleLogging(name);

Copied: python-pytorch/repos/community-x86_64/87773.patch (from rev 1361514, 
python-pytorch/trunk/87773.patch)
===================================================================
--- 87773.patch                         (rev 0)
+++ 87773.patch 2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,39 @@
+From 94465d6287e8f54c99f4b0b891a6c618bb80d7ce Mon Sep 17 00:00:00 2001
+From: Arfrever Frehtes Taifersar Arahesis <[email protected]>
+Date: Wed, 26 Oct 2022 07:32:59 +0200
+Subject: [PATCH] Support only GLog >=0.6.0
+
+Fixes https://github.com/pytorch/pytorch/issues/58054
+---
+ c10/util/Logging.cpp | 12 +-----------
+ 1 file changed, 1 insertion(+), 11 deletions(-)
+
+diff --git a/c10/util/Logging.cpp b/c10/util/Logging.cpp
+index fe74e49548646..d8fce12ce4840 100644
+--- a/c10/util/Logging.cpp
++++ b/c10/util/Logging.cpp
+@@ -192,23 +192,13 @@ C10_DEFINE_int(
+     google::GLOG_WARNING,
+     "The minimum log level that caffe2 will output.");
+ 
+-// Google glog's api does not have an external function that allows one to 
check
+-// if glog is initialized or not. It does have an internal function - so we 
are
+-// declaring it here. This is a hack but has been used by a bunch of others 
too
+-// (e.g. Torch).
+-namespace google {
+-namespace glog_internal_namespace_ {
+-bool IsGoogleLoggingInitialized();
+-} // namespace glog_internal_namespace_
+-} // namespace google
+-
+ namespace c10 {
+ namespace {
+ 
+ void initGoogleLogging(char const* name) {
+ #if !defined(_MSC_VER)
+   // This trick can only be used on UNIX platforms
+-  if (!::google::glog_internal_namespace_::IsGoogleLoggingInitialized())
++  if (!::google::IsGoogleLoggingInitialized())
+ #endif
+   {
+     ::google::InitGoogleLogging(name);

Deleted: PKGBUILD
===================================================================
--- PKGBUILD    2022-12-19 09:34:21 UTC (rev 1361514)
+++ PKGBUILD    2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,341 +0,0 @@
-# Maintainer: Sven-Hendrik Haase <[email protected]>
-# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
-
-_pkgname=pytorch
-pkgbase="python-${_pkgname}"
-pkgname=("${pkgbase}" "${pkgbase}-opt" "${pkgbase}-cuda" "${pkgbase}-opt-cuda")
-pkgver=1.13.0
-_pkgver=1.13.0
-pkgrel=3
-_pkgdesc='Tensors and Dynamic neural networks in Python with strong GPU 
acceleration'
-pkgdesc="${_pkgdesc}"
-arch=('x86_64')
-url="https://pytorch.org";
-license=('BSD')
-depends=('google-glog' 'gflags' 'opencv' 'openmp' 'nccl' 'pybind11' 'python' 
'python-yaml' 'libuv'
-         'python-numpy' 'protobuf' 'ffmpeg4.4' 'python-future' 'qt5-base' 
'intel-oneapi-mkl'
-         'python-typing_extensions')
-makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 
'cuda'
-             'cudnn' 'git' 'magma' 'ninja' 'pkgconfig' 'doxygen' 'gcc11' 
'onednn')
-source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$_pkgver";
-        # generated using parse-submodules
-        
"${pkgname}-ARM_NEON_2_x86_SSE::git+https://github.com/intel/ARM_NEON_2_x86_SSE.git";
-        "${pkgname}-FP16::git+https://github.com/Maratyszcza/FP16.git";
-        "${pkgname}-FXdiv::git+https://github.com/Maratyszcza/FXdiv.git";
-        "${pkgname}-NNPACK::git+https://github.com/Maratyszcza/NNPACK.git";
-        "${pkgname}-PeachPy::git+https://github.com/malfet/PeachPy.git";
-        "${pkgname}-QNNPACK::git+https://github.com/pytorch/QNNPACK";
-        
"${pkgname}-VulkanMemoryAllocator::git+https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git";
-        "${pkgname}-XNNPACK::git+https://github.com/google/XNNPACK.git";
-        "${pkgname}-benchmark::git+https://github.com/google/benchmark.git";
-        "${pkgname}-cpuinfo::git+https://github.com/pytorch/cpuinfo.git";
-        "${pkgname}-cub::git+https://github.com/NVlabs/cub.git";
-        
"${pkgname}-cudnn-frontend::git+https://github.com/NVIDIA/cudnn-frontend.git";
-        "${pkgname}-cutlass::git+https://github.com/NVIDIA/cutlass.git";
-        "${pkgname}-eigen::git+https://gitlab.com/libeigen/eigen.git";
-        "${pkgname}-enum34::git+https://github.com/PeachPy/enum34.git";
-        "${pkgname}-fbgemm::git+https://github.com/pytorch/fbgemm";
-        "${pkgname}-fbjni::git+https://github.com/facebookincubator/fbjni.git";
-        "${pkgname}-flatbuffers::git+https://github.com/google/flatbuffers.git";
-        "${pkgname}-fmt::git+https://github.com/fmtlib/fmt.git";
-        "${pkgname}-foxi::git+https://github.com/houseroad/foxi.git";
-        "${pkgname}-gemmlowp::git+https://github.com/google/gemmlowp.git";
-        "${pkgname}-gloo::git+https://github.com/facebookincubator/gloo";
-        "${pkgname}-googletest::git+https://github.com/google/googletest.git";
-        "${pkgname}-ideep::git+https://github.com/intel/ideep";
-        "${pkgname}-ios-cmake::git+https://github.com/Yangqing/ios-cmake.git";
-        "${pkgname}-ittapi::git+https://github.com/intel/ittapi.git";
-        "${pkgname}-json::git+https://github.com/nlohmann/json.git";
-        "${pkgname}-kineto::git+https://github.com/pytorch/kineto";
-        "${pkgname}-nccl::git+https://github.com/NVIDIA/nccl";
-        "${pkgname}-onnx-tensorrt::git+https://github.com/onnx/onnx-tensorrt";
-        "${pkgname}-onnx::git+https://github.com/onnx/onnx.git";
-        "${pkgname}-pocketfft::git+https://github.com/mreineck/pocketfft";
-        
"${pkgname}-protobuf::git+https://github.com/protocolbuffers/protobuf.git";
-        "${pkgname}-psimd::git+https://github.com/Maratyszcza/psimd.git";
-        
"${pkgname}-pthreadpool::git+https://github.com/Maratyszcza/pthreadpool.git";
-        "${pkgname}-pybind11::git+https://github.com/pybind/pybind11.git";
-        "${pkgname}-six::git+https://github.com/benjaminp/six.git";
-        "${pkgname}-sleef::git+https://github.com/shibatch/sleef";
-        "${pkgname}-tbb::git+https://github.com/01org/tbb";
-        "${pkgname}-tensorpipe::git+https://github.com/pytorch/tensorpipe.git";
-        "${pkgname}-zstd::git+https://github.com/facebook/zstd.git";
-        fix_include_system.patch
-        use-system-libuv.patch
-        fix-building-for-torchvision.patch
-        87773.patch
-        cuda_arch_update.patch
-        ffmpeg4.4.patch)
-b2sums=('SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        'SKIP'
-        
'77f85808e480bd37dfb5f072d565466ae30a8f827f49ef97591fc2fc03bea54944eb1adeaa4a1e3466518a5640f575eda88d15b4c4d549a6f41f0bf4f2cfb086'
-        
'1f7ce593fa9fc62535ca1c3d85c996a73006cc614c7b7258160c3fc53cd52a1cfddcb18baf897f2e1223ecdfee52ca1471b91c9f845368ed6ac51b66f6e0e676'
-        
'fdea0b815d7750a4233c1d4668593020da017aea43cf4cb63b4c00d0852c7d34f0333e618fcf98b8df2185313a2089b8c2e9fe8ec3cfb0bf693598f9c61461a8'
-        
'0a8fc110a306e81beeb9ddfb3a1ddfd26aeda5e3f7adfb0f7c9bc3fd999c2dde62e0b407d3eca573097a53fd97329214e30e8767fb38d770197c7ec2b53daf18'
-        
'2a540c5beb978bcda1e3375d82526fb088409cd9ba0be3aa8f411477dd935b75bab2b4a4a79cecffeee91e8c6a3a716884508d17b9a558979dbb5059458bd0d3'
-        
'6286b05d5b5143f117363e3ce3c7d693910f53845aeb6f501b3eea64aa71778cb2d7dcd4ac945d5321ef23b4da02446e86dedc6a9b6a998df4a7f3b1ce50550a')
-options=('!lto')
-
-get_pyver () {
-  python -c 'import sys; print(str(sys.version_info[0]) + "." + 
str(sys.version_info[1]))'
-}
-
-prepare() {
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-
-  # generated using parse-submodules
-  git submodule init
-
-  git config submodule."android/libs/fbjni".url "${srcdir}/${pkgname}"-fbjni
-  git config submodule."third_party/NNPACK".url "${srcdir}/${pkgname}"-NNPACK
-  git config submodule."third_party/NNPACK_deps/FP16".url 
"${srcdir}/${pkgname}"-FP16
-  git config submodule."third_party/NNPACK_deps/FXdiv".url 
"${srcdir}/${pkgname}"-FXdiv
-  git config submodule."third_party/NNPACK_deps/psimd".url 
"${srcdir}/${pkgname}"-psimd
-  git config submodule."third_party/NNPACK_deps/pthreadpool".url 
"${srcdir}/${pkgname}"-pthreadpool
-  git config submodule."third_party/QNNPACK".url "${srcdir}/${pkgname}"-QNNPACK
-  git config submodule."third_party/VulkanMemoryAllocator".url 
"${srcdir}/${pkgname}"-VulkanMemoryAllocator
-  git config submodule."third_party/XNNPACK".url "${srcdir}/${pkgname}"-XNNPACK
-  git config submodule."third_party/benchmark".url 
"${srcdir}/${pkgname}"-benchmark
-  git config submodule."third_party/cpuinfo".url "${srcdir}/${pkgname}"-cpuinfo
-  git config submodule."third_party/cub".url "${srcdir}/${pkgname}"-cub
-  git config submodule."third_party/cudnn_frontend".url 
"${srcdir}/${pkgname}"-cudnn-frontend
-  git config submodule."third_party/cutlass".url "${srcdir}/${pkgname}"-cutlass
-  git config submodule."third_party/eigen".url "${srcdir}/${pkgname}"-eigen
-  git config submodule."third_party/fbgemm".url "${srcdir}/${pkgname}"-fbgemm
-  git config submodule."third_party/flatbuffers".url 
"${srcdir}/${pkgname}"-flatbuffers
-  git config submodule."third_party/fmt".url "${srcdir}/${pkgname}"-fmt
-  git config submodule."third_party/foxi".url "${srcdir}/${pkgname}"-foxi
-  git config submodule."third_party/gemmlowp/gemmlowp".url 
"${srcdir}/${pkgname}"-gemmlowp
-  git config submodule."third_party/gloo".url "${srcdir}/${pkgname}"-gloo
-  git config submodule."third_party/googletest".url 
"${srcdir}/${pkgname}"-googletest
-  git config submodule."third_party/ideep".url "${srcdir}/${pkgname}"-ideep
-  git config submodule."third_party/ios-cmake".url 
"${srcdir}/${pkgname}"-ios-cmake
-  git config submodule."third_party/ittapi".url "${srcdir}/${pkgname}"-ittapi
-  git config submodule."third_party/kineto".url "${srcdir}/${pkgname}"-kineto
-  git config submodule."third_party/nccl/nccl".url "${srcdir}/${pkgname}"-nccl
-  git config submodule."third_party/neon2sse".url 
"${srcdir}/${pkgname}"-ARM_NEON_2_x86_SSE
-  git config submodule."third_party/nlohmann".url "${srcdir}/${pkgname}"-json
-  git config submodule."third_party/onnx".url "${srcdir}/${pkgname}"-onnx
-  git config submodule."third_party/onnx-tensorrt".url 
"${srcdir}/${pkgname}"-onnx-tensorrt
-  git config submodule."third_party/pocketfft".url 
"${srcdir}/${pkgname}"-pocketfft
-  git config submodule."third_party/protobuf".url 
"${srcdir}/${pkgname}"-protobuf
-  git config submodule."third_party/pybind11".url 
"${srcdir}/${pkgname}"-pybind11
-  git config submodule."third_party/python-enum".url 
"${srcdir}/${pkgname}"-enum34
-  git config submodule."third_party/python-peachpy".url 
"${srcdir}/${pkgname}"-PeachPy
-  git config submodule."third_party/python-six".url "${srcdir}/${pkgname}"-six
-  git config submodule."third_party/sleef".url "${srcdir}/${pkgname}"-sleef
-  git config submodule."third_party/tbb".url "${srcdir}/${pkgname}"-tbb
-  git config submodule."third_party/tensorpipe".url 
"${srcdir}/${pkgname}"-tensorpipe
-  git config submodule."third_party/zstd".url "${srcdir}/${pkgname}"-zstd
-
-  git -c protocol.file.allow=always submodule update --init --recursive
-
-  # https://bugs.archlinux.org/task/64981
-  patch -N torch/utils/cpp_extension.py "${srcdir}"/fix_include_system.patch
-
-  # Use system libuv
-  patch -Np1 -i "${srcdir}"/use-system-libuv.patch
-
-  # fix https://github.com/pytorch/vision/issues/3695
-  patch -Np1 -i "${srcdir}/fix-building-for-torchvision.patch"
-
-  # Fix building against glog 0.6
-  patch -Np1 -i "${srcdir}/87773.patch"
-
-  # Update supported CUDA compute architectures
-  patch -Np1 -i "${srcdir}/cuda_arch_update.patch"
-
-  # build against ffmpeg4.4
-  patch -Np1 -i "${srcdir}/ffmpeg4.4.patch"
-
-  cd "${srcdir}"
-
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt"
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
-  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-cuda"
-
-  export VERBOSE=1
-  export PYTORCH_BUILD_VERSION="${pkgver}"
-  export PYTORCH_BUILD_NUMBER=1
-
-  # Check tools/setup_helpers/cmake.py, setup.py and CMakeLists.txt for a list 
of flags that can be set via env vars.
-  export ATEN_NO_TEST=ON  # do not build ATen tests
-  export USE_MKLDNN=ON
-  export BUILD_CUSTOM_PROTOBUF=OFF
-  export BUILD_CAFFE2=ON
-  export BUILD_CAFFE2_OPS=ON
-  # export BUILD_SHARED_LIBS=OFF
-  export USE_FFMPEG=ON
-  export USE_GFLAGS=ON
-  export USE_GLOG=ON
-  export BUILD_BINARY=ON
-  export USE_OBSERVERS=ON
-  export USE_OPENCV=ON
-  # export USE_SYSTEM_LIBS=ON  # experimental, not all libs present in repos
-  export USE_SYSTEM_NCCL=ON
-  export NCCL_VERSION=$(pkg-config nccl --modversion)
-  export NCCL_VER_CODE=$(sed -n 's/^#define NCCL_VERSION_CODE\s*\(.*\).*/\1/p' 
/usr/include/nccl.h)
-  # export BUILD_SPLIT_CUDA=ON  # modern preferred build, but splits libs and 
symbols, ABI break
-  # export USE_FAST_NVCC=ON  # parallel build with nvcc, spawns too many 
processes
-  export USE_CUPTI_SO=ON  # make sure cupti.so is used as shared lib
-  export CC=/usr/bin/gcc-11
-  export CXX=/usr/bin/g++-11
-  export CUDAHOSTCXX=/usr/bin/g++-11
-  export CUDA_HOST_COMPILER="${CUDAHOSTCXX}"
-  export CUDA_HOME=/opt/cuda
-  # hide buildt-time CUDA devices
-  export CUDA_VISIBLE_DEVICES=""
-  export CUDNN_LIB_DIR=/usr/lib
-  export CUDNN_INCLUDE_DIR=/usr/include
-  export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
-  # CUDA arch 8.7 is not supported (needed by Jetson boards, etc.)
-  export 
TORCH_CUDA_ARCH_LIST="5.2;5.3;6.0;6.1;6.2;7.0;7.2;7.5;8.0;8.6;8.9;9.0;9.0+PTX"  
#include latest PTX for future compat
-  export OVERRIDE_TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}"
-}
-
-build() {
-  echo "Building without cuda and without non-x86-64 optimizations"
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
-  # this horrible hack is necessary because the current release
-  # ships inconsistent CMake which tries to build objects before
-  # thier dependencies, build twice when dependencies are available
-  python setup.py build || python setup.py build
-
-  echo "Building without cuda and with non-x86-64 optimizations"
-  export USE_CUDA=0
-  export USE_CUDNN=0
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  echo "Building with cuda and without non-x86-64 optimizations"
-  export USE_CUDA=1
-  export USE_CUDNN=1
-  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
-  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-
-  echo "Building with cuda and with non-x86-64 optimizations"
-  export USE_CUDA=1
-  export USE_CUDNN=1
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
-  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
-  # same horrible hack as above
-  python setup.py build || python setup.py build
-}
-
-_package() {
-  # Prevent setup.py from re-running CMake and rebuilding
-  sed -e 's/RUN_BUILD_DEPS = True/RUN_BUILD_DEPS = False/g' -i setup.py
-
-  python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
-
-  install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
-
-  pytorchpath="usr/lib/python$(get_pyver)/site-packages/torch"
-  install -d "${pkgdir}/usr/lib"
-
-  # put CMake files in correct place
-  mv "${pkgdir}/${pytorchpath}/share/cmake" "${pkgdir}/usr/lib/cmake"
-
-  # put C++ API in correct place
-  mv "${pkgdir}/${pytorchpath}/include" "${pkgdir}/usr/include"
-  find "${pkgdir}/${pytorchpath}"/lib/ -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
-    mv "${_lib}" "${pkgdir}"/usr/lib/
-  done
-
-  # clean up duplicates
-  # TODO: move towards direct shared library dependecy of:
-  #   c10, caffe2, libcpuinfo, CUDA RT, gloo, GTest, Intel MKL,
-  #   NVRTC, ONNX, protobuf, libthreadpool, QNNPACK
-  rm -rf "${pkgdir}/usr/include/pybind11"
-
-  # python module is hardcoded to look there at runtime
-  ln -s /usr/include "${pkgdir}/${pytorchpath}/include"
-  find "${pkgdir}"/usr/lib -maxdepth 1 -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
-    ln -s ${_lib#"$pkgdir"} "${pkgdir}/${pytorchpath}/lib/"
-  done
-}
-
-package_python-pytorch() {
-  pkgdesc="${_pkgdesc}"
-
-  cd "${srcdir}/${_pkgname}-${pkgver}"
-  _package
-}
-
-package_python-pytorch-opt() {
-  pkgdesc="${_pkgdesc} (with AVX2 CPU optimizations)"
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
-  _package
-}
-
-package_python-pytorch-cuda() {
-  pkgdesc="${_pkgdesc} (with CUDA)"
-  depends+=(cuda cudnn magma onednn)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
-  _package
-}
-
-package_python-pytorch-opt-cuda() {
-  pkgdesc="${_pkgdesc} (with CUDA and AVX2 CPU optimizations)"
-  depends+=(cuda cudnn magma onednn)
-  conflicts=(python-pytorch)
-  provides=(python-pytorch python-pytorch-cuda)
-
-  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
-  _package
-}
-
-# vim:set ts=2 sw=2 et:

Copied: python-pytorch/repos/community-x86_64/PKGBUILD (from rev 1361514, 
python-pytorch/trunk/PKGBUILD)
===================================================================
--- PKGBUILD                            (rev 0)
+++ PKGBUILD    2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,341 @@
+# Maintainer: Sven-Hendrik Haase <[email protected]>
+# Contributor: Stephen Zhang <zsrkmyn at gmail dot com>
+
+_pkgname=pytorch
+pkgbase="python-${_pkgname}"
+pkgname=("${pkgbase}" "${pkgbase}-opt" "${pkgbase}-cuda" "${pkgbase}-opt-cuda")
+pkgver=1.13.1
+_pkgver=1.13.1
+pkgrel=1
+_pkgdesc='Tensors and Dynamic neural networks in Python with strong GPU 
acceleration'
+pkgdesc="${_pkgdesc}"
+arch=('x86_64')
+url="https://pytorch.org";
+license=('BSD')
+depends=('google-glog' 'gflags' 'opencv' 'openmp' 'nccl' 'pybind11' 'python' 
'python-yaml' 'libuv'
+         'python-numpy' 'protobuf' 'ffmpeg4.4' 'python-future' 'qt5-base' 
'intel-oneapi-mkl'
+         'python-typing_extensions')
+makedepends=('python' 'python-setuptools' 'python-yaml' 'python-numpy' 'cmake' 
'cuda'
+             'cudnn' 'git' 'magma' 'ninja' 'pkgconfig' 'doxygen' 'gcc11' 
'onednn')
+source=("${_pkgname}-${pkgver}::git+https://github.com/pytorch/pytorch.git#tag=v$_pkgver";
+        # generated using parse-submodules
+        
"${pkgname}-ARM_NEON_2_x86_SSE::git+https://github.com/intel/ARM_NEON_2_x86_SSE.git";
+        "${pkgname}-FP16::git+https://github.com/Maratyszcza/FP16.git";
+        "${pkgname}-FXdiv::git+https://github.com/Maratyszcza/FXdiv.git";
+        "${pkgname}-NNPACK::git+https://github.com/Maratyszcza/NNPACK.git";
+        "${pkgname}-PeachPy::git+https://github.com/malfet/PeachPy.git";
+        "${pkgname}-QNNPACK::git+https://github.com/pytorch/QNNPACK";
+        
"${pkgname}-VulkanMemoryAllocator::git+https://github.com/GPUOpen-LibrariesAndSDKs/VulkanMemoryAllocator.git";
+        "${pkgname}-XNNPACK::git+https://github.com/google/XNNPACK.git";
+        "${pkgname}-benchmark::git+https://github.com/google/benchmark.git";
+        "${pkgname}-cpuinfo::git+https://github.com/pytorch/cpuinfo.git";
+        "${pkgname}-cub::git+https://github.com/NVlabs/cub.git";
+        
"${pkgname}-cudnn-frontend::git+https://github.com/NVIDIA/cudnn-frontend.git";
+        "${pkgname}-cutlass::git+https://github.com/NVIDIA/cutlass.git";
+        "${pkgname}-eigen::git+https://gitlab.com/libeigen/eigen.git";
+        "${pkgname}-enum34::git+https://github.com/PeachPy/enum34.git";
+        "${pkgname}-fbgemm::git+https://github.com/pytorch/fbgemm";
+        "${pkgname}-fbjni::git+https://github.com/facebookincubator/fbjni.git";
+        "${pkgname}-flatbuffers::git+https://github.com/google/flatbuffers.git";
+        "${pkgname}-fmt::git+https://github.com/fmtlib/fmt.git";
+        "${pkgname}-foxi::git+https://github.com/houseroad/foxi.git";
+        "${pkgname}-gemmlowp::git+https://github.com/google/gemmlowp.git";
+        "${pkgname}-gloo::git+https://github.com/facebookincubator/gloo";
+        "${pkgname}-googletest::git+https://github.com/google/googletest.git";
+        "${pkgname}-ideep::git+https://github.com/intel/ideep";
+        "${pkgname}-ios-cmake::git+https://github.com/Yangqing/ios-cmake.git";
+        "${pkgname}-ittapi::git+https://github.com/intel/ittapi.git";
+        "${pkgname}-json::git+https://github.com/nlohmann/json.git";
+        "${pkgname}-kineto::git+https://github.com/pytorch/kineto";
+        "${pkgname}-nccl::git+https://github.com/NVIDIA/nccl";
+        "${pkgname}-onnx-tensorrt::git+https://github.com/onnx/onnx-tensorrt";
+        "${pkgname}-onnx::git+https://github.com/onnx/onnx.git";
+        "${pkgname}-pocketfft::git+https://github.com/mreineck/pocketfft";
+        
"${pkgname}-protobuf::git+https://github.com/protocolbuffers/protobuf.git";
+        "${pkgname}-psimd::git+https://github.com/Maratyszcza/psimd.git";
+        
"${pkgname}-pthreadpool::git+https://github.com/Maratyszcza/pthreadpool.git";
+        "${pkgname}-pybind11::git+https://github.com/pybind/pybind11.git";
+        "${pkgname}-six::git+https://github.com/benjaminp/six.git";
+        "${pkgname}-sleef::git+https://github.com/shibatch/sleef";
+        "${pkgname}-tbb::git+https://github.com/01org/tbb";
+        "${pkgname}-tensorpipe::git+https://github.com/pytorch/tensorpipe.git";
+        "${pkgname}-zstd::git+https://github.com/facebook/zstd.git";
+        fix_include_system.patch
+        use-system-libuv.patch
+        fix-building-for-torchvision.patch
+        87773.patch
+        cuda_arch_update.patch
+        ffmpeg4.4.patch)
+b2sums=('SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        'SKIP'
+        
'77f85808e480bd37dfb5f072d565466ae30a8f827f49ef97591fc2fc03bea54944eb1adeaa4a1e3466518a5640f575eda88d15b4c4d549a6f41f0bf4f2cfb086'
+        
'1f7ce593fa9fc62535ca1c3d85c996a73006cc614c7b7258160c3fc53cd52a1cfddcb18baf897f2e1223ecdfee52ca1471b91c9f845368ed6ac51b66f6e0e676'
+        
'fdea0b815d7750a4233c1d4668593020da017aea43cf4cb63b4c00d0852c7d34f0333e618fcf98b8df2185313a2089b8c2e9fe8ec3cfb0bf693598f9c61461a8'
+        
'0a8fc110a306e81beeb9ddfb3a1ddfd26aeda5e3f7adfb0f7c9bc3fd999c2dde62e0b407d3eca573097a53fd97329214e30e8767fb38d770197c7ec2b53daf18'
+        
'2a540c5beb978bcda1e3375d82526fb088409cd9ba0be3aa8f411477dd935b75bab2b4a4a79cecffeee91e8c6a3a716884508d17b9a558979dbb5059458bd0d3'
+        
'6286b05d5b5143f117363e3ce3c7d693910f53845aeb6f501b3eea64aa71778cb2d7dcd4ac945d5321ef23b4da02446e86dedc6a9b6a998df4a7f3b1ce50550a')
+options=('!lto')
+
+get_pyver () {
+  python -c 'import sys; print(str(sys.version_info[0]) + "." + 
str(sys.version_info[1]))'
+}
+
+prepare() {
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+
+  # generated using parse-submodules
+  git submodule init
+
+  git config submodule."android/libs/fbjni".url "${srcdir}/${pkgname}"-fbjni
+  git config submodule."third_party/NNPACK".url "${srcdir}/${pkgname}"-NNPACK
+  git config submodule."third_party/NNPACK_deps/FP16".url 
"${srcdir}/${pkgname}"-FP16
+  git config submodule."third_party/NNPACK_deps/FXdiv".url 
"${srcdir}/${pkgname}"-FXdiv
+  git config submodule."third_party/NNPACK_deps/psimd".url 
"${srcdir}/${pkgname}"-psimd
+  git config submodule."third_party/NNPACK_deps/pthreadpool".url 
"${srcdir}/${pkgname}"-pthreadpool
+  git config submodule."third_party/QNNPACK".url "${srcdir}/${pkgname}"-QNNPACK
+  git config submodule."third_party/VulkanMemoryAllocator".url 
"${srcdir}/${pkgname}"-VulkanMemoryAllocator
+  git config submodule."third_party/XNNPACK".url "${srcdir}/${pkgname}"-XNNPACK
+  git config submodule."third_party/benchmark".url 
"${srcdir}/${pkgname}"-benchmark
+  git config submodule."third_party/cpuinfo".url "${srcdir}/${pkgname}"-cpuinfo
+  git config submodule."third_party/cub".url "${srcdir}/${pkgname}"-cub
+  git config submodule."third_party/cudnn_frontend".url 
"${srcdir}/${pkgname}"-cudnn-frontend
+  git config submodule."third_party/cutlass".url "${srcdir}/${pkgname}"-cutlass
+  git config submodule."third_party/eigen".url "${srcdir}/${pkgname}"-eigen
+  git config submodule."third_party/fbgemm".url "${srcdir}/${pkgname}"-fbgemm
+  git config submodule."third_party/flatbuffers".url 
"${srcdir}/${pkgname}"-flatbuffers
+  git config submodule."third_party/fmt".url "${srcdir}/${pkgname}"-fmt
+  git config submodule."third_party/foxi".url "${srcdir}/${pkgname}"-foxi
+  git config submodule."third_party/gemmlowp/gemmlowp".url 
"${srcdir}/${pkgname}"-gemmlowp
+  git config submodule."third_party/gloo".url "${srcdir}/${pkgname}"-gloo
+  git config submodule."third_party/googletest".url 
"${srcdir}/${pkgname}"-googletest
+  git config submodule."third_party/ideep".url "${srcdir}/${pkgname}"-ideep
+  git config submodule."third_party/ios-cmake".url 
"${srcdir}/${pkgname}"-ios-cmake
+  git config submodule."third_party/ittapi".url "${srcdir}/${pkgname}"-ittapi
+  git config submodule."third_party/kineto".url "${srcdir}/${pkgname}"-kineto
+  git config submodule."third_party/nccl/nccl".url "${srcdir}/${pkgname}"-nccl
+  git config submodule."third_party/neon2sse".url 
"${srcdir}/${pkgname}"-ARM_NEON_2_x86_SSE
+  git config submodule."third_party/nlohmann".url "${srcdir}/${pkgname}"-json
+  git config submodule."third_party/onnx".url "${srcdir}/${pkgname}"-onnx
+  git config submodule."third_party/onnx-tensorrt".url 
"${srcdir}/${pkgname}"-onnx-tensorrt
+  git config submodule."third_party/pocketfft".url 
"${srcdir}/${pkgname}"-pocketfft
+  git config submodule."third_party/protobuf".url 
"${srcdir}/${pkgname}"-protobuf
+  git config submodule."third_party/pybind11".url 
"${srcdir}/${pkgname}"-pybind11
+  git config submodule."third_party/python-enum".url 
"${srcdir}/${pkgname}"-enum34
+  git config submodule."third_party/python-peachpy".url 
"${srcdir}/${pkgname}"-PeachPy
+  git config submodule."third_party/python-six".url "${srcdir}/${pkgname}"-six
+  git config submodule."third_party/sleef".url "${srcdir}/${pkgname}"-sleef
+  git config submodule."third_party/tbb".url "${srcdir}/${pkgname}"-tbb
+  git config submodule."third_party/tensorpipe".url 
"${srcdir}/${pkgname}"-tensorpipe
+  git config submodule."third_party/zstd".url "${srcdir}/${pkgname}"-zstd
+
+  git -c protocol.file.allow=always submodule update --init --recursive
+
+  # https://bugs.archlinux.org/task/64981
+  patch -N torch/utils/cpp_extension.py "${srcdir}"/fix_include_system.patch
+
+  # Use system libuv
+  patch -Np1 -i "${srcdir}"/use-system-libuv.patch
+
+  # fix https://github.com/pytorch/vision/issues/3695
+  patch -Np1 -i "${srcdir}/fix-building-for-torchvision.patch"
+
+  # Fix building against glog 0.6
+  patch -Np1 -i "${srcdir}/87773.patch"
+
+  # Update supported CUDA compute architectures
+  patch -Np1 -i "${srcdir}/cuda_arch_update.patch"
+
+  # build against ffmpeg4.4
+  patch -Np1 -i "${srcdir}/ffmpeg4.4.patch"
+
+  cd "${srcdir}"
+
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt"
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-cuda"
+  cp -r "${_pkgname}-${pkgver}" "${_pkgname}-${pkgver}-opt-cuda"
+
+  export VERBOSE=1
+  export PYTORCH_BUILD_VERSION="${pkgver}"
+  export PYTORCH_BUILD_NUMBER=1
+
+  # Check tools/setup_helpers/cmake.py, setup.py and CMakeLists.txt for a list 
of flags that can be set via env vars.
+  export ATEN_NO_TEST=ON  # do not build ATen tests
+  export USE_MKLDNN=ON
+  export BUILD_CUSTOM_PROTOBUF=OFF
+  export BUILD_CAFFE2=ON
+  export BUILD_CAFFE2_OPS=ON
+  # export BUILD_SHARED_LIBS=OFF
+  export USE_FFMPEG=ON
+  export USE_GFLAGS=ON
+  export USE_GLOG=ON
+  export BUILD_BINARY=ON
+  export USE_OBSERVERS=ON
+  export USE_OPENCV=ON
+  # export USE_SYSTEM_LIBS=ON  # experimental, not all libs present in repos
+  export USE_SYSTEM_NCCL=ON
+  export NCCL_VERSION=$(pkg-config nccl --modversion)
+  export NCCL_VER_CODE=$(sed -n 's/^#define NCCL_VERSION_CODE\s*\(.*\).*/\1/p' 
/usr/include/nccl.h)
+  # export BUILD_SPLIT_CUDA=ON  # modern preferred build, but splits libs and 
symbols, ABI break
+  # export USE_FAST_NVCC=ON  # parallel build with nvcc, spawns too many 
processes
+  export USE_CUPTI_SO=ON  # make sure cupti.so is used as shared lib
+  export CC=/usr/bin/gcc-11
+  export CXX=/usr/bin/g++-11
+  export CUDAHOSTCXX=/usr/bin/g++-11
+  export CUDA_HOST_COMPILER="${CUDAHOSTCXX}"
+  export CUDA_HOME=/opt/cuda
+  # hide buildt-time CUDA devices
+  export CUDA_VISIBLE_DEVICES=""
+  export CUDNN_LIB_DIR=/usr/lib
+  export CUDNN_INCLUDE_DIR=/usr/include
+  export TORCH_NVCC_FLAGS="-Xfatbin -compress-all"
+  # CUDA arch 8.7 is not supported (needed by Jetson boards, etc.)
+  export 
TORCH_CUDA_ARCH_LIST="5.2;5.3;6.0;6.1;6.2;7.0;7.2;7.5;8.0;8.6;8.9;9.0;9.0+PTX"  
#include latest PTX for future compat
+  export OVERRIDE_TORCH_CUDA_ARCH_LIST="${TORCH_CUDA_ARCH_LIST}"
+}
+
+build() {
+  echo "Building without cuda and without non-x86-64 optimizations"
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
+  # this horrible hack is necessary because the current release
+  # ships inconsistent CMake which tries to build objects before
+  # thier dependencies, build twice when dependencies are available
+  python setup.py build || python setup.py build
+
+  echo "Building without cuda and with non-x86-64 optimizations"
+  export USE_CUDA=0
+  export USE_CUDNN=0
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  echo "Building with cuda and without non-x86-64 optimizations"
+  export USE_CUDA=1
+  export USE_CUDNN=1
+  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
+  echo "add_definitions(-march=x86-64)" >> cmake/MiscCheck.cmake
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+
+  echo "Building with cuda and with non-x86-64 optimizations"
+  export USE_CUDA=1
+  export USE_CUDNN=1
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
+  echo "add_definitions(-march=haswell)" >> cmake/MiscCheck.cmake
+  # same horrible hack as above
+  python setup.py build || python setup.py build
+}
+
+_package() {
+  # Prevent setup.py from re-running CMake and rebuilding
+  sed -e 's/RUN_BUILD_DEPS = True/RUN_BUILD_DEPS = False/g' -i setup.py
+
+  python setup.py install --root="${pkgdir}"/ --optimize=1 --skip-build
+
+  install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
+
+  pytorchpath="usr/lib/python$(get_pyver)/site-packages/torch"
+  install -d "${pkgdir}/usr/lib"
+
+  # put CMake files in correct place
+  mv "${pkgdir}/${pytorchpath}/share/cmake" "${pkgdir}/usr/lib/cmake"
+
+  # put C++ API in correct place
+  mv "${pkgdir}/${pytorchpath}/include" "${pkgdir}/usr/include"
+  find "${pkgdir}/${pytorchpath}"/lib/ -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
+    mv "${_lib}" "${pkgdir}"/usr/lib/
+  done
+
+  # clean up duplicates
+  # TODO: move towards direct shared library dependecy of:
+  #   c10, caffe2, libcpuinfo, CUDA RT, gloo, GTest, Intel MKL,
+  #   NVRTC, ONNX, protobuf, libthreadpool, QNNPACK
+  rm -rf "${pkgdir}/usr/include/pybind11"
+
+  # python module is hardcoded to look there at runtime
+  ln -s /usr/include "${pkgdir}/${pytorchpath}/include"
+  find "${pkgdir}"/usr/lib -maxdepth 1 -type f,l \( -iname '*.so' -or -iname 
'*.so*' \) -print0 | while read -rd $'\0' _lib; do
+    ln -s ${_lib#"$pkgdir"} "${pkgdir}/${pytorchpath}/lib/"
+  done
+}
+
+package_python-pytorch() {
+  pkgdesc="${_pkgdesc}"
+
+  cd "${srcdir}/${_pkgname}-${pkgver}"
+  _package
+}
+
+package_python-pytorch-opt() {
+  pkgdesc="${_pkgdesc} (with AVX2 CPU optimizations)"
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt"
+  _package
+}
+
+package_python-pytorch-cuda() {
+  pkgdesc="${_pkgdesc} (with CUDA)"
+  depends+=(cuda cudnn magma onednn)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-cuda"
+  _package
+}
+
+package_python-pytorch-opt-cuda() {
+  pkgdesc="${_pkgdesc} (with CUDA and AVX2 CPU optimizations)"
+  depends+=(cuda cudnn magma onednn)
+  conflicts=(python-pytorch)
+  provides=(python-pytorch python-pytorch-cuda)
+
+  cd "${srcdir}/${_pkgname}-${pkgver}-opt-cuda"
+  _package
+}
+
+# vim:set ts=2 sw=2 et:

Deleted: cuda_arch_update.patch
===================================================================
--- cuda_arch_update.patch      2022-12-19 09:34:21 UTC (rev 1361514)
+++ cuda_arch_update.patch      2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,58 +0,0 @@
-From 71fe069d985e97b5947d133f2f2bde9adea01ed7 Mon Sep 17 00:00:00 2001
-From: Greg Hogan <[email protected]>
-Date: Mon, 24 Oct 2022 21:25:36 +0000
-Subject: [PATCH] ada lovelace (arch 8.9) support (#87436)
-
-changes required to be able to compile https://github.com/pytorch/vision and 
https://github.com/nvidia/apex for `sm_89` architecture
-Pull Request resolved: https://github.com/pytorch/pytorch/pull/87436
-Approved by: https://github.com/ngimel
----
- .../upstream/FindCUDA/select_compute_arch.cmake       | 11 +++++++++++
- torch/utils/cpp_extension.py                          |  3 ++-
- 2 files changed, 13 insertions(+), 1 deletion(-)
-
-diff --git 
a/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake 
b/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
-index 7f22d476d2fbe..822c041ee5268 100644
---- a/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
-+++ b/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
-@@ -98,8 +98,22 @@ if(NOT CUDA_VERSION VERSION_LESS "11.1")
-   list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6")
-   set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6")
- 
-+  if(CUDA_VERSION VERSION_LESS "11.8")
-+    set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9")
-+  endif()
-+endif()
-+
-+if(NOT CUDA_VERSION VERSION_LESS "11.8")
-+  list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada")
-+  list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9")
-+  list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9")
-+  list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper")
-+  list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0")
-+  list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0")
-+
-   if(CUDA_VERSION VERSION_LESS "12.0")
-     set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0")
-+    list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX")
-   endif()
- endif()
- 
-diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
-index 36811bf22dedc..612ae9fdf0785 100644
---- a/torch/utils/cpp_extension.py
-+++ b/torch/utils/cpp_extension.py
-@@ -1729,10 +1729,12 @@ def _get_cuda_arch_flags(cflags: Optional[List[str]] = 
None) -> List[str]:
-         ('Volta', '7.0+PTX'),
-         ('Turing', '7.5+PTX'),
-         ('Ampere', '8.0;8.6+PTX'),
-+        ('Ada', '8.9+PTX'),
-+        ('Hopper', '9.0+PTX'),
-     ])
- 
-     supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', 
'6.2',
--                        '7.0', '7.2', '7.5', '8.0', '8.6']
-+                        '7.0', '7.2', '7.5', '8.0', '8.6', '8.9', '9.0']
-     valid_arch_strings = supported_arches + [s + "+PTX" for s in 
supported_arches]
- 
-     # The default is sm_30 for CUDA 9.x and 10.x

Copied: python-pytorch/repos/community-x86_64/cuda_arch_update.patch (from rev 
1361514, python-pytorch/trunk/cuda_arch_update.patch)
===================================================================
--- cuda_arch_update.patch                              (rev 0)
+++ cuda_arch_update.patch      2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,58 @@
+From 71fe069d985e97b5947d133f2f2bde9adea01ed7 Mon Sep 17 00:00:00 2001
+From: Greg Hogan <[email protected]>
+Date: Mon, 24 Oct 2022 21:25:36 +0000
+Subject: [PATCH] ada lovelace (arch 8.9) support (#87436)
+
+changes required to be able to compile https://github.com/pytorch/vision and 
https://github.com/nvidia/apex for `sm_89` architecture
+Pull Request resolved: https://github.com/pytorch/pytorch/pull/87436
+Approved by: https://github.com/ngimel
+---
+ .../upstream/FindCUDA/select_compute_arch.cmake       | 11 +++++++++++
+ torch/utils/cpp_extension.py                          |  3 ++-
+ 2 files changed, 13 insertions(+), 1 deletion(-)
+
+diff --git 
a/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake 
b/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
+index 7f22d476d2fbe..822c041ee5268 100644
+--- a/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
++++ b/cmake/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake
+@@ -98,8 +98,22 @@ if(NOT CUDA_VERSION VERSION_LESS "11.1")
+   list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.6")
+   set(CUDA_LIMIT_GPU_ARCHITECUTRE "8.6")
+ 
++  if(CUDA_VERSION VERSION_LESS "11.8")
++    set(CUDA_LIMIT_GPU_ARCHITECTURE "8.9")
++  endif()
++endif()
++
++if(NOT CUDA_VERSION VERSION_LESS "11.8")
++  list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Ada")
++  list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9")
++  list(APPEND CUDA_ALL_GPU_ARCHITECTURES "8.9")
++  list(APPEND CUDA_KNOWN_GPU_ARCHITECTURES "Hopper")
++  list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "9.0")
++  list(APPEND CUDA_ALL_GPU_ARCHITECTURES "9.0")
++
+   if(CUDA_VERSION VERSION_LESS "12.0")
+     set(CUDA_LIMIT_GPU_ARCHITECTURE "9.0")
++    list(APPEND CUDA_COMMON_GPU_ARCHITECTURES "8.9+PTX")
+   endif()
+ endif()
+ 
+diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
+index 36811bf22dedc..612ae9fdf0785 100644
+--- a/torch/utils/cpp_extension.py
++++ b/torch/utils/cpp_extension.py
+@@ -1729,10 +1729,12 @@ def _get_cuda_arch_flags(cflags: Optional[List[str]] = 
None) -> List[str]:
+         ('Volta', '7.0+PTX'),
+         ('Turing', '7.5+PTX'),
+         ('Ampere', '8.0;8.6+PTX'),
++        ('Ada', '8.9+PTX'),
++        ('Hopper', '9.0+PTX'),
+     ])
+ 
+     supported_arches = ['3.5', '3.7', '5.0', '5.2', '5.3', '6.0', '6.1', 
'6.2',
+-                        '7.0', '7.2', '7.5', '8.0', '8.6']
++                        '7.0', '7.2', '7.5', '8.0', '8.6', '8.9', '9.0']
+     valid_arch_strings = supported_arches + [s + "+PTX" for s in 
supported_arches]
+ 
+     # The default is sm_30 for CUDA 9.x and 10.x

Deleted: ffmpeg4.4.patch
===================================================================
--- ffmpeg4.4.patch     2022-12-19 09:34:21 UTC (rev 1361514)
+++ ffmpeg4.4.patch     2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,51 +0,0 @@
-diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
-index 04437562ee..dc8fe4aa5c 100644
---- a/cmake/Modules/FindFFmpeg.cmake
-+++ b/cmake/Modules/FindFFmpeg.cmake
-@@ -14,34 +14,40 @@ else (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
- 
-   find_path(FFMPEG_AVCODEC_INCLUDE_DIR
-     NAMES libavcodec/avcodec.h
--    PATHS ${_FFMPEG_AVCODEC_INCLUDE_DIRS} /usr/include /usr/local/include 
/opt/local/include /sw/include
-+    PATHS /usr/include/ffmpeg4.4 /usr/local/include /opt/local/include 
/sw/include
-     PATH_SUFFIXES ffmpeg libav
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBAVCODEC
-     NAMES avcodec
--    PATHS ${_FFMPEG_AVCODEC_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBAVFORMAT
-     NAMES avformat
--    PATHS ${_FFMPEG_AVFORMAT_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBAVUTIL
-     NAMES avutil
--    PATHS ${_FFMPEG_AVUTIL_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
- 
-   find_library(FFMPEG_LIBSWSCALE
-     NAMES swscale
--    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   find_library(FFMPEG_LIBSWRESAMPLE
-     NAMES swresample
--    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
-+    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
-+    NO_DEFAULT_PATH
-   )
- 
-   if (FFMPEG_LIBAVCODEC AND FFMPEG_LIBAVFORMAT)

Copied: python-pytorch/repos/community-x86_64/ffmpeg4.4.patch (from rev 
1361514, python-pytorch/trunk/ffmpeg4.4.patch)
===================================================================
--- ffmpeg4.4.patch                             (rev 0)
+++ ffmpeg4.4.patch     2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,51 @@
+diff --git a/cmake/Modules/FindFFmpeg.cmake b/cmake/Modules/FindFFmpeg.cmake
+index 04437562ee..dc8fe4aa5c 100644
+--- a/cmake/Modules/FindFFmpeg.cmake
++++ b/cmake/Modules/FindFFmpeg.cmake
+@@ -14,34 +14,40 @@ else (FFMPEG_LIBRARIES AND FFMPEG_INCLUDE_DIR)
+ 
+   find_path(FFMPEG_AVCODEC_INCLUDE_DIR
+     NAMES libavcodec/avcodec.h
+-    PATHS ${_FFMPEG_AVCODEC_INCLUDE_DIRS} /usr/include /usr/local/include 
/opt/local/include /sw/include
++    PATHS /usr/include/ffmpeg4.4 /usr/local/include /opt/local/include 
/sw/include
+     PATH_SUFFIXES ffmpeg libav
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBAVCODEC
+     NAMES avcodec
+-    PATHS ${_FFMPEG_AVCODEC_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBAVFORMAT
+     NAMES avformat
+-    PATHS ${_FFMPEG_AVFORMAT_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBAVUTIL
+     NAMES avutil
+-    PATHS ${_FFMPEG_AVUTIL_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+ 
+   find_library(FFMPEG_LIBSWSCALE
+     NAMES swscale
+-    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   find_library(FFMPEG_LIBSWRESAMPLE
+     NAMES swresample
+-    PATHS ${_FFMPEG_SWSCALE_LIBRARY_DIRS} /usr/lib /usr/local/lib 
/opt/local/lib /sw/lib
++    PATHS /usr/lib/ffmpeg4.4 /usr/local/lib /opt/local/lib /sw/lib
++    NO_DEFAULT_PATH
+   )
+ 
+   if (FFMPEG_LIBAVCODEC AND FFMPEG_LIBAVFORMAT)

Deleted: fix-building-for-torchvision.patch
===================================================================
--- fix-building-for-torchvision.patch  2022-12-19 09:34:21 UTC (rev 1361514)
+++ fix-building-for-torchvision.patch  2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,25 +0,0 @@
-From 011495d8045c44527fbd7796ce860618120ae127 Mon Sep 17 00:00:00 2001
-From: Butui Hu <[email protected]>
-Date: Fri, 30 Apr 2021 11:36:30 +0800
-Subject: [PATCH] fix building torchvision
-
----
- aten/src/ATen/core/op_registration/op_allowlist.h | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/aten/src/ATen/core/op_registration/op_allowlist.h 
b/aten/src/ATen/core/op_registration/op_allowlist.h
-index f93462bb2cf..12903d1cc09 100644
---- a/aten/src/ATen/core/op_registration/op_allowlist.h
-+++ b/aten/src/ATen/core/op_registration/op_allowlist.h
-@@ -59,7 +59,7 @@ constexpr bool op_allowlist_contains(string_view allowlist, 
string_view item) {
- // Returns true iff the given op name is on the allowlist
- // and should be registered
- constexpr bool op_allowlist_check(string_view op_name) {
--  assert(op_name.find("::") != string_view::npos);
-+//  assert(op_name.find("::") != string_view::npos);
-   // Use assert() instead of throw() due to a gcc bug. See:
-   // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
-   // https://github.com/fmtlib/fmt/issues/682
--- 
-2.31.1
-

Copied: 
python-pytorch/repos/community-x86_64/fix-building-for-torchvision.patch (from 
rev 1361514, python-pytorch/trunk/fix-building-for-torchvision.patch)
===================================================================
--- fix-building-for-torchvision.patch                          (rev 0)
+++ fix-building-for-torchvision.patch  2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,25 @@
+From 011495d8045c44527fbd7796ce860618120ae127 Mon Sep 17 00:00:00 2001
+From: Butui Hu <[email protected]>
+Date: Fri, 30 Apr 2021 11:36:30 +0800
+Subject: [PATCH] fix building torchvision
+
+---
+ aten/src/ATen/core/op_registration/op_allowlist.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/aten/src/ATen/core/op_registration/op_allowlist.h 
b/aten/src/ATen/core/op_registration/op_allowlist.h
+index f93462bb2cf..12903d1cc09 100644
+--- a/aten/src/ATen/core/op_registration/op_allowlist.h
++++ b/aten/src/ATen/core/op_registration/op_allowlist.h
+@@ -59,7 +59,7 @@ constexpr bool op_allowlist_contains(string_view allowlist, 
string_view item) {
+ // Returns true iff the given op name is on the allowlist
+ // and should be registered
+ constexpr bool op_allowlist_check(string_view op_name) {
+-  assert(op_name.find("::") != string_view::npos);
++//  assert(op_name.find("::") != string_view::npos);
+   // Use assert() instead of throw() due to a gcc bug. See:
+   // https://stackoverflow.com/questions/34280729/throw-in-constexpr-function
+   // https://github.com/fmtlib/fmt/issues/682
+-- 
+2.31.1
+

Deleted: fix_include_system.patch
===================================================================
--- fix_include_system.patch    2022-12-19 09:34:21 UTC (rev 1361514)
+++ fix_include_system.patch    2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,13 +0,0 @@
-diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
-index ca673033e1..c79ce8d37b 100644
---- a/torch/utils/cpp_extension.py
-+++ b/torch/utils/cpp_extension.py
-@@ -1760,7 +1760,7 @@ def _write_ninja_file_to_build_library(path,
-             common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
- 
-     common_cflags += [f'-I{include}' for include in user_includes]
--    common_cflags += [f'-isystem {include}' for include in system_includes]
-+    common_cflags += [f'-I{include}' for include in system_includes]
- 
-     common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + 
str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
- 

Copied: python-pytorch/repos/community-x86_64/fix_include_system.patch (from 
rev 1361514, python-pytorch/trunk/fix_include_system.patch)
===================================================================
--- fix_include_system.patch                            (rev 0)
+++ fix_include_system.patch    2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,13 @@
+diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py
+index ca673033e1..c79ce8d37b 100644
+--- a/torch/utils/cpp_extension.py
++++ b/torch/utils/cpp_extension.py
+@@ -1760,7 +1760,7 @@ def _write_ninja_file_to_build_library(path,
+             common_cflags.append(f'-DPYBIND11_{pname}=\\"{pval}\\"')
+ 
+     common_cflags += [f'-I{include}' for include in user_includes]
+-    common_cflags += [f'-isystem {include}' for include in system_includes]
++    common_cflags += [f'-I{include}' for include in system_includes]
+ 
+     common_cflags += ['-D_GLIBCXX_USE_CXX11_ABI=' + 
str(int(torch._C._GLIBCXX_USE_CXX11_ABI))]
+ 

Deleted: test.py
===================================================================
--- test.py     2022-12-19 09:34:21 UTC (rev 1361514)
+++ test.py     2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,7 +0,0 @@
-#!/usr/bin/env python
-
-import torch
-
-d = torch.device('cuda')
-a = torch.rand(1, 2).to(d)
-print(a + 0)

Copied: python-pytorch/repos/community-x86_64/test.py (from rev 1361514, 
python-pytorch/trunk/test.py)
===================================================================
--- test.py                             (rev 0)
+++ test.py     2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,7 @@
+#!/usr/bin/env python
+
+import torch
+
+d = torch.device('cuda')
+a = torch.rand(1, 2).to(d)
+print(a + 0)

Deleted: use-system-libuv.patch
===================================================================
--- use-system-libuv.patch      2022-12-19 09:34:21 UTC (rev 1361514)
+++ use-system-libuv.patch      2022-12-19 09:34:52 UTC (rev 1361515)
@@ -1,13 +0,0 @@
-diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
-index 06464e799a..93410bc210 100644
---- a/cmake/Dependencies.cmake
-+++ b/cmake/Dependencies.cmake
-@@ -1346,7 +1346,7 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
-       set(TP_USE_CUDA ON CACHE BOOL "" FORCE)
-       set(TP_ENABLE_CUDA_IPC ON CACHE BOOL "" FORCE)
-     endif()
--    set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
-+    set(TP_BUILD_LIBUV OFF CACHE BOOL "" FORCE)
-     set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
- 
-     add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)

Copied: python-pytorch/repos/community-x86_64/use-system-libuv.patch (from rev 
1361514, python-pytorch/trunk/use-system-libuv.patch)
===================================================================
--- use-system-libuv.patch                              (rev 0)
+++ use-system-libuv.patch      2022-12-19 09:34:52 UTC (rev 1361515)
@@ -0,0 +1,13 @@
+diff --git a/cmake/Dependencies.cmake b/cmake/Dependencies.cmake
+index 06464e799a..93410bc210 100644
+--- a/cmake/Dependencies.cmake
++++ b/cmake/Dependencies.cmake
+@@ -1346,7 +1346,7 @@ if(USE_DISTRIBUTED AND USE_TENSORPIPE)
+       set(TP_USE_CUDA ON CACHE BOOL "" FORCE)
+       set(TP_ENABLE_CUDA_IPC ON CACHE BOOL "" FORCE)
+     endif()
+-    set(TP_BUILD_LIBUV ON CACHE BOOL "" FORCE)
++    set(TP_BUILD_LIBUV OFF CACHE BOOL "" FORCE)
+     set(TP_STATIC_OR_SHARED STATIC CACHE STRING "" FORCE)
+ 
+     add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/tensorpipe)

Reply via email to