You are receiving this mail as a port that you maintain
is failing to build on the FreeBSD package build server.
Please investigate the failure and submit a PR to fix
build.

Maintainer:     y...@freebsd.org
Log URL:        
https://pkg-status.freebsd.org/beefy2/data/124amd64-quarterly/1c331580481c/logs/pytorch-1.13.1_1.log
Build URL:      
https://pkg-status.freebsd.org/beefy2/build.html?mastername=124amd64-quarterly&build=1c331580481c
Log:

=>> Building misc/pytorch
build started at Thu Jul 27 19:00:05 UTC 2023
port directory: /usr/ports/misc/pytorch
package name: pytorch-1.13.1_1
building for: FreeBSD 124amd64-quarterly-job-03 12.4-RELEASE-p3 FreeBSD 
12.4-RELEASE-p3 amd64
maintained by: y...@freebsd.org
Makefile ident: 
Poudriere version: 3.2.8-23-ga7f8d188
Host OSVERSION: 1400088
Jail OSVERSION: 1204000
Job Id: 03

---Begin Environment---
SHELL=/bin/csh
OSVERSION=1204000
UNAME_v=FreeBSD 12.4-RELEASE-p3
UNAME_r=12.4-RELEASE-p3
BLOCKSIZE=K
MAIL=/var/mail/root
STATUS=1
HOME=/root
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin
LOCALBASE=/usr/local
USER=root
LIBEXECPREFIX=/usr/local/libexec/poudriere
POUDRIERE_VERSION=3.2.8-23-ga7f8d188
MASTERMNT=/usr/local/poudriere/data/.m/124amd64-quarterly/ref
POUDRIERE_BUILD_TYPE=bulk
PACKAGE_BUILDING=yes
SAVED_TERM=
PWD=/usr/local/poudriere/data/.m/124amd64-quarterly/ref/.p/pool
P_PORTS_FEATURES=FLAVORS SELECTED_OPTIONS
MASTERNAME=124amd64-quarterly
SCRIPTPREFIX=/usr/local/share/poudriere
OLDPWD=/usr/local/poudriere/data/.m/124amd64-quarterly/ref/.p
SCRIPTPATH=/usr/local/share/poudriere/bulk.sh
POUDRIEREPATH=/usr/local/bin/poudriere
---End Environment---

---Begin Poudriere Port Flags/Env---
PORT_FLAGS=
PKGENV=
FLAVOR=
DEPENDS_ARGS=
MAKE_ARGS=
---End Poudriere Port Flags/Env---

---Begin OPTIONS List---
===> The following configuration options are available for pytorch-1.13.1_1:
     PYTHON=on: Python bindings or support
===> Use 'make config' to modify these settings
---End OPTIONS List---

--MAINTAINER--
y...@freebsd.org
--End MAINTAINER--

--CONFIGURE_ARGS--

--End CONFIGURE_ARGS--

--CONFIGURE_ENV--
PYTHON="/usr/local/bin/python3.9" 
XDG_DATA_HOME=/wrkdirs/usr/ports/misc/pytorch/work  
XDG_CONFIG_HOME=/wrkdirs/usr/ports/misc/pytorch/work  
XDG_CACHE_HOME=/wrkdirs/usr/ports/misc/pytorch/work/.cache  
HOME=/wrkdirs/usr/ports/misc/pytorch/work TMPDIR="/tmp" 
PATH=/wrkdirs/usr/ports/misc/pytorch/work/.bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin
 
PKG_CONFIG_LIBDIR=/wrkdirs/usr/ports/misc/pytorch/work/.pkgconfig:/usr/local/libdata/pkgconfig:/usr/local/share/pkgconfig:/usr/libdata/pkgconfig
 SHELL=/bin/sh CONFIG_SHELL=/bin/sh CMAKE_PREFIX_PATH="/usr/local"
--End CONFIGURE_ENV--

--MAKE_ENV--
USE_NINJA=no NINJA_STATUS="[%p %s/%t] " 
XDG_DATA_HOME=/wrkdirs/usr/ports/misc/pytorch/work  
XDG_CONFIG_HOME=/wrkdirs/usr/ports/misc/pytorch/work  
XDG_CACHE_HOME=/wrkdirs/usr/ports/misc/pytorch/work/.cache  
HOME=/wrkdirs/usr/ports/misc/pytorch/work TMPDIR="/tmp" 
PATH=/wrkdirs/usr/ports/misc/pytorch/work/.bin:/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin:/root/bin
 
PKG_CONFIG_LIBDIR=/wrkdirs/usr/ports/misc/pytorch/work/.pkgconfig:/usr/local/libdata/pkgconfig:/usr/local/share/pkgconfig:/usr/libdata/pkgconfig
 MK_DEBUG_FILES=no MK_KERNEL_SYMBOLS=no SHELL=/bin/sh NO_LINT=YES 
DESTDIR=/wrkdirs/usr/ports/misc/pytorch/work/stage PREFIX=/usr/local  
LOCALBASE=/usr/local  CC="cc" CFLAGS="-O2 -pipe  -fstack-protector-strong 
-isystem /usr/local/include -fno-strict-aliasing "  CPP="cpp" 
CPPFLAGS="-isystem /usr/local/include"  LDFLAGS=" -lexecinfo 
-fstack-protector-strong -L/usr/local/lib " LIBS=""  CXX="c++" CXXFLAGS="-O2 
-pipe -fstack-protector-strong -isystem /usr/local/incl
ude -fno-strict-aliasing  -isystem /usr/local/include "  MANPREFIX="/usr/local" 
BSD_INSTALL_PROGRAM="install  -s -m 555"  BSD_INSTALL_LIB="install  -s -m 0644" 
 BSD_INSTALL_SCRIPT="install  -m 555"  BSD_INSTALL_DATA="install  -m 0644"  
BSD_INSTALL_MAN="install  -m 444"
--End MAKE_ENV--

--PLIST_SUB--
PYTHON="" NO_PYTHON="@comment " CMAKE_BUILD_TYPE="release" 
PYTHON_INCLUDEDIR=include/python3.9  PYTHON_LIBDIR=lib/python3.9  
PYTHON_PLATFORM=freebsd12  PYTHON_SITELIBDIR=lib/python3.9/site-packages  
PYTHON_SUFFIX=39  PYTHON_EXT_SUFFIX=.cpython-39  PYTHON_VER=3.9  
PYTHON_VERSION=python3.9 PYTHON2="@comment " PYTHON3="" AMD64="" OSREL=12.4 
PREFIX=%D LOCALBASE=/usr/local  RESETPREFIX=/usr/local LIB32DIR=lib 
DOCSDIR="share/doc/pytorch"  EXAMPLESDIR="share/examples/pytorch"  
DATADIR="share/pytorch"  WWWDIR="www/pytorch"  ETCDIR="etc/pytorch"
--End PLIST_SUB--

--SUB_LIST--
PYTHON="" NO_PYTHON="@comment " PYTHON_INCLUDEDIR=/usr/local/include/python3.9  
PYTHON_LIBDIR=/usr/local/lib/python3.9  PYTHON_PLATFORM=freebsd12  
PYTHON_SITELIBDIR=/usr/local/lib/python3.9/site-packages  PYTHON_SUFFIX=39  
PYTHON_EXT_SUFFIX=.cpython-39  PYTHON_VER=3.9  PYTHON_VERSION=python3.9 
PYTHON2="@comment " PYTHON3="" PREFIX=/usr/local LOCALBASE=/usr/local  
DATADIR=/usr/local/share/pytorch DOCSDIR=/usr/local/share/doc/pytorch 
EXAMPLESDIR=/usr/local/share/examples/pytorch  WWWDIR=/usr/local/www/pytorch 
ETCDIR=/usr/local/etc/pytorch
--End SUB_LIST--

---Begin make.conf---
USE_PACKAGE_DEPENDS=yes
BATCH=yes
WRKDIRPREFIX=/wrkdirs
PORTSDIR=/usr/ports
PACKAGES=/packages
DISTDIR=/distfiles
PACKAGE_BUILDING=yes
PACKAGE_BUILDING_FLAVORS=yes
#### /usr/local/etc/poudriere.d/make.conf ####
# XXX: We really need this but cannot use it while 'make checksum' does not
# try the next mirror on checksum failure.  It currently retries the same
# failed mirror and then fails rather then trying another.  It *does*
# try the next if the size is mismatched though.
#MASTER_SITE_FREEBSD=yes
# Build ALLOW_MAKE_JOBS_PACKAGES with 2 jobs
MAKE_JOBS_NUMBER=2
#### /usr/ports/Mk/Scripts/ports_env.sh ####
_CCVERSION_921dbbb2=FreeBSD clang version 13.0.0 
(g...@github.com:llvm/llvm-project.git llvmorg-13.0.0-0-gd7b669b3a303) Target: 
x86_64-unknown-freebsd12.4 Thread model: posix InstalledDir: /usr/bin
_ALTCCVERSION_921dbbb2=none
_CXXINTERNAL_acaad9ca=FreeBSD clang version 13.0.0 
(g...@github.com:llvm/llvm-project.git llvmorg-13.0.0-0-gd7b669b3a303) Target: 
x86_64-unknown-freebsd12.4 Thread model: posix InstalledDir: /usr/bin 
"/usr/bin/ld" "--eh-frame-hdr" "-dynamic-linker" "/libexec/ld-elf.so.1" 
"--hash-style=both" "--enable-new-dtags" "-o" "a.out" "/usr/lib/crt1.o" 
"/usr/lib/crti.o" "/usr/lib/crtbegin.o" "-L/usr/lib" "/dev/null" "-lc++" "-lm" 
"-lgcc" "--as-needed" "-lgcc_s" "--no-as-needed" "-lc" "-lgcc" "--as-needed" 
"-lgcc_s" "--no-as-needed" "/usr/lib/crtend.o" "/usr/lib/crtn.o"
CC_OUTPUT_921dbbb2_58173849=yes
CC_OUTPUT_921dbbb2_9bdba57c=yes
CC_OUTPUT_921dbbb2_6a4fe7f5=yes
CC_OUTPUT_921dbbb2_6bcac02b=yes
CC_OUTPUT_921dbbb2_67d20829=yes
CC_OUTPUT_921dbbb2_bfa62e83=yes
CC_OUTPUT_921dbbb2_f0b4d593=yes
CC_OUTPUT_921dbbb2_308abb44=yes
CC_OUTPUT_921dbbb2_f00456e5=yes
CC_OUTPUT_921dbbb2_65ad290d=yes
CC_OUTPUT_921dbbb2_f2776b26=yes
CC_OUTPUT_921dbbb2_53255a77=yes
CC_OUTPUT_921dbbb2_911cfe02=yes
CC_OUTPUT_921dbbb2_b2657cc3=yes
CC_OUTPUT_921dbbb2_380987f7=yes
CC_OUTPUT_921dbbb2_160933ec=yes
CC_OUTPUT_921dbbb2_fb62803b=yes
CC_OUTPUT_921dbbb2_af59ad06=yes
CC_OUTPUT_921dbbb2_a15f3fcf=yes
_OBJC_CCVERSION_921dbbb2=FreeBSD clang version 13.0.0 
(g...@github.com:llvm/llvm-project.git llvmorg-13.0.0-0-gd7b669b3a303) Target: 
x86_64-unknown-freebsd12.4 Thread model: posix InstalledDir: /usr/bin
_OBJC_ALTCCVERSION_921dbbb2=none
ARCH=amd64
OPSYS=FreeBSD
_OSRELEASE=12.4-RELEASE-p3
OSREL=12.4
OSVERSION=1204000
PYTHONBASE=/usr/local
HAVE_COMPAT_IA32_KERN=YES
CONFIGURE_MAX_CMD_LEN=524288
HAVE_PORTS_ENV=1
#### Misc Poudriere ####
GID=0
UID=0
---End make.conf---
--Resource limits--
cpu time               (seconds, -t)  unlimited
file size           (512-blocks, -f)  unlimited
data seg size           (kbytes, -d)  33554432
stack size              (kbytes, -s)  524288
core file size      (512-blocks, -c)  unlimited
max memory size         (kbytes, -m)  unlimited
locked memory           (kbytes, -l)  unlimited
max user processes              (-u)  89999
open files                      (-n)  1024
virtual mem size        (kbytes, -v)  unlimited
swap limit              (kbytes, -w)  unlimited
socket buffer size       (bytes, -b)  unlimited
pseudo-terminals                (-p)  unlimited
kqueues                         (-k)  unlimited
umtx shared locks               (-o)  unlimited
--End resource limits--
=======================<phase: check-sanity   >============================
===>  License BSD3CLAUSE accepted by the user
===========================================================================
=======================<phase: pkg-depends    >============================
===>   pytorch-1.13.1_1 depends on file: /usr/local/sbin/pkg - not found
===>   Installing existing package /packages/All/pkg-1.19.2.pkg
[124amd64-quarterly-job-03] Installing pkg-1.19.2...
[124amd64-quarterly-job-03] Extracting pkg-1.19.2: .......... done
===>   pytorch-1.13.1_1 depends on file: /usr/local/sbin/pkg - found
===>   Returning to build of pytorch-1.13.1_1
===========================================================================
=======================<phase: fetch-depends  >============================
===========================================================================
=======================<phase: fetch          >============================
===>  License BSD3CLAUSE accepted by the user
===> Fetching all distfiles required by pytorch-1.13.1_1 for building
===========================================================================
=======================<phase: checksum       >============================
===>  License BSD3CLAUSE accepted by the user
===> Fetching all distfiles required by pytorch-1.13.1_1 for building
=> SHA256 Checksum OK for pytorch/pytorch-v1.13.1.tar.gz.
===========================================================================
=======================<phase: extract-depends>============================
===========================================================================
=======================<phase: extract        >============================
===>  License BSD3CLAUSE accepted by the user
===> Fetching all distfiles required by pytorch-1.13.1_1 for building
===>  Extracting for pytorch-1.13.1_1
=> SHA256 Checksum OK for pytorch/pytorch-v1.13.1.tar.gz.
===========================================================================
=======================<phase: patch-depends  >============================
===========================================================================
=======================<phase: patch          >============================
===>  Patching for pytorch-1.13.1_1
===>  Applying FreeBSD patches for pytorch-1.13.1_1 from 
/usr/ports/misc/pytorch/files
grep: warning: third_party/ittapi/rust/ittapi-sys/c-library: recursive 
directory loop
===========================================================================
=======================<phase: build-depends  >============================
===>   pytorch-1.13.1_1 depends on executable: gmake - not found
===>   Installing existing package /packages/All/gmake-4.3_2.pkg
[124amd64-quarterly-job-03] Installing gmake-4.3_2...
[124amd64-quarterly-job-03] `-- Installing gettext-runtime-0.21.1...
[124amd64-quarterly-job-03] |   `-- Installing indexinfo-0.3.1...
[124amd64-quarterly-job-03] |   `-- Extracting indexinfo-0.3.1: .... done
[124amd64-quarterly-job-03] `-- Extracting gettext-runtime-0.21.1: .......... 
done
[124amd64-quarterly-job-03] Extracting gmake-4.3_2: .......... done
===>   pytorch-1.13.1_1 depends on executable: gmake - found
===>   Returning to build of pytorch-1.13.1_1
===>   pytorch-1.13.1_1 depends on file: /usr/local/include/fxdiv.h - not found
===>   Installing existing package /packages/All/fxdiv-g20181121.pkg
<snip>
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:303:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_expm1f8_u10);
               ^~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:393:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_logf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:396:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_log2f8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:399:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_log10f8_u10);
               ^~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:402:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_log1pf8_u10);
               ^~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:406:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_sinf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:409:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_sinhf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:412:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_cosf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:415:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_coshf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:447:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_tanf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:450:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_tanhf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:460:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_lgammaf8_u10);
               ^~~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
18 errors generated.
[ 80% 1027/1283] /usr/bin/c++ -DAT_PER_OPERATOR_HEADERS 
-DCPUINFO_SUPPORTED_PLATFORM=0 -DFMT_HEADER_ONLY=1 -DHAVE_MALLOC_USABLE_SIZE=1 
-DHAVE_MMAP=1 -DHAVE_SHM_OPEN=1 -DHAVE_SHM_UNLINK=1 
-DMINIZ_DISABLE_ZIP_READER_CRC32_CHECKS -DONNXIFI_ENABLE_EXT=1 -DONNX_ML=1 
-DONNX_NAMESPACE=onnx -DUSE_EXTERNAL_MZCRC -D_FILE_OFFSET_BITS=64 
-Dtorch_cpu_EXPORTS -I/wrkdirs/usr/ports/misc/pytorch/work/.build/aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/foxi 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/third_party/foxi 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/torch/csrc/api 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/torch/csrc/api/include 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/caffe2/aten/src/TH 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/caffe2/aten/src/TH
 -I/wrkdirs/usr/ports/misc/pytorch/work/.build/caffe2/aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/caffe2/../aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/torch/csrc 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/miniz-2.1.0 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/kineto/libkineto/include
 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/kineto/libkineto/src
 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/../third_party/catch/single_include
 -I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/.. 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/c10/.. 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/cpuinfo/include
 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/FP16/include 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/fmt/include 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_par
ty/flatbuffers/include -isystem 
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/cmake/../third_party/eigen 
-isystem /wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/caffe2 -O2 -pipe 
-fstack-protector-strong -isystem /usr/local/include -fno-strict-aliasing  
-isystem /usr/local/include -Wno-deprecated -fvisibility-inlines-hidden 
-fopenmp=libomp -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOCUPTI 
-DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC 
-Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor 
-Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds 
-Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter 
-Wno-unused-function -Wno-unused-result -Wno-strict-overflow 
-Wno-strict-aliasing -Wno-error=deprecated-declarations -Wvla-extension 
-Wno-range-loop-analysis -Wno-pass-failed -Wno-error=pedantic 
-Wno-error=redundant-decls -Wno-error=old-style-cast -Wconstant-conversion 
-Wno-invalid-partial-specialization -Wno-typ
edef-redefinition -Wno-unused-private-field -Wno-inconsistent-missing-override 
-Wno-c++14-extensions -Wno-constexpr-not-const -Wno-missing-braces 
-Wunused-lambda-capture -Wunused-local-typedef -Qunused-arguments 
-fcolor-diagnostics -fdiagnostics-color=always -Wno-unused-but-set-variable 
-fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type 
-DHAVE_AVX512_CPU_DEFINITION -DHAVE_AVX2_CPU_DEFINITION -O2 -pipe 
-fstack-protector-strong -isystem /usr/local/include -fno-strict-aliasing  
-isystem /usr/local/include  -DNDEBUG -DNDEBUG -std=gnu++14 -fPIC 
-DTH_HAVE_THREAD -Wall -Wextra -Wno-unused-parameter -Wno-unused-function 
-Wno-unused-result -Wno-missing-field-initializers -Wno-write-strings 
-Wno-unknown-pragmas -Wno-type-limits -Wno-array-bounds -Wno-sign-compare 
-Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations 
-Wno-missing-braces -Wno-range-loop-analysis -fvisibility=hidden -O2 
-fopenmp=libomp -DCAFFE2_BUILD_MAIN_LIB -pthread -O3 
 -mavx2 -mfma  -DCPU_CAPABILITY=AVX2 -DCPU_CAPABILITY_AVX2 -MD -MT 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o
 -MF 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o.d
 -o 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o
 -c 
/wrkdirs/usr/ports/misc/pytorch/work/.build/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp
FAILED: 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o
 
/usr/bin/c++ -DAT_PER_OPERATOR_HEADERS -DCPUINFO_SUPPORTED_PLATFORM=0 
-DFMT_HEADER_ONLY=1 -DHAVE_MALLOC_USABLE_SIZE=1 -DHAVE_MMAP=1 -DHAVE_SHM_OPEN=1 
-DHAVE_SHM_UNLINK=1 -DMINIZ_DISABLE_ZIP_READER_CRC32_CHECKS 
-DONNXIFI_ENABLE_EXT=1 -DONNX_ML=1 -DONNX_NAMESPACE=onnx -DUSE_EXTERNAL_MZCRC 
-D_FILE_OFFSET_BITS=64 -Dtorch_cpu_EXPORTS 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/foxi 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/third_party/foxi 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/torch/csrc/api 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/torch/csrc/api/include 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/caffe2/aten/src/TH 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/caffe2/aten/src/TH 
-I/wrkdirs/usr/p
orts/misc/pytorch/work/.build/caffe2/aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/.build/caffe2/../aten/src 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/torch/csrc 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/miniz-2.1.0 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/kineto/libkineto/include
 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/kineto/libkineto/src
 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/../third_party/catch/single_include
 -I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/.. 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/c10/.. 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/cpuinfo/include
 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/FP16/include 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/fmt/include 
-I/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/third_party/flatbuffers/in
clude -isystem 
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/cmake/../third_party/eigen 
-isystem /wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/caffe2 -O2 -pipe 
-fstack-protector-strong -isystem /usr/local/include -fno-strict-aliasing  
-isystem /usr/local/include -Wno-deprecated -fvisibility-inlines-hidden 
-fopenmp=libomp -DNDEBUG -DUSE_KINETO -DLIBKINETO_NOCUPTI 
-DSYMBOLICATE_MOBILE_DEBUG_HANDLE -DEDGE_PROFILER_USE_KINETO -O2 -fPIC 
-Wno-narrowing -Wall -Wextra -Werror=return-type -Werror=non-virtual-dtor 
-Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds 
-Wno-unknown-pragmas -Wunused-local-typedefs -Wno-unused-parameter 
-Wno-unused-function -Wno-unused-result -Wno-strict-overflow 
-Wno-strict-aliasing -Wno-error=deprecated-declarations -Wvla-extension 
-Wno-range-loop-analysis -Wno-pass-failed -Wno-error=pedantic 
-Wno-error=redundant-decls -Wno-error=old-style-cast -Wconstant-conversion 
-Wno-invalid-partial-specialization -Wno-typedef-redefinition
 -Wno-unused-private-field -Wno-inconsistent-missing-override 
-Wno-c++14-extensions -Wno-constexpr-not-const -Wno-missing-braces 
-Wunused-lambda-capture -Wunused-local-typedef -Qunused-arguments 
-fcolor-diagnostics -fdiagnostics-color=always -Wno-unused-but-set-variable 
-fno-math-errno -fno-trapping-math -Werror=format -Werror=cast-function-type 
-DHAVE_AVX512_CPU_DEFINITION -DHAVE_AVX2_CPU_DEFINITION -O2 -pipe 
-fstack-protector-strong -isystem /usr/local/include -fno-strict-aliasing  
-isystem /usr/local/include  -DNDEBUG -DNDEBUG -std=gnu++14 -fPIC 
-DTH_HAVE_THREAD -Wall -Wextra -Wno-unused-parameter -Wno-unused-function 
-Wno-unused-result -Wno-missing-field-initializers -Wno-write-strings 
-Wno-unknown-pragmas -Wno-type-limits -Wno-array-bounds -Wno-sign-compare 
-Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations 
-Wno-missing-braces -Wno-range-loop-analysis -fvisibility=hidden -O2 
-fopenmp=libomp -DCAFFE2_BUILD_MAIN_LIB -pthread -O3  -mavx2 -mfma  -D
CPU_CAPABILITY=AVX2 -DCPU_CAPABILITY_AVX2 -MD -MT 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o
 -MF 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o.d
 -o 
caffe2/CMakeFiles/torch_cpu.dir/__/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp.o
 -c 
/wrkdirs/usr/ports/misc/pytorch/work/.build/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp
In file included from 
/wrkdirs/usr/ports/misc/pytorch/work/.build/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp.AVX2.cpp:1:
In file included from 
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/native/quantized/cpu/kernels/QuantizedOpKernels.cpp:9:
In file included from 
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/native/cpu/Loops.h:37:
In file included from 
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec.h:6:
In file included from 
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256.h:12:
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:253:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_acosf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:256:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_asinf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:259:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_atanf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:280:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_erff8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:283:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_erfcf8_u15);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:300:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_expf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:303:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_expm1f8_u10);
               ^~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:393:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_logf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:396:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_log2f8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:399:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_log10f8_u10);
               ^~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:402:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_log1pf8_u10);
               ^~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:406:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_sinf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:409:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_sinhf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:412:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_cosf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:415:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_coshf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:447:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_tanf8_u10);
               ^~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:450:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_tanhf8_u10);
               ^~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:460:16:
 error: cannot initialize a parameter of type 'const __m256 (*)(__m256)' with 
an lvalue of type '__m256 (__m256)': different return type ('const __m256' 
(vector of 8 'float' values) vs '__m256' (vector of 8 'float' values))
    return map(Sleef_lgammaf8_u10);
               ^~~~~~~~~~~~~~~~~~
/wrkdirs/usr/ports/misc/pytorch/work/pytorch-v1.13.1/aten/src/ATen/cpu/vec/vec256/vec256_bfloat16.h:209:49:
 note: passing argument to parameter 'vop' here
  Vectorized<BFloat16> map(const __m256 (*const vop)(__m256)) const {
                                                ^
18 errors generated.
ninja: build stopped: subcommand failed.
===> Compilation failed unexpectedly.
Try to set MAKE_JOBS_UNSAFE=yes and rebuild before reporting the failure to
the maintainer.
*** Error code 1

Stop.
make: stopped in /usr/ports/misc/pytorch


Reply via email to