aboutsummaryrefslogtreecommitdiff
path: root/misc
diff options
context:
space:
mode:
authorYuri Victorovich <yuri@FreeBSD.org>2023-05-10 04:15:26 +0000
committerYuri Victorovich <yuri@FreeBSD.org>2023-05-10 04:21:00 +0000
commit85aebbb57bed45347a7c13009a2cf10abff229ec (patch)
tree240ec522890ff1efa34772e328af9f35f7137382 /misc
parent167ba266d99226410632fb4d895cc0fc44ca9c65 (diff)
downloadports-85aebbb57bed45347a7c13009a2cf10abff229ec.tar.gz
ports-85aebbb57bed45347a7c13009a2cf10abff229ec.zip
misc/py-pytorch: Disable mkldnn, CUDA, and add a patch
Diffstat (limited to 'misc')
-rw-r--r--misc/py-pytorch/Makefile3
-rw-r--r--misc/py-pytorch/files/patch-c10_core_DynamicCast.h21
2 files changed, 24 insertions, 0 deletions
diff --git a/misc/py-pytorch/Makefile b/misc/py-pytorch/Makefile
index a71d2a5ebb29..e9f1fc05ffdd 100644
--- a/misc/py-pytorch/Makefile
+++ b/misc/py-pytorch/Makefile
@@ -1,6 +1,7 @@
PORTNAME= pytorch
DISTVERSIONPREFIX= v
DISTVERSION= 2.0.0
+PORTREVISION= 1
CATEGORIES= misc # machine-learning
MASTER_SITES= https://github.com/pytorch/pytorch/releases/download/v${DISTVERSION}/
PKGNAMEPREFIX= ${PYTHON_PKGNAMEPREFIX}
@@ -39,6 +40,8 @@ USE_PYTHON= distutils autoplist
MAKE_ENV= USE_NINJA=no # ninja breaks for some reason
MAKE_ENV+= BUILD_TEST=0 # ninja breaks for some reason
+MAKE_ENV+= USE_MKLDNN=0 # disable MKLDNN that doesn't exist, see https://github.com/pytorch/pytorch/issues/100957
+MAKE_ENV+= USE_CUDNN=0
LDFLAGS+= -lexecinfo
BINARY_ALIAS= make=${GMAKE}
diff --git a/misc/py-pytorch/files/patch-c10_core_DynamicCast.h b/misc/py-pytorch/files/patch-c10_core_DynamicCast.h
new file mode 100644
index 000000000000..517085e8bdee
--- /dev/null
+++ b/misc/py-pytorch/files/patch-c10_core_DynamicCast.h
@@ -0,0 +1,21 @@
+- workaround for the failuree diring math/dgl build:
+- /usr/local/lib/python3.9/site-packages/torch/include/c10/core/DynamicCast.h:112:22: error: use of undeclared identifier '__assert_fail'
+
+--- c10/core/DynamicCast.h.orig 2023-05-10 02:37:18 UTC
++++ c10/core/DynamicCast.h
+@@ -99,13 +99,13 @@ C10_HOST_DEVICE inline void cast_and_store(
+ template <> \
+ C10_HOST_DEVICE inline T fetch_and_cast<T>( \
+ const ScalarType src_type, const void* ptr) { \
+- CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == src_type); \
++ assert(ScalarType::scalartype_ == src_type); \
+ return c10::load<T>(ptr); \
+ } \
+ template <> \
+ C10_HOST_DEVICE inline void cast_and_store<T>( \
+ const ScalarType dest_type, void* ptr, T value) { \
+- CUDA_KERNEL_ASSERT(ScalarType::scalartype_ == dest_type); \
++ assert(ScalarType::scalartype_ == dest_type); \
+ *(T*)ptr = value; \
+ }
+