Group various condidtionals together
Group options for things like toolchain, cuda, rocm together to make things a little easier to follow.
This commit is contained in:
parent
b56f1511ce
commit
7a717695c8
1 changed files with 28 additions and 25 deletions
|
|
@ -21,6 +21,14 @@
|
|||
# /usr/lib64/python3.12/site-packages/torch/bin/test_api, test_lazy
|
||||
%bcond_with test
|
||||
|
||||
%global toolchain gcc
|
||||
%global _lto_cflags %nil
|
||||
|
||||
# For testing compat-gcc
|
||||
%global compat_gcc_major 13
|
||||
%bcond_with compat_gcc
|
||||
|
||||
# ROCM
|
||||
%ifarch x86_64
|
||||
%bcond_without rocm
|
||||
%endif
|
||||
|
|
@ -30,6 +38,21 @@
|
|||
%global rocm_default_gpu default
|
||||
%global rocm_gpu_list gfx9
|
||||
|
||||
%ifarch x86_64
|
||||
%if %{with rocm}
|
||||
%bcond_with fbgemm
|
||||
%else
|
||||
%bcond_without fbgemm
|
||||
%endif
|
||||
%else
|
||||
%bcond_with fbgemm
|
||||
%endif
|
||||
|
||||
# Disable dwz with rocm because memory can be exhausted
|
||||
%if %{with rocm}
|
||||
%define _find_debuginfo_dwz_opts %{nil}
|
||||
%endif
|
||||
|
||||
# Caffe2 support came in F41
|
||||
%if 0%{?fedora} > 40
|
||||
%bcond_without caffe2
|
||||
|
|
@ -59,16 +82,7 @@
|
|||
%bcond_without pthreadpool
|
||||
%bcond_without pocketfft
|
||||
|
||||
%ifarch x86_64
|
||||
%if %{with rocm}
|
||||
%bcond_with fbgemm
|
||||
%else
|
||||
%bcond_without fbgemm
|
||||
%endif
|
||||
%else
|
||||
%bcond_with fbgemm
|
||||
%endif
|
||||
|
||||
# CUDA
|
||||
# For testing cuda
|
||||
%ifarch x86_64
|
||||
%bcond_with cuda
|
||||
|
|
@ -86,15 +100,8 @@
|
|||
%else
|
||||
%global cuda_ver 12.5
|
||||
%endif
|
||||
%endif
|
||||
|
||||
# For testing compat-gcc
|
||||
%global compat_gcc_major 13
|
||||
%bcond_with compat_gcc
|
||||
|
||||
# Disable dwz with rocm because memory can be exhausted
|
||||
%if %{with rocm}
|
||||
%define _find_debuginfo_dwz_opts %{nil}
|
||||
%global cuf_ver 1.1.2
|
||||
%global cul_ver 3.4.1
|
||||
%endif
|
||||
|
||||
%if %{with cuda}
|
||||
|
|
@ -120,6 +127,8 @@ Summary: PyTorch AI/ML framework
|
|||
License: BSD-3-Clause AND BSD-2-Clause AND 0BSD AND Apache-2.0 AND MIT AND BSL-1.0 AND GPL-3.0-or-later AND Zlib
|
||||
|
||||
URL: https://pytorch.org/
|
||||
ExclusiveArch: x86_64 aarch64
|
||||
|
||||
%if %{with gitcommit}
|
||||
Source0: %{forgeurl}/archive/%{commit0}/pytorch-%{shortcommit0}.tar.gz
|
||||
Source1000: pyproject.toml
|
||||
|
|
@ -130,9 +139,7 @@ Source1: https://github.com/google/flatbuffers/archive/refs/tags/v23.3.3.
|
|||
Source2: https://github.com/pybind/pybind11/archive/refs/tags/v2.11.1.tar.gz
|
||||
|
||||
%if %{with cuda}
|
||||
%global cuf_ver 1.1.2
|
||||
Source10: https://github.com/NVIDIA/cudnn-frontend/archive/refs/tags/v%{cuf_ver}.tar.gz
|
||||
%global cul_ver 3.4.1
|
||||
Source11: https://github.com/NVIDIA/cutlass/archive/refs/tags/v%{cul_ver}.tar.gz
|
||||
%endif
|
||||
|
||||
|
|
@ -227,10 +234,6 @@ Patch105: 0001-disable-use-of-aotriton.patch
|
|||
Patch106: 0001-include-fmt-ranges.h-for-using-fmt-join.patch
|
||||
%endif
|
||||
|
||||
ExclusiveArch: x86_64 aarch64
|
||||
%global toolchain gcc
|
||||
%global _lto_cflags %nil
|
||||
|
||||
BuildRequires: cmake
|
||||
BuildRequires: eigen3-devel
|
||||
%if %{with fbgemm}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue