PyTorch: build flash attention by default, except in CI (#48521)

* PyTorch: build flash attention by default, except in CI

* Variant is boolean, only available when +cuda/+rocm

* desc -> _desc
This commit is contained in:
Adam J. Stewart
2025-02-11 22:20:10 +01:00
committed by GitHub
parent 18cd922aab
commit 46f5b192ef
4 changed files with 28 additions and 5 deletions

View File

@@ -12,6 +12,13 @@ spack:
require: ~cuda
mpi:
require: openmpi
py-torch:
require:
- target=aarch64
- ~rocm
- +cuda
- cuda_arch=80
- ~flash_attention
specs:
# Horovod

View File

@@ -12,6 +12,13 @@ spack:
require: ~cuda
mpi:
require: openmpi
py-torch:
require:
- target=x86_64_v3
- ~rocm
- +cuda
- cuda_arch=80
- ~flash_attention
specs:
# Horovod

View File

@@ -11,6 +11,13 @@ spack:
require: "osmesa"
mpi:
require: openmpi
py-torch:
require:
- target=x86_64_v3
- ~cuda
- +rocm
- amdgpu_target=gfx90a
- ~flash_attention
specs:
# Horovod