mirror of
https://github.com/ml-explore/mlx.git
synced 2025-12-16 01:49:05 +08:00
Compare commits
92 Commits
sdpav-back
...
v0.29.2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7a6adda1e6 | ||
|
|
1a9f820af6 | ||
|
|
d4f4ff3c5e | ||
|
|
7c7e48dbd1 | ||
|
|
fbbf3b9b3e | ||
|
|
bf01ad9367 | ||
|
|
ae438d05fa | ||
|
|
711a645807 | ||
|
|
aa9d44b3d4 | ||
|
|
ec2ab42888 | ||
|
|
787c0d90cd | ||
|
|
e8b604a6a3 | ||
|
|
50cc09887f | ||
|
|
3f730e77aa | ||
|
|
caecbe876a | ||
|
|
8afb6d62f2 | ||
|
|
6ccfa603cd | ||
|
|
36cad99a11 | ||
|
|
ee18e1cbf0 | ||
|
|
af120c2bc0 | ||
|
|
6a3acf2301 | ||
|
|
d6977f2a57 | ||
|
|
db5443e831 | ||
|
|
52b8384d10 | ||
|
|
44cc5da4bc | ||
|
|
dde3682b69 | ||
|
|
17310d91a6 | ||
|
|
b194d65a6a | ||
|
|
a44b27f5f8 | ||
|
|
e5a33f2223 | ||
|
|
c1e3340b23 | ||
|
|
8f163a367d | ||
|
|
89a3df9014 | ||
|
|
c5d2937aa5 | ||
|
|
b61a65e313 | ||
|
|
04cbb4191c | ||
|
|
c5460762e7 | ||
|
|
8ce49cd39e | ||
|
|
9c68b50853 | ||
|
|
111f1e71af | ||
|
|
827003d568 | ||
|
|
d363a76aa4 | ||
|
|
70560b6bd5 | ||
|
|
7ef8a6f2d5 | ||
|
|
31c6f6e33f | ||
|
|
584d48458e | ||
|
|
5cf984ca87 | ||
|
|
a9bac3d9e5 | ||
|
|
5458d43247 | ||
|
|
a4dba65220 | ||
|
|
3dcb286baf | ||
|
|
4822c3dbe9 | ||
|
|
2ca75bb529 | ||
|
|
db14e29a0b | ||
|
|
d2f540f4e0 | ||
|
|
333ffea273 | ||
|
|
f55b6f1f2f | ||
|
|
30561229c7 | ||
|
|
068a4612e9 | ||
|
|
5722c147de | ||
|
|
f6819a1f26 | ||
|
|
f93f87c802 | ||
|
|
9392fc3f88 | ||
|
|
e843c4d8d5 | ||
|
|
0c5fc63a36 | ||
|
|
e397177f6e | ||
|
|
f4c8888cbe | ||
|
|
25c1e03205 | ||
|
|
512281781c | ||
|
|
ac85ddfdb7 | ||
|
|
65d0d40232 | ||
|
|
cea9369610 | ||
|
|
e7c6e1db82 | ||
|
|
c5fcd5b61b | ||
|
|
1df9887998 | ||
|
|
73f22d6226 | ||
|
|
c422050ca7 | ||
|
|
1ba18ff7d9 | ||
|
|
37b440faa8 | ||
|
|
888b13ed63 | ||
|
|
4abb218d21 | ||
|
|
6441c21a94 | ||
|
|
dfb5022eab | ||
|
|
ac207ce7aa | ||
|
|
fce53b61d6 | ||
|
|
8ae4a76308 | ||
|
|
7fde1b6a1e | ||
|
|
aa7b47481a | ||
|
|
56be773610 | ||
|
|
a9bdd67baa | ||
|
|
f2adb5638d | ||
|
|
728d4db582 |
@@ -18,13 +18,14 @@ jobs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
macos:
|
macos:
|
||||||
xcode: "16.2.0"
|
xcode: "26.0.0"
|
||||||
resource_class: m2pro.medium
|
resource_class: m4pro.medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install
|
name: Install
|
||||||
command: |
|
command: |
|
||||||
|
xcodebuild -downloadComponent MetalToolchain
|
||||||
brew install python@3.9
|
brew install python@3.9
|
||||||
brew install doxygen
|
brew install doxygen
|
||||||
python3.9 -m venv env
|
python3.9 -m venv env
|
||||||
@@ -89,6 +90,7 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
uv venv
|
uv venv
|
||||||
uv pip install cmake
|
uv pip install cmake
|
||||||
|
DEBUG=1 CMAKE_ARGS="-DCMAKE_COMPILE_WARNING_AS_ERROR=ON" \
|
||||||
uv pip install -e ".[dev]" -v
|
uv pip install -e ".[dev]" -v
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
@@ -118,7 +120,7 @@ jobs:
|
|||||||
parameters:
|
parameters:
|
||||||
xcode_version:
|
xcode_version:
|
||||||
type: string
|
type: string
|
||||||
default: "16.2.0"
|
default: "26.0.0"
|
||||||
macosx_deployment_target:
|
macosx_deployment_target:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
@@ -126,12 +128,13 @@ jobs:
|
|||||||
xcode: << parameters.xcode_version >>
|
xcode: << parameters.xcode_version >>
|
||||||
environment:
|
environment:
|
||||||
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
||||||
resource_class: m2pro.medium
|
resource_class: m4pro.medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
|
xcodebuild -downloadComponent MetalToolchain
|
||||||
HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 \
|
HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 \
|
||||||
brew install openmpi uv
|
brew install openmpi uv
|
||||||
- run:
|
- run:
|
||||||
@@ -196,7 +199,7 @@ jobs:
|
|||||||
name: Run Python tests with JIT
|
name: Run Python tests with JIT
|
||||||
command: |
|
command: |
|
||||||
CMAKE_ARGS="-DMLX_METAL_JIT=ON" \
|
CMAKE_ARGS="-DMLX_METAL_JIT=ON" \
|
||||||
uv pip install -e .
|
uv pip install -e . -v
|
||||||
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 \
|
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 \
|
||||||
METAL_DEBUG_ERROR_MODE=0 \
|
METAL_DEBUG_ERROR_MODE=0 \
|
||||||
uv run --no-project python -m xmlrunner discover \
|
uv run --no-project python -m xmlrunner discover \
|
||||||
@@ -222,15 +225,20 @@ jobs:
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install libcudnn9-dev-cuda-12
|
sudo apt-get install libcudnn9-dev-cuda-12
|
||||||
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
||||||
|
sudo apt-get install libnccl2 libnccl-dev
|
||||||
curl -sL https://github.com/ccache/ccache/releases/download/v4.11.3/ccache-4.11.3-linux-x86_64.tar.xz | tar xJf -
|
curl -sL https://github.com/ccache/ccache/releases/download/v4.11.3/ccache-4.11.3-linux-x86_64.tar.xz | tar xJf -
|
||||||
sudo mv ccache-4.11.3-linux-x86_64/ccache /usr/bin/ccache
|
sudo mv ccache-4.11.3-linux-x86_64/ccache /usr/bin/ccache
|
||||||
rm -rf ccache-4.11.3-linux-x86_64
|
rm -rf ccache-4.11.3-linux-x86_64
|
||||||
curl -LsSf https://astral.sh/uv/install.sh | sh
|
curl -LsSf https://astral.sh/uv/install.sh | sh
|
||||||
|
- run:
|
||||||
|
name: Set CCache size
|
||||||
|
command: ccache --max-size 1G
|
||||||
- run:
|
- run:
|
||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
uv venv
|
uv venv
|
||||||
CMAKE_ARGS="-DMLX_BUILD_CUDA=ON -DCMAKE_CUDA_COMPILER=`which nvcc`" \
|
uv pip install cmake
|
||||||
|
DEBUG=1 CMAKE_ARGS="-DMLX_BUILD_CUDA=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DCMAKE_CUDA_COMPILER=`which nvcc`" \
|
||||||
uv pip install -e ".[dev]" -v
|
uv pip install -e ".[dev]" -v
|
||||||
- run:
|
- run:
|
||||||
name: Run Python tests
|
name: Run Python tests
|
||||||
@@ -238,12 +246,23 @@ jobs:
|
|||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
LOW_MEMORY=1 DEVICE=cpu python -m unittest discover python/tests -v
|
LOW_MEMORY=1 DEVICE=cpu python -m unittest discover python/tests -v
|
||||||
LOW_MEMORY=1 DEVICE=gpu python -m tests discover python/tests -v
|
LOW_MEMORY=1 DEVICE=gpu python -m tests discover python/tests -v
|
||||||
|
- run:
|
||||||
|
name: Build CPP only
|
||||||
|
command: |
|
||||||
|
source .venv/bin/activate
|
||||||
|
cmake . -B build \
|
||||||
|
-DMLX_BUILD_CUDA=ON \
|
||||||
|
-DCMAKE_CUDA_COMPILER=`which nvcc` \
|
||||||
|
-DCMAKE_BUILD_TYPE=DEBUG
|
||||||
|
cmake --build build -j `nproc`
|
||||||
|
- run:
|
||||||
|
name: Run CPP tests
|
||||||
|
command: ./build/tests/tests -sfe="*fft_tests.cpp,*linalg_tests.cpp"
|
||||||
- run:
|
- run:
|
||||||
name: CCache report
|
name: CCache report
|
||||||
command: |
|
command: |
|
||||||
ccache --show-stats
|
ccache --show-stats
|
||||||
ccache --zero-stats
|
ccache --zero-stats
|
||||||
ccache --max-size 400MB
|
|
||||||
ccache --cleanup
|
ccache --cleanup
|
||||||
- save_cache:
|
- save_cache:
|
||||||
key: cuda-<< parameters.image_date >>-{{ arch }}-{{ epoch }}
|
key: cuda-<< parameters.image_date >>-{{ arch }}-{{ epoch }}
|
||||||
@@ -257,7 +276,7 @@ jobs:
|
|||||||
default: "3.9"
|
default: "3.9"
|
||||||
xcode_version:
|
xcode_version:
|
||||||
type: string
|
type: string
|
||||||
default: "16.2.0"
|
default: "26.0.0"
|
||||||
build_env:
|
build_env:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
@@ -266,7 +285,7 @@ jobs:
|
|||||||
default: ""
|
default: ""
|
||||||
macos:
|
macos:
|
||||||
xcode: << parameters.xcode_version >>
|
xcode: << parameters.xcode_version >>
|
||||||
resource_class: m2pro.medium
|
resource_class: m4pro.medium
|
||||||
environment:
|
environment:
|
||||||
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
||||||
steps:
|
steps:
|
||||||
@@ -274,11 +293,15 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
brew install python@<< parameters.python_version >>
|
xcodebuild -downloadComponent MetalToolchain
|
||||||
brew install openmpi
|
mkdir -p ~/miniconda3
|
||||||
python<< parameters.python_version >> -m venv env
|
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh
|
||||||
source env/bin/activate
|
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
||||||
pip install --upgrade pip
|
rm ~/miniconda3/miniconda.sh
|
||||||
|
source ~/miniconda3/bin/activate
|
||||||
|
conda init --all
|
||||||
|
conda create -n env python=<< parameters.python_version >> -y
|
||||||
|
conda activate env
|
||||||
pip install --upgrade cmake
|
pip install --upgrade cmake
|
||||||
pip install nanobind==2.4.0
|
pip install nanobind==2.4.0
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
@@ -288,19 +311,19 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
env -u MACOSX_DEPLOYMENT_TARGET DEV_RELEASE=1 \
|
env -u MACOSX_DEPLOYMENT_TARGET DEV_RELEASE=1 \
|
||||||
pip install . -v
|
pip install . -v
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
pip install typing_extensions
|
pip install typing_extensions
|
||||||
python setup.py generate_stubs
|
python setup.py generate_stubs
|
||||||
- run:
|
- run:
|
||||||
name: Build Python package
|
name: Build Python package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
python setup.py clean --all
|
python setup.py clean --all
|
||||||
<< parameters.build_env >> MLX_BUILD_STAGE=1 python -m build -w
|
<< parameters.build_env >> MLX_BUILD_STAGE=1 python -m build -w
|
||||||
- when:
|
- when:
|
||||||
@@ -310,7 +333,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Build common package
|
name: Build common package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
python setup.py clean --all
|
python setup.py clean --all
|
||||||
<< parameters.build_env >> MLX_BUILD_STAGE=2 python -m build -w
|
<< parameters.build_env >> MLX_BUILD_STAGE=2 python -m build -w
|
||||||
- when:
|
- when:
|
||||||
@@ -319,7 +342,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Upload package
|
name: Upload package
|
||||||
command: |
|
command: |
|
||||||
source env/bin/activate
|
conda activate env
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: dist/
|
path: dist/
|
||||||
@@ -392,7 +415,7 @@ jobs:
|
|||||||
default: ""
|
default: ""
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2204:current
|
image: ubuntu-2204:current
|
||||||
resource_class: large
|
resource_class: xlarge
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@@ -439,7 +462,7 @@ workflows:
|
|||||||
- mac_build_and_test:
|
- mac_build_and_test:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
macosx_deployment_target: ["13.5", "14.0"]
|
macosx_deployment_target: ["13.5", "15.0"]
|
||||||
- linux_build_and_test
|
- linux_build_and_test
|
||||||
- cuda_build_and_test:
|
- cuda_build_and_test:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -464,68 +487,7 @@ workflows:
|
|||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
build_env: ["PYPI_RELEASE=1"]
|
build_env: ["PYPI_RELEASE=1"]
|
||||||
xcode_version: ["16.2.0", "15.0.0"]
|
xcode_version: ["26.0.0"]
|
||||||
exclude:
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
build_env: "PYPI_RELEASE=1"
|
|
||||||
- build_documentation:
|
- build_documentation:
|
||||||
filters:
|
filters:
|
||||||
tags:
|
tags:
|
||||||
@@ -567,7 +529,7 @@ workflows:
|
|||||||
requires: [ hold ]
|
requires: [ hold ]
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
macosx_deployment_target: ["13.5", "14.0"]
|
macosx_deployment_target: ["13.5", "15.0"]
|
||||||
- linux_build_and_test:
|
- linux_build_and_test:
|
||||||
requires: [ hold ]
|
requires: [ hold ]
|
||||||
- cuda_build_and_test:
|
- cuda_build_and_test:
|
||||||
@@ -586,53 +548,7 @@ workflows:
|
|||||||
parameters:
|
parameters:
|
||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
xcode_version: ["16.2.0", "15.0.0"]
|
xcode_version: ["26.0.0"]
|
||||||
exclude:
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
- build_linux_release:
|
- build_linux_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
@@ -651,68 +567,7 @@ workflows:
|
|||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
build_env: ["DEV_RELEASE=1"]
|
build_env: ["DEV_RELEASE=1"]
|
||||||
xcode_version: ["16.2.0", "15.0.0"]
|
xcode_version: ["26.0.0"]
|
||||||
exclude:
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "13.5"
|
|
||||||
xcode_version: "16.2.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "14.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.9"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.10"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.11"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.12"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- macosx_deployment_target: "15.0"
|
|
||||||
xcode_version: "15.0.0"
|
|
||||||
python_version: "3.13"
|
|
||||||
build_env: "DEV_RELEASE=1"
|
|
||||||
- build_linux_release:
|
- build_linux_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
|
|||||||
@@ -19,12 +19,17 @@ MLX was developed with contributions from the following individuals:
|
|||||||
- Gleb Pobudzey: Added the `where` primitive, and groups in 1D and 2D convolutions.
|
- Gleb Pobudzey: Added the `where` primitive, and groups in 1D and 2D convolutions.
|
||||||
- Paul Paczuski: Improved stability of BCE loss calculation
|
- Paul Paczuski: Improved stability of BCE loss calculation
|
||||||
- Max-Heinrich Laves: Added `conv_transpose1d`, `conv_transpose2d`, and `conv_transpose3d` ops.
|
- Max-Heinrich Laves: Added `conv_transpose1d`, `conv_transpose2d`, and `conv_transpose3d` ops.
|
||||||
- Gökdeniz Gülmez: Added the `Muon (MomentUm Orthogonalized by Newton-schulz)` optimizer.
|
- Gökdeniz Gülmez: Added the `Muon (MomentUm Orthogonalized by Newton-schulz)` optimizer, and the `ReLU²` activation function.
|
||||||
|
|
||||||
<a href="https://github.com/ml-explore/mlx/graphs/contributors">
|
<a href="https://github.com/ml-explore/mlx/graphs/contributors">
|
||||||
<img class="dark-light" src="https://contrib.rocks/image?repo=ml-explore/mlx&anon=0&columns=20&max=100&r=true" />
|
<img class="dark-light" src="https://contrib.rocks/image?repo=ml-explore/mlx&anon=0&columns=20&max=100&r=true" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
|
# Organizations
|
||||||
|
|
||||||
|
MLX has received contributions from the following companies:
|
||||||
|
- NVIDIA Corporation & Affiliates
|
||||||
|
|
||||||
# Third-Party Software
|
# Third-Party Software
|
||||||
|
|
||||||
MLX leverages several third-party software, listed here together with
|
MLX leverages several third-party software, listed here together with
|
||||||
|
|||||||
@@ -87,22 +87,21 @@ cmake_policy(SET CMP0135 NEW)
|
|||||||
|
|
||||||
add_library(mlx)
|
add_library(mlx)
|
||||||
|
|
||||||
if(MLX_BUILD_METAL)
|
|
||||||
set(METAL_LIB "-framework Metal")
|
|
||||||
set(FOUNDATION_LIB "-framework Foundation")
|
|
||||||
set(QUARTZ_LIB "-framework QuartzCore")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(MLX_BUILD_CUDA)
|
if(MLX_BUILD_CUDA)
|
||||||
enable_language(CUDA)
|
enable_language(CUDA)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(MLX_BUILD_METAL AND NOT METAL_LIB)
|
if(MLX_BUILD_METAL)
|
||||||
message(STATUS "Metal not found. Unable to build GPU")
|
find_library(METAL_LIB Metal)
|
||||||
set(MLX_BUILD_METAL OFF)
|
find_library(FOUNDATION_LIB Foundation)
|
||||||
set(MLX_METAL_DEBUG OFF)
|
find_library(QUARTZ_LIB QuartzCore)
|
||||||
elseif(MLX_BUILD_METAL)
|
if(METAL_LIB)
|
||||||
message(STATUS "Building METAL sources")
|
message(STATUS "Metal found ${METAL_LIB}")
|
||||||
|
else()
|
||||||
|
message(
|
||||||
|
FATAL_ERROR
|
||||||
|
"Metal not found. Set MLX_BUILD_METAL=OFF to build without GPU")
|
||||||
|
endif()
|
||||||
|
|
||||||
if(MLX_METAL_DEBUG)
|
if(MLX_METAL_DEBUG)
|
||||||
add_compile_definitions(MLX_METAL_DEBUG)
|
add_compile_definitions(MLX_METAL_DEBUG)
|
||||||
@@ -111,7 +110,8 @@ elseif(MLX_BUILD_METAL)
|
|||||||
# Throw an error if xcrun not found
|
# Throw an error if xcrun not found
|
||||||
execute_process(
|
execute_process(
|
||||||
COMMAND zsh "-c" "/usr/bin/xcrun -sdk macosx --show-sdk-version"
|
COMMAND zsh "-c" "/usr/bin/xcrun -sdk macosx --show-sdk-version"
|
||||||
OUTPUT_VARIABLE MACOS_SDK_VERSION COMMAND_ERROR_IS_FATAL ANY)
|
OUTPUT_VARIABLE MACOS_SDK_VERSION
|
||||||
|
OUTPUT_STRIP_TRAILING_WHITESPACE COMMAND_ERROR_IS_FATAL ANY)
|
||||||
|
|
||||||
if(${MACOS_SDK_VERSION} LESS 14.0)
|
if(${MACOS_SDK_VERSION} LESS 14.0)
|
||||||
message(
|
message(
|
||||||
@@ -140,6 +140,12 @@ elseif(MLX_BUILD_METAL)
|
|||||||
target_link_libraries(mlx PUBLIC ${METAL_LIB} ${FOUNDATION_LIB} ${QUARTZ_LIB})
|
target_link_libraries(mlx PUBLIC ${METAL_LIB} ${FOUNDATION_LIB} ${QUARTZ_LIB})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||||
|
# With newer clang/gcc versions following libs are implicitly linked, but when
|
||||||
|
# building on old distributions they need to be explicitly listed.
|
||||||
|
target_link_libraries(mlx PRIVATE dl pthread)
|
||||||
|
endif()
|
||||||
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
# GGUF does not build with MSVC.
|
# GGUF does not build with MSVC.
|
||||||
|
|||||||
54
cmake/FindNCCL.cmake
Normal file
54
cmake/FindNCCL.cmake
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
# FindNCCL.cmake This module finds the NVIDIA NCCL library and its include
|
||||||
|
# directories.
|
||||||
|
|
||||||
|
set(NCCL_ROOT_DIR
|
||||||
|
$ENV{NCCL_ROOT_DIR}
|
||||||
|
CACHE PATH "Folder contains NVIDIA NCCL")
|
||||||
|
|
||||||
|
find_path(
|
||||||
|
NCCL_INCLUDE_DIRS
|
||||||
|
NAMES nccl.h
|
||||||
|
HINTS ${NCCL_INCLUDE_DIR} ${NCCL_ROOT_DIR} ${NCCL_ROOT_DIR}/include
|
||||||
|
${CUDA_TOOLKIT_ROOT_DIR}/include)
|
||||||
|
|
||||||
|
if($ENV{USE_STATIC_NCCL})
|
||||||
|
message(
|
||||||
|
STATUS "USE_STATIC_NCCL detected. Linking against static NCCL library")
|
||||||
|
set(NCCL_LIBNAME "libnccl_static.a")
|
||||||
|
else()
|
||||||
|
set(NCCL_LIBNAME "nccl")
|
||||||
|
endif()
|
||||||
|
|
||||||
|
find_library(
|
||||||
|
NCCL_LIBRARIES
|
||||||
|
NAMES ${NCCL_LIBNAME}
|
||||||
|
HINTS ${NCCL_LIB_DIR}
|
||||||
|
${NCCL_ROOT_DIR}
|
||||||
|
${NCCL_ROOT_DIR}/lib
|
||||||
|
${NCCL_ROOT_DIR}/lib/x86_64-linux-gnu
|
||||||
|
${NCCL_ROOT_DIR}/lib64
|
||||||
|
${CUDA_TOOLKIT_ROOT_DIR}/lib
|
||||||
|
${CUDA_TOOLKIT_ROOT_DIR}/lib64)
|
||||||
|
|
||||||
|
include(FindPackageHandleStandardArgs)
|
||||||
|
find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIRS
|
||||||
|
NCCL_LIBRARIES)
|
||||||
|
|
||||||
|
if(NCCL_FOUND)
|
||||||
|
set(NCCL_HEADER_FILE "${NCCL_INCLUDE_DIRS}/nccl.h")
|
||||||
|
message(
|
||||||
|
STATUS "Determining NCCL version from the header file: ${NCCL_HEADER_FILE}")
|
||||||
|
file(
|
||||||
|
STRINGS ${NCCL_HEADER_FILE} NCCL_MAJOR_VERSION_DEFINED
|
||||||
|
REGEX "^[ \t]*#define[ \t]+NCCL_MAJOR[ \t]+[0-9]+.*$"
|
||||||
|
LIMIT_COUNT 1)
|
||||||
|
if(NCCL_MAJOR_VERSION_DEFINED)
|
||||||
|
string(REGEX REPLACE "^[ \t]*#define[ \t]+NCCL_MAJOR[ \t]+" ""
|
||||||
|
NCCL_MAJOR_VERSION ${NCCL_MAJOR_VERSION_DEFINED})
|
||||||
|
message(STATUS "NCCL_MAJOR_VERSION: ${NCCL_MAJOR_VERSION}")
|
||||||
|
endif()
|
||||||
|
message(
|
||||||
|
STATUS
|
||||||
|
"Found NCCL (include: ${NCCL_INCLUDE_DIRS}, library: ${NCCL_LIBRARIES})")
|
||||||
|
mark_as_advanced(NCCL_ROOT_DIR NCCL_INCLUDE_DIRS NCCL_LIBRARIES)
|
||||||
|
endif()
|
||||||
@@ -1,4 +1,5 @@
|
|||||||
sphinx
|
sphinx
|
||||||
breathe
|
breathe
|
||||||
sphinx-book-theme
|
sphinx-book-theme
|
||||||
|
sphinx-copybutton
|
||||||
mlx
|
mlx
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ release = version
|
|||||||
# -- General configuration ---------------------------------------------------
|
# -- General configuration ---------------------------------------------------
|
||||||
|
|
||||||
extensions = [
|
extensions = [
|
||||||
|
"sphinx_copybutton",
|
||||||
"sphinx.ext.autodoc",
|
"sphinx.ext.autodoc",
|
||||||
"sphinx.ext.autosummary",
|
"sphinx.ext.autosummary",
|
||||||
"sphinx.ext.intersphinx",
|
"sphinx.ext.intersphinx",
|
||||||
|
|||||||
@@ -127,7 +127,8 @@ relying on a copy from ``ensure_row_contiguous``:
|
|||||||
name="myexp_strided",
|
name="myexp_strided",
|
||||||
input_names=["inp"],
|
input_names=["inp"],
|
||||||
output_names=["out"],
|
output_names=["out"],
|
||||||
source=source
|
source=source,
|
||||||
|
ensure_row_contiguous=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
def exp_elementwise(a: mx.array):
|
def exp_elementwise(a: mx.array):
|
||||||
@@ -138,7 +139,6 @@ relying on a copy from ``ensure_row_contiguous``:
|
|||||||
threadgroup=(256, 1, 1),
|
threadgroup=(256, 1, 1),
|
||||||
output_shapes=[a.shape],
|
output_shapes=[a.shape],
|
||||||
output_dtypes=[a.dtype],
|
output_dtypes=[a.dtype],
|
||||||
ensure_row_contiguous=False,
|
|
||||||
)
|
)
|
||||||
return outputs[0]
|
return outputs[0]
|
||||||
|
|
||||||
|
|||||||
@@ -70,6 +70,7 @@ are the CPU and GPU.
|
|||||||
python/fft
|
python/fft
|
||||||
python/linalg
|
python/linalg
|
||||||
python/metal
|
python/metal
|
||||||
|
python/cuda
|
||||||
python/memory_management
|
python/memory_management
|
||||||
python/nn
|
python/nn
|
||||||
python/optimizers
|
python/optimizers
|
||||||
|
|||||||
@@ -271,7 +271,7 @@ and the CUDA toolkit. For example on Ubuntu, run the following:
|
|||||||
dpkg -i cuda-keyring_1.1-1_all.deb
|
dpkg -i cuda-keyring_1.1-1_all.deb
|
||||||
apt-get update -y
|
apt-get update -y
|
||||||
apt-get -y install cuda-toolkit-12-9
|
apt-get -y install cuda-toolkit-12-9
|
||||||
apt-get install libblas-dev liblapack-dev liblapacke-dev -y
|
apt-get install libblas-dev liblapack-dev liblapacke-dev libcudnn9-dev-cuda-12 -y
|
||||||
|
|
||||||
|
|
||||||
When building either the Python or C++ APIs make sure to pass the cmake flag
|
When building either the Python or C++ APIs make sure to pass the cmake flag
|
||||||
|
|||||||
9
docs/src/python/cuda.rst
Normal file
9
docs/src/python/cuda.rst
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
CUDA
|
||||||
|
=====
|
||||||
|
|
||||||
|
.. currentmodule:: mlx.core.cuda
|
||||||
|
|
||||||
|
.. autosummary::
|
||||||
|
:toctree: _autosummary
|
||||||
|
|
||||||
|
is_available
|
||||||
@@ -13,3 +13,4 @@ Fast
|
|||||||
rope
|
rope
|
||||||
scaled_dot_product_attention
|
scaled_dot_product_attention
|
||||||
metal_kernel
|
metal_kernel
|
||||||
|
cuda_kernel
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ simple functions.
|
|||||||
mish
|
mish
|
||||||
prelu
|
prelu
|
||||||
relu
|
relu
|
||||||
|
relu2
|
||||||
relu6
|
relu6
|
||||||
selu
|
selu
|
||||||
sigmoid
|
sigmoid
|
||||||
|
|||||||
@@ -50,6 +50,7 @@ Layers
|
|||||||
QuantizedLinear
|
QuantizedLinear
|
||||||
RMSNorm
|
RMSNorm
|
||||||
ReLU
|
ReLU
|
||||||
|
ReLU2
|
||||||
ReLU6
|
ReLU6
|
||||||
RNN
|
RNN
|
||||||
RoPE
|
RoPE
|
||||||
|
|||||||
@@ -51,14 +51,14 @@ the saved state. Here's a simple example:
|
|||||||
optimizer.update(model, grads)
|
optimizer.update(model, grads)
|
||||||
|
|
||||||
# Save the state
|
# Save the state
|
||||||
state = tree_flatten(optimizer.state)
|
state = tree_flatten(optimizer.state, destination={})
|
||||||
mx.save_safetensors("optimizer.safetensors", dict(state))
|
mx.save_safetensors("optimizer.safetensors", state)
|
||||||
|
|
||||||
# Later on, for example when loading from a checkpoint,
|
# Later on, for example when loading from a checkpoint,
|
||||||
# recreate the optimizer and load the state
|
# recreate the optimizer and load the state
|
||||||
optimizer = optim.Adam(learning_rate=1e-2)
|
optimizer = optim.Adam(learning_rate=1e-2)
|
||||||
|
|
||||||
state = tree_unflatten(list(mx.load("optimizer.safetensors").items()))
|
state = tree_unflatten(mx.load("optimizer.safetensors"))
|
||||||
optimizer.state = state
|
optimizer.state = state
|
||||||
|
|
||||||
Note, not every optimizer configuation parameter is saved in the state. For
|
Note, not every optimizer configuation parameter is saved in the state. For
|
||||||
|
|||||||
@@ -130,8 +130,8 @@ Now make an array, and benchmark both functions:
|
|||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
x = mx.random.uniform(shape=(32, 1000, 4096))
|
x = mx.random.uniform(shape=(32, 1000, 4096))
|
||||||
timeit(nn.gelu, x)
|
timeit(gelu, x)
|
||||||
timeit(mx.compile(nn.gelu), x)
|
timeit(mx.compile(gelu), x)
|
||||||
|
|
||||||
On an M1 Max the times are 15.5 and 3.1 milliseconds. The compiled ``gelu`` is
|
On an M1 Max the times are 15.5 and 3.1 milliseconds. The compiled ``gelu`` is
|
||||||
five times faster.
|
five times faster.
|
||||||
@@ -225,7 +225,7 @@ In some cases returning updated state can be pretty inconvenient. Hence,
|
|||||||
def fun(x, y):
|
def fun(x, y):
|
||||||
z = x + y
|
z = x + y
|
||||||
state.append(z)
|
state.append(z)
|
||||||
return mx.exp(z), state
|
return mx.exp(z)
|
||||||
|
|
||||||
fun(mx.array(1.0), mx.array(2.0))
|
fun(mx.array(1.0), mx.array(2.0))
|
||||||
# Prints [array(3, dtype=float32)]
|
# Prints [array(3, dtype=float32)]
|
||||||
|
|||||||
@@ -184,7 +184,7 @@ almost identical to the example above:
|
|||||||
|
|
||||||
def step(model, x, y):
|
def step(model, x, y):
|
||||||
loss, grads = loss_grad_fn(model, x, y)
|
loss, grads = loss_grad_fn(model, x, y)
|
||||||
grads = mlx.nn.average_gradients(grads) # <---- This line was added
|
grads = mx.nn.average_gradients(grads) # <---- This line was added
|
||||||
optimizer.update(model, grads)
|
optimizer.update(model, grads)
|
||||||
return loss
|
return loss
|
||||||
|
|
||||||
|
|||||||
@@ -151,7 +151,7 @@ parameters, pass them as inputs to the ``call`` wrapper:
|
|||||||
model.update(tree_unflatten(list(params.items())))
|
model.update(tree_unflatten(list(params.items())))
|
||||||
return model(x)
|
return model(x)
|
||||||
|
|
||||||
params = dict(tree_flatten(model.parameters()))
|
params = tree_flatten(model.parameters(), destination={})
|
||||||
mx.export_function("model.mlxfn", call, (mx.zeros(4),), params)
|
mx.export_function("model.mlxfn", call, (mx.zeros(4),), params)
|
||||||
|
|
||||||
|
|
||||||
@@ -164,11 +164,11 @@ to export a function which can be used for inputs with variable shapes:
|
|||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
mx.export_function("fun.mlxfn", mx.abs, mx.array(0.0), shapeless=True)
|
mx.export_function("fun.mlxfn", mx.abs, mx.array([0.0]), shapeless=True)
|
||||||
imported_abs = mx.import_function("fun.mlxfn")
|
imported_abs = mx.import_function("fun.mlxfn")
|
||||||
|
|
||||||
# Ok
|
# Ok
|
||||||
out, = imported_abs(mx.array(-1.0))
|
out, = imported_abs(mx.array([-1.0]))
|
||||||
|
|
||||||
# Also ok
|
# Also ok
|
||||||
out, = imported_abs(mx.array([-1.0, -2.0]))
|
out, = imported_abs(mx.array([-1.0, -2.0]))
|
||||||
|
|||||||
@@ -107,8 +107,20 @@ same array:
|
|||||||
>>> a
|
>>> a
|
||||||
array([1, 2, 0], dtype=int32)
|
array([1, 2, 0], dtype=int32)
|
||||||
|
|
||||||
|
Note that unlike NumPy, slicing an array creates a copy, not a view. So
|
||||||
|
mutating it does not mutate the original array:
|
||||||
|
|
||||||
Note, unlike NumPy, updates to the same location are nondeterministic:
|
.. code-block:: shell
|
||||||
|
|
||||||
|
>>> a = mx.array([1, 2, 3])
|
||||||
|
>>> b = a[:]
|
||||||
|
>>> b[2] = 0
|
||||||
|
>>> b
|
||||||
|
array([1, 2, 0], dtype=int32)
|
||||||
|
>>> a
|
||||||
|
array([1, 2, 3], dtype=int32)
|
||||||
|
|
||||||
|
Also unlike NumPy, updates to the same location are nondeterministic:
|
||||||
|
|
||||||
.. code-block:: shell
|
.. code-block:: shell
|
||||||
|
|
||||||
|
|||||||
@@ -228,31 +228,4 @@ std::pair<Dims, Dims> get_grid_and_block_common(int dim0, int dim1, int dim2) {
|
|||||||
std::make_tuple(gx, gy, gz), std::make_tuple(bx, by, bz));
|
std::make_tuple(gx, gy, gz), std::make_tuple(bx, by, bz));
|
||||||
}
|
}
|
||||||
|
|
||||||
array swapaxes_in_eval(const array& x, int axis1, int axis2) {
|
|
||||||
int ndim = x.ndim();
|
|
||||||
if (axis1 < 0) {
|
|
||||||
axis1 += ndim;
|
|
||||||
}
|
|
||||||
if (axis2 < 0) {
|
|
||||||
axis2 += ndim;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape = x.shape();
|
|
||||||
std::swap(shape[axis1], shape[axis2]);
|
|
||||||
auto strides = x.strides();
|
|
||||||
std::swap(strides[axis1], strides[axis2]);
|
|
||||||
|
|
||||||
auto [data_size, row_contiguous, col_contiguous] =
|
|
||||||
check_contiguity(shape, strides);
|
|
||||||
bool contiguous = data_size == x.data_size();
|
|
||||||
|
|
||||||
array out(std::move(shape), x.dtype(), nullptr, {});
|
|
||||||
out.copy_shared_buffer(
|
|
||||||
x,
|
|
||||||
std::move(strides),
|
|
||||||
{contiguous, row_contiguous, col_contiguous},
|
|
||||||
x.data_size());
|
|
||||||
return out;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -196,9 +196,6 @@ void shared_buffer_reshape(
|
|||||||
const Strides& out_strides,
|
const Strides& out_strides,
|
||||||
array& out);
|
array& out);
|
||||||
|
|
||||||
// Like the swapaxes op but safe to call in eval_gpu.
|
|
||||||
array swapaxes_in_eval(const array& x, int axis1, int axis2);
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline SmallVector<T> remove_index(SmallVector<T> vec, size_t index) {
|
inline SmallVector<T> remove_index(SmallVector<T> vec, size_t index) {
|
||||||
vec.erase(std::next(vec.begin(), index));
|
vec.erase(std::next(vec.begin(), index));
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
#include "mlx/backend/cpu/jit_compiler.h"
|
#include "mlx/backend/cpu/jit_compiler.h"
|
||||||
#include "mlx/device.h"
|
#include "mlx/device.h"
|
||||||
#include "mlx/graph_utils.h"
|
#include "mlx/graph_utils.h"
|
||||||
|
#include "mlx/version.h"
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
@@ -94,7 +95,11 @@ void* compile(
|
|||||||
kernel_file_name = kernel_name;
|
kernel_file_name = kernel_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto output_dir = std::filesystem::temp_directory_path();
|
auto output_dir =
|
||||||
|
std::filesystem::temp_directory_path() / "mlx" / version() / "cpu";
|
||||||
|
if (!std::filesystem::exists(output_dir)) {
|
||||||
|
std::filesystem::create_directories(output_dir);
|
||||||
|
}
|
||||||
|
|
||||||
std::string shared_lib_name = "lib" + kernel_file_name + ".so";
|
std::string shared_lib_name = "lib" + kernel_file_name + ".so";
|
||||||
auto shared_lib_path = (output_dir / shared_lib_name).string();
|
auto shared_lib_path = (output_dir / shared_lib_name).string();
|
||||||
@@ -157,10 +162,12 @@ inline void build_kernel(
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Start the kernel
|
// Start the kernel
|
||||||
os << "void " << kernel_name << "(void** args) {" << std::endl;
|
os << "void " << kernel_name
|
||||||
|
<< "(int* shape, int64_t** strides, void** args) {" << std::endl;
|
||||||
|
|
||||||
// Add the input arguments
|
// Add the input arguments
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
|
int strides_index = 1;
|
||||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||||
// Skip constants from the input list
|
// Skip constants from the input list
|
||||||
if (is_constant(i)) {
|
if (is_constant(i)) {
|
||||||
@@ -175,8 +182,8 @@ inline void build_kernel(
|
|||||||
<< "];" << std::endl;
|
<< "];" << std::endl;
|
||||||
// Scalars and contiguous need no strides
|
// Scalars and contiguous need no strides
|
||||||
if (!is_scalar(x) && !contiguous) {
|
if (!is_scalar(x) && !contiguous) {
|
||||||
os << " const size_t* " << xname << "_strides = (size_t*)args[" << cnt++
|
os << " const int64_t* " << xname << "_strides = strides["
|
||||||
<< "];" << std::endl;
|
<< strides_index++ << "];" << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -186,10 +193,8 @@ inline void build_kernel(
|
|||||||
os << " " << tstr << "* " << namer.get_name(x) << " = (" << tstr
|
os << " " << tstr << "* " << namer.get_name(x) << " = (" << tstr
|
||||||
<< "*)args[" << cnt++ << "];" << std::endl;
|
<< "*)args[" << cnt++ << "];" << std::endl;
|
||||||
}
|
}
|
||||||
// Add output strides and shape to extract the indices.
|
// Add output size
|
||||||
if (!contiguous) {
|
if (contiguous) {
|
||||||
os << " const int* shape = (int*)args[" << cnt++ << "];" << std::endl;
|
|
||||||
} else {
|
|
||||||
os << " const size_t size = (size_t)args[" << cnt++ << "];" << std::endl;
|
os << " const size_t size = (size_t)args[" << cnt++ << "];" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -288,17 +293,8 @@ void Compiled::eval_cpu(
|
|||||||
auto [contiguous, shape, strides] =
|
auto [contiguous, shape, strides] =
|
||||||
compiled_collapse_contiguous_dims(inputs, outputs[0], is_constant_);
|
compiled_collapse_contiguous_dims(inputs, outputs[0], is_constant_);
|
||||||
|
|
||||||
// Force allocating shape/strides on heap so we can take their data() first
|
|
||||||
// and then std::move them.
|
|
||||||
// TODO: Refactor code to avoid heap allocation.
|
|
||||||
shape.grow();
|
|
||||||
for (auto& s : strides) {
|
|
||||||
s.grow();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect function input arguments.
|
// Collect function input arguments.
|
||||||
std::vector<void*> args;
|
std::vector<void*> args;
|
||||||
int strides_index = 1;
|
|
||||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||||
if (is_constant_(i)) {
|
if (is_constant_(i)) {
|
||||||
continue;
|
continue;
|
||||||
@@ -306,9 +302,6 @@ void Compiled::eval_cpu(
|
|||||||
const auto& x = inputs[i];
|
const auto& x = inputs[i];
|
||||||
encoder.set_input_array(x);
|
encoder.set_input_array(x);
|
||||||
args.push_back((void*)x.data<void>());
|
args.push_back((void*)x.data<void>());
|
||||||
if (!contiguous && !is_scalar(x)) {
|
|
||||||
args.push_back(strides[strides_index++].data());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the kernel name from the lib
|
// Get the kernel name from the lib
|
||||||
@@ -343,16 +336,20 @@ void Compiled::eval_cpu(
|
|||||||
args.push_back(x.data<void>());
|
args.push_back(x.data<void>());
|
||||||
encoder.set_output_array(x);
|
encoder.set_output_array(x);
|
||||||
}
|
}
|
||||||
if (!contiguous) {
|
if (contiguous) {
|
||||||
args.push_back((void*)shape.data());
|
|
||||||
} else {
|
|
||||||
args.push_back((void*)outputs[0].data_size());
|
args.push_back((void*)outputs[0].data_size());
|
||||||
}
|
}
|
||||||
auto fun = (void (*)(void**))fn_ptr;
|
auto fun = reinterpret_cast<void (*)(int*, int64_t**, void**)>(fn_ptr);
|
||||||
encoder.dispatch([fun,
|
encoder.dispatch([fun,
|
||||||
args = std::move(args),
|
args = std::move(args),
|
||||||
strides = std::move(strides),
|
strides = std::move(strides),
|
||||||
shape = std::move(shape)]() mutable { fun(args.data()); });
|
shape = std::move(shape)]() mutable {
|
||||||
|
SmallVector<int64_t*> strides_ptrs;
|
||||||
|
for (auto& s : strides) {
|
||||||
|
strides_ptrs.push_back(s.data());
|
||||||
|
}
|
||||||
|
fun(shape.data(), strides_ptrs.data(), args.data());
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ INSTANTIATE_LAPACK_REAL(orgqr)
|
|||||||
INSTANTIATE_LAPACK_REAL(syevd)
|
INSTANTIATE_LAPACK_REAL(syevd)
|
||||||
INSTANTIATE_LAPACK_REAL(geev)
|
INSTANTIATE_LAPACK_REAL(geev)
|
||||||
INSTANTIATE_LAPACK_REAL(potrf)
|
INSTANTIATE_LAPACK_REAL(potrf)
|
||||||
INSTANTIATE_LAPACK_REAL(gesvdx)
|
INSTANTIATE_LAPACK_REAL(gesdd)
|
||||||
INSTANTIATE_LAPACK_REAL(getrf)
|
INSTANTIATE_LAPACK_REAL(getrf)
|
||||||
INSTANTIATE_LAPACK_REAL(getri)
|
INSTANTIATE_LAPACK_REAL(getri)
|
||||||
INSTANTIATE_LAPACK_REAL(trtri)
|
INSTANTIATE_LAPACK_REAL(trtri)
|
||||||
|
|||||||
@@ -1,7 +1,5 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023 Apple Inc.
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
#include "mlx/backend/cpu/copy.h"
|
#include "mlx/backend/cpu/copy.h"
|
||||||
#include "mlx/backend/cpu/encoder.h"
|
#include "mlx/backend/cpu/encoder.h"
|
||||||
#include "mlx/backend/cpu/simd/simd.h"
|
#include "mlx/backend/cpu/simd/simd.h"
|
||||||
@@ -13,6 +11,35 @@ namespace mlx::core {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
const static float MXFP4_LUT[16] = {
|
||||||
|
+0.0f,
|
||||||
|
+0.5f,
|
||||||
|
+1.0f,
|
||||||
|
+1.5f,
|
||||||
|
+2.0f,
|
||||||
|
+3.0f,
|
||||||
|
+4.0f,
|
||||||
|
+6.0f,
|
||||||
|
-0.0f,
|
||||||
|
-0.5f,
|
||||||
|
-1.0f,
|
||||||
|
-1.5f,
|
||||||
|
-2.0f,
|
||||||
|
-3.0f,
|
||||||
|
-4.0f,
|
||||||
|
-6.0f};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
static inline T dequantize_scale(uint8_t s) {
|
||||||
|
using FOrI = union {
|
||||||
|
bfloat16_t f;
|
||||||
|
uint16_t i;
|
||||||
|
};
|
||||||
|
FOrI out;
|
||||||
|
out.i = (s == 0 ? 0x40 : (static_cast<uint16_t>(s) << 7));
|
||||||
|
return static_cast<T>(out.f);
|
||||||
|
}
|
||||||
|
|
||||||
inline constexpr short get_pack_factor(int bits, int wsize = 8) {
|
inline constexpr short get_pack_factor(int bits, int wsize = 8) {
|
||||||
return (bits == 3 || bits == 5) ? 8 : (bits == 6 ? 4 : wsize / bits);
|
return (bits == 3 || bits == 5) ? 8 : (bits == 6 ? 4 : wsize / bits);
|
||||||
}
|
}
|
||||||
@@ -407,6 +434,231 @@ void _qmm_dispatch(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void mxfp4_qmm(
|
||||||
|
T* result,
|
||||||
|
const T* x,
|
||||||
|
const uint32_t* w,
|
||||||
|
const uint8_t* scales,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K) {
|
||||||
|
constexpr int group_size = 32;
|
||||||
|
constexpr int pack_factor = get_pack_factor(4, 8);
|
||||||
|
constexpr int bytes_per_pack = get_bytes_per_pack(4);
|
||||||
|
constexpr int packs_in_group = group_size / pack_factor;
|
||||||
|
|
||||||
|
for (int m = 0; m < M; m++) {
|
||||||
|
const uint8_t* w_local = (const uint8_t*)w;
|
||||||
|
const uint8_t* scales_local = scales;
|
||||||
|
|
||||||
|
std::fill(result, result + N, 0);
|
||||||
|
|
||||||
|
for (int k = 0; k < K; k++) {
|
||||||
|
T* result_local = result;
|
||||||
|
T xi = *x++;
|
||||||
|
|
||||||
|
for (int n = 0; n < N; n += group_size) {
|
||||||
|
T scale = dequantize_scale<T>(*scales_local++);
|
||||||
|
for (int ng = 0; ng < packs_in_group; ng++) {
|
||||||
|
uint8_t wi = *w_local++;
|
||||||
|
#pragma clang loop unroll(full)
|
||||||
|
for (int p = 0; p < pack_factor; p++) {
|
||||||
|
(*result_local++) +=
|
||||||
|
xi * scale * static_cast<T>(MXFP4_LUT[wi & 0xf]);
|
||||||
|
wi >>= 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
result += N;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void mxfp4_qmm_t(
|
||||||
|
T* result,
|
||||||
|
const T* x,
|
||||||
|
const uint32_t* w,
|
||||||
|
const uint8_t* scales,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K) {
|
||||||
|
constexpr int group_size = 32;
|
||||||
|
constexpr int pack_factor = get_pack_factor(4, 8);
|
||||||
|
constexpr int bytes_per_pack = get_bytes_per_pack(4);
|
||||||
|
constexpr int packs_in_group = group_size / pack_factor;
|
||||||
|
|
||||||
|
for (int m = 0; m < M; m++) {
|
||||||
|
const uint8_t* w_local = (const uint8_t*)w;
|
||||||
|
const uint8_t* scales_local = scales;
|
||||||
|
|
||||||
|
for (int n = 0; n < N; n++) {
|
||||||
|
const T* x_local = x;
|
||||||
|
T sum = 0;
|
||||||
|
for (int k = 0; k < K; k += group_size) {
|
||||||
|
T scale = dequantize_scale<T>(*scales_local++);
|
||||||
|
|
||||||
|
T gsum = 0;
|
||||||
|
for (int kw = 0; kw < packs_in_group; kw++) {
|
||||||
|
uint8_t wi = *w_local++;
|
||||||
|
#pragma clang loop unroll(full)
|
||||||
|
for (int p = 0; p < pack_factor; p++) {
|
||||||
|
gsum += (*x_local++) * static_cast<T>(MXFP4_LUT[wi & 0xf]);
|
||||||
|
wi >>= 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sum += scale * gsum;
|
||||||
|
}
|
||||||
|
*result = sum;
|
||||||
|
result++;
|
||||||
|
}
|
||||||
|
|
||||||
|
x += K;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int S>
|
||||||
|
simd::Simd<float, S> mxfp4_extract_bits_simd(const uint32_t* w) {
|
||||||
|
if constexpr (S == 8) {
|
||||||
|
constexpr std::array<uint32_t, 8> shifts_ = {{0, 4, 8, 12, 16, 20, 24, 28}};
|
||||||
|
auto shifts(*(simd::Simd<uint32_t, S>*)&shifts_);
|
||||||
|
auto wi = simd::Simd<uint32_t, S>(*w);
|
||||||
|
wi = wi >> shifts;
|
||||||
|
wi = wi & 0xf;
|
||||||
|
simd::Simd<float, S> w_out;
|
||||||
|
for (int i = 0; i < S; ++i) {
|
||||||
|
w_out[i] = MXFP4_LUT[wi[i]];
|
||||||
|
}
|
||||||
|
return w_out;
|
||||||
|
} else {
|
||||||
|
// Appease compiler.. but should never get here
|
||||||
|
throw std::runtime_error("Unsupported combination for simd qmm.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void mxfp4_qmm_t_simd(
|
||||||
|
T* result,
|
||||||
|
const T* x,
|
||||||
|
const uint32_t* w,
|
||||||
|
const uint8_t* scales,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K) {
|
||||||
|
constexpr int group_size = 32;
|
||||||
|
constexpr int pack_factor = 32 / 4;
|
||||||
|
constexpr int packs_in_group = group_size / pack_factor;
|
||||||
|
constexpr int S = simd::max_size<T>;
|
||||||
|
static_assert(
|
||||||
|
S % pack_factor == 0, "SIMD size must be divisible by pack factor");
|
||||||
|
constexpr int packs_per_simd = S / pack_factor;
|
||||||
|
|
||||||
|
for (int m = 0; m < M; m++) {
|
||||||
|
const uint32_t* w_local = w;
|
||||||
|
const uint8_t* scales_local = scales;
|
||||||
|
|
||||||
|
for (int n = 0; n < N; n++) {
|
||||||
|
simd::Simd<float, S> acc(0);
|
||||||
|
auto x_local = x;
|
||||||
|
for (int k = 0; k < K; k += group_size) {
|
||||||
|
T scale = dequantize_scale<T>(*scales_local++);
|
||||||
|
|
||||||
|
simd::Simd<float, S> g_acc(0);
|
||||||
|
for (int kw = 0; kw < packs_in_group; kw += packs_per_simd) {
|
||||||
|
// Extract bits
|
||||||
|
auto wf = mxfp4_extract_bits_simd<S>(w_local);
|
||||||
|
w_local += packs_per_simd;
|
||||||
|
simd::Simd<float, S> x_simd = simd::load<T, S>(x_local);
|
||||||
|
g_acc = g_acc + x_simd * wf;
|
||||||
|
x_local += S;
|
||||||
|
}
|
||||||
|
acc = acc + scale * g_acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
*result = T(simd::sum(acc));
|
||||||
|
result++;
|
||||||
|
}
|
||||||
|
x += K;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void mxfp4_qmm_dispatch_transpose(
|
||||||
|
T* result,
|
||||||
|
const T* x,
|
||||||
|
const uint32_t* w,
|
||||||
|
const uint8_t* scales,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K,
|
||||||
|
bool transposed_w) {
|
||||||
|
if (transposed_w) {
|
||||||
|
// the simd size must be a multiple of the number of elements per word
|
||||||
|
if constexpr (simd::max_size<T> % 8 == 0) {
|
||||||
|
mxfp4_qmm_t_simd<T>(result, x, w, scales, M, N, K);
|
||||||
|
} else {
|
||||||
|
mxfp4_qmm_t<T>(result, x, w, scales, M, N, K);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
mxfp4_qmm<T>(result, x, w, scales, M, N, K);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void mxfp4_qmm_dispatch_typed(
|
||||||
|
array& out,
|
||||||
|
const array& x,
|
||||||
|
const array& w,
|
||||||
|
const array& scales,
|
||||||
|
bool transposed_w) {
|
||||||
|
int K = x.shape(-1);
|
||||||
|
int M = x.ndim() > 1 ? x.shape(-2) : 1;
|
||||||
|
int N = out.shape(-1);
|
||||||
|
int w_els = w.ndim() > 2 ? w.shape(-1) * w.shape(-2) : 0;
|
||||||
|
int g_els = w.ndim() > 2 ? scales.shape(-1) * scales.shape(-2) : 0;
|
||||||
|
int batch_size = x.size() / (K * M);
|
||||||
|
|
||||||
|
auto out_ptr = out.data<T>();
|
||||||
|
auto x_ptr = x.data<T>();
|
||||||
|
auto w_ptr = w.data<uint32_t>();
|
||||||
|
auto scales_ptr = scales.data<uint8_t>();
|
||||||
|
for (int i = 0; i < batch_size; i++) {
|
||||||
|
mxfp4_qmm_dispatch_transpose<T>(
|
||||||
|
out_ptr + i * M * N,
|
||||||
|
x_ptr + elem_to_loc(i * M * K, x.shape(), x.strides()),
|
||||||
|
w_ptr + elem_to_loc(i * w_els, w.shape(), w.strides()),
|
||||||
|
scales_ptr + elem_to_loc(i * g_els, scales.shape(), scales.strides()),
|
||||||
|
M,
|
||||||
|
N,
|
||||||
|
K,
|
||||||
|
transposed_w);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void mxfp4_qmm_dispatch(
|
||||||
|
array& out,
|
||||||
|
const array& x,
|
||||||
|
const array& w,
|
||||||
|
const array& scales,
|
||||||
|
bool transposed_w) {
|
||||||
|
switch (x.dtype()) {
|
||||||
|
case bfloat16:
|
||||||
|
mxfp4_qmm_dispatch_typed<bfloat16_t>(out, x, w, scales, transposed_w);
|
||||||
|
break;
|
||||||
|
case float16:
|
||||||
|
mxfp4_qmm_dispatch_typed<float16_t>(out, x, w, scales, transposed_w);
|
||||||
|
break;
|
||||||
|
case float32:
|
||||||
|
mxfp4_qmm_dispatch_typed<float>(out, x, w, scales, transposed_w);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw std::invalid_argument(
|
||||||
|
"[quantized_matmul] only floating types are supported");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void _bs_qmm_dispatch_typed(
|
void _bs_qmm_dispatch_typed(
|
||||||
array& out,
|
array& out,
|
||||||
@@ -513,41 +765,106 @@ void _bs_qmm_dispatch(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void mxfp4_bs_qmm_dispatch_typed(
|
||||||
|
array& out,
|
||||||
|
const array& x,
|
||||||
|
const array& w,
|
||||||
|
const array& scales,
|
||||||
|
const array& lhs_indices,
|
||||||
|
const array& rhs_indices,
|
||||||
|
bool transposed_w) {
|
||||||
|
int K = x.shape(-1);
|
||||||
|
int M = x.shape(-2);
|
||||||
|
int N = out.shape(-1);
|
||||||
|
|
||||||
|
int w_els = w.shape(-1) * w.shape(-2);
|
||||||
|
int g_els = scales.shape(-1) * scales.shape(-2);
|
||||||
|
|
||||||
|
auto out_ptr = out.data<T>();
|
||||||
|
auto x_ptr = x.data<T>();
|
||||||
|
auto w_ptr = w.data<uint32_t>();
|
||||||
|
auto scales_ptr = scales.data<uint8_t>();
|
||||||
|
auto lhs_indices_ptr = lhs_indices.data<uint32_t>();
|
||||||
|
auto rhs_indices_ptr = rhs_indices.data<uint32_t>();
|
||||||
|
|
||||||
|
for (int i = 0; i < lhs_indices.size(); i++) {
|
||||||
|
int x_idx = lhs_indices_ptr[elem_to_loc(
|
||||||
|
i, lhs_indices.shape(), lhs_indices.strides())];
|
||||||
|
int w_idx = rhs_indices_ptr[elem_to_loc(
|
||||||
|
i, rhs_indices.shape(), rhs_indices.strides())];
|
||||||
|
mxfp4_qmm_dispatch_transpose<T>(
|
||||||
|
out_ptr + i * M * N,
|
||||||
|
x_ptr + elem_to_loc(x_idx * M * K, x.shape(), x.strides()),
|
||||||
|
w_ptr + elem_to_loc(w_idx * w_els, w.shape(), w.strides()),
|
||||||
|
scales_ptr +
|
||||||
|
elem_to_loc(w_idx * g_els, scales.shape(), scales.strides()),
|
||||||
|
M,
|
||||||
|
N,
|
||||||
|
K,
|
||||||
|
transposed_w);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void mxfp4_bs_qmm_dispatch(
|
||||||
|
array& out,
|
||||||
|
const array& x,
|
||||||
|
const array& w,
|
||||||
|
const array& scales,
|
||||||
|
const array& lhs_indices,
|
||||||
|
const array& rhs_indices,
|
||||||
|
bool transposed_w) {
|
||||||
|
switch (x.dtype()) {
|
||||||
|
case float32:
|
||||||
|
mxfp4_bs_qmm_dispatch_typed<float>(
|
||||||
|
out, x, w, scales, lhs_indices, rhs_indices, transposed_w);
|
||||||
|
break;
|
||||||
|
case float16:
|
||||||
|
mxfp4_bs_qmm_dispatch_typed<float16_t>(
|
||||||
|
out, x, w, scales, lhs_indices, rhs_indices, transposed_w);
|
||||||
|
break;
|
||||||
|
case bfloat16:
|
||||||
|
mxfp4_bs_qmm_dispatch_typed<bfloat16_t>(
|
||||||
|
out, x, w, scales, lhs_indices, rhs_indices, transposed_w);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw std::invalid_argument(
|
||||||
|
"[quantized_matmul] only floating types are supported");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
||||||
assert(inputs.size() == 4);
|
|
||||||
|
|
||||||
auto& x_pre = inputs[0];
|
auto& x_pre = inputs[0];
|
||||||
auto& w_pre = inputs[1];
|
auto& w_pre = inputs[1];
|
||||||
auto& scales_pre = inputs[2];
|
auto& scales_pre = inputs[2];
|
||||||
auto& biases_pre = inputs[3];
|
|
||||||
|
|
||||||
std::vector<array> temps;
|
auto& encoder = cpu::get_command_encoder(stream());
|
||||||
auto ensure_row_contiguous = [s = stream(), &temps](const array& arr) {
|
auto ensure_row_contiguous = [s = stream(), &encoder](const array& arr) {
|
||||||
if (arr.flags().row_contiguous) {
|
if (arr.flags().row_contiguous) {
|
||||||
return arr;
|
return arr;
|
||||||
} else {
|
} else {
|
||||||
temps.push_back(array(arr.shape(), arr.dtype(), nullptr, {}));
|
auto arr_cpy = array(arr.shape(), arr.dtype(), nullptr, {});
|
||||||
copy_cpu(arr, temps.back(), CopyType::General, s);
|
copy_cpu(arr, arr_cpy, CopyType::General, s);
|
||||||
return temps.back();
|
encoder.add_temporary(arr_cpy);
|
||||||
|
return arr_cpy;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto x = ensure_row_contiguous(x_pre);
|
auto x = ensure_row_contiguous(x_pre);
|
||||||
auto w = ensure_row_contiguous(w_pre);
|
auto w = ensure_row_contiguous(w_pre);
|
||||||
auto scales = ensure_row_contiguous(scales_pre);
|
auto scales = ensure_row_contiguous(scales_pre);
|
||||||
auto biases = ensure_row_contiguous(biases_pre);
|
|
||||||
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
auto& encoder = cpu::get_command_encoder(stream());
|
|
||||||
encoder.add_temporaries(std::move(temps));
|
|
||||||
encoder.set_input_array(x);
|
encoder.set_input_array(x);
|
||||||
encoder.set_input_array(w);
|
encoder.set_input_array(w);
|
||||||
encoder.set_input_array(scales);
|
encoder.set_input_array(scales);
|
||||||
encoder.set_input_array(biases);
|
|
||||||
encoder.set_output_array(out);
|
encoder.set_output_array(out);
|
||||||
|
if (mode_ == QuantizationMode::Affine) {
|
||||||
|
auto biases = ensure_row_contiguous(inputs[3]);
|
||||||
|
encoder.set_input_array(biases);
|
||||||
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
||||||
x = array::unsafe_weak_copy(x),
|
x = array::unsafe_weak_copy(x),
|
||||||
w = array::unsafe_weak_copy(w),
|
w = array::unsafe_weak_copy(w),
|
||||||
@@ -558,48 +875,54 @@ void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|||||||
transpose_ = transpose_]() mutable {
|
transpose_ = transpose_]() mutable {
|
||||||
_qmm_dispatch(out, x, w, scales, biases, group_size_, bits_, transpose_);
|
_qmm_dispatch(out, x, w, scales, biases, group_size_, bits_, transpose_);
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
||||||
|
x = array::unsafe_weak_copy(x),
|
||||||
|
w = array::unsafe_weak_copy(w),
|
||||||
|
scales = array::unsafe_weak_copy(scales),
|
||||||
|
transpose_ = transpose_]() mutable {
|
||||||
|
mxfp4_qmm_dispatch(out, x, w, scales, transpose_);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void GatherQMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
void GatherQMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
||||||
assert(inputs.size() == 6);
|
|
||||||
|
|
||||||
auto& x_pre = inputs[0];
|
auto& x_pre = inputs[0];
|
||||||
auto& w_pre = inputs[1];
|
auto& w_pre = inputs[1];
|
||||||
auto& scales_pre = inputs[2];
|
auto& scales_pre = inputs[2];
|
||||||
auto& biases_pre = inputs[3];
|
auto& lhs_indices = inputs[inputs.size() - 2];
|
||||||
auto& lhs_indices = inputs[4];
|
auto& rhs_indices = inputs[inputs.size() - 1];
|
||||||
auto& rhs_indices = inputs[5];
|
|
||||||
|
|
||||||
std::vector<array> temps;
|
auto& encoder = cpu::get_command_encoder(stream());
|
||||||
auto ensure_row_contiguous_last_dims = [s = stream(),
|
auto ensure_row_contiguous_last_dims = [s = stream(),
|
||||||
&temps](const array& arr) {
|
&encoder](const array& arr) {
|
||||||
auto stride_0 = arr.strides()[arr.ndim() - 2];
|
auto stride_0 = arr.strides()[arr.ndim() - 2];
|
||||||
auto stride_1 = arr.strides()[arr.ndim() - 1];
|
auto stride_1 = arr.strides()[arr.ndim() - 1];
|
||||||
if (stride_0 == arr.shape(-1) && stride_1 == 1) {
|
if (stride_0 == arr.shape(-1) && stride_1 == 1) {
|
||||||
return arr;
|
return arr;
|
||||||
} else {
|
} else {
|
||||||
temps.push_back(array(arr.shape(), arr.dtype(), nullptr, {}));
|
auto arr_cpy = array(arr.shape(), arr.dtype(), nullptr, {});
|
||||||
copy_cpu(arr, temps.back(), CopyType::General, s);
|
copy_cpu(arr, arr_cpy, CopyType::General, s);
|
||||||
return temps.back();
|
encoder.add_temporary(arr_cpy);
|
||||||
|
return arr_cpy;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto x = ensure_row_contiguous_last_dims(x_pre);
|
auto x = ensure_row_contiguous_last_dims(x_pre);
|
||||||
auto w = ensure_row_contiguous_last_dims(w_pre);
|
auto w = ensure_row_contiguous_last_dims(w_pre);
|
||||||
auto scales = ensure_row_contiguous_last_dims(scales_pre);
|
auto scales = ensure_row_contiguous_last_dims(scales_pre);
|
||||||
auto biases = ensure_row_contiguous_last_dims(biases_pre);
|
|
||||||
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
auto& encoder = cpu::get_command_encoder(stream());
|
|
||||||
encoder.add_temporaries(std::move(temps));
|
|
||||||
encoder.set_input_array(x);
|
encoder.set_input_array(x);
|
||||||
encoder.set_input_array(w);
|
encoder.set_input_array(w);
|
||||||
encoder.set_input_array(scales);
|
encoder.set_input_array(scales);
|
||||||
encoder.set_input_array(biases);
|
|
||||||
encoder.set_input_array(lhs_indices);
|
encoder.set_input_array(lhs_indices);
|
||||||
encoder.set_input_array(rhs_indices);
|
encoder.set_input_array(rhs_indices);
|
||||||
encoder.set_output_array(out);
|
encoder.set_output_array(out);
|
||||||
|
if (mode_ == QuantizationMode::Affine) {
|
||||||
|
auto biases = ensure_row_contiguous_last_dims(inputs[3]);
|
||||||
|
encoder.set_input_array(biases);
|
||||||
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
||||||
x = array::unsafe_weak_copy(x),
|
x = array::unsafe_weak_copy(x),
|
||||||
w = array::unsafe_weak_copy(w),
|
w = array::unsafe_weak_copy(w),
|
||||||
@@ -622,6 +945,18 @@ void GatherQMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|||||||
bits_,
|
bits_,
|
||||||
transpose_);
|
transpose_);
|
||||||
});
|
});
|
||||||
|
} else {
|
||||||
|
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
||||||
|
x = array::unsafe_weak_copy(x),
|
||||||
|
w = array::unsafe_weak_copy(w),
|
||||||
|
scales = array::unsafe_weak_copy(scales),
|
||||||
|
lhs_indices = array::unsafe_weak_copy(lhs_indices),
|
||||||
|
rhs_indices = array::unsafe_weak_copy(rhs_indices),
|
||||||
|
transpose_ = transpose_]() mutable {
|
||||||
|
mxfp4_bs_qmm_dispatch(
|
||||||
|
out, x, w, scales, lhs_indices, rhs_indices, transpose_);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename U>
|
template <typename T, typename U>
|
||||||
@@ -705,7 +1040,7 @@ void dispatch_quantize(
|
|||||||
w_ptr, out_ptr, scales_ptr, biases_ptr, bits, group_size, w.size());
|
w_ptr, out_ptr, scales_ptr, biases_ptr, bits, group_size, w.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void fast::AffineQuantize::eval_cpu(
|
void fast::Quantize::eval_cpu(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs) {
|
std::vector<array>& outputs) {
|
||||||
auto ensure_row_contiguous = [s = stream()](const array& arr) {
|
auto ensure_row_contiguous = [s = stream()](const array& arr) {
|
||||||
@@ -764,7 +1099,7 @@ void fast::AffineQuantize::eval_cpu(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
"[fast::AffineQuantize::eval_cpu] Only supports floating point inputs");
|
"[fast::Quantize::eval_cpu] Only supports floating point inputs");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -491,19 +491,27 @@ void Reduce::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|||||||
switch (in.dtype()) {
|
switch (in.dtype()) {
|
||||||
case bool_:
|
case bool_:
|
||||||
case uint8:
|
case uint8:
|
||||||
|
reduce_dispatch_sum_prod<uint8_t>(in, out, reduce_type_, axes_);
|
||||||
|
break;
|
||||||
|
case uint16:
|
||||||
|
reduce_dispatch_sum_prod<uint16_t>(in, out, reduce_type_, axes_);
|
||||||
|
break;
|
||||||
|
case uint32:
|
||||||
|
reduce_dispatch_sum_prod<uint32_t>(in, out, reduce_type_, axes_);
|
||||||
|
break;
|
||||||
|
case uint64:
|
||||||
|
reduce_dispatch_sum_prod<uint64_t>(in, out, reduce_type_, axes_);
|
||||||
|
break;
|
||||||
case int8:
|
case int8:
|
||||||
reduce_dispatch_sum_prod<int8_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_sum_prod<int8_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case int16:
|
case int16:
|
||||||
case uint16:
|
|
||||||
reduce_dispatch_sum_prod<int16_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_sum_prod<int16_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case int32:
|
case int32:
|
||||||
case uint32:
|
|
||||||
reduce_dispatch_sum_prod<int32_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_sum_prod<int32_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case int64:
|
case int64:
|
||||||
case uint64:
|
|
||||||
reduce_dispatch_sum_prod<int64_t>(in, out, reduce_type_, axes_);
|
reduce_dispatch_sum_prod<int64_t>(in, out, reduce_type_, axes_);
|
||||||
break;
|
break;
|
||||||
case float16:
|
case float16:
|
||||||
|
|||||||
@@ -234,6 +234,7 @@ Simd<T, N> remainder(Simd<T, N> a, Simd<T, N> b) {
|
|||||||
|
|
||||||
template <typename MaskT, typename T1, typename T2, int N>
|
template <typename MaskT, typename T1, typename T2, int N>
|
||||||
Simd<T1, N> select(Simd<MaskT, N> mask, Simd<T1, N> x, Simd<T2, N> y) {
|
Simd<T1, N> select(Simd<MaskT, N> mask, Simd<T1, N> x, Simd<T2, N> y) {
|
||||||
|
static_assert(std::is_same_v<MaskT, bool>);
|
||||||
if constexpr (sizeof(T1) == 1) {
|
if constexpr (sizeof(T1) == 1) {
|
||||||
return asd::bitselect(y.value, x.value, asd::convert<char>(mask.value));
|
return asd::bitselect(y.value, x.value, asd::convert<char>(mask.value));
|
||||||
} else if constexpr (sizeof(T1) == 2) {
|
} else if constexpr (sizeof(T1) == 2) {
|
||||||
@@ -251,9 +252,13 @@ Simd<T, N> pow(Simd<T, N> base, Simd<T, N> exp) {
|
|||||||
return asd::pow(base.value, exp.value);
|
return asd::pow(base.value, exp.value);
|
||||||
} else {
|
} else {
|
||||||
Simd<T, N> res = 1;
|
Simd<T, N> res = 1;
|
||||||
while (any(exp)) {
|
// Raising an integer to a negative power is undefined
|
||||||
res = select(exp & 1, res * base, res);
|
if (any(exp < 0)) {
|
||||||
base = select(exp, base * base, base);
|
return 0;
|
||||||
|
}
|
||||||
|
while (any(exp > 0)) {
|
||||||
|
res = select((exp & 1) != 0, res * base, res);
|
||||||
|
base = select(exp > 0, base * base, base);
|
||||||
exp = exp >> 1;
|
exp = exp >> 1;
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
|
|||||||
@@ -81,9 +81,7 @@ void svd_impl(
|
|||||||
// Vᵀ of shape N x N. (M x M in lapack).
|
// Vᵀ of shape N x N. (M x M in lapack).
|
||||||
const int ldvt = M;
|
const int ldvt = M;
|
||||||
|
|
||||||
auto job_u = (u_ptr) ? "V" : "N";
|
auto jobz = (u_ptr) ? "A" : "N";
|
||||||
auto job_vt = (u_ptr) ? "V" : "N";
|
|
||||||
static constexpr auto range = "A";
|
|
||||||
|
|
||||||
// Will contain the number of singular values after the call has returned.
|
// Will contain the number of singular values after the call has returned.
|
||||||
int ns = 0;
|
int ns = 0;
|
||||||
@@ -91,30 +89,20 @@ void svd_impl(
|
|||||||
|
|
||||||
// Will contain the indices of eigenvectors that failed to converge (not
|
// Will contain the indices of eigenvectors that failed to converge (not
|
||||||
// used here but required by lapack).
|
// used here but required by lapack).
|
||||||
auto iwork = array::Data{allocator::malloc(sizeof(int) * 12 * K)};
|
auto iwork = array::Data{allocator::malloc(sizeof(int) * 8 * K)};
|
||||||
|
|
||||||
static const int lwork_query = -1;
|
static const int lwork_query = -1;
|
||||||
|
|
||||||
static const int ignored_int = 0;
|
|
||||||
static const T ignored_float = 0;
|
|
||||||
|
|
||||||
int info;
|
int info;
|
||||||
|
|
||||||
// Compute workspace size.
|
// Compute workspace size.
|
||||||
gesvdx<T>(
|
gesdd<T>(
|
||||||
/* jobu = */ job_u,
|
/* jobz = */ jobz,
|
||||||
/* jobvt = */ job_vt,
|
|
||||||
/* range = */ range,
|
|
||||||
// M and N are swapped since lapack expects column-major.
|
// M and N are swapped since lapack expects column-major.
|
||||||
/* m = */ &N,
|
/* m = */ &N,
|
||||||
/* n = */ &M,
|
/* n = */ &M,
|
||||||
/* a = */ nullptr,
|
/* a = */ nullptr,
|
||||||
/* lda = */ &lda,
|
/* lda = */ &lda,
|
||||||
/* vl = */ &ignored_float,
|
|
||||||
/* vu = */ &ignored_float,
|
|
||||||
/* il = */ &ignored_int,
|
|
||||||
/* iu = */ &ignored_int,
|
|
||||||
/* ns = */ &ns,
|
|
||||||
/* s = */ nullptr,
|
/* s = */ nullptr,
|
||||||
/* u = */ nullptr,
|
/* u = */ nullptr,
|
||||||
/* ldu = */ &ldu,
|
/* ldu = */ &ldu,
|
||||||
@@ -136,20 +124,13 @@ void svd_impl(
|
|||||||
|
|
||||||
// Loop over matrices.
|
// Loop over matrices.
|
||||||
for (int i = 0; i < num_matrices; i++) {
|
for (int i = 0; i < num_matrices; i++) {
|
||||||
gesvdx<T>(
|
gesdd<T>(
|
||||||
/* jobu = */ job_u,
|
/* jobz = */ jobz,
|
||||||
/* jobvt = */ job_vt,
|
|
||||||
/* range = */ range,
|
|
||||||
// M and N are swapped since lapack expects column-major.
|
// M and N are swapped since lapack expects column-major.
|
||||||
/* m = */ &N,
|
/* m = */ &N,
|
||||||
/* n = */ &M,
|
/* n = */ &M,
|
||||||
/* a = */ in_ptr + M * N * i,
|
/* a = */ in_ptr + M * N * i,
|
||||||
/* lda = */ &lda,
|
/* lda = */ &lda,
|
||||||
/* vl = */ &ignored_float,
|
|
||||||
/* vu = */ &ignored_float,
|
|
||||||
/* il = */ &ignored_int,
|
|
||||||
/* iu = */ &ignored_int,
|
|
||||||
/* ns = */ &ns,
|
|
||||||
/* s = */ s_ptr + K * i,
|
/* s = */ s_ptr + K * i,
|
||||||
// According to the identity above, lapack will write Vᵀᵀ as U.
|
// According to the identity above, lapack will write Vᵀᵀ as U.
|
||||||
/* u = */ vt_ptr ? vt_ptr + N * N * i : nullptr,
|
/* u = */ vt_ptr ? vt_ptr + N * N * i : nullptr,
|
||||||
@@ -167,13 +148,6 @@ void svd_impl(
|
|||||||
ss << "svd_impl: sgesvdx_ failed with code " << info;
|
ss << "svd_impl: sgesvdx_ failed with code " << info;
|
||||||
throw std::runtime_error(ss.str());
|
throw std::runtime_error(ss.str());
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ns != K) {
|
|
||||||
std::stringstream ss;
|
|
||||||
ss << "svd_impl: expected " << K << " singular values, but " << ns
|
|
||||||
<< " were computed.";
|
|
||||||
throw std::runtime_error(ss.str());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
encoder.add_temporary(in);
|
encoder.add_temporary(in);
|
||||||
|
|||||||
@@ -8,7 +8,6 @@ target_sources(
|
|||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/allocator.cpp
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/allocator.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/arange.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/arange.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/arg_reduce.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/arg_reduce.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/binary.cu
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/binary_two.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/binary_two.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/compiled.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/compiled.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/copy.cu
|
||||||
@@ -17,8 +16,13 @@ target_sources(
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_dynamic.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_dynamic.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_input.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_input.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/conv.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/conv.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/conv/gemm_conv.cu
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/conv/gemm_grouped_conv.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/cuda.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/cuda.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/cudnn_utils.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/custom_kernel.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/distributed.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/eval.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/eval.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/event.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/event.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/fence.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/fence.cpp
|
||||||
@@ -45,18 +49,20 @@ target_sources(
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/softmax.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/softmax.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/sort.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/sort.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/ternary.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/ternary.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/unary.cu
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/quantized/affine_quantize.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/quantized/affine_quantize.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/quantized/quantized.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/quantized/quantized.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/worker.cpp)
|
${CMAKE_CURRENT_SOURCE_DIR}/worker.cpp)
|
||||||
|
|
||||||
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/binary)
|
||||||
|
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unary)
|
||||||
|
|
||||||
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 12.9.0)
|
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 12.9.0)
|
||||||
target_sources(
|
target_sources(
|
||||||
mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_batched_gemm_12_9.cu)
|
mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_gemm_batched_12_9.cu)
|
||||||
else()
|
else()
|
||||||
target_sources(
|
target_sources(
|
||||||
mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_batched_gemm_12_0.cpp)
|
mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_gemm_batched_12_0.cpp)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
target_compile_definitions(mlx PRIVATE MLX_USE_CUDA)
|
target_compile_definitions(mlx PRIVATE MLX_USE_CUDA)
|
||||||
@@ -148,7 +154,7 @@ target_link_libraries(mlx PRIVATE CUDA::nvrtc CUDA::cuda_driver)
|
|||||||
FetchContent_Declare(
|
FetchContent_Declare(
|
||||||
cudnn
|
cudnn
|
||||||
GIT_REPOSITORY https://github.com/NVIDIA/cudnn-frontend.git
|
GIT_REPOSITORY https://github.com/NVIDIA/cudnn-frontend.git
|
||||||
GIT_TAG v1.12.1
|
GIT_TAG v1.14.0
|
||||||
GIT_SHALLOW TRUE
|
GIT_SHALLOW TRUE
|
||||||
EXCLUDE_FROM_ALL)
|
EXCLUDE_FROM_ALL)
|
||||||
set(CUDNN_FRONTEND_SKIP_JSON_LIB ON)
|
set(CUDNN_FRONTEND_SKIP_JSON_LIB ON)
|
||||||
|
|||||||
@@ -30,8 +30,15 @@ SmallSizePool::SmallSizePool() {
|
|||||||
next_free_ = buffer_;
|
next_free_ = buffer_;
|
||||||
|
|
||||||
CHECK_CUDA_ERROR(cudaMallocManaged(&data_, small_pool_size));
|
CHECK_CUDA_ERROR(cudaMallocManaged(&data_, small_pool_size));
|
||||||
|
#if CUDART_VERSION >= 13000
|
||||||
|
cudaMemLocation loc;
|
||||||
|
loc.type = cudaMemLocationTypeDevice;
|
||||||
|
loc.id = 0;
|
||||||
|
#else
|
||||||
|
int loc = 0;
|
||||||
|
#endif // CUDART_VERSION >= 13000
|
||||||
CHECK_CUDA_ERROR(
|
CHECK_CUDA_ERROR(
|
||||||
cudaMemAdvise(data_, small_pool_size, cudaMemAdviseSetReadMostly, 0));
|
cudaMemAdvise(data_, small_pool_size, cudaMemAdviseSetReadMostly, loc));
|
||||||
|
|
||||||
auto curr = next_free_;
|
auto curr = next_free_;
|
||||||
for (size_t i = 1; i < num_blocks; ++i) {
|
for (size_t i = 1; i < num_blocks; ++i) {
|
||||||
|
|||||||
@@ -6,23 +6,33 @@
|
|||||||
#include "mlx/dtype_utils.h"
|
#include "mlx/dtype_utils.h"
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
|
#include <cooperative_groups.h>
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
#include <thrust/device_ptr.h>
|
|
||||||
#include <thrust/transform.h>
|
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
namespace cu {
|
namespace cu {
|
||||||
|
|
||||||
template <typename T>
|
namespace cg = cooperative_groups;
|
||||||
struct Arange {
|
|
||||||
const T start;
|
|
||||||
const T step;
|
|
||||||
|
|
||||||
__device__ T operator()(uint32_t i) const {
|
template <typename T, typename IdxT, int N_WRITES>
|
||||||
return start + i * step;
|
__global__ void arange(T* out, IdxT size, T start, T step) {
|
||||||
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
|
|
||||||
|
if ((index + 1) * N_WRITES > size) {
|
||||||
|
for (IdxT i = index * N_WRITES; i < size; ++i) {
|
||||||
|
out[i] = start + i * step;
|
||||||
}
|
}
|
||||||
};
|
} else {
|
||||||
|
AlignedVector<T, N_WRITES> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_WRITES; ++i) {
|
||||||
|
out_vec[i] = start + (index * N_WRITES + i) * step;
|
||||||
|
}
|
||||||
|
|
||||||
|
store_vector<N_WRITES>(out, index, out_vec);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
|
|
||||||
@@ -36,19 +46,23 @@ void Arange::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
auto& encoder = cu::get_command_encoder(stream());
|
auto& encoder = cu::get_command_encoder(stream());
|
||||||
encoder.set_output_array(out);
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
auto capture = encoder.capture_context();
|
|
||||||
dispatch_int_float_types(out.dtype(), "Arange", [&](auto type_tag) {
|
dispatch_int_float_types(out.dtype(), "Arange", [&](auto type_tag) {
|
||||||
using CTYPE = MLX_GET_TYPE(type_tag);
|
using CTYPE = MLX_GET_TYPE(type_tag);
|
||||||
using OutType = cuda_type_t<CTYPE>;
|
using OutType = cuda_type_t<CTYPE>;
|
||||||
CTYPE step =
|
constexpr int N_WRITES = 16 / sizeof(OutType);
|
||||||
static_cast<CTYPE>(start_ + step_) - static_cast<CTYPE>(start_);
|
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
||||||
thrust::transform(
|
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
||||||
cu::thrust_policy(encoder.stream()),
|
auto [num_blocks, block_dims] = get_launch_args(out, large(), N_WRITES);
|
||||||
thrust::counting_iterator<uint32_t>(0),
|
encoder.add_kernel_node(
|
||||||
thrust::counting_iterator<uint32_t>(out.data_size()),
|
cu::arange<OutType, IdxT, N_WRITES>,
|
||||||
thrust::device_pointer_cast(out.data<OutType>()),
|
num_blocks,
|
||||||
cu::Arange<OutType>{
|
block_dims,
|
||||||
static_cast<OutType>(start_), static_cast<OutType>(step)});
|
0,
|
||||||
|
out.data<OutType>(),
|
||||||
|
out.data_size(),
|
||||||
|
static_cast<CTYPE>(start_),
|
||||||
|
static_cast<CTYPE>(start_ + step_) - static_cast<CTYPE>(start_));
|
||||||
|
});
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
21
mlx/backend/cuda/binary/CMakeLists.txt
Normal file
21
mlx/backend/cuda/binary/CMakeLists.txt
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
target_sources(
|
||||||
|
mlx
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/add.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arctan2.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bitwise_binary.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/divide.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/equal.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/greater.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/greater_equal.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/less.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/less_equal.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/logical_and.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/logical_or.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/log_add_exp.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/minimum.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/maximum.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/multiply.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/power.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/remainder.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/not_equal.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/subtract.cu)
|
||||||
7
mlx/backend/cuda/binary/add.cu
Normal file
7
mlx/backend/cuda/binary/add.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Add)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/arctan2.cu
Normal file
7
mlx/backend/cuda/binary/arctan2.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(ArcTan2)
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -99,39 +99,89 @@ __global__ void binary_vv(const In* a, const In* b, Out* out, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT, int NDIM>
|
template <
|
||||||
|
typename Op,
|
||||||
|
typename In,
|
||||||
|
typename Out,
|
||||||
|
typename IdxT,
|
||||||
|
int NDIM,
|
||||||
|
int N_READS>
|
||||||
__global__ void binary_g_nd(
|
__global__ void binary_g_nd(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
IdxT index_rest =
|
||||||
index, shape.data(), a_strides.data(), b_strides.data());
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
out[index] = Op{}(a[a_idx], b[b_idx]);
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[NDIM - 1];
|
||||||
|
auto a_stride_x = a_strides[NDIM - 1];
|
||||||
|
auto b_stride_x = b_strides[NDIM - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
||||||
|
index_rest * shape_x, shape.data(), a_strides.data(), b_strides.data());
|
||||||
|
auto a_vec =
|
||||||
|
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
||||||
|
auto b_vec =
|
||||||
|
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
||||||
|
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = Op{}(a_vec[i], b_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void binary_g(
|
__global__ void binary_g(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides a_strides,
|
const __grid_constant__ Strides a_strides,
|
||||||
const __grid_constant__ Strides b_strides,
|
const __grid_constant__ Strides b_strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto [a_idx, b_idx] = elem_to_loc(
|
IdxT index_rest =
|
||||||
index, shape.data(), a_strides.data(), b_strides.data(), ndim);
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
out[index] = Op{}(a[a_idx], b[b_idx]);
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[ndim - 1];
|
||||||
|
auto a_stride_x = a_strides[ndim - 1];
|
||||||
|
auto b_stride_x = b_strides[ndim - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto [a_idx, b_idx] = elem_to_loc(
|
||||||
|
index_rest * shape_x,
|
||||||
|
shape.data(),
|
||||||
|
a_strides.data(),
|
||||||
|
b_strides.data(),
|
||||||
|
ndim);
|
||||||
|
auto a_vec =
|
||||||
|
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
||||||
|
auto b_vec =
|
||||||
|
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
||||||
|
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = Op{}(a_vec[i], b_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out>
|
template <typename Op, typename In, typename Out>
|
||||||
@@ -209,39 +259,61 @@ void binary_op_gpu_inplace(
|
|||||||
auto& a_strides = strides[0];
|
auto& a_strides = strides[0];
|
||||||
auto& b_strides = strides[1];
|
auto& b_strides = strides[1];
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
|
int work_per_thread = 1;
|
||||||
|
auto dim0 = ndim > 0 ? shape.back() : 1;
|
||||||
|
auto rest = out.size() / dim0;
|
||||||
|
if (dim0 >= 4) {
|
||||||
|
work_per_thread = 4;
|
||||||
|
}
|
||||||
|
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||||
|
auto block_dims = get_block_dims(dim0, rest, 1);
|
||||||
|
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
||||||
|
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto [num_blocks, block_dims] =
|
auto kernel = cu::binary_g_nd<
|
||||||
get_launch_args(out, large());
|
|
||||||
encoder.add_kernel_node(
|
|
||||||
cu::binary_g_nd<
|
|
||||||
Op,
|
Op,
|
||||||
InType,
|
InType,
|
||||||
OutType,
|
OutType,
|
||||||
IdxT,
|
IdxT,
|
||||||
dims_constant()>,
|
dims_constant(),
|
||||||
num_blocks,
|
1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::binary_g_nd<
|
||||||
|
Op,
|
||||||
|
InType,
|
||||||
|
OutType,
|
||||||
|
IdxT,
|
||||||
|
dims_constant(),
|
||||||
|
4>;
|
||||||
|
}
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
kernel,
|
||||||
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out.data<OutType>(),
|
out.data<OutType>(),
|
||||||
out.size(),
|
rest,
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(a_strides),
|
const_param<dims_constant()>(a_strides),
|
||||||
const_param<dims_constant()>(b_strides));
|
const_param<dims_constant()>(b_strides));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
auto kernel = cu::binary_g<Op, InType, OutType, IdxT, 1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::binary_g<Op, InType, OutType, IdxT, 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::binary_g<Op, InType, OutType, IdxT>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out.data<OutType>(),
|
out.data<OutType>(),
|
||||||
out.size(),
|
rest,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(a_strides),
|
const_param(a_strides),
|
||||||
const_param(b_strides),
|
const_param(b_strides),
|
||||||
@@ -304,54 +376,4 @@ void binary_op_gpu(
|
|||||||
binary_op_gpu<cu::func>(inputs, out, name(), s); \
|
binary_op_gpu<cu::func>(inputs, out, name(), s); \
|
||||||
}
|
}
|
||||||
|
|
||||||
BINARY_GPU(Add)
|
|
||||||
BINARY_GPU(ArcTan2)
|
|
||||||
BINARY_GPU(Divide)
|
|
||||||
BINARY_GPU(Remainder)
|
|
||||||
BINARY_GPU(Greater)
|
|
||||||
BINARY_GPU(GreaterEqual)
|
|
||||||
BINARY_GPU(Less)
|
|
||||||
BINARY_GPU(LessEqual)
|
|
||||||
BINARY_GPU(LogicalAnd)
|
|
||||||
BINARY_GPU(LogicalOr)
|
|
||||||
BINARY_GPU(LogAddExp)
|
|
||||||
BINARY_GPU(Maximum)
|
|
||||||
BINARY_GPU(Minimum)
|
|
||||||
BINARY_GPU(Multiply)
|
|
||||||
BINARY_GPU(NotEqual)
|
|
||||||
BINARY_GPU(Power)
|
|
||||||
BINARY_GPU(Subtract)
|
|
||||||
|
|
||||||
void Equal::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
nvtx3::scoped_range r("Equal::eval_gpu");
|
|
||||||
auto& s = out.primitive().stream();
|
|
||||||
if (equal_nan_) {
|
|
||||||
binary_op_gpu<cu::NaNEqual>(inputs, out, name(), s);
|
|
||||||
} else {
|
|
||||||
binary_op_gpu<cu::Equal>(inputs, out, name(), s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void BitwiseBinary::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
nvtx3::scoped_range r("BitwiseBinary::eval_gpu");
|
|
||||||
auto& s = out.primitive().stream();
|
|
||||||
switch (op_) {
|
|
||||||
case BitwiseBinary::And:
|
|
||||||
binary_op_gpu<cu::BitwiseAnd>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::Or:
|
|
||||||
binary_op_gpu<cu::BitwiseOr>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::Xor:
|
|
||||||
binary_op_gpu<cu::BitwiseXor>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::LeftShift:
|
|
||||||
binary_op_gpu<cu::LeftShift>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::RightShift:
|
|
||||||
binary_op_gpu<cu::RightShift>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
27
mlx/backend/cuda/binary/bitwise_binary.cu
Normal file
27
mlx/backend/cuda/binary/bitwise_binary.cu
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
void BitwiseBinary::eval_gpu(const std::vector<array>& inputs, array& out) {
|
||||||
|
nvtx3::scoped_range r("BitwiseBinary::eval_gpu");
|
||||||
|
auto& s = out.primitive().stream();
|
||||||
|
switch (op_) {
|
||||||
|
case BitwiseBinary::And:
|
||||||
|
binary_op_gpu<cu::BitwiseAnd>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::Or:
|
||||||
|
binary_op_gpu<cu::BitwiseOr>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::Xor:
|
||||||
|
binary_op_gpu<cu::BitwiseXor>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::LeftShift:
|
||||||
|
binary_op_gpu<cu::LeftShift>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::RightShift:
|
||||||
|
binary_op_gpu<cu::RightShift>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/divide.cu
Normal file
7
mlx/backend/cuda/binary/divide.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Divide)
|
||||||
|
} // namespace mlx::core
|
||||||
15
mlx/backend/cuda/binary/equal.cu
Normal file
15
mlx/backend/cuda/binary/equal.cu
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
void Equal::eval_gpu(const std::vector<array>& inputs, array& out) {
|
||||||
|
nvtx3::scoped_range r("Equal::eval_gpu");
|
||||||
|
auto& s = out.primitive().stream();
|
||||||
|
if (equal_nan_) {
|
||||||
|
binary_op_gpu<cu::NaNEqual>(inputs, out, name(), s);
|
||||||
|
} else {
|
||||||
|
binary_op_gpu<cu::Equal>(inputs, out, name(), s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/greater.cu
Normal file
7
mlx/backend/cuda/binary/greater.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Greater)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/greater_equal.cu
Normal file
7
mlx/backend/cuda/binary/greater_equal.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(GreaterEqual)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/less.cu
Normal file
7
mlx/backend/cuda/binary/less.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Less)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/less_equal.cu
Normal file
7
mlx/backend/cuda/binary/less_equal.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(LessEqual)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/log_add_exp.cu
Normal file
7
mlx/backend/cuda/binary/log_add_exp.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(LogAddExp)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/logical_and.cu
Normal file
7
mlx/backend/cuda/binary/logical_and.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(LogicalAnd)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/logical_or.cu
Normal file
7
mlx/backend/cuda/binary/logical_or.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(LogicalOr)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/maximum.cu
Normal file
7
mlx/backend/cuda/binary/maximum.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Maximum)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/minimum.cu
Normal file
7
mlx/backend/cuda/binary/minimum.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Minimum)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/multiply.cu
Normal file
7
mlx/backend/cuda/binary/multiply.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Multiply)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/not_equal.cu
Normal file
7
mlx/backend/cuda/binary/not_equal.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(NotEqual)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/power.cu
Normal file
7
mlx/backend/cuda/binary/power.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Power)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/remainder.cu
Normal file
7
mlx/backend/cuda/binary/remainder.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Remainder)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/binary/subtract.cu
Normal file
7
mlx/backend/cuda/binary/subtract.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/binary/binary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
BINARY_GPU(Subtract)
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -127,45 +127,99 @@ binary_two_vv(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT, int NDIM>
|
template <
|
||||||
|
typename Op,
|
||||||
|
typename In,
|
||||||
|
typename Out,
|
||||||
|
typename IdxT,
|
||||||
|
int NDIM,
|
||||||
|
int N_READS>
|
||||||
__global__ void binary_two_g_nd(
|
__global__ void binary_two_g_nd(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out_a,
|
Out* out_a,
|
||||||
Out* out_b,
|
Out* out_b,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
IdxT index_rest =
|
||||||
index, shape.data(), a_strides.data(), b_strides.data());
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
auto out = Op{}(a[a_idx], b[b_idx]);
|
if (index_rest >= size_rest) {
|
||||||
out_a[index] = out[0];
|
return;
|
||||||
out_b[index] = out[1];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[NDIM - 1];
|
||||||
|
auto a_stride_x = a_strides[NDIM - 1];
|
||||||
|
auto b_stride_x = b_strides[NDIM - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
||||||
|
index_rest * shape_x, shape.data(), a_strides.data(), b_strides.data());
|
||||||
|
auto a_vec =
|
||||||
|
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
||||||
|
auto b_vec =
|
||||||
|
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
||||||
|
|
||||||
|
AlignedVector<Out, N_READS> out_vec_a;
|
||||||
|
AlignedVector<Out, N_READS> out_vec_b;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
auto out = Op{}(a_vec[i], b_vec[i]);
|
||||||
|
out_vec_a[i] = out[0];
|
||||||
|
out_vec_b[i] = out[1];
|
||||||
|
}
|
||||||
|
store_vector(out_a + shape_x * index_rest, index_x, out_vec_a, shape_x);
|
||||||
|
store_vector(out_b + shape_x * index_rest, index_x, out_vec_b, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void binary_two_g(
|
__global__ void binary_two_g(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out_a,
|
Out* out_a,
|
||||||
Out* out_b,
|
Out* out_b,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides a_strides,
|
const __grid_constant__ Strides a_strides,
|
||||||
const __grid_constant__ Strides b_strides,
|
const __grid_constant__ Strides b_strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto [a_idx, b_idx] = elem_to_loc(
|
IdxT index_rest =
|
||||||
index, shape.data(), a_strides.data(), b_strides.data(), ndim);
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
auto out = Op{}(a[a_idx], b[b_idx]);
|
if (index_rest >= size_rest) {
|
||||||
out_a[index] = out[0];
|
return;
|
||||||
out_b[index] = out[1];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[ndim - 1];
|
||||||
|
auto a_stride_x = a_strides[ndim - 1];
|
||||||
|
auto b_stride_x = b_strides[ndim - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto [a_idx, b_idx] = elem_to_loc(
|
||||||
|
index_rest * shape_x,
|
||||||
|
shape.data(),
|
||||||
|
a_strides.data(),
|
||||||
|
b_strides.data(),
|
||||||
|
ndim);
|
||||||
|
auto a_vec =
|
||||||
|
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
||||||
|
auto b_vec =
|
||||||
|
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
||||||
|
|
||||||
|
AlignedVector<Out, N_READS> out_vec_a;
|
||||||
|
AlignedVector<Out, N_READS> out_vec_b;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
auto out = Op{}(a_vec[i], b_vec[i]);
|
||||||
|
out_vec_a[i] = out[0];
|
||||||
|
out_vec_b[i] = out[1];
|
||||||
|
}
|
||||||
|
store_vector(out_a + shape_x * index_rest, index_x, out_vec_a, shape_x);
|
||||||
|
store_vector(out_b + shape_x * index_rest, index_x, out_vec_b, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out>
|
template <typename Op, typename In, typename Out>
|
||||||
@@ -225,42 +279,64 @@ void binary_two_op_gpu_inplace(
|
|||||||
auto& a_strides = strides[0];
|
auto& a_strides = strides[0];
|
||||||
auto& b_strides = strides[1];
|
auto& b_strides = strides[1];
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
|
int work_per_thread = 1;
|
||||||
|
auto dim0 = ndim > 0 ? shape.back() : 1;
|
||||||
|
auto rest = out_a.size() / dim0;
|
||||||
|
if (dim0 >= 4) {
|
||||||
|
work_per_thread = 4;
|
||||||
|
}
|
||||||
|
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||||
|
auto block_dims = get_block_dims(dim0, rest, 1);
|
||||||
|
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
||||||
|
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto [num_blocks, block_dims] =
|
auto kernel = cu::binary_two_g_nd<
|
||||||
get_launch_args(out_a, large());
|
|
||||||
encoder.add_kernel_node(
|
|
||||||
cu::binary_two_g_nd<
|
|
||||||
Op,
|
Op,
|
||||||
InType,
|
InType,
|
||||||
OutType,
|
OutType,
|
||||||
IdxT,
|
IdxT,
|
||||||
dims_constant()>,
|
dims_constant(),
|
||||||
num_blocks,
|
1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::binary_two_g_nd<
|
||||||
|
Op,
|
||||||
|
InType,
|
||||||
|
OutType,
|
||||||
|
IdxT,
|
||||||
|
dims_constant(),
|
||||||
|
4>;
|
||||||
|
}
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
kernel,
|
||||||
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out_a.data<OutType>(),
|
out_a.data<OutType>(),
|
||||||
out_b.data<OutType>(),
|
out_b.data<OutType>(),
|
||||||
out_a.size(),
|
rest,
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(a_strides),
|
const_param<dims_constant()>(a_strides),
|
||||||
const_param<dims_constant()>(b_strides));
|
const_param<dims_constant()>(b_strides));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
auto [num_blocks, block_dims] =
|
auto kernel = cu::binary_two_g<Op, InType, OutType, IdxT, 1>;
|
||||||
get_launch_args(out_a, large());
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::binary_two_g<Op, InType, OutType, IdxT, 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::binary_two_g<Op, InType, OutType, IdxT>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out_a.data<OutType>(),
|
out_a.data<OutType>(),
|
||||||
out_b.data<OutType>(),
|
out_b.data<OutType>(),
|
||||||
out_a.size(),
|
rest,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(a_strides),
|
const_param(a_strides),
|
||||||
const_param(b_strides),
|
const_param(b_strides),
|
||||||
|
|||||||
@@ -267,7 +267,8 @@ void Compiled::eval_gpu(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_pair(std::move(builder.os), std::move(kernel_names));
|
return std::make_tuple(
|
||||||
|
false, std::move(builder.os), std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
// Collapse contiguous dims to route to a faster kernel if possible. Also
|
// Collapse contiguous dims to route to a faster kernel if possible. Also
|
||||||
|
|||||||
@@ -1,18 +1,12 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/conv/conv.h"
|
||||||
|
#include "mlx/backend/cuda/cudnn_utils.h"
|
||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
#include "mlx/backend/cuda/device/config.h"
|
|
||||||
#include "mlx/backend/cuda/lru_cache.h"
|
#include "mlx/backend/cuda/lru_cache.h"
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
#include "mlx/dtype_utils.h"
|
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
// cudnn_frontend.h redefines this macro.
|
|
||||||
#undef CHECK_CUDA_ERROR
|
|
||||||
|
|
||||||
#include <cudnn_frontend.h>
|
|
||||||
#include <cudnn_frontend_find_plan.h>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@@ -21,9 +15,6 @@ namespace mlx::core {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
// Not all engines support it so can not use this API now.
|
|
||||||
#define MLX_USE_CUDNN_NATIVE_CUDA_GRAPH_API 0
|
|
||||||
|
|
||||||
// Alias for better readability.
|
// Alias for better readability.
|
||||||
#define CONV_FORWARD CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR
|
#define CONV_FORWARD CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR
|
||||||
#define CONV_BACKWARD_INPUT \
|
#define CONV_BACKWARD_INPUT \
|
||||||
@@ -31,6 +22,9 @@ namespace {
|
|||||||
#define CONV_BACKWARD_WEIGHT \
|
#define CONV_BACKWARD_WEIGHT \
|
||||||
CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR
|
CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR
|
||||||
|
|
||||||
|
// Custom placeholder representing fallback kernel.
|
||||||
|
#define CONV_FALLBACK static_cast<cudnnBackendDescriptorType_t>(-1)
|
||||||
|
|
||||||
struct ConvCacheKey {
|
struct ConvCacheKey {
|
||||||
int device_id;
|
int device_id;
|
||||||
cudnnDataType_t cudnn_dtype;
|
cudnnDataType_t cudnn_dtype;
|
||||||
@@ -50,203 +44,13 @@ struct ConvCacheKey {
|
|||||||
auto& conv_cache() {
|
auto& conv_cache() {
|
||||||
static LRUBytesKeyCache<
|
static LRUBytesKeyCache<
|
||||||
ConvCacheKey,
|
ConvCacheKey,
|
||||||
std::pair<cudnnBackendDescriptorType_t, cudnn_frontend::ExecutionPlan>>
|
std::pair<
|
||||||
cache(/* capacity */ 128);
|
cudnnBackendDescriptorType_t,
|
||||||
|
std::optional<cudnn_frontend::ExecutionPlan>>>
|
||||||
|
cache("MLX_CUDA_CONV_CACHE_SIZE", /* default_capacity */ 128);
|
||||||
return cache;
|
return cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename Vec>
|
|
||||||
inline SmallVector<T> convert_vector(const Vec& vec) {
|
|
||||||
return SmallVector<T>(vec.begin(), vec.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, template <typename U> class Vec>
|
|
||||||
inline std::array<T, MAX_NDIM> fixed_vector(const Vec<T>& vec) {
|
|
||||||
if (vec.size() > MAX_NDIM) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
fmt::format("ndim can not be larger than {}.", MAX_NDIM));
|
|
||||||
}
|
|
||||||
std::array<T, MAX_NDIM> result = {};
|
|
||||||
std::copy_n(vec.begin(), vec.size(), result.begin());
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto nhwc_to_nchw(const array& x) {
|
|
||||||
auto shape = convert_vector<int64_t>(x.shape());
|
|
||||||
shape.insert(shape.begin() + 1, shape.back());
|
|
||||||
shape.erase(shape.end() - 1);
|
|
||||||
auto strides = convert_vector<int64_t>(x.strides());
|
|
||||||
strides.insert(strides.begin() + 1, strides.back());
|
|
||||||
strides.erase(strides.end() - 1);
|
|
||||||
return std::make_tuple(std::move(shape), std::move(strides));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline cudnnDataType_t dtype_to_cudnn_type(Dtype dtype) {
|
|
||||||
switch (dtype) {
|
|
||||||
case int8:
|
|
||||||
return CUDNN_DATA_INT8;
|
|
||||||
case int32:
|
|
||||||
return CUDNN_DATA_INT32;
|
|
||||||
case uint8:
|
|
||||||
return CUDNN_DATA_UINT8;
|
|
||||||
case float16:
|
|
||||||
return CUDNN_DATA_HALF;
|
|
||||||
case bfloat16:
|
|
||||||
return CUDNN_DATA_BFLOAT16;
|
|
||||||
case float32:
|
|
||||||
return CUDNN_DATA_FLOAT;
|
|
||||||
case float64:
|
|
||||||
return CUDNN_DATA_DOUBLE;
|
|
||||||
default:
|
|
||||||
throw std::runtime_error(fmt::format(
|
|
||||||
"Unsupported dtype in Convolution: {}.", dtype_to_string(dtype)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
inline uint8_t get_alignment(const array& x) {
|
|
||||||
uint8_t alignment = 1;
|
|
||||||
uintptr_t address = reinterpret_cast<uintptr_t>(x.data<void>());
|
|
||||||
for (; alignment < 32; alignment *= 2) {
|
|
||||||
if (address % (alignment * 2)) {
|
|
||||||
return alignment;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return alignment;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline cudnn_frontend::Tensor build_tensor(int64_t id, const array& x) {
|
|
||||||
auto [shape, strides] = nhwc_to_nchw(x);
|
|
||||||
return cudnn_frontend::TensorBuilder()
|
|
||||||
.setDim(shape.size(), shape.data())
|
|
||||||
.setStrides(strides.size(), strides.data())
|
|
||||||
.setId(id)
|
|
||||||
.setAlignment(get_alignment(x))
|
|
||||||
.setDataType(dtype_to_cudnn_type(x.dtype()))
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
cudnn_frontend::EngineConfigList get_engine_configs(
|
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
|
||||||
Dtype dtype,
|
|
||||||
cudnn_frontend::OperationGraph& op_graph,
|
|
||||||
bool use_fallback = false) {
|
|
||||||
cudnn_frontend::GeneratorSource source;
|
|
||||||
if (use_fallback) {
|
|
||||||
source = [&backend_type](cudnn_frontend::OperationGraph& op_graph) {
|
|
||||||
auto fallback = cudnn_frontend::EngineFallbackListBuilder()
|
|
||||||
.setOperationGraph(op_graph)
|
|
||||||
.setOperation(backend_type)
|
|
||||||
.build();
|
|
||||||
return fallback.getFallbackList();
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
source = [](cudnn_frontend::OperationGraph& op_graph) {
|
|
||||||
auto heuristics = cudnn_frontend::EngineHeuristicsBuilder()
|
|
||||||
.setOperationGraph(op_graph)
|
|
||||||
.setHeurMode(CUDNN_HEUR_MODE_A)
|
|
||||||
.build();
|
|
||||||
return heuristics.getEngineConfig(heuristics.getEngineConfigCount());
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
cudnn_frontend::EngineConfigGenerator generator(1, &source);
|
|
||||||
auto configs = generator.generate_engine_config(op_graph);
|
|
||||||
|
|
||||||
cudnn_frontend::EngineConfigList filtered_configs;
|
|
||||||
cudnn_frontend::filter(configs, filtered_configs, [dtype](auto c) {
|
|
||||||
if (cudnn_frontend::hasNumericalNote<
|
|
||||||
CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS>(c)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (cudnn_frontend::hasNumericalNote<CUDNN_NUMERICAL_NOTE_TENSOR_CORE>(c) &&
|
|
||||||
dtype == float32 && !env::enable_tf32()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
return filtered_configs;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool execute_plan(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
array& x,
|
|
||||||
array& w,
|
|
||||||
array& y) {
|
|
||||||
int workspace_size = plan.getWorkspaceSize();
|
|
||||||
array workspace(allocator::malloc(workspace_size), {workspace_size}, uint8);
|
|
||||||
|
|
||||||
int64_t uids[3] = {'x', 'w', 'y'};
|
|
||||||
void* data_ptrs[3] = {
|
|
||||||
x.data<void>(),
|
|
||||||
w.data<void>(),
|
|
||||||
y.data<void>(),
|
|
||||||
};
|
|
||||||
|
|
||||||
auto variantPack = cudnn_frontend::VariantPackBuilder()
|
|
||||||
.setWorkspacePointer(workspace.data<void>())
|
|
||||||
.setDataPointers(3, data_ptrs)
|
|
||||||
.setUids(3, uids)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
auto handle = encoder.device().cudnn_handle();
|
|
||||||
cudnnSetStream(handle, encoder.stream());
|
|
||||||
|
|
||||||
#if CUDNN_VERSION >= 90500 && MLX_USE_CUDNN_NATIVE_CUDA_GRAPH_API
|
|
||||||
cudaGraph_t graph;
|
|
||||||
cudaGraphCreate(&graph, 0);
|
|
||||||
std::unique_ptr<cudaGraph_t, void (*)(cudaGraph_t*)> graph_freer(
|
|
||||||
&graph, [](cudaGraph_t* p) { cudaGraphDestroy(*p); });
|
|
||||||
if (cudnnBackendPopulateCudaGraph(
|
|
||||||
handle, plan.get_raw_desc(), variantPack.get_raw_desc(), graph) !=
|
|
||||||
CUDNN_STATUS_SUCCESS) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
encoder.add_graph_node(graph);
|
|
||||||
#else
|
|
||||||
auto capture = encoder.capture_context();
|
|
||||||
if (cudnnBackendExecute(
|
|
||||||
handle, plan.get_raw_desc(), variantPack.get_raw_desc()) !=
|
|
||||||
CUDNN_STATUS_SUCCESS) {
|
|
||||||
// Discard the captured graph when failed.
|
|
||||||
capture.discard = true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
encoder.add_temporary(workspace);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool try_engines(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const ConvCacheKey& cache_key,
|
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
|
||||||
cudnn_frontend::EngineConfigList& configs,
|
|
||||||
const std::string& op_graph_tag,
|
|
||||||
array& x,
|
|
||||||
array& w,
|
|
||||||
array& y) {
|
|
||||||
for (auto& config : configs) {
|
|
||||||
try {
|
|
||||||
auto plan = cudnn_frontend::ExecutionPlanBuilder()
|
|
||||||
.setHandle(encoder.device().cudnn_handle())
|
|
||||||
.setEngineConfig(config, op_graph_tag)
|
|
||||||
.build();
|
|
||||||
if (execute_plan(encoder, plan, x, w, y)) {
|
|
||||||
conv_cache().emplace(
|
|
||||||
cache_key, std::make_pair(backend_type, std::move(plan)));
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
} catch (cudnn_frontend::cudnnException& error) {
|
|
||||||
if (error.getCudnnStatus() != CUDNN_STATUS_NOT_SUPPORTED) {
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto get_conv_op_settings(
|
auto get_conv_op_settings(
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
array& x,
|
array& x,
|
||||||
@@ -291,7 +95,7 @@ auto get_conv_op_settings(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<cudnn_frontend::OperationGraph> build_op_graph(
|
std::optional<cudnn_frontend::OperationGraph> build_conv_op_graph(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
Dtype dtype,
|
Dtype dtype,
|
||||||
@@ -317,9 +121,9 @@ std::optional<cudnn_frontend::OperationGraph> build_op_graph(
|
|||||||
.build();
|
.build();
|
||||||
|
|
||||||
auto op = cudnn_frontend::OperationBuilder(backend_type)
|
auto op = cudnn_frontend::OperationBuilder(backend_type)
|
||||||
.setxDesc(build_tensor('x', x))
|
.setxDesc(build_cudnn_tensor_nchw('x', x))
|
||||||
.setwDesc(build_tensor('w', w))
|
.setwDesc(build_cudnn_tensor_nchw('w', w))
|
||||||
.setyDesc(build_tensor('y', y))
|
.setyDesc(build_cudnn_tensor_nchw('y', y))
|
||||||
.setcDesc(conv_desc)
|
.setcDesc(conv_desc)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@@ -336,6 +140,42 @@ std::optional<cudnn_frontend::OperationGraph> build_op_graph(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Transpose from (C_out, H, W, C_in / groups) to (C_in, H, W, C_out / groups).
|
||||||
|
array group_transpose(
|
||||||
|
const array& x,
|
||||||
|
int groups,
|
||||||
|
int group_dim,
|
||||||
|
int axis1,
|
||||||
|
int axis2,
|
||||||
|
Stream s) {
|
||||||
|
if (groups == 1) {
|
||||||
|
return swapaxes_in_eval(x, axis1, axis2);
|
||||||
|
}
|
||||||
|
int ndim = x.ndim();
|
||||||
|
if (group_dim < 0) {
|
||||||
|
group_dim += ndim;
|
||||||
|
}
|
||||||
|
if (axis1 < 0) {
|
||||||
|
axis1 += ndim;
|
||||||
|
}
|
||||||
|
if (axis2 < 0) {
|
||||||
|
axis2 += ndim;
|
||||||
|
}
|
||||||
|
if (group_dim <= axis1) {
|
||||||
|
axis1 += 1;
|
||||||
|
}
|
||||||
|
if (group_dim <= axis2) {
|
||||||
|
axis2 += 1;
|
||||||
|
}
|
||||||
|
auto shape = x.shape();
|
||||||
|
shape.insert(shape.begin() + group_dim, groups);
|
||||||
|
shape[group_dim + 1] = shape[group_dim + 1] / groups;
|
||||||
|
array x_trans = reshape_in_eval(x, std::move(shape), s);
|
||||||
|
x_trans = swapaxes_in_eval(x_trans, axis1, axis2);
|
||||||
|
x_trans = flatten_in_eval(x_trans, group_dim, group_dim + 1, s);
|
||||||
|
return x_trans;
|
||||||
|
}
|
||||||
|
|
||||||
// Do necessary transposes and copies to prepare the inputs and outputs for
|
// Do necessary transposes and copies to prepare the inputs and outputs for
|
||||||
// building the cuDNN conv op. It is safe to be called multiple times in one
|
// building the cuDNN conv op. It is safe to be called multiple times in one
|
||||||
// eval_gpu, with cost of possible redundant copies.
|
// eval_gpu, with cost of possible redundant copies.
|
||||||
@@ -345,13 +185,14 @@ std::tuple<array, array, array> prepare_args(
|
|||||||
array in,
|
array in,
|
||||||
array wt,
|
array wt,
|
||||||
array out,
|
array out,
|
||||||
|
int groups,
|
||||||
Stream s) {
|
Stream s) {
|
||||||
// Transpose the args depending on the backend type.
|
// Transpose the args depending on the backend type.
|
||||||
// TODO: Handle groups.
|
// TODO: Handle groups.
|
||||||
if (backend_type == CONV_BACKWARD_INPUT) {
|
if (backend_type == CONV_BACKWARD_INPUT) {
|
||||||
wt = swapaxes_in_eval(wt, 0, -1);
|
wt = group_transpose(wt, groups, 0, 0, -1, s);
|
||||||
} else if (backend_type == CONV_BACKWARD_WEIGHT) {
|
} else if (backend_type == CONV_BACKWARD_WEIGHT) {
|
||||||
in = swapaxes_in_eval(in, 0, -1);
|
in = group_transpose(in, groups, -1, 0, -1, s);
|
||||||
wt = swapaxes_in_eval(wt, 0, -1);
|
wt = swapaxes_in_eval(wt, 0, -1);
|
||||||
// Create a contiguous array that shares the data with |out|, but with dim
|
// Create a contiguous array that shares the data with |out|, but with dim
|
||||||
// C_in and C_out swapped.
|
// C_in and C_out swapped.
|
||||||
@@ -444,12 +285,12 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
ConvCacheKey cache_key{
|
ConvCacheKey cache_key{
|
||||||
encoder.device().cuda_device(),
|
encoder.device().cuda_device(),
|
||||||
dtype_to_cudnn_type(dtype),
|
dtype_to_cudnn_type(dtype),
|
||||||
fixed_vector(in.shape()),
|
vector_key(in.shape()),
|
||||||
fixed_vector(wt.shape()),
|
vector_key(wt.shape()),
|
||||||
fixed_vector(kernel_strides_),
|
vector_key(kernel_strides_),
|
||||||
fixed_vector(padding_lo_),
|
vector_key(padding_lo_),
|
||||||
fixed_vector(padding_hi_),
|
vector_key(padding_hi_),
|
||||||
fixed_vector(kernel_dilation_),
|
vector_key(kernel_dilation_),
|
||||||
groups_,
|
groups_,
|
||||||
flip_,
|
flip_,
|
||||||
get_alignment(in),
|
get_alignment(in),
|
||||||
@@ -457,12 +298,30 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
get_alignment(out)};
|
get_alignment(out)};
|
||||||
if (auto it = conv_cache().find(cache_key); it != conv_cache().end()) {
|
if (auto it = conv_cache().find(cache_key); it != conv_cache().end()) {
|
||||||
auto& [backend_type, plan] = it->second;
|
auto& [backend_type, plan] = it->second;
|
||||||
std::tie(in, wt, out) = prepare_args(encoder, backend_type, in, wt, out, s);
|
if (plan) {
|
||||||
|
// Run cached plan.
|
||||||
|
std::tie(in, wt, out) =
|
||||||
|
prepare_args(encoder, backend_type, in, wt, out, groups_, s);
|
||||||
register_args(encoder, backend_type, in, wt, out, out_);
|
register_args(encoder, backend_type, in, wt, out, out_);
|
||||||
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
||||||
if (!execute_plan(encoder, plan, x, w, y)) {
|
if (!encode_cudnn_plan(encoder, *plan, {'x', 'w', 'y'}, x, w, y)) {
|
||||||
throw std::runtime_error("[conv] Cached plan failed to execute.");
|
throw std::runtime_error("[conv] Cached plan failed to execute.");
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
// Run fallback kernel.
|
||||||
|
gemm_conv(
|
||||||
|
encoder,
|
||||||
|
in,
|
||||||
|
wt,
|
||||||
|
out,
|
||||||
|
kernel_strides_,
|
||||||
|
padding_lo_,
|
||||||
|
kernel_dilation_,
|
||||||
|
input_dilation_,
|
||||||
|
groups_,
|
||||||
|
flip_,
|
||||||
|
s);
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -490,7 +349,7 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
std::optional<cudnn_frontend::OperationGraph> op_graph;
|
std::optional<cudnn_frontend::OperationGraph> op_graph;
|
||||||
for (auto try_backend : try_backends) {
|
for (auto try_backend : try_backends) {
|
||||||
auto [in_copy, wt_copy, out_copy] =
|
auto [in_copy, wt_copy, out_copy] =
|
||||||
prepare_args(encoder, try_backend, in, wt, out, s);
|
prepare_args(encoder, try_backend, in, wt, out, groups_, s);
|
||||||
auto [x, w, y] = dispatch_args(try_backend, in_copy, wt_copy, out_copy);
|
auto [x, w, y] = dispatch_args(try_backend, in_copy, wt_copy, out_copy);
|
||||||
auto [stride, padding_lo, padding_hi, dilation] = get_conv_op_settings(
|
auto [stride, padding_lo, padding_hi, dilation] = get_conv_op_settings(
|
||||||
try_backend,
|
try_backend,
|
||||||
@@ -502,7 +361,7 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
padding_hi_,
|
padding_hi_,
|
||||||
kernel_dilation_,
|
kernel_dilation_,
|
||||||
input_dilation_);
|
input_dilation_);
|
||||||
op_graph = build_op_graph(
|
op_graph = build_conv_op_graph(
|
||||||
encoder,
|
encoder,
|
||||||
try_backend,
|
try_backend,
|
||||||
dtype,
|
dtype,
|
||||||
@@ -521,26 +380,39 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!op_graph) {
|
|
||||||
throw std::runtime_error("[conv] Can not build op graph.");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get ready to execute the graph.
|
if (op_graph) {
|
||||||
|
// Setup inputs and outputs.
|
||||||
register_args(encoder, backend_type, in, wt, out, out_);
|
register_args(encoder, backend_type, in, wt, out, out_);
|
||||||
|
|
||||||
// Try to run plans based on heuristics.
|
// Find a plan for the graph and execute it.
|
||||||
auto configs = get_engine_configs(backend_type, dtype, *op_graph);
|
auto plan = find_cudnn_plan_from_op_graph(
|
||||||
auto tag = op_graph->getTag();
|
encoder.device().cudnn_handle(), backend_type, dtype, *op_graph);
|
||||||
|
if (!plan) {
|
||||||
|
throw std::runtime_error("[conv] Unable to find an execution plan.");
|
||||||
|
}
|
||||||
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
||||||
if (try_engines(encoder, cache_key, backend_type, configs, tag, x, w, y)) {
|
if (encode_cudnn_plan(encoder, *plan, {'x', 'w', 'y'}, x, w, y)) {
|
||||||
|
conv_cache().emplace(
|
||||||
|
cache_key, std::make_pair(backend_type, std::move(*plan)));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Then try fallback plans.
|
|
||||||
configs = get_engine_configs(backend_type, dtype, *op_graph);
|
|
||||||
if (try_engines(encoder, cache_key, backend_type, configs, tag, x, w, y)) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
throw std::runtime_error("[conv] Unable to find a working engine.");
|
|
||||||
|
// Use fallback kernel for settings not supported by cuDNN.
|
||||||
|
gemm_conv(
|
||||||
|
encoder,
|
||||||
|
in,
|
||||||
|
wt,
|
||||||
|
out,
|
||||||
|
kernel_strides_,
|
||||||
|
padding_lo_,
|
||||||
|
kernel_dilation_,
|
||||||
|
input_dilation_,
|
||||||
|
groups_,
|
||||||
|
flip_,
|
||||||
|
s);
|
||||||
|
conv_cache().emplace(cache_key, std::make_pair(CONV_FALLBACK, std::nullopt));
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
126
mlx/backend/cuda/conv/conv.h
Normal file
126
mlx/backend/cuda/conv/conv.h
Normal file
@@ -0,0 +1,126 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/backend/gpu/copy.h"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
struct ConvParams {
|
||||||
|
int N; // Batch size
|
||||||
|
int C; // In channels
|
||||||
|
int O; // Out channels
|
||||||
|
int strides[NDIM];
|
||||||
|
int padding[NDIM];
|
||||||
|
int kernel_dilation[NDIM];
|
||||||
|
int input_dilation[NDIM];
|
||||||
|
int groups;
|
||||||
|
bool flip;
|
||||||
|
int in_spatial_dims[NDIM];
|
||||||
|
int wt_spatial_dims[NDIM];
|
||||||
|
int out_spatial_dims[NDIM];
|
||||||
|
int64_t in_strides[NDIM + 2];
|
||||||
|
|
||||||
|
ConvParams(
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
const array& out,
|
||||||
|
const std::vector<int>& strides,
|
||||||
|
const std::vector<int>& padding,
|
||||||
|
const std::vector<int>& kernel_dilation,
|
||||||
|
const std::vector<int>& input_dilation,
|
||||||
|
int groups,
|
||||||
|
bool flip)
|
||||||
|
: N(in.shape(0)),
|
||||||
|
C(in.shape(-1)),
|
||||||
|
O(wt.shape(0)),
|
||||||
|
groups(groups),
|
||||||
|
flip(flip) {
|
||||||
|
std::copy_n(strides.begin(), NDIM, this->strides);
|
||||||
|
std::copy_n(padding.begin(), NDIM, this->padding);
|
||||||
|
std::copy_n(kernel_dilation.begin(), NDIM, this->kernel_dilation);
|
||||||
|
std::copy_n(input_dilation.begin(), NDIM, this->input_dilation);
|
||||||
|
std::copy_n(in.shape().begin() + 1, NDIM, this->in_spatial_dims);
|
||||||
|
std::copy_n(wt.shape().begin() + 1, NDIM, this->wt_spatial_dims);
|
||||||
|
std::copy_n(out.shape().begin() + 1, NDIM, this->out_spatial_dims);
|
||||||
|
std::copy_n(in.strides().begin(), NDIM + 2, this->in_strides);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
void gemm_grouped_conv(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
array& out,
|
||||||
|
const std::vector<int>& strides,
|
||||||
|
const std::vector<int>& padding,
|
||||||
|
const std::vector<int>& kernel_dilation,
|
||||||
|
const std::vector<int>& input_dilation,
|
||||||
|
int groups,
|
||||||
|
bool flip,
|
||||||
|
Stream s);
|
||||||
|
|
||||||
|
void gemm_conv(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
array& out,
|
||||||
|
const std::vector<int>& strides,
|
||||||
|
const std::vector<int>& padding,
|
||||||
|
const std::vector<int>& kernel_dilation,
|
||||||
|
const std::vector<int>& input_dilation,
|
||||||
|
bool flip,
|
||||||
|
Stream s);
|
||||||
|
|
||||||
|
inline void gemm_conv(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
array in,
|
||||||
|
array wt,
|
||||||
|
array& out,
|
||||||
|
const std::vector<int>& strides,
|
||||||
|
const std::vector<int>& padding,
|
||||||
|
const std::vector<int>& kernel_dilation,
|
||||||
|
const std::vector<int>& input_dilation,
|
||||||
|
int groups,
|
||||||
|
bool flip,
|
||||||
|
Stream s) {
|
||||||
|
if (!in.flags().row_contiguous) {
|
||||||
|
in = contiguous_copy_gpu(in, s);
|
||||||
|
encoder.add_temporary(in);
|
||||||
|
}
|
||||||
|
if (!wt.flags().row_contiguous) {
|
||||||
|
wt = contiguous_copy_gpu(wt, s);
|
||||||
|
encoder.add_temporary(wt);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (groups == 1) {
|
||||||
|
gemm_conv(
|
||||||
|
encoder,
|
||||||
|
in,
|
||||||
|
wt,
|
||||||
|
out,
|
||||||
|
strides,
|
||||||
|
padding,
|
||||||
|
kernel_dilation,
|
||||||
|
input_dilation,
|
||||||
|
flip,
|
||||||
|
s);
|
||||||
|
} else {
|
||||||
|
gemm_grouped_conv(
|
||||||
|
encoder,
|
||||||
|
in,
|
||||||
|
wt,
|
||||||
|
out,
|
||||||
|
strides,
|
||||||
|
padding,
|
||||||
|
kernel_dilation,
|
||||||
|
input_dilation,
|
||||||
|
groups,
|
||||||
|
flip,
|
||||||
|
s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
217
mlx/backend/cuda/conv/gemm_conv.cu
Normal file
217
mlx/backend/cuda/conv/gemm_conv.cu
Normal file
@@ -0,0 +1,217 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/conv/conv.h"
|
||||||
|
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
||||||
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
|
#include "mlx/dtype_utils.h"
|
||||||
|
|
||||||
|
#include <cooperative_groups.h>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace cu {
|
||||||
|
|
||||||
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
|
template <typename T, int NDIM>
|
||||||
|
__global__ void naive_unfold_nd(
|
||||||
|
const T* in,
|
||||||
|
T* out,
|
||||||
|
int filter_size,
|
||||||
|
int out_pixels,
|
||||||
|
const __grid_constant__ ConvParams<NDIM> params) {
|
||||||
|
auto block = cg::this_thread_block();
|
||||||
|
auto tid = block.group_index();
|
||||||
|
auto lid = block.thread_index();
|
||||||
|
|
||||||
|
int index_batch = tid.z / out_pixels; // [0, N)
|
||||||
|
int index_out_spatial = tid.z % out_pixels; // [0, H_out * W_out)
|
||||||
|
int index_wt_spatial =
|
||||||
|
tid.x * block.dim_threads().x + lid.x; // [0, H_wt * W_wt)
|
||||||
|
|
||||||
|
if (index_wt_spatial >= filter_size / params.C) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
in += tid.y; // [0, C)
|
||||||
|
out += tid.z * filter_size + index_wt_spatial * params.C + tid.y;
|
||||||
|
|
||||||
|
bool valid = index_batch < params.N;
|
||||||
|
|
||||||
|
// Get the coordinates in input.
|
||||||
|
int index_in[NDIM] = {};
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = NDIM - 1; i >= 0; --i) {
|
||||||
|
int index_out = index_out_spatial % params.out_spatial_dims[i];
|
||||||
|
int index_wt = index_wt_spatial % params.wt_spatial_dims[i];
|
||||||
|
|
||||||
|
if (params.flip) {
|
||||||
|
index_wt = params.wt_spatial_dims[i] - index_wt - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int index = index_out * params.strides[i] - params.padding[i] +
|
||||||
|
index_wt * params.kernel_dilation[i];
|
||||||
|
int index_max =
|
||||||
|
1 + params.input_dilation[i] * (params.in_spatial_dims[i] - 1);
|
||||||
|
|
||||||
|
valid &= (index >= 0) && (index < index_max) &&
|
||||||
|
(index % params.input_dilation[i] == 0);
|
||||||
|
|
||||||
|
index_in[i] = index / params.input_dilation[i];
|
||||||
|
|
||||||
|
index_out_spatial /= params.out_spatial_dims[i];
|
||||||
|
index_wt_spatial /= params.wt_spatial_dims[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid) {
|
||||||
|
int in_offset = index_batch * params.in_strides[0];
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NDIM; ++i) {
|
||||||
|
in_offset += index_in[i] * params.in_strides[i + 1];
|
||||||
|
}
|
||||||
|
*out = in[in_offset];
|
||||||
|
} else {
|
||||||
|
*out = T{0};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cu
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
array unfold_inputs_nd(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
int mat_M,
|
||||||
|
int mat_K,
|
||||||
|
int mat_N,
|
||||||
|
ConvParams<NDIM>& params) {
|
||||||
|
array unfolded({mat_M, mat_K}, in.dtype(), nullptr, {});
|
||||||
|
unfolded.set_data(allocator::malloc(unfolded.nbytes()));
|
||||||
|
encoder.add_temporary(unfolded);
|
||||||
|
|
||||||
|
int filter_size = params.C;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NDIM; ++i) {
|
||||||
|
filter_size *= params.wt_spatial_dims[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
int out_pixels = 1;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NDIM; ++i) {
|
||||||
|
out_pixels *= params.out_spatial_dims[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
int wt_spatial_size = mat_K / params.C;
|
||||||
|
dim3 block_dims;
|
||||||
|
block_dims.x = std::min(std::max(wt_spatial_size, 32), 1024);
|
||||||
|
dim3 num_blocks;
|
||||||
|
num_blocks.x = cuda::ceil_div(wt_spatial_size, block_dims.x);
|
||||||
|
num_blocks.y = params.C;
|
||||||
|
num_blocks.z = mat_M;
|
||||||
|
|
||||||
|
encoder.set_input_array(in);
|
||||||
|
encoder.set_output_array(unfolded);
|
||||||
|
dispatch_float_types(in.dtype(), "unfold", [&](auto type_tag) {
|
||||||
|
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
cu::naive_unfold_nd<DataType, NDIM>,
|
||||||
|
num_blocks,
|
||||||
|
block_dims,
|
||||||
|
0,
|
||||||
|
in.data<DataType>(),
|
||||||
|
unfolded.data<DataType>(),
|
||||||
|
filter_size,
|
||||||
|
out_pixels,
|
||||||
|
params);
|
||||||
|
});
|
||||||
|
|
||||||
|
return unfolded;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
void gemm_conv_nd(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
array& out,
|
||||||
|
ConvParams<NDIM>& params,
|
||||||
|
Stream s) {
|
||||||
|
// Get gemm shapes.
|
||||||
|
int mat_M = out.size() / params.O; // N * H_out * W_out
|
||||||
|
int mat_K = wt.size() / params.O; // C * H_wt * W_wt
|
||||||
|
int mat_N = params.O; // O
|
||||||
|
|
||||||
|
// Unfold input to (N * H_out * W_out, C * H_wt * W_wt) for gemm.
|
||||||
|
array in_unfolded =
|
||||||
|
unfold_inputs_nd<NDIM>(encoder, in, mat_M, mat_K, mat_N, params);
|
||||||
|
|
||||||
|
// Reshape weight to (C * H_wt * W_wt, O) for gemm.
|
||||||
|
array wt_reshaped({mat_K, mat_N}, wt.dtype(), nullptr, {});
|
||||||
|
wt_reshaped.copy_shared_buffer(
|
||||||
|
wt,
|
||||||
|
{1, mat_K},
|
||||||
|
{false, false, /* col_contiguous */ true},
|
||||||
|
wt.data_size());
|
||||||
|
|
||||||
|
// Single batch.
|
||||||
|
Shape batch_shape{1};
|
||||||
|
Strides a_batch_strides{0};
|
||||||
|
Strides b_batch_strides{0};
|
||||||
|
|
||||||
|
// Run matmul.
|
||||||
|
CublasGemm gemm(
|
||||||
|
encoder.device(),
|
||||||
|
in.dtype(),
|
||||||
|
false, // a_transposed
|
||||||
|
mat_M, // a_rows
|
||||||
|
mat_K, // a_cols
|
||||||
|
mat_K, // lda
|
||||||
|
true, // b_transposed
|
||||||
|
mat_K, // b_rows
|
||||||
|
mat_N, // b_cols
|
||||||
|
mat_K, // ldb
|
||||||
|
batch_shape.back(),
|
||||||
|
a_batch_strides.back(),
|
||||||
|
b_batch_strides.back());
|
||||||
|
gemm.run(
|
||||||
|
encoder,
|
||||||
|
out,
|
||||||
|
in_unfolded,
|
||||||
|
wt_reshaped,
|
||||||
|
batch_shape,
|
||||||
|
a_batch_strides,
|
||||||
|
b_batch_strides);
|
||||||
|
}
|
||||||
|
|
||||||
|
void gemm_conv(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
array& out,
|
||||||
|
const std::vector<int>& strides,
|
||||||
|
const std::vector<int>& padding,
|
||||||
|
const std::vector<int>& kernel_dilation,
|
||||||
|
const std::vector<int>& input_dilation,
|
||||||
|
bool flip,
|
||||||
|
Stream s) {
|
||||||
|
int conv_ndim = in.ndim() - 2;
|
||||||
|
if (conv_ndim < 1 || conv_ndim > 3) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
fmt::format("[conv] Unsupported gemm_conv for {}D conv.", conv_ndim));
|
||||||
|
}
|
||||||
|
dispatch_1_2_3(conv_ndim, [&](auto ndim_constant) {
|
||||||
|
ConvParams<ndim_constant()> params(
|
||||||
|
in,
|
||||||
|
wt,
|
||||||
|
out,
|
||||||
|
strides,
|
||||||
|
padding,
|
||||||
|
kernel_dilation,
|
||||||
|
input_dilation,
|
||||||
|
1, // groups
|
||||||
|
flip);
|
||||||
|
gemm_conv_nd<ndim_constant()>(encoder, in, wt, out, params, s);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
231
mlx/backend/cuda/conv/gemm_grouped_conv.cu
Normal file
231
mlx/backend/cuda/conv/gemm_grouped_conv.cu
Normal file
@@ -0,0 +1,231 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/conv/conv.h"
|
||||||
|
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
||||||
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
|
#include "mlx/dtype_utils.h"
|
||||||
|
|
||||||
|
#include <cooperative_groups.h>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace cu {
|
||||||
|
|
||||||
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
|
template <typename T, int NDIM>
|
||||||
|
__global__ void naive_grouped_unfold_transpose_nd(
|
||||||
|
const T* in,
|
||||||
|
T* out,
|
||||||
|
int filter_size,
|
||||||
|
int out_pixels,
|
||||||
|
const __grid_constant__ ConvParams<NDIM> params) {
|
||||||
|
auto block = cg::this_thread_block();
|
||||||
|
auto tid = block.group_index();
|
||||||
|
auto lid = block.thread_index();
|
||||||
|
|
||||||
|
int index_batch = tid.z / out_pixels; // [0, N)
|
||||||
|
int index_out_spatial = tid.z % out_pixels; // [0, H_out * W_out)
|
||||||
|
int index_wt_spatial =
|
||||||
|
tid.x * block.dim_threads().x + lid.x; // [0, H_wt * W_wt)
|
||||||
|
|
||||||
|
if (index_wt_spatial >= filter_size / params.C) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
in += tid.y; // [0, C)
|
||||||
|
out += tid.z * filter_size + tid.y * (filter_size / params.C);
|
||||||
|
|
||||||
|
bool valid = index_batch < params.N;
|
||||||
|
|
||||||
|
// Get the coordinates in input.
|
||||||
|
int index_in[NDIM] = {};
|
||||||
|
int wt_stride = 1;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = NDIM - 1; i >= 0; --i) {
|
||||||
|
int index_out = index_out_spatial % params.out_spatial_dims[i];
|
||||||
|
int index_wt = index_wt_spatial % params.wt_spatial_dims[i];
|
||||||
|
out += index_wt * wt_stride;
|
||||||
|
|
||||||
|
if (params.flip) {
|
||||||
|
index_wt = params.wt_spatial_dims[i] - index_wt - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int index = index_out * params.strides[i] - params.padding[i] +
|
||||||
|
index_wt * params.kernel_dilation[i];
|
||||||
|
int index_max =
|
||||||
|
1 + params.input_dilation[i] * (params.in_spatial_dims[i] - 1);
|
||||||
|
|
||||||
|
valid &= (index >= 0) && (index < index_max) &&
|
||||||
|
(index % params.input_dilation[i] == 0);
|
||||||
|
|
||||||
|
index_in[i] = index / params.input_dilation[i];
|
||||||
|
|
||||||
|
index_out_spatial /= params.out_spatial_dims[i];
|
||||||
|
index_wt_spatial /= params.wt_spatial_dims[i];
|
||||||
|
wt_stride *= params.wt_spatial_dims[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (valid) {
|
||||||
|
int in_offset = index_batch * params.in_strides[0];
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NDIM; ++i) {
|
||||||
|
in_offset += index_in[i] * params.in_strides[i + 1];
|
||||||
|
}
|
||||||
|
*out = in[in_offset];
|
||||||
|
} else {
|
||||||
|
*out = T{0};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cu
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
array grouped_unfold_transpose_inputs_nd(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
int mat_M,
|
||||||
|
int mat_K,
|
||||||
|
int mat_N,
|
||||||
|
ConvParams<NDIM>& params) {
|
||||||
|
array unfolded({mat_M, mat_K * params.groups}, in.dtype(), nullptr, {});
|
||||||
|
unfolded.set_data(allocator::malloc(unfolded.nbytes()));
|
||||||
|
encoder.add_temporary(unfolded);
|
||||||
|
|
||||||
|
int filter_size = params.C;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NDIM; ++i) {
|
||||||
|
filter_size *= params.wt_spatial_dims[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
int out_pixels = 1;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NDIM; ++i) {
|
||||||
|
out_pixels *= params.out_spatial_dims[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
int wt_spatial_size = (mat_K * params.groups) / params.C;
|
||||||
|
dim3 block_dims;
|
||||||
|
block_dims.x = std::min(std::max(wt_spatial_size, 32), 1024);
|
||||||
|
dim3 num_blocks;
|
||||||
|
num_blocks.x = cuda::ceil_div(wt_spatial_size, block_dims.x);
|
||||||
|
num_blocks.y = params.C;
|
||||||
|
num_blocks.z = mat_M;
|
||||||
|
|
||||||
|
encoder.set_input_array(in);
|
||||||
|
encoder.set_output_array(unfolded);
|
||||||
|
dispatch_float_types(in.dtype(), "unfold", [&](auto type_tag) {
|
||||||
|
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
cu::naive_grouped_unfold_transpose_nd<DataType, NDIM>,
|
||||||
|
num_blocks,
|
||||||
|
block_dims,
|
||||||
|
0,
|
||||||
|
in.data<DataType>(),
|
||||||
|
unfolded.data<DataType>(),
|
||||||
|
filter_size,
|
||||||
|
out_pixels,
|
||||||
|
params);
|
||||||
|
});
|
||||||
|
|
||||||
|
return unfolded;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
void gemm_grouped_conv_nd(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
array& out,
|
||||||
|
ConvParams<NDIM>& params,
|
||||||
|
Stream s) {
|
||||||
|
// Get gemm shapes.
|
||||||
|
int C_per_group = params.C / params.groups;
|
||||||
|
int O_per_group = params.O / params.groups;
|
||||||
|
int mat_M = out.size() / params.O; // N * H_out * W_out
|
||||||
|
int mat_K = wt.size() / params.O; // C_per_group * H_wt * W_wt
|
||||||
|
int mat_N = O_per_group; // O_per_group
|
||||||
|
|
||||||
|
// Unfold input to (N * H_out * W_out, C * H_wt * W_wt) for gemm.
|
||||||
|
array in_unfolded = grouped_unfold_transpose_inputs_nd<NDIM>(
|
||||||
|
encoder, in, mat_M, mat_K, mat_N, params);
|
||||||
|
|
||||||
|
// Reshape weight to (O, C_per_group, H_wt * W_wt) for gemm.
|
||||||
|
int wt_spatial_size = (wt.size() / wt.shape(0)) / wt.shape(-1);
|
||||||
|
array wt_view(
|
||||||
|
{params.O, C_per_group, wt_spatial_size}, wt.dtype(), nullptr, {});
|
||||||
|
wt_view.copy_shared_buffer(
|
||||||
|
wt, {wt.strides(0), 1, C_per_group}, wt.flags(), wt.size());
|
||||||
|
array wt_reshaped = contiguous_copy_gpu(wt_view, s);
|
||||||
|
|
||||||
|
// Batch with size of groups.
|
||||||
|
Shape batch_shape{params.groups};
|
||||||
|
Strides a_batch_strides{mat_K};
|
||||||
|
Strides b_batch_strides{mat_N * mat_K};
|
||||||
|
|
||||||
|
// Run matmul.
|
||||||
|
CublasGemm gemm(
|
||||||
|
encoder.device(),
|
||||||
|
in.dtype(),
|
||||||
|
false, // a_transposed
|
||||||
|
mat_M, // a_rows
|
||||||
|
mat_K, // a_cols
|
||||||
|
mat_K * params.groups, // lda
|
||||||
|
true, // b_transposed
|
||||||
|
mat_K, // b_rows
|
||||||
|
mat_N, // b_cols
|
||||||
|
mat_K, // ldb
|
||||||
|
batch_shape.back(),
|
||||||
|
a_batch_strides.back(),
|
||||||
|
b_batch_strides.back());
|
||||||
|
gemm.set_out(
|
||||||
|
out.dtype(),
|
||||||
|
false, // out_transposed
|
||||||
|
mat_M, // out_rows
|
||||||
|
mat_N, // out_cols
|
||||||
|
mat_N * params.groups, // out_ld
|
||||||
|
params.groups, // batch_count
|
||||||
|
mat_N); // batch_stride
|
||||||
|
gemm.run(
|
||||||
|
encoder,
|
||||||
|
out,
|
||||||
|
in_unfolded,
|
||||||
|
wt_reshaped,
|
||||||
|
batch_shape,
|
||||||
|
a_batch_strides,
|
||||||
|
b_batch_strides);
|
||||||
|
}
|
||||||
|
|
||||||
|
void gemm_grouped_conv(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& in,
|
||||||
|
const array& wt,
|
||||||
|
array& out,
|
||||||
|
const std::vector<int>& strides,
|
||||||
|
const std::vector<int>& padding,
|
||||||
|
const std::vector<int>& kernel_dilation,
|
||||||
|
const std::vector<int>& input_dilation,
|
||||||
|
int groups,
|
||||||
|
bool flip,
|
||||||
|
Stream s) {
|
||||||
|
int conv_ndim = in.ndim() - 2;
|
||||||
|
if (conv_ndim < 1 || conv_ndim > 3) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
fmt::format("[conv] Unsupported gemm_conv for {}D conv.", conv_ndim));
|
||||||
|
}
|
||||||
|
dispatch_1_2_3(conv_ndim, [&](auto ndim_constant) {
|
||||||
|
ConvParams<ndim_constant()> params(
|
||||||
|
in,
|
||||||
|
wt,
|
||||||
|
out,
|
||||||
|
strides,
|
||||||
|
padding,
|
||||||
|
kernel_dilation,
|
||||||
|
input_dilation,
|
||||||
|
groups,
|
||||||
|
flip);
|
||||||
|
gemm_grouped_conv_nd<ndim_constant()>(encoder, in, wt, out, params, s);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -15,8 +15,8 @@ void copy_gpu_inplace(
|
|||||||
int64_t offset_out,
|
int64_t offset_out,
|
||||||
CopyType ctype,
|
CopyType ctype,
|
||||||
const Stream& s,
|
const Stream& s,
|
||||||
const std::optional<array>& dynamic_offset_in,
|
std::optional<array> dynamic_offset_in,
|
||||||
const std::optional<array>& dynamic_offset_out) {
|
std::optional<array> dynamic_offset_out) {
|
||||||
if (out.size() == 0) {
|
if (out.size() == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -44,6 +44,16 @@ void copy_gpu_inplace(
|
|||||||
strides_vec[0]);
|
strides_vec[0]);
|
||||||
} else {
|
} else {
|
||||||
if (dynamic_offset_in || dynamic_offset_out) {
|
if (dynamic_offset_in || dynamic_offset_out) {
|
||||||
|
if (!dynamic_offset_in) {
|
||||||
|
dynamic_offset_in = array(0, int64);
|
||||||
|
encoder.add_temporary(*dynamic_offset_in);
|
||||||
|
}
|
||||||
|
if (!dynamic_offset_out) {
|
||||||
|
dynamic_offset_out = array(0, int64);
|
||||||
|
encoder.add_temporary(*dynamic_offset_out);
|
||||||
|
}
|
||||||
|
encoder.set_input_array(*dynamic_offset_in);
|
||||||
|
encoder.set_input_array(*dynamic_offset_out);
|
||||||
copy_general_dynamic(
|
copy_general_dynamic(
|
||||||
encoder,
|
encoder,
|
||||||
ctype,
|
ctype,
|
||||||
@@ -54,8 +64,8 @@ void copy_gpu_inplace(
|
|||||||
shape_collapsed,
|
shape_collapsed,
|
||||||
strides_vec[0],
|
strides_vec[0],
|
||||||
strides_vec[1],
|
strides_vec[1],
|
||||||
dynamic_offset_in ? *dynamic_offset_in : array(0, int64),
|
*dynamic_offset_in,
|
||||||
dynamic_offset_out ? *dynamic_offset_out : array(0, int64));
|
*dynamic_offset_out);
|
||||||
} else {
|
} else {
|
||||||
copy_general(
|
copy_general(
|
||||||
encoder,
|
encoder,
|
||||||
|
|||||||
@@ -10,37 +10,80 @@ namespace cu {
|
|||||||
|
|
||||||
namespace cg = cooperative_groups;
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT, int NDIM>
|
template <typename In, typename Out, typename IdxT, int NDIM, int N_READS>
|
||||||
__global__ void copy_gg_nd(
|
__global__ void copy_gg_nd(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_in,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_in,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_out) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_out) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto [idx_in, idx_out] = elem_to_loc_nd<NDIM>(
|
IdxT index_rest =
|
||||||
index, shape.data(), strides_in.data(), strides_out.data());
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
out[idx_out] = CastOp<In, Out>{}(in[idx_in]);
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[NDIM - 1];
|
||||||
|
auto in_stride_x = strides_in[NDIM - 1];
|
||||||
|
auto out_stride_x = strides_out[NDIM - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto [idx_in, idx_out] = elem_to_loc_nd<NDIM>(
|
||||||
|
index_rest * shape_x,
|
||||||
|
shape.data(),
|
||||||
|
strides_in.data(),
|
||||||
|
strides_out.data());
|
||||||
|
|
||||||
|
auto in_vec =
|
||||||
|
load_vector<N_READS>(in + idx_in, index_x, shape_x, in_stride_x, In(0));
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + idx_out, index_x, out_vec, shape_x, out_stride_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT>
|
template <typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void copy_gg(
|
__global__ void copy_gg(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides strides_in,
|
const __grid_constant__ Strides strides_in,
|
||||||
const __grid_constant__ Strides strides_out,
|
const __grid_constant__ Strides strides_out,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto [idx_in, idx_out] = elem_to_loc(
|
IdxT index_rest =
|
||||||
index, shape.data(), strides_in.data(), strides_out.data(), ndim);
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
out[idx_out] = CastOp<In, Out>{}(in[idx_in]);
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[ndim - 1];
|
||||||
|
auto in_stride_x = strides_in[ndim - 1];
|
||||||
|
auto out_stride_x = strides_out[ndim - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto [idx_in, idx_out] = elem_to_loc(
|
||||||
|
index_rest * shape_x,
|
||||||
|
shape.data(),
|
||||||
|
strides_in.data(),
|
||||||
|
strides_out.data(),
|
||||||
|
ndim);
|
||||||
|
|
||||||
|
auto in_vec =
|
||||||
|
load_vector<N_READS>(in + idx_in, index_x, shape_x, in_stride_x, In(0));
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + idx_out, index_x, out_vec, shape_x, out_stride_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -69,33 +112,52 @@ void copy_general(
|
|||||||
size_t data_size = 1;
|
size_t data_size = 1;
|
||||||
for (auto& s : shape)
|
for (auto& s : shape)
|
||||||
data_size *= s;
|
data_size *= s;
|
||||||
|
|
||||||
|
int work_per_thread = 1;
|
||||||
|
auto dim0 = ndim > 0 ? shape.back() : 1;
|
||||||
|
auto rest = data_size / dim0;
|
||||||
|
if (dim0 >= 4) {
|
||||||
|
work_per_thread = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||||
|
auto block_dims = get_block_dims(dim0, rest, 1);
|
||||||
|
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
||||||
|
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto ndim_constant) {
|
dispatch_1_2_3(ndim, [&](auto ndim_constant) {
|
||||||
auto [num_blocks, block_dims] =
|
auto kernel =
|
||||||
get_launch_args(data_size, shape, out.strides(), large());
|
cu::copy_gg_nd<InType, OutType, IdxT, ndim_constant(), 1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel =
|
||||||
|
cu::copy_gg_nd<InType, OutType, IdxT, ndim_constant(), 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::copy_gg_nd<InType, OutType, IdxT, ndim_constant()>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
data_size,
|
rest,
|
||||||
const_param<ndim_constant()>(shape),
|
const_param<ndim_constant()>(shape),
|
||||||
const_param<ndim_constant()>(strides_in),
|
const_param<ndim_constant()>(strides_in),
|
||||||
const_param<ndim_constant()>(strides_out));
|
const_param<ndim_constant()>(strides_out));
|
||||||
});
|
});
|
||||||
} else { // ndim >= 4
|
} else { // ndim >= 4
|
||||||
auto [num_blocks, block_dims] =
|
auto kernel = cu::copy_gg<InType, OutType, IdxT, 1>;
|
||||||
get_launch_args(data_size, shape, out.strides(), large());
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::copy_gg<InType, OutType, IdxT, 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::copy_gg<InType, OutType, IdxT>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
data_size,
|
rest,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(strides_in),
|
const_param(strides_in),
|
||||||
const_param(strides_out),
|
const_param(strides_out),
|
||||||
|
|||||||
@@ -10,33 +10,67 @@ namespace cu {
|
|||||||
|
|
||||||
namespace cg = cooperative_groups;
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT, int NDIM>
|
template <typename In, typename Out, typename IdxT, int NDIM, int N_READS>
|
||||||
__global__ void copy_g_nd(
|
__global__ void copy_g_nd(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_in) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
IdxT idx_in = elem_to_loc_nd<NDIM>(index, shape.data(), strides_in.data());
|
IdxT index_rest =
|
||||||
out[index] = CastOp<In, Out>{}(in[idx_in]);
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[NDIM - 1];
|
||||||
|
auto stride_x = strides[NDIM - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto idx =
|
||||||
|
elem_to_loc_nd<NDIM>(index_rest * shape_x, shape.data(), strides.data());
|
||||||
|
auto in_vec =
|
||||||
|
load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT>
|
template <typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void copy_g(
|
__global__ void copy_g(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides strides_in,
|
const __grid_constant__ Strides strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
IdxT idx_in = elem_to_loc(index, shape.data(), strides_in.data(), ndim);
|
IdxT index_rest =
|
||||||
out[index] = CastOp<In, Out>{}(in[idx_in]);
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[ndim - 1];
|
||||||
|
auto stride_x = strides[ndim - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto idx =
|
||||||
|
elem_to_loc(index_rest * shape_x, shape.data(), strides.data(), ndim);
|
||||||
|
auto in_vec =
|
||||||
|
load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -61,30 +95,49 @@ void copy_general_input(
|
|||||||
const InType* in_ptr = in.data<InType>() + offset_in;
|
const InType* in_ptr = in.data<InType>() + offset_in;
|
||||||
OutType* out_ptr = out.data<OutType>() + offset_out;
|
OutType* out_ptr = out.data<OutType>() + offset_out;
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
|
int work_per_thread = 1;
|
||||||
|
auto dim0 = ndim > 0 ? shape.back() : 1;
|
||||||
|
auto rest = out.size() / dim0;
|
||||||
|
if (dim0 >= 4) {
|
||||||
|
work_per_thread = 4;
|
||||||
|
}
|
||||||
|
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||||
|
auto block_dims = get_block_dims(dim0, rest, 1);
|
||||||
|
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
||||||
|
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
auto kernel =
|
||||||
|
cu::copy_g_nd<InType, OutType, IdxT, dims_constant(), 1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel =
|
||||||
|
cu::copy_g_nd<InType, OutType, IdxT, dims_constant(), 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::copy_g_nd<InType, OutType, IdxT, dims_constant()>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
out.size(),
|
rest,
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(strides_in));
|
const_param<dims_constant()>(strides_in));
|
||||||
});
|
});
|
||||||
} else { // ndim >= 4
|
} else { // ndim >= 4
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
auto kernel = cu::copy_g<InType, OutType, IdxT, 1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::copy_g<InType, OutType, IdxT, 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::copy_g<InType, OutType, IdxT>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
out.size(),
|
rest,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(strides_in),
|
const_param(strides_in),
|
||||||
ndim);
|
ndim);
|
||||||
|
|||||||
272
mlx/backend/cuda/cudnn_utils.cpp
Normal file
272
mlx/backend/cuda/cudnn_utils.cpp
Normal file
@@ -0,0 +1,272 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/cudnn_utils.h"
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
// Create a cudnn tensor descriptor.
|
||||||
|
template <typename Vec>
|
||||||
|
inline cudnn_frontend::Tensor build_cudnn_tensor(
|
||||||
|
int64_t id,
|
||||||
|
const array& x,
|
||||||
|
const Vec& shape,
|
||||||
|
const Vec& strides) {
|
||||||
|
return cudnn_frontend::TensorBuilder()
|
||||||
|
.setDim(shape.size(), shape.data())
|
||||||
|
.setStrides(strides.size(), strides.data())
|
||||||
|
.setId(id)
|
||||||
|
.setAlignment(get_alignment(x))
|
||||||
|
.setDataType(dtype_to_cudnn_type(x.dtype()))
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
// In MLX a singleton dim (shape[dim] == 1) can have any stride, but in cuDNN
|
||||||
|
// whether a tensor is contiguous is determined with:
|
||||||
|
// shape[dim] == shape[dim + 1] * strides[dim + 1]
|
||||||
|
// So a contiguous array with singleton dims in MLX may be mistakenly treated
|
||||||
|
// as strided in cuDNN, and we work around it by normalizing the strides.
|
||||||
|
Strides normalized_strides(const array& x) {
|
||||||
|
if (!x.flags().row_contiguous || x.ndim() < 2) {
|
||||||
|
return x.strides();
|
||||||
|
}
|
||||||
|
Strides strides = x.strides();
|
||||||
|
for (int i = x.ndim() - 2; i >= 0; --i) {
|
||||||
|
if (x.shape(i) == 1) {
|
||||||
|
strides[i] = x.shape(i + 1) * strides[i + 1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return strides;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return the shape and strides after transposing from NHWC to NCHW.
|
||||||
|
auto nhwc_to_nchw(SmallVector<int64_t> shape, SmallVector<int64_t> strides) {
|
||||||
|
assert(shape.size() >= 3);
|
||||||
|
shape.insert(shape.begin() + 1, shape.back());
|
||||||
|
shape.erase(shape.end() - 1);
|
||||||
|
strides.insert(strides.begin() + 1, strides.back());
|
||||||
|
strides.erase(strides.end() - 1);
|
||||||
|
return std::make_tuple(std::move(shape), std::move(strides));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline auto nhwc_to_nchw(const array& x) {
|
||||||
|
return nhwc_to_nchw(
|
||||||
|
convert_vector<int64_t>(x.shape()), normalized_strides(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return available engines for a |op_graph|.
|
||||||
|
cudnn_frontend::EngineConfigList get_cudnn_engine_configs(
|
||||||
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
|
Dtype dtype,
|
||||||
|
cudnn_frontend::OperationGraph& op_graph,
|
||||||
|
bool use_fallback = true) {
|
||||||
|
SmallVector<cudnn_frontend::GeneratorSource, 2> sources;
|
||||||
|
sources.push_back([](auto& op_graph) {
|
||||||
|
auto heuristics = cudnn_frontend::EngineHeuristicsBuilder()
|
||||||
|
.setOperationGraph(op_graph)
|
||||||
|
.setHeurMode(CUDNN_HEUR_MODE_A)
|
||||||
|
.build();
|
||||||
|
return heuristics.getEngineConfig(heuristics.getEngineConfigCount());
|
||||||
|
});
|
||||||
|
if (use_fallback) {
|
||||||
|
sources.push_back([&backend_type](auto& op_graph) {
|
||||||
|
auto fallback = cudnn_frontend::EngineFallbackListBuilder()
|
||||||
|
.setOperationGraph(op_graph)
|
||||||
|
.setOperation(backend_type)
|
||||||
|
.build();
|
||||||
|
return fallback.getFallbackList();
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
auto configs =
|
||||||
|
cudnn_frontend::EngineConfigGenerator(sources.size(), sources.data())
|
||||||
|
.generate_engine_config(op_graph);
|
||||||
|
|
||||||
|
cudnn_frontend::EngineConfigList filtered_configs;
|
||||||
|
cudnn_frontend::filter(configs, filtered_configs, [dtype](auto c) {
|
||||||
|
if (cudnn_frontend::hasNumericalNote<
|
||||||
|
CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS>(c)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (cudnn_frontend::hasNumericalNote<CUDNN_NUMERICAL_NOTE_TENSOR_CORE>(c) &&
|
||||||
|
dtype == float32 && !env::enable_tf32()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
return filtered_configs;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take |engine_configs| and |op_graph| and find a working execution plans
|
||||||
|
// from them.
|
||||||
|
std::optional<cudnn_frontend::ExecutionPlan>
|
||||||
|
find_cudnn_plan_from_engine_configs(
|
||||||
|
cudnnHandle_t handle,
|
||||||
|
const cudnn_frontend::EngineConfigList& engine_configs,
|
||||||
|
const cudnn_frontend::OperationGraph& op_graph) {
|
||||||
|
auto op_graph_tag = op_graph.getTag();
|
||||||
|
for (const auto& config : engine_configs) {
|
||||||
|
try {
|
||||||
|
return cudnn_frontend::ExecutionPlanBuilder()
|
||||||
|
.setHandle(handle)
|
||||||
|
.setEngineConfig(config, op_graph_tag)
|
||||||
|
.build();
|
||||||
|
} catch (cudnn_frontend::cudnnException& error) {
|
||||||
|
if (error.getCudnnStatus() != CUDNN_STATUS_NOT_SUPPORTED) {
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare workspace and args to execute plan.
|
||||||
|
template <typename F>
|
||||||
|
bool prepare_cudnn_plan(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
int num_args,
|
||||||
|
const int64_t* uids,
|
||||||
|
void** data_ptrs,
|
||||||
|
F&& execute) {
|
||||||
|
int workspace_size = plan.getWorkspaceSize();
|
||||||
|
array workspace(
|
||||||
|
workspace_size > 0 ? allocator::malloc(workspace_size)
|
||||||
|
: allocator::Buffer(nullptr),
|
||||||
|
{workspace_size},
|
||||||
|
uint8);
|
||||||
|
|
||||||
|
auto args = cudnn_frontend::VariantPackBuilder()
|
||||||
|
.setWorkspacePointer(workspace.data<void>())
|
||||||
|
.setDataPointers(num_args, data_ptrs)
|
||||||
|
.setUids(num_args, uids)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
auto handle = encoder.device().cudnn_handle();
|
||||||
|
cudnnSetStream(handle, encoder.stream());
|
||||||
|
|
||||||
|
if (!execute(handle, plan.get_raw_desc(), args.get_raw_desc())) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder.add_temporary(workspace);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
cudnn_frontend::Tensor build_cudnn_tensor(int64_t id, const array& x) {
|
||||||
|
auto shape = convert_vector<int64_t>(x.shape());
|
||||||
|
return build_cudnn_tensor(id, x, shape, normalized_strides(x));
|
||||||
|
}
|
||||||
|
|
||||||
|
cudnn_frontend::Tensor build_cudnn_tensor_nchw(int64_t id, const array& x) {
|
||||||
|
auto [shape, strides] = nhwc_to_nchw(x);
|
||||||
|
return build_cudnn_tensor(id, x, shape, strides);
|
||||||
|
}
|
||||||
|
|
||||||
|
cudnn_frontend::Tensor build_cudnn_tensor_4d_nchw(int64_t id, const array& x) {
|
||||||
|
if (x.ndim() == 0) {
|
||||||
|
SmallVector<int64_t, 4> scalar_dims = {1, 1, 1, 1};
|
||||||
|
return build_cudnn_tensor(id, x, scalar_dims, scalar_dims);
|
||||||
|
}
|
||||||
|
if (x.ndim() == 1) {
|
||||||
|
int64_t s = x.shape(0);
|
||||||
|
SmallVector<int64_t, 4> shape = {1, x.shape(0), 1, 1};
|
||||||
|
SmallVector<int64_t, 4> strides = {s, 1, s, s};
|
||||||
|
return build_cudnn_tensor(id, x, shape, strides);
|
||||||
|
}
|
||||||
|
if (x.ndim() == 2) {
|
||||||
|
int64_t s =
|
||||||
|
x.flags().row_contiguous ? x.shape(1) * x.strides(1) : x.strides(0);
|
||||||
|
SmallVector<int64_t, 4> shape = {x.shape(0), x.shape(1), 1, 1};
|
||||||
|
SmallVector<int64_t, 4> strides = {s, x.strides(1), s, s};
|
||||||
|
return build_cudnn_tensor(id, x, shape, strides);
|
||||||
|
}
|
||||||
|
if (x.ndim() == 3 || x.ndim() == 4) {
|
||||||
|
return build_cudnn_tensor_nchw(id, x);
|
||||||
|
}
|
||||||
|
throw std::runtime_error(
|
||||||
|
fmt::format("Unsupported array with {} dims.", x.ndim()));
|
||||||
|
}
|
||||||
|
|
||||||
|
cudnn_frontend::Tensor build_cudnn_scalar_4d(int64_t id, Dtype dtype) {
|
||||||
|
SmallVector<int64_t, 4> scalar_dims = {1, 1, 1, 1};
|
||||||
|
return cudnn_frontend::TensorBuilder()
|
||||||
|
.setDim(scalar_dims.size(), scalar_dims.data())
|
||||||
|
.setStrides(scalar_dims.size(), scalar_dims.data())
|
||||||
|
.setId(id)
|
||||||
|
.setAlignment(16)
|
||||||
|
.setDataType(dtype_to_cudnn_type(dtype))
|
||||||
|
.setByValue(true)
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<cudnn_frontend::ExecutionPlan> find_cudnn_plan_from_op_graph(
|
||||||
|
cudnnHandle_t handle,
|
||||||
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
|
Dtype dtype,
|
||||||
|
cudnn_frontend::OperationGraph& op_graph) {
|
||||||
|
auto engine_configs = get_cudnn_engine_configs(backend_type, dtype, op_graph);
|
||||||
|
return find_cudnn_plan_from_engine_configs(handle, engine_configs, op_graph);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool encode_cudnn_plan_with_capturing(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
int num_args,
|
||||||
|
const int64_t* uids,
|
||||||
|
void** data_ptrs) {
|
||||||
|
return prepare_cudnn_plan(
|
||||||
|
encoder,
|
||||||
|
plan,
|
||||||
|
num_args,
|
||||||
|
uids,
|
||||||
|
data_ptrs,
|
||||||
|
[&](auto handle, auto plan, auto args) {
|
||||||
|
auto capture = encoder.capture_context();
|
||||||
|
if (cudnnBackendExecute(handle, plan, args) != CUDNN_STATUS_SUCCESS) {
|
||||||
|
// Discard the captured graph when failed.
|
||||||
|
capture.discard = true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
#if CUDNN_VERSION >= 90500
|
||||||
|
bool encode_cudnn_plan_with_graph_api(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
CudaGraph& graph,
|
||||||
|
int num_args,
|
||||||
|
const int64_t* uids,
|
||||||
|
void** data_ptrs) {
|
||||||
|
return prepare_cudnn_plan(
|
||||||
|
encoder,
|
||||||
|
plan,
|
||||||
|
num_args,
|
||||||
|
uids,
|
||||||
|
data_ptrs,
|
||||||
|
[&](auto handle, auto plan, auto args) {
|
||||||
|
if (!graph) {
|
||||||
|
graph = CudaGraph(encoder.device());
|
||||||
|
if (cudnnBackendPopulateCudaGraph(handle, plan, args, graph) !=
|
||||||
|
CUDNN_STATUS_SUCCESS) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (cudnnBackendUpdateCudaGraph(handle, plan, args, graph) !=
|
||||||
|
CUDNN_STATUS_SUCCESS) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
encoder.add_graph_node(graph);
|
||||||
|
return true;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
164
mlx/backend/cuda/cudnn_utils.h
Normal file
164
mlx/backend/cuda/cudnn_utils.h
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "mlx/array.h"
|
||||||
|
#include "mlx/backend/cuda/device/config.h"
|
||||||
|
#include "mlx/backend/cuda/utils.h"
|
||||||
|
#include "mlx/dtype_utils.h"
|
||||||
|
|
||||||
|
#include <cudnn_frontend.h>
|
||||||
|
#include <cudnn_frontend_find_plan.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <array>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace cu {
|
||||||
|
class CommandEncoder;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return pointer alignment of |x|'s data.
|
||||||
|
inline uint8_t get_alignment(const array& x) {
|
||||||
|
uint8_t alignment = 1;
|
||||||
|
uintptr_t address = reinterpret_cast<uintptr_t>(x.data<void>());
|
||||||
|
for (; alignment < 32; alignment *= 2) {
|
||||||
|
if (address % (alignment * 2)) {
|
||||||
|
return alignment;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alignment;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert the type of elements in |vec| to |T|.
|
||||||
|
template <typename T, typename Vec>
|
||||||
|
inline SmallVector<T> convert_vector(const Vec& vec) {
|
||||||
|
return SmallVector<T>(vec.begin(), vec.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an array that can be used as map key for |vec| with size <= MAX_NDIM.
|
||||||
|
//
|
||||||
|
// There are 2 differences from the const_param util from kernel_utils.cuh:
|
||||||
|
// 1. The rest of array is filled with 0.
|
||||||
|
// 2. This util can be used in .cpp files.
|
||||||
|
template <typename T, template <typename U> class Vec>
|
||||||
|
inline std::array<T, MAX_NDIM> vector_key(const Vec<T>& vec) {
|
||||||
|
if (vec.size() > MAX_NDIM) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
fmt::format("ndim can not be larger than {}.", MAX_NDIM));
|
||||||
|
}
|
||||||
|
std::array<T, MAX_NDIM> result = {};
|
||||||
|
std::copy_n(vec.begin(), vec.size(), result.begin());
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helpers used by get_data_ptrs to get pointers.
|
||||||
|
inline void* get_data_ptr(const array& arr) {
|
||||||
|
return const_cast<void*>(arr.data<void>());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, typename = std::enable_if_t<std::is_scalar_v<T>>>
|
||||||
|
inline void* get_data_ptr(T& scalar) {
|
||||||
|
return &scalar;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return an array filled with data pointers of args.
|
||||||
|
template <typename... Args>
|
||||||
|
inline std::array<void*, sizeof...(Args)> get_data_ptrs(Args&... args) {
|
||||||
|
return {get_data_ptr(args)...};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map dtype to cudnn data type.
|
||||||
|
inline cudnnDataType_t dtype_to_cudnn_type(Dtype dtype) {
|
||||||
|
switch (dtype) {
|
||||||
|
case int8:
|
||||||
|
return CUDNN_DATA_INT8;
|
||||||
|
case int32:
|
||||||
|
return CUDNN_DATA_INT32;
|
||||||
|
case uint8:
|
||||||
|
return CUDNN_DATA_UINT8;
|
||||||
|
case float16:
|
||||||
|
return CUDNN_DATA_HALF;
|
||||||
|
case bfloat16:
|
||||||
|
return CUDNN_DATA_BFLOAT16;
|
||||||
|
case float32:
|
||||||
|
return CUDNN_DATA_FLOAT;
|
||||||
|
case float64:
|
||||||
|
return CUDNN_DATA_DOUBLE;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(fmt::format(
|
||||||
|
"Unsupported dtype in Convolution: {}.", dtype_to_string(dtype)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a tensor descriptor from |x|.
|
||||||
|
cudnn_frontend::Tensor build_cudnn_tensor(int64_t id, const array& x);
|
||||||
|
|
||||||
|
// Create a tensor descriptor from |x|, and transpose from NHWC to NCHW.
|
||||||
|
cudnn_frontend::Tensor build_cudnn_tensor_nchw(int64_t id, const array& x);
|
||||||
|
|
||||||
|
// Create a tensor descriptor from |x|, make sure it is 4D, and transpose it
|
||||||
|
// from NHWC to NCHW.
|
||||||
|
cudnn_frontend::Tensor build_cudnn_tensor_4d_nchw(int64_t id, const array& x);
|
||||||
|
|
||||||
|
// Create a 4D scalar tensor descriptor, which is passed by value.
|
||||||
|
cudnn_frontend::Tensor build_cudnn_scalar_4d(int64_t id, Dtype dtype);
|
||||||
|
|
||||||
|
// Find a working plan for |op_graph|.
|
||||||
|
std::optional<cudnn_frontend::ExecutionPlan> find_cudnn_plan_from_op_graph(
|
||||||
|
cudnnHandle_t handle,
|
||||||
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
|
Dtype dtype,
|
||||||
|
cudnn_frontend::OperationGraph& op_graph);
|
||||||
|
|
||||||
|
// Encode the plan to command buffer by capturing.
|
||||||
|
bool encode_cudnn_plan_with_capturing(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
int num_args,
|
||||||
|
const int64_t* uids,
|
||||||
|
void** data_ptrs);
|
||||||
|
|
||||||
|
#if CUDNN_VERSION >= 90500
|
||||||
|
// Encode the plan to command buffer by using native graph api of cudnn. If the
|
||||||
|
// |graph| is empty it will be populated, otherwise it will be updated.
|
||||||
|
bool encode_cudnn_plan_with_graph_api(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
CudaGraph& graph,
|
||||||
|
int num_args,
|
||||||
|
const int64_t* uids,
|
||||||
|
void** data_ptrs);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
// Helpers to make calls like encode_cudnn_plan(..., {'x', 'y', 'z'}, x, y, z).
|
||||||
|
template <typename... Args>
|
||||||
|
bool encode_cudnn_plan(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
std::initializer_list<int64_t> uids,
|
||||||
|
Args&... args) {
|
||||||
|
assert(uids.size() == sizeof...(args));
|
||||||
|
auto data_ptrs = get_data_ptrs(args...);
|
||||||
|
return encode_cudnn_plan_with_capturing(
|
||||||
|
encoder, plan, uids.size(), uids.begin(), data_ptrs.data());
|
||||||
|
}
|
||||||
|
|
||||||
|
#if CUDNN_VERSION >= 90500
|
||||||
|
template <typename... Args>
|
||||||
|
bool encode_cudnn_plan(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
CudaGraph& graph,
|
||||||
|
std::initializer_list<int64_t> uids,
|
||||||
|
Args&... args) {
|
||||||
|
assert(uids.size() == sizeof...(args));
|
||||||
|
auto data_ptrs = get_data_ptrs(args...);
|
||||||
|
return encode_cudnn_plan_with_graph_api(
|
||||||
|
encoder, plan, graph, uids.size(), uids.begin(), data_ptrs.data());
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
379
mlx/backend/cuda/custom_kernel.cpp
Normal file
379
mlx/backend/cuda/custom_kernel.cpp
Normal file
@@ -0,0 +1,379 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
#include "mlx/backend/common/compiled.h"
|
||||||
|
#include "mlx/backend/cuda/jit_module.h"
|
||||||
|
#include "mlx/backend/cuda/utils.h"
|
||||||
|
#include "mlx/backend/gpu/copy.h"
|
||||||
|
#include "mlx/fast.h"
|
||||||
|
#include "mlx/fast_primitives.h"
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
|
||||||
|
namespace mlx::core::fast {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
constexpr const char* default_header = R"(
|
||||||
|
#include "mlx/backend/cuda/device/utils.cuh"
|
||||||
|
|
||||||
|
#include <cooperative_groups.h>
|
||||||
|
|
||||||
|
#define inf cuda::std::numeric_limits<float>::infinity()
|
||||||
|
|
||||||
|
)";
|
||||||
|
|
||||||
|
std::string template_arguments_hash(
|
||||||
|
const std::vector<std::pair<std::string, TemplateArg>>& template_args) {
|
||||||
|
if (template_args.empty()) {
|
||||||
|
return "";
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string hash;
|
||||||
|
hash.reserve(512);
|
||||||
|
|
||||||
|
for (const auto& [name, arg] : template_args) {
|
||||||
|
if (std::holds_alternative<int>(arg)) {
|
||||||
|
hash += fmt::format("_{}", std::get<int>(arg));
|
||||||
|
} else if (std::holds_alternative<bool>(arg)) {
|
||||||
|
hash += (std::get<bool>(arg)) ? "_t" : "_f";
|
||||||
|
} else if (std::holds_alternative<Dtype>(arg)) {
|
||||||
|
hash += "_";
|
||||||
|
hash += get_type_string(std::get<Dtype>(arg));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return hash;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string build_kernel(
|
||||||
|
const std::string& func_name,
|
||||||
|
const std::string& header,
|
||||||
|
const std::string& source,
|
||||||
|
const std::vector<std::string>& input_names,
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const std::vector<std::string>& output_names,
|
||||||
|
const std::vector<Dtype>& output_dtypes,
|
||||||
|
const std::vector<std::pair<std::string, TemplateArg>>& template_args,
|
||||||
|
const std::vector<CustomKernelShapeInfo>& shape_infos) {
|
||||||
|
std::string kernel_source;
|
||||||
|
kernel_source.reserve(header.size() + source.size() + 8192);
|
||||||
|
kernel_source += default_header;
|
||||||
|
kernel_source += header;
|
||||||
|
kernel_source +=
|
||||||
|
"namespace mlx::core::cu {\n\n"
|
||||||
|
"namespace cg = cooperative_groups;\n\n";
|
||||||
|
|
||||||
|
kernel_source += "__global__ void ";
|
||||||
|
kernel_source += func_name;
|
||||||
|
kernel_source += "(\n";
|
||||||
|
|
||||||
|
// Add inputs
|
||||||
|
for (int i = 0; i < inputs.size(); ++i) {
|
||||||
|
const auto& name = input_names[i];
|
||||||
|
const auto& arr = inputs[i];
|
||||||
|
kernel_source += " const ";
|
||||||
|
kernel_source += dtype_to_cuda_type(arr.dtype());
|
||||||
|
kernel_source += "* ";
|
||||||
|
kernel_source += name;
|
||||||
|
kernel_source += ",\n";
|
||||||
|
// Add input shape, strides and ndim if present in the source
|
||||||
|
if (arr.ndim() > 0) {
|
||||||
|
if (shape_infos[i].shape) {
|
||||||
|
kernel_source += " const __grid_constant__ Shape ";
|
||||||
|
kernel_source += name;
|
||||||
|
kernel_source += "_shape,\n";
|
||||||
|
}
|
||||||
|
if (shape_infos[i].strides) {
|
||||||
|
kernel_source += " const __grid_constant__ Strides ";
|
||||||
|
kernel_source += name;
|
||||||
|
kernel_source += "_strides,\n";
|
||||||
|
}
|
||||||
|
if (shape_infos[i].ndim) {
|
||||||
|
kernel_source += " const __grid_constant__ int ";
|
||||||
|
kernel_source += name;
|
||||||
|
kernel_source += "_ndim,\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add outputs
|
||||||
|
for (int i = 0; i < output_names.size(); ++i) {
|
||||||
|
const auto& name = output_names[i];
|
||||||
|
const auto& dtype = output_dtypes[i];
|
||||||
|
kernel_source += " ";
|
||||||
|
kernel_source += dtype_to_cuda_type(dtype);
|
||||||
|
kernel_source += "* ";
|
||||||
|
kernel_source += name;
|
||||||
|
if (i < output_names.size() - 1) {
|
||||||
|
kernel_source += ",\n";
|
||||||
|
} else {
|
||||||
|
kernel_source += ") {\n";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set compile time constants
|
||||||
|
if (!template_args.empty()) {
|
||||||
|
for (const auto& [name, arg] : template_args) {
|
||||||
|
if (std::holds_alternative<int>(arg)) {
|
||||||
|
kernel_source +=
|
||||||
|
fmt::format(" constexpr int {} = {};\n", name, std::get<int>(arg));
|
||||||
|
} else if (std::holds_alternative<bool>(arg)) {
|
||||||
|
kernel_source += fmt::format(
|
||||||
|
" constexpr bool {} = {};\n", name, std::get<bool>(arg));
|
||||||
|
} else {
|
||||||
|
kernel_source += fmt::format(
|
||||||
|
" using {} = {};\n",
|
||||||
|
name,
|
||||||
|
dtype_to_cuda_type(std::get<Dtype>(arg)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
kernel_source += "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
kernel_source += source;
|
||||||
|
kernel_source += "\n}\n\n} // namespace mlx::core::cu\n";
|
||||||
|
|
||||||
|
return kernel_source;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
CustomKernelFunction cuda_kernel(
|
||||||
|
const std::string& name,
|
||||||
|
const std::vector<std::string>& input_names,
|
||||||
|
const std::vector<std::string>& output_names,
|
||||||
|
const std::string& source,
|
||||||
|
const std::string& header,
|
||||||
|
bool ensure_row_contiguous,
|
||||||
|
int shared_memory) {
|
||||||
|
if (output_names.empty()) {
|
||||||
|
throw std::invalid_argument(
|
||||||
|
"[custom_kernel] Must specify at least one output.");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<CustomKernelShapeInfo> shape_infos;
|
||||||
|
for (auto& n : input_names) {
|
||||||
|
CustomKernelShapeInfo shape_info;
|
||||||
|
shape_info.shape = source.find(n + "_shape") != std::string::npos;
|
||||||
|
shape_info.strides = source.find(n + "_strides") != std::string::npos;
|
||||||
|
shape_info.ndim = source.find(n + "_ndim") != std::string::npos;
|
||||||
|
shape_infos.push_back(shape_info);
|
||||||
|
}
|
||||||
|
|
||||||
|
return [=, shape_infos = std::move(shape_infos)](
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const std::vector<Shape>& output_shapes,
|
||||||
|
const std::vector<Dtype>& output_dtypes,
|
||||||
|
std::tuple<int, int, int> grid,
|
||||||
|
std::tuple<int, int, int> threadgroup,
|
||||||
|
const std::vector<std::pair<std::string, TemplateArg>>&
|
||||||
|
template_args = {},
|
||||||
|
std::optional<float> init_value = std::nullopt,
|
||||||
|
bool verbose = false,
|
||||||
|
StreamOrDevice s_ = {}) {
|
||||||
|
if (inputs.size() != input_names.size()) {
|
||||||
|
std::ostringstream msg;
|
||||||
|
msg << "[custom_kernel] Expected `inputs` to have size "
|
||||||
|
<< input_names.size() << " but got size " << inputs.size() << "."
|
||||||
|
<< std::endl;
|
||||||
|
throw std::invalid_argument(msg.str());
|
||||||
|
}
|
||||||
|
if (output_shapes.size() != output_names.size()) {
|
||||||
|
std::ostringstream msg;
|
||||||
|
msg << "[custom_kernel] Expected `output_shapes` to have size "
|
||||||
|
<< output_names.size() << " but got size " << output_shapes.size()
|
||||||
|
<< "." << std::endl;
|
||||||
|
throw std::invalid_argument(msg.str());
|
||||||
|
}
|
||||||
|
if (output_dtypes.size() != output_names.size()) {
|
||||||
|
std::ostringstream msg;
|
||||||
|
msg << "[custom_kernel] Expected `output_dtypes` to have size "
|
||||||
|
<< output_names.size() << " but got size " << output_dtypes.size()
|
||||||
|
<< "." << std::endl;
|
||||||
|
throw std::invalid_argument(msg.str());
|
||||||
|
}
|
||||||
|
|
||||||
|
auto s = to_stream(s_);
|
||||||
|
if (s.device != Device::gpu) {
|
||||||
|
throw std::invalid_argument("[custom_kernel] Only supports the GPU.");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string kernel_name =
|
||||||
|
"custom_kernel_" + name + template_arguments_hash(template_args);
|
||||||
|
std::string kernel_source = build_kernel(
|
||||||
|
kernel_name,
|
||||||
|
header,
|
||||||
|
source,
|
||||||
|
input_names,
|
||||||
|
inputs,
|
||||||
|
output_names,
|
||||||
|
output_dtypes,
|
||||||
|
template_args,
|
||||||
|
shape_infos);
|
||||||
|
|
||||||
|
if (verbose) {
|
||||||
|
std::cout << "Generated source code for `" << kernel_name
|
||||||
|
<< "`:" << std::endl
|
||||||
|
<< "```" << std::endl
|
||||||
|
<< kernel_source << std::endl
|
||||||
|
<< "```" << std::endl;
|
||||||
|
}
|
||||||
|
|
||||||
|
return array::make_arrays(
|
||||||
|
std::move(output_shapes),
|
||||||
|
std::move(output_dtypes),
|
||||||
|
std::make_shared<CustomKernel>(
|
||||||
|
s,
|
||||||
|
std::move(kernel_name),
|
||||||
|
std::move(kernel_source),
|
||||||
|
grid,
|
||||||
|
threadgroup,
|
||||||
|
shape_infos,
|
||||||
|
ensure_row_contiguous,
|
||||||
|
init_value,
|
||||||
|
std::vector<ScalarArg>{},
|
||||||
|
false,
|
||||||
|
shared_memory),
|
||||||
|
std::move(inputs));
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<array> precompiled_cuda_kernel(
|
||||||
|
const std::string& name,
|
||||||
|
const std::string& compiled_source,
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
const std::vector<Shape>& output_shapes,
|
||||||
|
const std::vector<Dtype>& output_dtypes,
|
||||||
|
const std::vector<ScalarArg>& scalars,
|
||||||
|
std::tuple<int, int, int> grid,
|
||||||
|
std::tuple<int, int, int> threadgroup,
|
||||||
|
int shared_memory,
|
||||||
|
std::optional<float> init_value,
|
||||||
|
bool ensure_row_contiguous,
|
||||||
|
StreamOrDevice s) {
|
||||||
|
std::vector<CustomKernelShapeInfo> shape_infos(
|
||||||
|
inputs.size(), CustomKernelShapeInfo{false, false, false});
|
||||||
|
return array::make_arrays(
|
||||||
|
output_shapes,
|
||||||
|
output_dtypes,
|
||||||
|
std::make_shared<CustomKernel>(
|
||||||
|
to_stream(s),
|
||||||
|
name,
|
||||||
|
compiled_source,
|
||||||
|
grid,
|
||||||
|
threadgroup,
|
||||||
|
shape_infos,
|
||||||
|
ensure_row_contiguous,
|
||||||
|
init_value,
|
||||||
|
scalars,
|
||||||
|
true,
|
||||||
|
shared_memory),
|
||||||
|
inputs);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CustomKernel::eval_gpu(
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
std::vector<array>& outputs) {
|
||||||
|
nvtx3::scoped_range r("CustomKernel::eval_gpu");
|
||||||
|
auto& s = stream();
|
||||||
|
|
||||||
|
std::vector<array> copies;
|
||||||
|
|
||||||
|
// Allocate and initialize the output arrays
|
||||||
|
for (auto& out : outputs) {
|
||||||
|
if (init_value_) {
|
||||||
|
copies.emplace_back(init_value_.value(), out.dtype());
|
||||||
|
fill_gpu(copies.back(), out, s);
|
||||||
|
} else {
|
||||||
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the input arrays and copy if needed
|
||||||
|
auto check_input = [&copies, &s, this](const array& x) -> const array {
|
||||||
|
bool no_copy = x.flags().row_contiguous;
|
||||||
|
if (!ensure_row_contiguous_ || no_copy) {
|
||||||
|
return x;
|
||||||
|
} else {
|
||||||
|
copies.push_back(array(x.shape(), x.dtype(), nullptr, {}));
|
||||||
|
copy_gpu(x, copies.back(), CopyType::General, s);
|
||||||
|
return copies.back();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
std::vector<array> checked_inputs;
|
||||||
|
for (const array& in : inputs) {
|
||||||
|
checked_inputs.push_back(check_input(in));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compile the custom kernel
|
||||||
|
std::string kernel_name =
|
||||||
|
(is_precompiled_) ? name_ : "mlx::core::cu::" + name_;
|
||||||
|
cu::JitModule& mod = cu::get_jit_module(
|
||||||
|
s.device,
|
||||||
|
name_,
|
||||||
|
[&]() {
|
||||||
|
return std::make_tuple(
|
||||||
|
is_precompiled_, source_, std::vector{kernel_name});
|
||||||
|
},
|
||||||
|
false);
|
||||||
|
|
||||||
|
// Make the arguments
|
||||||
|
cu::KernelArgs args;
|
||||||
|
for (int i = 0; i < checked_inputs.size(); i++) {
|
||||||
|
const array& in = checked_inputs[i];
|
||||||
|
auto& shape_info = shape_infos_[i];
|
||||||
|
args.append(in);
|
||||||
|
if (shape_info.shape) {
|
||||||
|
args.append_ndim(in.shape());
|
||||||
|
}
|
||||||
|
if (shape_info.strides) {
|
||||||
|
args.append_ndim(in.strides());
|
||||||
|
}
|
||||||
|
if (shape_info.ndim) {
|
||||||
|
args.append<int32_t>(in.ndim());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (auto& out : outputs) {
|
||||||
|
args.append(out);
|
||||||
|
}
|
||||||
|
for (auto& s : scalar_arguments_) {
|
||||||
|
if (std::holds_alternative<bool>(s)) {
|
||||||
|
args.append(std::get<bool>(s));
|
||||||
|
} else if (std::holds_alternative<int>(s)) {
|
||||||
|
args.append(std::get<int>(s));
|
||||||
|
} else if (std::holds_alternative<float>(s)) {
|
||||||
|
args.append(std::get<float>(s));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the grid
|
||||||
|
const auto [tx, ty, tz] = threadgroup_;
|
||||||
|
const auto [gx, gy, gz] = grid_;
|
||||||
|
dim3 block(std::min(tx, gx), std::min(ty, gy), std::min(tz, gz));
|
||||||
|
dim3 grid((gx + tx - 1) / tx, (gy + ty - 1) / ty, (gz + tz - 1) / tz);
|
||||||
|
|
||||||
|
// Call the kernel
|
||||||
|
auto& encoder = cu::get_command_encoder(s);
|
||||||
|
for (const auto& in : checked_inputs) {
|
||||||
|
encoder.set_input_array(in);
|
||||||
|
}
|
||||||
|
for (const auto& out : outputs) {
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
}
|
||||||
|
for (const auto& t : copies) {
|
||||||
|
encoder.add_temporary(t);
|
||||||
|
}
|
||||||
|
auto kernel =
|
||||||
|
mod.get_kernel(kernel_name, [smem = shared_memory_](CUfunction kernel) {
|
||||||
|
if (smem > 0 && smem > 48000) {
|
||||||
|
cuFuncSetAttribute(
|
||||||
|
kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
encoder.add_kernel_node(kernel, grid, block, shared_memory_, args.args());
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core::fast
|
||||||
@@ -27,11 +27,11 @@ void check_cudnn_error(const char* name, cudnnStatus_t err) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int cuda_graph_cache_size() {
|
bool use_cuda_graphs() {
|
||||||
static int cache_size = []() {
|
static bool use_graphs = []() {
|
||||||
return env::get_var("MLX_CUDA_GRAPH_CACHE_SIZE", 100);
|
return env::get_var("MLX_USE_CUDA_GRAPHS", true);
|
||||||
}();
|
}();
|
||||||
return cache_size;
|
return use_graphs;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@@ -86,14 +86,19 @@ CommandEncoder& Device::get_command_encoder(Stream s) {
|
|||||||
|
|
||||||
CommandEncoder::CaptureContext::CaptureContext(CommandEncoder& enc) : enc(enc) {
|
CommandEncoder::CaptureContext::CaptureContext(CommandEncoder& enc) : enc(enc) {
|
||||||
enc.device().make_current();
|
enc.device().make_current();
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
CHECK_CUDA_ERROR(
|
CHECK_CUDA_ERROR(
|
||||||
cudaStreamBeginCapture(enc.stream(), cudaStreamCaptureModeGlobal));
|
cudaStreamBeginCapture(enc.stream(), cudaStreamCaptureModeGlobal));
|
||||||
}
|
}
|
||||||
|
|
||||||
CommandEncoder::CaptureContext::~CaptureContext() {
|
CommandEncoder::CaptureContext::~CaptureContext() {
|
||||||
CHECK_CUDA_ERROR(cudaStreamEndCapture(enc.stream(), &graph));
|
if (!use_cuda_graphs()) {
|
||||||
std::unique_ptr<cudaGraph_t, void (*)(cudaGraph_t*)> graph_freer(
|
return;
|
||||||
&graph, [](cudaGraph_t* p) { CHECK_CUDA_ERROR(cudaGraphDestroy(*p)); });
|
}
|
||||||
|
|
||||||
|
graph.end_capture(enc.stream());
|
||||||
if (discard) {
|
if (discard) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -107,6 +112,9 @@ CommandEncoder::ConcurrentContext::ConcurrentContext(CommandEncoder& enc)
|
|||||||
|
|
||||||
CommandEncoder::ConcurrentContext::~ConcurrentContext() {
|
CommandEncoder::ConcurrentContext::~ConcurrentContext() {
|
||||||
enc.in_concurrent_ = false;
|
enc.in_concurrent_ = false;
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Use an empty graph node for synchronization
|
// Use an empty graph node for synchronization
|
||||||
CommandEncoder::GraphNode empty{NULL, 'E', std::to_string(enc.node_count_++)};
|
CommandEncoder::GraphNode empty{NULL, 'E', std::to_string(enc.node_count_++)};
|
||||||
@@ -185,20 +193,28 @@ void CommandEncoder::insert_graph_dependencies(std::vector<GraphNode> nodes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
CommandEncoder::CommandEncoder(Device& d)
|
CommandEncoder::CommandEncoder(Device& d)
|
||||||
: device_(d), stream_(d), graph_cache_(cuda_graph_cache_size()) {
|
: device_(d),
|
||||||
CHECK_CUDA_ERROR(cudaGraphCreate(&graph_, 0));
|
stream_(d),
|
||||||
}
|
graph_(d),
|
||||||
|
graph_cache_("MLX_CUDA_GRAPH_CACHE_SIZE", /* default_capacity */ 400) {}
|
||||||
|
|
||||||
void CommandEncoder::add_completed_handler(std::function<void()> task) {
|
void CommandEncoder::add_completed_handler(std::function<void()> task) {
|
||||||
worker_.add_task(std::move(task));
|
worker_.add_task(std::move(task));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CommandEncoder::set_input_array(const array& arr) {
|
void CommandEncoder::set_input_array(const array& arr) {
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
||||||
active_deps_.push_back(id);
|
active_deps_.push_back(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CommandEncoder::set_output_array(const array& arr) {
|
void CommandEncoder::set_output_array(const array& arr) {
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
||||||
active_deps_.push_back(id);
|
active_deps_.push_back(id);
|
||||||
active_outputs_.push_back(id);
|
active_outputs_.push_back(id);
|
||||||
@@ -216,6 +232,11 @@ void CommandEncoder::add_kernel_node(
|
|||||||
dim3 block_dim,
|
dim3 block_dim,
|
||||||
uint32_t smem_bytes,
|
uint32_t smem_bytes,
|
||||||
void** params) {
|
void** params) {
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
CHECK_CUDA_ERROR(cudaLaunchKernel(
|
||||||
|
func, grid_dim, block_dim, params, smem_bytes, stream()));
|
||||||
|
return;
|
||||||
|
}
|
||||||
cudaKernelNodeParams kernel_params = {0};
|
cudaKernelNodeParams kernel_params = {0};
|
||||||
kernel_params.func = func;
|
kernel_params.func = func;
|
||||||
kernel_params.gridDim = grid_dim;
|
kernel_params.gridDim = grid_dim;
|
||||||
@@ -231,6 +252,22 @@ void CommandEncoder::add_kernel_node(
|
|||||||
dim3 block_dim,
|
dim3 block_dim,
|
||||||
uint32_t smem_bytes,
|
uint32_t smem_bytes,
|
||||||
void** params) {
|
void** params) {
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
CHECK_CUDA_ERROR(cuLaunchKernel(
|
||||||
|
func,
|
||||||
|
grid_dim.x,
|
||||||
|
grid_dim.y,
|
||||||
|
grid_dim.z,
|
||||||
|
block_dim.x,
|
||||||
|
block_dim.y,
|
||||||
|
block_dim.z,
|
||||||
|
smem_bytes,
|
||||||
|
stream(),
|
||||||
|
params,
|
||||||
|
nullptr));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
CUDA_KERNEL_NODE_PARAMS kernel_params = {0};
|
CUDA_KERNEL_NODE_PARAMS kernel_params = {0};
|
||||||
kernel_params.func = func;
|
kernel_params.func = func;
|
||||||
kernel_params.gridDimX = grid_dim.x;
|
kernel_params.gridDimX = grid_dim.x;
|
||||||
@@ -257,6 +294,13 @@ void CommandEncoder::add_kernel_node(const CUDA_KERNEL_NODE_PARAMS& params) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CommandEncoder::add_graph_node(cudaGraph_t child) {
|
void CommandEncoder::add_graph_node(cudaGraph_t child) {
|
||||||
|
if (!use_cuda_graphs()) {
|
||||||
|
CudaGraphExec graph_exec;
|
||||||
|
graph_exec.instantiate(child);
|
||||||
|
device_.make_current();
|
||||||
|
CHECK_CUDA_ERROR(cudaGraphLaunch(graph_exec, stream()));
|
||||||
|
return;
|
||||||
|
}
|
||||||
cudaGraphNode_t node;
|
cudaGraphNode_t node;
|
||||||
CHECK_CUDA_ERROR(cudaGraphAddChildGraphNode(&node, graph_, NULL, 0, child));
|
CHECK_CUDA_ERROR(cudaGraphAddChildGraphNode(&node, graph_, NULL, 0, child));
|
||||||
insert_graph_dependencies(GraphNode{node, 'G'});
|
insert_graph_dependencies(GraphNode{node, 'G'});
|
||||||
@@ -270,7 +314,13 @@ void CommandEncoder::commit() {
|
|||||||
if (node_count_ > 0) {
|
if (node_count_ > 0) {
|
||||||
if (!from_nodes_.empty()) {
|
if (!from_nodes_.empty()) {
|
||||||
CHECK_CUDA_ERROR(cudaGraphAddDependencies(
|
CHECK_CUDA_ERROR(cudaGraphAddDependencies(
|
||||||
graph_, from_nodes_.data(), to_nodes_.data(), from_nodes_.size()));
|
graph_,
|
||||||
|
from_nodes_.data(),
|
||||||
|
to_nodes_.data(),
|
||||||
|
#if CUDART_VERSION >= 13000
|
||||||
|
nullptr, // edgeData
|
||||||
|
#endif // CUDART_VERSION >= 13000
|
||||||
|
from_nodes_.size()));
|
||||||
}
|
}
|
||||||
|
|
||||||
graph_key_ += ".";
|
graph_key_ += ".";
|
||||||
@@ -311,8 +361,7 @@ void CommandEncoder::commit() {
|
|||||||
to_nodes_.clear();
|
to_nodes_.clear();
|
||||||
graph_key_.clear();
|
graph_key_.clear();
|
||||||
node_map_.clear();
|
node_map_.clear();
|
||||||
CHECK_CUDA_ERROR(cudaGraphDestroy(graph_));
|
graph_ = CudaGraph(device_);
|
||||||
CHECK_CUDA_ERROR(cudaGraphCreate(&graph_, 0));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put completion handlers in a batch.
|
// Put completion handlers in a batch.
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ class CommandEncoder {
|
|||||||
struct CaptureContext {
|
struct CaptureContext {
|
||||||
CaptureContext(CommandEncoder& enc);
|
CaptureContext(CommandEncoder& enc);
|
||||||
~CaptureContext();
|
~CaptureContext();
|
||||||
cudaGraph_t graph;
|
CudaGraph graph;
|
||||||
CommandEncoder& enc;
|
CommandEncoder& enc;
|
||||||
bool discard{false};
|
bool discard{false};
|
||||||
};
|
};
|
||||||
@@ -76,9 +76,6 @@ class CommandEncoder {
|
|||||||
uint32_t smem_bytes,
|
uint32_t smem_bytes,
|
||||||
void** params);
|
void** params);
|
||||||
|
|
||||||
// Low-level graph helpers.
|
|
||||||
void add_kernel_node(const cudaKernelNodeParams& params);
|
|
||||||
void add_kernel_node(const CUDA_KERNEL_NODE_PARAMS& params);
|
|
||||||
void add_graph_node(cudaGraph_t child);
|
void add_graph_node(cudaGraph_t child);
|
||||||
|
|
||||||
void add_temporary(const array& arr) {
|
void add_temporary(const array& arr) {
|
||||||
@@ -101,6 +98,9 @@ class CommandEncoder {
|
|||||||
void synchronize();
|
void synchronize();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
void add_kernel_node(const cudaKernelNodeParams& params);
|
||||||
|
void add_kernel_node(const CUDA_KERNEL_NODE_PARAMS& params);
|
||||||
|
|
||||||
struct GraphNode {
|
struct GraphNode {
|
||||||
cudaGraphNode_t node;
|
cudaGraphNode_t node;
|
||||||
// K = kernel
|
// K = kernel
|
||||||
@@ -115,7 +115,7 @@ class CommandEncoder {
|
|||||||
|
|
||||||
Device& device_;
|
Device& device_;
|
||||||
CudaStream stream_;
|
CudaStream stream_;
|
||||||
cudaGraph_t graph_;
|
CudaGraph graph_;
|
||||||
Worker worker_;
|
Worker worker_;
|
||||||
char node_count_{0};
|
char node_count_{0};
|
||||||
char graph_node_count_{0};
|
char graph_node_count_{0};
|
||||||
|
|||||||
@@ -204,6 +204,12 @@ struct Power {
|
|||||||
__device__ T operator()(T base, T exp) {
|
__device__ T operator()(T base, T exp) {
|
||||||
if constexpr (cuda::std::is_integral_v<T>) {
|
if constexpr (cuda::std::is_integral_v<T>) {
|
||||||
T res = 1;
|
T res = 1;
|
||||||
|
// Raising an integer to a negative power is undefined
|
||||||
|
if constexpr (cuda::std::is_signed_v<T>) {
|
||||||
|
if (exp < 0) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
while (exp) {
|
while (exp) {
|
||||||
if (exp & 1) {
|
if (exp & 1) {
|
||||||
res *= base;
|
res *= base;
|
||||||
|
|||||||
@@ -6,7 +6,6 @@
|
|||||||
|
|
||||||
#include <cuda_bf16.h>
|
#include <cuda_bf16.h>
|
||||||
#include <cuda_fp16.h>
|
#include <cuda_fp16.h>
|
||||||
#include <thrust/iterator/transform_iterator.h>
|
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core::cu {
|
||||||
|
|
||||||
@@ -116,15 +115,4 @@ inline __host__ __device__ auto cast_to(SrcT x) {
|
|||||||
return CastOp<SrcT, DstT>{}(x);
|
return CastOp<SrcT, DstT>{}(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return an iterator that cast the value to DstT using CastOp.
|
|
||||||
template <typename DstT, typename Iterator>
|
|
||||||
inline __host__ __device__ auto make_cast_iterator(Iterator it) {
|
|
||||||
using SrcT = typename cuda::std::iterator_traits<Iterator>::value_type;
|
|
||||||
if constexpr (std::is_same_v<SrcT, DstT>) {
|
|
||||||
return it;
|
|
||||||
} else {
|
|
||||||
return thrust::make_transform_iterator(it, CastOp<SrcT, DstT>{});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core::cu
|
||||||
|
|||||||
@@ -146,6 +146,23 @@ inline __device__ void store_vector(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <int N, typename T, typename SizeT>
|
||||||
|
inline __device__ void store_vector(
|
||||||
|
T* ptr,
|
||||||
|
uint32_t offset,
|
||||||
|
const AlignedVector<T, N>& vec,
|
||||||
|
SizeT size,
|
||||||
|
int64_t stride) {
|
||||||
|
if (is_aligned<N>(ptr) && (offset + 1) * N <= size && stride == 1) {
|
||||||
|
auto* to = reinterpret_cast<AlignedVector<T, N>*>(ptr);
|
||||||
|
to[offset] = vec;
|
||||||
|
} else {
|
||||||
|
for (int i = 0; (offset * N + i) < size && i < N; ++i) {
|
||||||
|
ptr[stride * (offset * N + i)] = vec[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// Type limits utils
|
// Type limits utils
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|||||||
56
mlx/backend/cuda/distributed.cu
Normal file
56
mlx/backend/cuda/distributed.cu
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
|
#include "mlx/backend/gpu/copy.h"
|
||||||
|
#include "mlx/distributed/primitives.h"
|
||||||
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
|
namespace mlx::core::distributed {
|
||||||
|
void AllReduce::eval_gpu(
|
||||||
|
const std::vector<array>& inputs,
|
||||||
|
std::vector<array>& outputs) {
|
||||||
|
assert(inputs.size() == 1);
|
||||||
|
assert(outputs.size() == 1);
|
||||||
|
|
||||||
|
auto set_input_output =
|
||||||
|
[s = stream()](const array& in, array& out) -> std::pair<array, array> {
|
||||||
|
if (!in.flags().row_contiguous) {
|
||||||
|
copy_gpu(in, out, CopyType::General, s);
|
||||||
|
return {out, out};
|
||||||
|
} else if (in.is_donatable()) {
|
||||||
|
out.copy_shared_buffer(in);
|
||||||
|
return {in, out};
|
||||||
|
} else {
|
||||||
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
return {in, out};
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
auto [input, output] = set_input_output(inputs[0], outputs[0]);
|
||||||
|
|
||||||
|
auto& encoder = cu::get_command_encoder(stream());
|
||||||
|
encoder.set_input_array(input);
|
||||||
|
encoder.set_output_array(output);
|
||||||
|
|
||||||
|
auto capture = encoder.capture_context();
|
||||||
|
auto& s = stream();
|
||||||
|
|
||||||
|
switch (reduce_type_) {
|
||||||
|
case Sum:
|
||||||
|
distributed::detail::all_sum(group(), input, output, s);
|
||||||
|
break;
|
||||||
|
case Max:
|
||||||
|
distributed::detail::all_max(group(), input, output, s);
|
||||||
|
break;
|
||||||
|
case Min:
|
||||||
|
distributed::detail::all_min(group(), input, output, s);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Only all reduce sum, max, and min are supported.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} // namespace mlx::core::distributed
|
||||||
@@ -15,8 +15,9 @@ bool is_available() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void new_stream(Stream s) {
|
void new_stream(Stream s) {
|
||||||
// Force initalization of cuda, so cuda runtime get destroyed at last.
|
// Force initalization of CUDA by creating an event, so the CUDA runtime and
|
||||||
cudaFree(nullptr);
|
// our CUDA event pool get destroyed last.
|
||||||
|
cu::CudaEvent(cudaEventDefault);
|
||||||
// Ensure the static stream objects get created.
|
// Ensure the static stream objects get created.
|
||||||
cu::get_command_encoder(s);
|
cu::get_command_encoder(s);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,12 @@
|
|||||||
#include "mlx/backend/cuda/allocator.h"
|
#include "mlx/backend/cuda/allocator.h"
|
||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
#include "mlx/backend/cuda/event.h"
|
#include "mlx/backend/cuda/event.h"
|
||||||
#include "mlx/backend/cuda/utils.h"
|
|
||||||
#include "mlx/event.h"
|
#include "mlx/event.h"
|
||||||
#include "mlx/scheduler.h"
|
#include "mlx/scheduler.h"
|
||||||
|
|
||||||
|
#include <map>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
@@ -17,104 +19,141 @@ namespace cu {
|
|||||||
// CudaEvent implementations
|
// CudaEvent implementations
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
// Cuda event managed with RAII.
|
namespace {
|
||||||
class CudaEventHandle {
|
|
||||||
public:
|
// Manage cached cudaEvent_t objects.
|
||||||
CudaEventHandle() {
|
struct CudaEventPool {
|
||||||
CHECK_CUDA_ERROR(cudaEventCreateWithFlags(
|
static CudaEventHandle create(int flags) {
|
||||||
&event_, cudaEventDisableTiming | cudaEventBlockingSync));
|
auto& cache = cache_for(flags);
|
||||||
|
if (cache.empty()) {
|
||||||
|
return CudaEventHandle(flags);
|
||||||
|
} else {
|
||||||
|
CudaEventHandle ret = std::move(cache.back());
|
||||||
|
cache.pop_back();
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
~CudaEventHandle() {
|
static void release(CudaEventHandle event) {
|
||||||
CHECK_CUDA_ERROR(cudaEventDestroy(event_));
|
cache_for(event.flags).push_back(std::move(event));
|
||||||
}
|
}
|
||||||
|
|
||||||
CudaEventHandle(const CudaEventHandle&) = delete;
|
static std::vector<CudaEventHandle>& cache_for(int flags) {
|
||||||
CudaEventHandle& operator=(const CudaEventHandle&) = delete;
|
static std::map<int, std::vector<CudaEventHandle>> cache;
|
||||||
|
return cache[flags];
|
||||||
operator cudaEvent_t() const {
|
|
||||||
return event_;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
cudaEvent_t event_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
CudaEvent::CudaEvent() : event_(std::make_shared<CudaEventHandle>()) {}
|
} // namespace
|
||||||
|
|
||||||
|
CudaEventHandle::CudaEventHandle(int flags) : flags(flags) {
|
||||||
|
CHECK_CUDA_ERROR(cudaEventCreateWithFlags(&handle_, flags));
|
||||||
|
assert(handle_ != nullptr);
|
||||||
|
}
|
||||||
|
|
||||||
|
CudaEvent::CudaEvent(int flags) : event_(CudaEventPool::create(flags)) {}
|
||||||
|
|
||||||
|
CudaEvent::~CudaEvent() {
|
||||||
|
CudaEventPool::release(std::move(event_));
|
||||||
|
}
|
||||||
|
|
||||||
void CudaEvent::wait() {
|
void CudaEvent::wait() {
|
||||||
nvtx3::scoped_range r("cu::CudaEvent::wait");
|
nvtx3::scoped_range r("cu::CudaEvent::wait");
|
||||||
if (!recorded_) {
|
cudaEventSynchronize(event_);
|
||||||
throw std::runtime_error("Should not wait on a CudaEvent before record.");
|
|
||||||
}
|
|
||||||
cudaEventSynchronize(*event_);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CudaEvent::wait(cudaStream_t stream) {
|
void CudaEvent::wait(cudaStream_t stream) {
|
||||||
if (!recorded_) {
|
cudaStreamWaitEvent(stream, event_);
|
||||||
throw std::runtime_error("Should not wait on a CudaEvent before record.");
|
|
||||||
}
|
|
||||||
cudaStreamWaitEvent(stream, *event_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CudaEvent::wait(Stream s) {
|
|
||||||
if (s.device == mlx::core::Device::cpu) {
|
|
||||||
scheduler::enqueue(s, [*this]() mutable { wait(); });
|
|
||||||
} else {
|
|
||||||
auto& enc = cu::get_command_encoder(s);
|
|
||||||
enc.commit();
|
|
||||||
wait(enc.stream());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void CudaEvent::record(cudaStream_t stream) {
|
void CudaEvent::record(cudaStream_t stream) {
|
||||||
cudaEventRecord(*event_, stream);
|
cudaEventRecord(event_, stream);
|
||||||
recorded_ = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
void CudaEvent::record(Stream s) {
|
|
||||||
if (s.device == mlx::core::Device::cpu) {
|
|
||||||
throw std::runtime_error("CudaEvent can not wait on cpu stream.");
|
|
||||||
} else {
|
|
||||||
auto& enc = cu::get_command_encoder(s);
|
|
||||||
enc.commit();
|
|
||||||
record(enc.stream());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CudaEvent::completed() const {
|
bool CudaEvent::completed() const {
|
||||||
return cudaEventQuery(*event_) == cudaSuccess;
|
return cudaEventQuery(event_) == cudaSuccess;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Wraps CudaEvent with a few features:
|
||||||
|
// 1. The class can be copied.
|
||||||
|
// 2. Make wait/record work with CPU streams.
|
||||||
|
// 3. Add checks for waiting on un-recorded event.
|
||||||
|
class CopyableCudaEvent {
|
||||||
|
public:
|
||||||
|
CopyableCudaEvent()
|
||||||
|
: event_(std::make_shared<CudaEvent>(
|
||||||
|
cudaEventDisableTiming | cudaEventBlockingSync)) {}
|
||||||
|
|
||||||
|
void wait() {
|
||||||
|
event_->wait();
|
||||||
|
}
|
||||||
|
|
||||||
|
void wait(Stream s) {
|
||||||
|
if (s.device == mlx::core::Device::cpu) {
|
||||||
|
scheduler::enqueue(s, [*this]() mutable {
|
||||||
|
check_recorded();
|
||||||
|
event_->wait();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
check_recorded();
|
||||||
|
auto& encoder = cu::get_command_encoder(s);
|
||||||
|
encoder.commit();
|
||||||
|
event_->wait(encoder.stream());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void record(Stream s) {
|
||||||
|
if (s.device == mlx::core::Device::cpu) {
|
||||||
|
throw std::runtime_error("CudaEvent can not wait on CPU stream.");
|
||||||
|
} else {
|
||||||
|
auto& encoder = cu::get_command_encoder(s);
|
||||||
|
encoder.commit();
|
||||||
|
event_->record(encoder.stream());
|
||||||
|
recorded_ = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool is_signaled() const {
|
||||||
|
return recorded_ && event_->completed();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void check_recorded() const {
|
||||||
|
if (!recorded_) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
"Should not wait on a CudaEvent before recording.");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<CudaEvent> event_;
|
||||||
|
bool recorded_{false};
|
||||||
|
};
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// SharedEvent implementations
|
// AtomicEvent implementations
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
__host__ __device__ void event_wait(SharedEvent::Atomic* ac, uint64_t value) {
|
__host__ __device__ void event_wait(AtomicEvent::Atomic* ac, uint64_t value) {
|
||||||
uint64_t current;
|
uint64_t current;
|
||||||
while ((current = ac->load()) < value) {
|
while ((current = ac->load()) < value) {
|
||||||
ac->wait(current);
|
ac->wait(current);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
__host__ __device__ void event_signal(SharedEvent::Atomic* ac, uint64_t value) {
|
__host__ __device__ void event_signal(AtomicEvent::Atomic* ac, uint64_t value) {
|
||||||
ac->store(value);
|
ac->store(value);
|
||||||
ac->notify_all();
|
ac->notify_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void event_wait_kernel(SharedEvent::Atomic* ac, uint64_t value) {
|
__global__ void event_wait_kernel(AtomicEvent::Atomic* ac, uint64_t value) {
|
||||||
event_wait(ac, value);
|
event_wait(ac, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
__global__ void event_signal_kernel(SharedEvent::Atomic* ac, uint64_t value) {
|
__global__ void event_signal_kernel(AtomicEvent::Atomic* ac, uint64_t value) {
|
||||||
event_signal(ac, value);
|
event_signal(ac, value);
|
||||||
}
|
}
|
||||||
|
|
||||||
SharedEvent::Atomic* to_atomic(std::shared_ptr<Buffer> buf) {
|
AtomicEvent::AtomicEvent() {
|
||||||
return static_cast<SharedEvent::Atomic*>(buf->raw_ptr());
|
|
||||||
}
|
|
||||||
|
|
||||||
SharedEvent::SharedEvent() {
|
|
||||||
buf_ = std::shared_ptr<Buffer>(
|
buf_ = std::shared_ptr<Buffer>(
|
||||||
new Buffer{allocator().malloc(sizeof(Atomic))}, [](Buffer* ptr) {
|
new Buffer{allocator().malloc(sizeof(Atomic))}, [](Buffer* ptr) {
|
||||||
allocator().free(*ptr);
|
allocator().free(*ptr);
|
||||||
@@ -123,17 +162,17 @@ SharedEvent::SharedEvent() {
|
|||||||
*static_cast<uint64_t*>(buf_->raw_ptr()) = 0;
|
*static_cast<uint64_t*>(buf_->raw_ptr()) = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedEvent::wait(uint64_t value) {
|
void AtomicEvent::wait(uint64_t value) {
|
||||||
nvtx3::scoped_range r("cu::SharedEvent::wait");
|
nvtx3::scoped_range r("cu::AtomicEvent::wait");
|
||||||
event_wait(to_atomic(buf_), value);
|
event_wait(atomic(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedEvent::wait(cudaStream_t stream, uint64_t value) {
|
void AtomicEvent::wait(cudaStream_t stream, uint64_t value) {
|
||||||
event_wait_kernel<<<1, 1, 0, stream>>>(to_atomic(buf_), value);
|
event_wait_kernel<<<1, 1, 0, stream>>>(atomic(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedEvent::wait(Stream s, uint64_t value) {
|
void AtomicEvent::wait(Stream s, uint64_t value) {
|
||||||
nvtx3::scoped_range r("cu::SharedEvent::wait(s)");
|
nvtx3::scoped_range r("cu::AtomicEvent::wait(s)");
|
||||||
if (s.device == mlx::core::Device::cpu) {
|
if (s.device == mlx::core::Device::cpu) {
|
||||||
scheduler::enqueue(s, [*this, value]() mutable { wait(value); });
|
scheduler::enqueue(s, [*this, value]() mutable { wait(value); });
|
||||||
} else {
|
} else {
|
||||||
@@ -144,17 +183,17 @@ void SharedEvent::wait(Stream s, uint64_t value) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedEvent::signal(uint64_t value) {
|
void AtomicEvent::signal(uint64_t value) {
|
||||||
nvtx3::scoped_range r("cu::SharedEvent::signal");
|
nvtx3::scoped_range r("cu::AtomicEvent::signal");
|
||||||
event_signal(to_atomic(buf_), value);
|
event_signal(atomic(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedEvent::signal(cudaStream_t stream, uint64_t value) {
|
void AtomicEvent::signal(cudaStream_t stream, uint64_t value) {
|
||||||
event_signal_kernel<<<1, 1, 0, stream>>>(to_atomic(buf_), value);
|
event_signal_kernel<<<1, 1, 0, stream>>>(atomic(), value);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SharedEvent::signal(Stream s, uint64_t value) {
|
void AtomicEvent::signal(Stream s, uint64_t value) {
|
||||||
nvtx3::scoped_range r("cu::SharedEvent::signal(s)");
|
nvtx3::scoped_range r("cu::AtomicEvent::signal(s)");
|
||||||
if (s.device == mlx::core::Device::cpu) {
|
if (s.device == mlx::core::Device::cpu) {
|
||||||
// Signal through a GPU stream so the atomic is updated in GPU - updating
|
// Signal through a GPU stream so the atomic is updated in GPU - updating
|
||||||
// the atomic in CPU sometimes does not get GPU notified.
|
// the atomic in CPU sometimes does not get GPU notified.
|
||||||
@@ -168,14 +207,14 @@ void SharedEvent::signal(Stream s, uint64_t value) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SharedEvent::is_signaled(uint64_t value) const {
|
bool AtomicEvent::is_signaled(uint64_t value) const {
|
||||||
nvtx3::scoped_range r("cu::SharedEvent::is_signaled");
|
nvtx3::scoped_range r("cu::AtomicEvent::is_signaled");
|
||||||
return to_atomic(buf_)->load() >= value;
|
return atomic()->load() >= value;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint64_t SharedEvent::value() const {
|
uint64_t AtomicEvent::value() const {
|
||||||
nvtx3::scoped_range r("cu::SharedEvent::value");
|
nvtx3::scoped_range r("cu::AtomicEvent::value");
|
||||||
return to_atomic(buf_)->load();
|
return atomic()->load();
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -188,14 +227,14 @@ namespace {
|
|||||||
|
|
||||||
struct EventImpl {
|
struct EventImpl {
|
||||||
// CudaEvent is preferred when possible because it is fast, however we have
|
// CudaEvent is preferred when possible because it is fast, however we have
|
||||||
// to fallback to SharedEvent in following cases:
|
// to fallback to AtomicEvent in following cases:
|
||||||
// 1. the event is used to wait/signal a cpu stream;
|
// 1. the event is used to wait/signal a cpu stream;
|
||||||
// 2. signal value other than 1 has been specified.
|
// 2. signal value other than 1 has been specified.
|
||||||
std::unique_ptr<cu::CudaEvent> cuda;
|
std::unique_ptr<cu::CopyableCudaEvent> cuda;
|
||||||
std::unique_ptr<cu::SharedEvent> shared;
|
std::unique_ptr<cu::AtomicEvent> atomic;
|
||||||
|
|
||||||
bool is_created() const {
|
bool is_created() const {
|
||||||
return cuda || shared;
|
return cuda || atomic;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ensure_created(Stream s, uint64_t signal_value) {
|
void ensure_created(Stream s, uint64_t signal_value) {
|
||||||
@@ -203,10 +242,10 @@ struct EventImpl {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (s.device == mlx::core::Device::cpu || signal_value > 1) {
|
if (s.device == mlx::core::Device::cpu || signal_value > 1) {
|
||||||
nvtx3::mark("Using slow SharedEvent");
|
nvtx3::mark("Using slow AtomicEvent");
|
||||||
shared = std::make_unique<cu::SharedEvent>();
|
atomic = std::make_unique<cu::AtomicEvent>();
|
||||||
} else {
|
} else {
|
||||||
cuda = std::make_unique<cu::CudaEvent>();
|
cuda = std::make_unique<cu::CopyableCudaEvent>();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@@ -225,7 +264,7 @@ void Event::wait() {
|
|||||||
assert(value() == 1);
|
assert(value() == 1);
|
||||||
event->cuda->wait();
|
event->cuda->wait();
|
||||||
} else {
|
} else {
|
||||||
event->shared->wait(value());
|
event->atomic->wait(value());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -236,7 +275,7 @@ void Event::wait(Stream s) {
|
|||||||
assert(value() == 1);
|
assert(value() == 1);
|
||||||
event->cuda->wait(s);
|
event->cuda->wait(s);
|
||||||
} else {
|
} else {
|
||||||
event->shared->wait(s, value());
|
event->atomic->wait(s, value());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,7 +286,7 @@ void Event::signal(Stream s) {
|
|||||||
assert(value() == 1);
|
assert(value() == 1);
|
||||||
event->cuda->record(s);
|
event->cuda->record(s);
|
||||||
} else {
|
} else {
|
||||||
event->shared->signal(s, value());
|
event->atomic->signal(s, value());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -258,9 +297,9 @@ bool Event::is_signaled() const {
|
|||||||
}
|
}
|
||||||
if (event->cuda) {
|
if (event->cuda) {
|
||||||
assert(value() == 1);
|
assert(value() == 1);
|
||||||
return event->cuda->recorded() && event->cuda->completed();
|
return event->cuda->is_signaled();
|
||||||
} else {
|
} else {
|
||||||
return event->shared->is_signaled(value());
|
return event->atomic->is_signaled(value());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,49 +3,54 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "mlx/allocator.h"
|
#include "mlx/allocator.h"
|
||||||
|
#include "mlx/backend/cuda/utils.h"
|
||||||
#include "mlx/stream.h"
|
#include "mlx/stream.h"
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
#include <cuda_runtime.h>
|
#include <cuda_runtime.h>
|
||||||
#include <cuda/atomic>
|
#include <cuda/atomic>
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core::cu {
|
||||||
|
|
||||||
class CudaEventHandle;
|
// RAII-managed move-only wrapper of cudaEvent_t.
|
||||||
|
struct CudaEventHandle : public CudaHandle<cudaEvent_t, cudaEventDestroy> {
|
||||||
|
CudaEventHandle(int flags);
|
||||||
|
int flags;
|
||||||
|
};
|
||||||
|
|
||||||
// Wrapper of native cuda event. It can synchronize between GPU streams, or wait
|
// Wrapper of native cuda event. It can synchronize between GPU streams, or wait
|
||||||
// on GPU stream in CPU stream, but can not wait on CPU stream.
|
// on GPU stream in CPU stream, but can not wait on CPU stream.
|
||||||
class CudaEvent {
|
class CudaEvent {
|
||||||
public:
|
public:
|
||||||
CudaEvent();
|
explicit CudaEvent(int flags);
|
||||||
|
~CudaEvent();
|
||||||
|
|
||||||
|
CudaEvent(CudaEvent&&) = default;
|
||||||
|
CudaEvent& operator=(CudaEvent&&) = default;
|
||||||
|
|
||||||
|
CudaEvent(const CudaEvent&) = delete;
|
||||||
|
CudaEvent& operator=(const CudaEvent&) = delete;
|
||||||
|
|
||||||
void wait();
|
void wait();
|
||||||
void wait(cudaStream_t stream);
|
void wait(cudaStream_t stream);
|
||||||
void wait(Stream s);
|
|
||||||
void record(cudaStream_t stream);
|
void record(cudaStream_t stream);
|
||||||
void record(Stream s);
|
|
||||||
|
|
||||||
// Return whether the recorded kernels have completed. Note that this method
|
// Return whether the recorded kernels have completed. Note that this method
|
||||||
// returns true if record() has not been called.
|
// returns true if record() has not been called.
|
||||||
bool completed() const;
|
bool completed() const;
|
||||||
|
|
||||||
bool recorded() const {
|
|
||||||
return recorded_;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool recorded_{false};
|
CudaEventHandle event_;
|
||||||
std::shared_ptr<CudaEventHandle> event_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Event that can synchronize between CPU and GPU. It is much slower than
|
// Event that can synchronize between CPU and GPU. It is much slower than
|
||||||
// CudaEvent so the latter should always be preferred when possible.
|
// CudaEvent so the latter should always be preferred when possible.
|
||||||
class SharedEvent {
|
class AtomicEvent {
|
||||||
public:
|
public:
|
||||||
using Atomic = cuda::atomic<uint64_t>;
|
using Atomic = cuda::atomic<uint64_t>;
|
||||||
|
|
||||||
SharedEvent();
|
AtomicEvent();
|
||||||
|
|
||||||
void wait(uint64_t value);
|
void wait(uint64_t value);
|
||||||
void wait(cudaStream_t stream, uint64_t value);
|
void wait(cudaStream_t stream, uint64_t value);
|
||||||
@@ -57,7 +62,11 @@ class SharedEvent {
|
|||||||
uint64_t value() const;
|
uint64_t value() const;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::shared_ptr<mlx::core::allocator::Buffer> buf_;
|
Atomic* atomic() const {
|
||||||
|
return static_cast<AtomicEvent::Atomic*>(buf_->raw_ptr());
|
||||||
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<allocator::Buffer> buf_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core::cu
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ namespace mlx::core {
|
|||||||
|
|
||||||
struct FenceImpl {
|
struct FenceImpl {
|
||||||
uint32_t count;
|
uint32_t count;
|
||||||
cu::SharedEvent event;
|
cu::AtomicEvent event;
|
||||||
};
|
};
|
||||||
|
|
||||||
Fence::Fence(Stream s) {
|
Fence::Fence(Stream s) {
|
||||||
|
|||||||
@@ -1,208 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/device.h"
|
|
||||||
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
|
||||||
|
|
||||||
#include <cooperative_groups.h>
|
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
|
||||||
|
|
||||||
namespace cg = cooperative_groups;
|
|
||||||
|
|
||||||
__global__ void set_mm_device_pointers(
|
|
||||||
int8_t** pointers,
|
|
||||||
int8_t* a_start,
|
|
||||||
int8_t* b_start,
|
|
||||||
int8_t* out_start,
|
|
||||||
int item_size,
|
|
||||||
const __grid_constant__ Shape batch_shape,
|
|
||||||
const __grid_constant__ Strides a_batch_strides,
|
|
||||||
const __grid_constant__ Strides b_batch_strides,
|
|
||||||
int64_t batch_stride,
|
|
||||||
int batch_ndim,
|
|
||||||
int batch_count) {
|
|
||||||
auto index = cg::this_grid().thread_rank();
|
|
||||||
if (index >= batch_count) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto [a_offset, b_offset] = elem_to_loc(
|
|
||||||
index,
|
|
||||||
batch_shape.data(),
|
|
||||||
a_batch_strides.data(),
|
|
||||||
b_batch_strides.data(),
|
|
||||||
batch_ndim);
|
|
||||||
pointers[index] = a_start + item_size * a_offset;
|
|
||||||
pointers[index + batch_count] = b_start + item_size * b_offset;
|
|
||||||
pointers[index + 2 * batch_count] =
|
|
||||||
out_start + item_size * index * batch_stride;
|
|
||||||
}
|
|
||||||
|
|
||||||
__global__ void set_addmm_device_pointers(
|
|
||||||
int8_t** pointers,
|
|
||||||
int8_t* a_start,
|
|
||||||
int8_t* b_start,
|
|
||||||
int8_t* c_start,
|
|
||||||
int8_t* out_start,
|
|
||||||
int item_size,
|
|
||||||
const __grid_constant__ Shape batch_shape,
|
|
||||||
const __grid_constant__ Strides a_batch_strides,
|
|
||||||
const __grid_constant__ Strides b_batch_strides,
|
|
||||||
const __grid_constant__ Strides c_batch_strides,
|
|
||||||
int64_t batch_stride,
|
|
||||||
int batch_ndim,
|
|
||||||
int batch_count) {
|
|
||||||
auto index = cg::this_grid().thread_rank();
|
|
||||||
if (index >= batch_count) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto [a_offset, b_offset, c_offset] = elem_to_loc(
|
|
||||||
index,
|
|
||||||
batch_shape.data(),
|
|
||||||
a_batch_strides.data(),
|
|
||||||
b_batch_strides.data(),
|
|
||||||
c_batch_strides.data(),
|
|
||||||
batch_ndim);
|
|
||||||
pointers[index] = a_start + item_size * a_offset;
|
|
||||||
pointers[index + batch_count] = b_start + item_size * b_offset;
|
|
||||||
pointers[index + 2 * batch_count] = c_start + item_size * c_offset;
|
|
||||||
pointers[index + 3 * batch_count] =
|
|
||||||
out_start + item_size * index * batch_stride;
|
|
||||||
}
|
|
||||||
|
|
||||||
void set_pointer_mode(cublasLtMatrixLayout_t desc, int batch_count) {
|
|
||||||
auto batch_mode = CUBLASLT_BATCH_MODE_POINTER_ARRAY;
|
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
|
||||||
desc,
|
|
||||||
CUBLASLT_MATRIX_LAYOUT_BATCH_MODE,
|
|
||||||
&batch_mode,
|
|
||||||
sizeof(batch_mode)));
|
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
|
||||||
desc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch_count, sizeof(int32_t)));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Matmul::run_batched(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
array& out,
|
|
||||||
const array& a,
|
|
||||||
const array& b,
|
|
||||||
const mlx::core::Shape& batch_shape,
|
|
||||||
const mlx::core::Strides& a_batch_strides,
|
|
||||||
const mlx::core::Strides& b_batch_strides) {
|
|
||||||
auto batch_count = out.size() / (M_ * N_);
|
|
||||||
set_pointer_mode(a_desc_, batch_count);
|
|
||||||
set_pointer_mode(b_desc_, batch_count);
|
|
||||||
set_pointer_mode(out_desc_, batch_count);
|
|
||||||
|
|
||||||
// Launch kernel to set device offsets
|
|
||||||
auto pointers = array(
|
|
||||||
allocator::malloc(batch_count * sizeof(uint64_t) * 3),
|
|
||||||
{static_cast<int>(batch_count * 3)},
|
|
||||||
uint64);
|
|
||||||
|
|
||||||
encoder.add_temporary(pointers);
|
|
||||||
int block_size = 512;
|
|
||||||
encoder.set_output_array(pointers);
|
|
||||||
|
|
||||||
encoder.add_kernel_node(
|
|
||||||
cu::set_mm_device_pointers,
|
|
||||||
cuda::ceil_div(pointers.size(), block_size),
|
|
||||||
block_size,
|
|
||||||
0,
|
|
||||||
pointers.data<int8_t*>(),
|
|
||||||
a.data<int8_t>(),
|
|
||||||
b.data<int8_t>(),
|
|
||||||
out.data<int8_t>(),
|
|
||||||
static_cast<int>(out.dtype().size()),
|
|
||||||
const_param(batch_shape),
|
|
||||||
const_param(a_batch_strides),
|
|
||||||
const_param(b_batch_strides),
|
|
||||||
static_cast<int64_t>(M_) * N_,
|
|
||||||
static_cast<int>(batch_shape.size()),
|
|
||||||
batch_count);
|
|
||||||
|
|
||||||
// Run matmul
|
|
||||||
encoder.set_input_array(pointers);
|
|
||||||
encoder.set_input_array(a);
|
|
||||||
encoder.set_input_array(b);
|
|
||||||
encoder.set_output_array(out);
|
|
||||||
|
|
||||||
auto a_pointers = pointers.data<int8_t*>();
|
|
||||||
auto b_pointers = a_pointers + batch_count;
|
|
||||||
auto out_pointers = b_pointers + batch_count;
|
|
||||||
run_impl(
|
|
||||||
encoder,
|
|
||||||
reinterpret_cast<void*>(out_pointers),
|
|
||||||
reinterpret_cast<void*>(a_pointers),
|
|
||||||
reinterpret_cast<void*>(b_pointers),
|
|
||||||
nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Matmul::run_batched(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
array& out,
|
|
||||||
const array& a,
|
|
||||||
const array& b,
|
|
||||||
const array& c,
|
|
||||||
const mlx::core::Shape& batch_shape,
|
|
||||||
const mlx::core::Strides& a_batch_strides,
|
|
||||||
const mlx::core::Strides& b_batch_strides,
|
|
||||||
const mlx::core::Strides& c_batch_strides,
|
|
||||||
float alpha,
|
|
||||||
float beta) {
|
|
||||||
auto batch_count = out.size() / (M_ * N_);
|
|
||||||
set_pointer_mode(a_desc_, batch_count);
|
|
||||||
set_pointer_mode(b_desc_, batch_count);
|
|
||||||
set_pointer_mode(c_desc_, batch_count);
|
|
||||||
set_pointer_mode(out_desc_, batch_count);
|
|
||||||
|
|
||||||
// Launch kernel to set device offsets
|
|
||||||
auto pointers = array(
|
|
||||||
allocator::malloc(batch_count * sizeof(uint64_t) * 4),
|
|
||||||
{static_cast<int>(batch_count * 4)},
|
|
||||||
uint64);
|
|
||||||
|
|
||||||
encoder.add_temporary(pointers);
|
|
||||||
int block_size = 512;
|
|
||||||
encoder.set_output_array(pointers);
|
|
||||||
encoder.add_kernel_node(
|
|
||||||
cu::set_addmm_device_pointers,
|
|
||||||
cuda::ceil_div(pointers.size(), block_size),
|
|
||||||
block_size,
|
|
||||||
0,
|
|
||||||
pointers.data<int8_t*>(),
|
|
||||||
a.data<int8_t>(),
|
|
||||||
b.data<int8_t>(),
|
|
||||||
c.data<int8_t>(),
|
|
||||||
out.data<int8_t>(),
|
|
||||||
static_cast<int>(out.dtype().size()),
|
|
||||||
const_param(batch_shape),
|
|
||||||
const_param(a_batch_strides),
|
|
||||||
const_param(b_batch_strides),
|
|
||||||
const_param(c_batch_strides),
|
|
||||||
static_cast<int64_t>(M_) * N_,
|
|
||||||
static_cast<int>(batch_shape.size()),
|
|
||||||
batch_count);
|
|
||||||
|
|
||||||
// Run matmul
|
|
||||||
encoder.set_input_array(pointers);
|
|
||||||
encoder.set_input_array(a);
|
|
||||||
encoder.set_input_array(b);
|
|
||||||
encoder.set_input_array(c);
|
|
||||||
encoder.set_output_array(out);
|
|
||||||
|
|
||||||
auto a_pointers = pointers.data<int8_t*>();
|
|
||||||
auto b_pointers = a_pointers + batch_count;
|
|
||||||
auto c_pointers = b_pointers + batch_count;
|
|
||||||
auto out_pointers = c_pointers + batch_count;
|
|
||||||
run_impl(
|
|
||||||
encoder,
|
|
||||||
reinterpret_cast<void*>(out_pointers),
|
|
||||||
reinterpret_cast<void*>(a_pointers),
|
|
||||||
reinterpret_cast<void*>(b_pointers),
|
|
||||||
reinterpret_cast<void*>(c_pointers),
|
|
||||||
alpha,
|
|
||||||
beta);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
|
||||||
@@ -7,10 +7,12 @@
|
|||||||
|
|
||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
struct CublasPreference {
|
struct CublasPreference {
|
||||||
CublasPreference(Device& device) {
|
CublasPreference(cu::Device& device) {
|
||||||
// The recommended cublas workspace size is 4 MiB for pre-Hopper and 32 MiB
|
// The recommended cublas workspace size is 4 MiB for pre-Hopper and 32 MiB
|
||||||
// for Hopper+:
|
// for Hopper+:
|
||||||
// https://docs.nvidia.com/cuda/cublas/#cublassetworkspace
|
// https://docs.nvidia.com/cuda/cublas/#cublassetworkspace
|
||||||
@@ -33,7 +35,7 @@ struct CublasPreference {
|
|||||||
cublasLtMatmulPreference_t pref_{nullptr};
|
cublasLtMatmulPreference_t pref_{nullptr};
|
||||||
};
|
};
|
||||||
|
|
||||||
cublasLtMatmulPreference_t cublas_preference(Device& device) {
|
cublasLtMatmulPreference_t cublas_preference(cu::Device& device) {
|
||||||
static CublasPreference pref(device);
|
static CublasPreference pref(device);
|
||||||
return pref.pref_;
|
return pref.pref_;
|
||||||
}
|
}
|
||||||
@@ -52,7 +54,7 @@ cublasComputeType_t dtype_to_compute_type(Dtype dtype) {
|
|||||||
return CUBLAS_COMPUTE_64F;
|
return CUBLAS_COMPUTE_64F;
|
||||||
default:
|
default:
|
||||||
throw std::runtime_error(fmt::format(
|
throw std::runtime_error(fmt::format(
|
||||||
"Unsupported dtype in Matmul: {}.", dtype_to_string(dtype)));
|
"Unsupported dtype in CublasGemm: {}.", dtype_to_string(dtype)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +72,7 @@ cudaDataType_t dtype_to_cublas_type(Dtype dtype) {
|
|||||||
return CUDA_C_32F;
|
return CUDA_C_32F;
|
||||||
default:
|
default:
|
||||||
throw std::runtime_error(fmt::format(
|
throw std::runtime_error(fmt::format(
|
||||||
"Unsupported dtype in Matmul: {}.", dtype_to_string(dtype)));
|
"Unsupported dtype in CublasGemm: {}.", dtype_to_string(dtype)));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,10 +85,10 @@ cublasLtMatrixLayout_t create_matrix_layout(
|
|||||||
int32_t batch_count,
|
int32_t batch_count,
|
||||||
int64_t batch_stride) {
|
int64_t batch_stride) {
|
||||||
cublasLtMatrixLayout_t desc;
|
cublasLtMatrixLayout_t desc;
|
||||||
|
if (transposed) {
|
||||||
|
std::swap(rows, cols);
|
||||||
|
}
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutCreate(&desc, type, rows, cols, ld));
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutCreate(&desc, type, rows, cols, ld));
|
||||||
cublasLtOrder_t order = transposed ? CUBLASLT_ORDER_COL : CUBLASLT_ORDER_ROW;
|
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
|
||||||
desc, CUBLASLT_MATRIX_LAYOUT_ORDER, &order, sizeof(cublasLtOrder_t)));
|
|
||||||
if (batch_count > 1) {
|
if (batch_count > 1) {
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
||||||
desc,
|
desc,
|
||||||
@@ -102,8 +104,10 @@ cublasLtMatrixLayout_t create_matrix_layout(
|
|||||||
return desc;
|
return desc;
|
||||||
}
|
}
|
||||||
|
|
||||||
Matmul::Matmul(
|
} // namespace
|
||||||
Device& device,
|
|
||||||
|
CublasGemm::CublasGemm(
|
||||||
|
cu::Device& device,
|
||||||
Dtype dtype,
|
Dtype dtype,
|
||||||
bool a_transposed,
|
bool a_transposed,
|
||||||
uint64_t a_rows,
|
uint64_t a_rows,
|
||||||
@@ -134,29 +138,38 @@ Matmul::Matmul(
|
|||||||
CUBLASLT_MATMUL_DESC_POINTER_MODE,
|
CUBLASLT_MATMUL_DESC_POINTER_MODE,
|
||||||
&pointer_mode,
|
&pointer_mode,
|
||||||
sizeof(int32_t)));
|
sizeof(int32_t)));
|
||||||
cublasOperation_t op = CUBLAS_OP_N;
|
|
||||||
|
// In cublasLt matrices use column-major layout, while it is possible to use
|
||||||
|
// the CUBLASLT_ORDER_ROW option to switch to row-major layout, the bias
|
||||||
|
// epilogue does not work with the option. So instead we swap A and B to make
|
||||||
|
// cublasLt return the row-major result, which works because:
|
||||||
|
// - the data of a matrix in row-major layout is identical to its transpose in
|
||||||
|
// column-major layout
|
||||||
|
// - C^T = (A @ B)^T = B^T @ A^T
|
||||||
|
cublasOperation_t a_op = b_transposed ? CUBLAS_OP_T : CUBLAS_OP_N;
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatmulDescSetAttribute(
|
CHECK_CUBLAS_ERROR(cublasLtMatmulDescSetAttribute(
|
||||||
matmul_desc_,
|
matmul_desc_,
|
||||||
CUBLASLT_MATMUL_DESC_TRANSA,
|
CUBLASLT_MATMUL_DESC_TRANSA,
|
||||||
&op,
|
&a_op,
|
||||||
sizeof(cublasOperation_t)));
|
sizeof(cublasOperation_t)));
|
||||||
|
cublasOperation_t b_op = a_transposed ? CUBLAS_OP_T : CUBLAS_OP_N;
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatmulDescSetAttribute(
|
CHECK_CUBLAS_ERROR(cublasLtMatmulDescSetAttribute(
|
||||||
matmul_desc_,
|
matmul_desc_,
|
||||||
CUBLASLT_MATMUL_DESC_TRANSB,
|
CUBLASLT_MATMUL_DESC_TRANSB,
|
||||||
&op,
|
&b_op,
|
||||||
sizeof(cublasOperation_t)));
|
sizeof(cublasOperation_t)));
|
||||||
|
|
||||||
auto type = dtype_to_cublas_type(dtype);
|
auto type = dtype_to_cublas_type(dtype);
|
||||||
a_desc_ = create_matrix_layout(
|
a_desc_ = create_matrix_layout(
|
||||||
type, a_rows, a_cols, a_transposed, lda, batch_count, a_batch_stride);
|
type, b_cols, b_rows, b_transposed, ldb, batch_count, b_batch_stride);
|
||||||
b_desc_ = create_matrix_layout(
|
b_desc_ = create_matrix_layout(
|
||||||
type, b_rows, b_cols, b_transposed, ldb, batch_count, b_batch_stride);
|
type, a_cols, a_rows, a_transposed, lda, batch_count, a_batch_stride);
|
||||||
out_desc_ = create_matrix_layout(
|
out_desc_ = create_matrix_layout(
|
||||||
type, a_rows, b_cols, false, b_cols, batch_count, a_rows * b_cols);
|
type, b_cols, a_rows, false, b_cols, batch_count, a_rows * b_cols);
|
||||||
}
|
}
|
||||||
|
|
||||||
Matmul::Matmul(
|
CublasGemm::CublasGemm(
|
||||||
Device& device,
|
cu::Device& device,
|
||||||
Dtype dtype,
|
Dtype dtype,
|
||||||
bool a_transposed,
|
bool a_transposed,
|
||||||
uint64_t a_rows,
|
uint64_t a_rows,
|
||||||
@@ -171,7 +184,7 @@ Matmul::Matmul(
|
|||||||
int64_t a_batch_stride,
|
int64_t a_batch_stride,
|
||||||
int64_t b_batch_stride,
|
int64_t b_batch_stride,
|
||||||
int64_t c_batch_stride)
|
int64_t c_batch_stride)
|
||||||
: Matmul(
|
: CublasGemm(
|
||||||
device,
|
device,
|
||||||
dtype,
|
dtype,
|
||||||
a_transposed,
|
a_transposed,
|
||||||
@@ -187,10 +200,10 @@ Matmul::Matmul(
|
|||||||
b_batch_stride) {
|
b_batch_stride) {
|
||||||
auto type = dtype_to_cublas_type(dtype);
|
auto type = dtype_to_cublas_type(dtype);
|
||||||
c_desc_ = create_matrix_layout(
|
c_desc_ = create_matrix_layout(
|
||||||
type, a_rows, b_cols, false, ldc, batch_count, c_batch_stride);
|
type, b_cols, a_rows, false, ldc, batch_count, c_batch_stride);
|
||||||
}
|
}
|
||||||
|
|
||||||
Matmul::~Matmul() {
|
CublasGemm::~CublasGemm() {
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(a_desc_));
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(a_desc_));
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(b_desc_));
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(b_desc_));
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(c_desc_));
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(c_desc_));
|
||||||
@@ -198,7 +211,122 @@ Matmul::~Matmul() {
|
|||||||
CHECK_CUBLAS_ERROR(cublasLtMatmulDescDestroy(matmul_desc_));
|
CHECK_CUBLAS_ERROR(cublasLtMatmulDescDestroy(matmul_desc_));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Matmul::run_impl(
|
void CublasGemm::set_out(
|
||||||
|
Dtype dtype,
|
||||||
|
bool transposed,
|
||||||
|
uint64_t rows,
|
||||||
|
uint64_t cols,
|
||||||
|
int64_t ld,
|
||||||
|
int32_t batch_count,
|
||||||
|
int64_t batch_stride) {
|
||||||
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(out_desc_));
|
||||||
|
out_desc_ = create_matrix_layout(
|
||||||
|
dtype_to_cublas_type(dtype),
|
||||||
|
cols,
|
||||||
|
rows,
|
||||||
|
transposed,
|
||||||
|
ld,
|
||||||
|
batch_count,
|
||||||
|
batch_stride);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CublasGemm::set_bias(cu::CommandEncoder& encoder, const array& bias) {
|
||||||
|
encoder.set_input_array(bias);
|
||||||
|
cublasLtEpilogue_t epilogue = CUBLASLT_EPILOGUE_BIAS;
|
||||||
|
CHECK_CUBLAS_ERROR(cublasLtMatmulDescSetAttribute(
|
||||||
|
matmul_desc_,
|
||||||
|
CUBLASLT_MATMUL_DESC_EPILOGUE,
|
||||||
|
&epilogue,
|
||||||
|
sizeof(epilogue)));
|
||||||
|
auto* bias_ptr = bias.data<void>();
|
||||||
|
CHECK_CUBLAS_ERROR(cublasLtMatmulDescSetAttribute(
|
||||||
|
matmul_desc_,
|
||||||
|
CUBLASLT_MATMUL_DESC_BIAS_POINTER,
|
||||||
|
&bias_ptr,
|
||||||
|
sizeof(bias_ptr)));
|
||||||
|
}
|
||||||
|
|
||||||
|
void CublasGemm::run(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
array& out,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
const Shape& batch_shape,
|
||||||
|
const Strides& a_batch_strides,
|
||||||
|
const Strides& b_batch_strides,
|
||||||
|
float alpha) {
|
||||||
|
int batch_count = out.size() / (M_ * N_);
|
||||||
|
if (batch_count / batch_shape.back() > 1) {
|
||||||
|
run_batched(
|
||||||
|
encoder,
|
||||||
|
out,
|
||||||
|
a,
|
||||||
|
b,
|
||||||
|
batch_shape,
|
||||||
|
a_batch_strides,
|
||||||
|
b_batch_strides,
|
||||||
|
alpha);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder.set_input_array(a);
|
||||||
|
encoder.set_input_array(b);
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
execute(
|
||||||
|
encoder,
|
||||||
|
out.data<void>(),
|
||||||
|
a.data<void>(),
|
||||||
|
b.data<void>(),
|
||||||
|
nullptr,
|
||||||
|
alpha);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CublasGemm::run(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
array& out,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
const array& c,
|
||||||
|
const Shape& batch_shape,
|
||||||
|
const Strides& a_batch_strides,
|
||||||
|
const Strides& b_batch_strides,
|
||||||
|
const Strides& c_batch_strides,
|
||||||
|
float alpha,
|
||||||
|
float beta) {
|
||||||
|
int batch_count = out.size() / (M_ * N_);
|
||||||
|
if (batch_count / batch_shape.back() > 1) {
|
||||||
|
run_batched(
|
||||||
|
encoder,
|
||||||
|
out,
|
||||||
|
a,
|
||||||
|
b,
|
||||||
|
c,
|
||||||
|
batch_shape,
|
||||||
|
a_batch_strides,
|
||||||
|
b_batch_strides,
|
||||||
|
c_batch_strides,
|
||||||
|
alpha,
|
||||||
|
beta);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
encoder.set_input_array(a);
|
||||||
|
encoder.set_input_array(b);
|
||||||
|
encoder.set_input_array(c);
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
execute(
|
||||||
|
encoder,
|
||||||
|
out.data<void>(),
|
||||||
|
a.data<void>(),
|
||||||
|
b.data<void>(),
|
||||||
|
c.data<void>(),
|
||||||
|
alpha,
|
||||||
|
beta);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CublasGemm::execute(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
void* out,
|
void* out,
|
||||||
const void* a,
|
const void* a,
|
||||||
@@ -241,9 +369,9 @@ void Matmul::run_impl(
|
|||||||
handle_,
|
handle_,
|
||||||
matmul_desc_,
|
matmul_desc_,
|
||||||
&alpha,
|
&alpha,
|
||||||
a,
|
b, // a and b are swapped
|
||||||
a_desc_,
|
a_desc_,
|
||||||
b,
|
a,
|
||||||
b_desc_,
|
b_desc_,
|
||||||
&beta,
|
&beta,
|
||||||
c ? c : out,
|
c ? c : out,
|
||||||
@@ -256,29 +384,4 @@ void Matmul::run_impl(
|
|||||||
encoder.stream()));
|
encoder.stream()));
|
||||||
}
|
}
|
||||||
|
|
||||||
void Matmul::run(
|
} // namespace mlx::core
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
array& out,
|
|
||||||
const array& a,
|
|
||||||
const array& b,
|
|
||||||
const std::optional<array>& c /* = std::nullopt */,
|
|
||||||
float alpha /* = 1 */,
|
|
||||||
float beta /* = 0 */) {
|
|
||||||
encoder.set_input_array(a);
|
|
||||||
encoder.set_input_array(b);
|
|
||||||
if (c) {
|
|
||||||
encoder.set_input_array(*c);
|
|
||||||
}
|
|
||||||
encoder.set_output_array(out);
|
|
||||||
|
|
||||||
run_impl(
|
|
||||||
encoder,
|
|
||||||
out.data<void>(),
|
|
||||||
a.data<void>(),
|
|
||||||
b.data<void>(),
|
|
||||||
c ? c->data<void>() : nullptr,
|
|
||||||
alpha,
|
|
||||||
beta);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
|
||||||
|
|||||||
@@ -5,13 +5,13 @@
|
|||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
|
||||||
#include <cublasLt.h>
|
#include <cublasLt.h>
|
||||||
#include <optional>
|
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core {
|
||||||
class Matmul {
|
|
||||||
|
class CublasGemm {
|
||||||
public:
|
public:
|
||||||
Matmul(
|
CublasGemm(
|
||||||
Device& device,
|
cu::Device& device,
|
||||||
Dtype dtype,
|
Dtype dtype,
|
||||||
bool a_transposed,
|
bool a_transposed,
|
||||||
uint64_t a_rows,
|
uint64_t a_rows,
|
||||||
@@ -25,8 +25,8 @@ class Matmul {
|
|||||||
int64_t a_batch_stride,
|
int64_t a_batch_stride,
|
||||||
int64_t b_batch_stride);
|
int64_t b_batch_stride);
|
||||||
|
|
||||||
Matmul(
|
CublasGemm(
|
||||||
Device& device,
|
cu::Device& device,
|
||||||
Dtype dtype,
|
Dtype dtype,
|
||||||
bool a_transposed,
|
bool a_transposed,
|
||||||
uint64_t a_rows,
|
uint64_t a_rows,
|
||||||
@@ -42,25 +42,54 @@ class Matmul {
|
|||||||
int64_t b_batch_stride,
|
int64_t b_batch_stride,
|
||||||
int64_t c_batch_stride);
|
int64_t c_batch_stride);
|
||||||
|
|
||||||
~Matmul();
|
~CublasGemm();
|
||||||
|
|
||||||
|
// The output's descriptor is inferred from inputs by default, use this method
|
||||||
|
// for unusual output.
|
||||||
|
void set_out(
|
||||||
|
Dtype dtype,
|
||||||
|
bool transposed,
|
||||||
|
uint64_t rows,
|
||||||
|
uint64_t cols,
|
||||||
|
int64_t ld,
|
||||||
|
int32_t batch_count,
|
||||||
|
int64_t batch_stride);
|
||||||
|
|
||||||
|
void set_bias(cu::CommandEncoder& encoder, const array& bias);
|
||||||
|
|
||||||
void run(
|
void run(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
array& out,
|
array& out,
|
||||||
const array& a,
|
const array& a,
|
||||||
const array& b,
|
const array& b,
|
||||||
const std::optional<array>& c = std::nullopt,
|
const Shape& batch_shape,
|
||||||
float alpha = 1,
|
const Strides& a_batch_strides,
|
||||||
float beta = 0);
|
const Strides& b_batch_strides,
|
||||||
|
float alpha = 1.0f);
|
||||||
|
|
||||||
|
void run(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
array& out,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
const array& c,
|
||||||
|
const Shape& batch_shape,
|
||||||
|
const Strides& a_batch_strides,
|
||||||
|
const Strides& b_batch_strides,
|
||||||
|
const Strides& c_batch_strides,
|
||||||
|
float alpha,
|
||||||
|
float beta);
|
||||||
|
|
||||||
|
private:
|
||||||
void run_batched(
|
void run_batched(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
array& out,
|
array& out,
|
||||||
const array& a,
|
const array& a,
|
||||||
const array& b,
|
const array& b,
|
||||||
const mlx::core::Shape& batch_shape,
|
const Shape& batch_shape,
|
||||||
const mlx::core::Strides& a_batch_strides,
|
const Strides& a_batch_strides,
|
||||||
const mlx::core::Strides& b_batch_strides);
|
const Strides& b_batch_strides,
|
||||||
|
float alpha);
|
||||||
|
|
||||||
void run_batched(
|
void run_batched(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
@@ -68,15 +97,14 @@ class Matmul {
|
|||||||
const array& a,
|
const array& a,
|
||||||
const array& b,
|
const array& b,
|
||||||
const array& c,
|
const array& c,
|
||||||
const mlx::core::Shape& batch_shape,
|
const Shape& batch_shape,
|
||||||
const mlx::core::Strides& a_batch_strides,
|
const Strides& a_batch_strides,
|
||||||
const mlx::core::Strides& b_batch_strides,
|
const Strides& b_batch_strides,
|
||||||
const mlx::core::Strides& c_batch_strides,
|
const Strides& c_batch_strides,
|
||||||
float alpha,
|
float alpha,
|
||||||
float beta);
|
float beta);
|
||||||
|
|
||||||
private:
|
void execute(
|
||||||
void run_impl(
|
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
void* out,
|
void* out,
|
||||||
const void* a,
|
const void* a,
|
||||||
@@ -97,4 +125,4 @@ class Matmul {
|
|||||||
cublasLtMatmulHeuristicResult_t heuristic_;
|
cublasLtMatmulHeuristicResult_t heuristic_;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -4,16 +4,17 @@
|
|||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core {
|
||||||
|
|
||||||
void Matmul::run_batched(
|
void CublasGemm::run_batched(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
array& out,
|
array& out,
|
||||||
const array& a,
|
const array& a,
|
||||||
const array& b,
|
const array& b,
|
||||||
const mlx::core::Shape& batch_shape,
|
const Shape& batch_shape,
|
||||||
const mlx::core::Strides& a_batch_strides,
|
const Strides& a_batch_strides,
|
||||||
const mlx::core::Strides& b_batch_strides) {
|
const Strides& b_batch_strides,
|
||||||
|
float alpha) {
|
||||||
encoder.set_input_array(a);
|
encoder.set_input_array(a);
|
||||||
encoder.set_input_array(b);
|
encoder.set_input_array(b);
|
||||||
encoder.set_output_array(out);
|
encoder.set_output_array(out);
|
||||||
@@ -22,27 +23,28 @@ void Matmul::run_batched(
|
|||||||
ContiguousIterator b_it(batch_shape, b_batch_strides, batch_shape.size() - 1);
|
ContiguousIterator b_it(batch_shape, b_batch_strides, batch_shape.size() - 1);
|
||||||
auto concurrent = encoder.concurrent_context();
|
auto concurrent = encoder.concurrent_context();
|
||||||
for (size_t i = 0; i < nbatch; ++i) {
|
for (size_t i = 0; i < nbatch; ++i) {
|
||||||
run_impl(
|
execute(
|
||||||
encoder,
|
encoder,
|
||||||
out.data<int8_t>() + out.itemsize() * i * batch_shape.back() * M_ * N_,
|
out.data<int8_t>() + out.itemsize() * i * batch_shape.back() * M_ * N_,
|
||||||
a.data<int8_t>() + a.itemsize() * a_it.loc,
|
a.data<int8_t>() + a.itemsize() * a_it.loc,
|
||||||
b.data<int8_t>() + b.itemsize() * b_it.loc,
|
b.data<int8_t>() + b.itemsize() * b_it.loc,
|
||||||
nullptr);
|
nullptr,
|
||||||
|
alpha);
|
||||||
a_it.step();
|
a_it.step();
|
||||||
b_it.step();
|
b_it.step();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Matmul::run_batched(
|
void CublasGemm::run_batched(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
array& out,
|
array& out,
|
||||||
const array& a,
|
const array& a,
|
||||||
const array& b,
|
const array& b,
|
||||||
const array& c,
|
const array& c,
|
||||||
const mlx::core::Shape& batch_shape,
|
const Shape& batch_shape,
|
||||||
const mlx::core::Strides& a_batch_strides,
|
const Strides& a_batch_strides,
|
||||||
const mlx::core::Strides& b_batch_strides,
|
const Strides& b_batch_strides,
|
||||||
const mlx::core::Strides& c_batch_strides,
|
const Strides& c_batch_strides,
|
||||||
float alpha,
|
float alpha,
|
||||||
float beta) {
|
float beta) {
|
||||||
encoder.set_input_array(a);
|
encoder.set_input_array(a);
|
||||||
@@ -56,7 +58,7 @@ void Matmul::run_batched(
|
|||||||
ContiguousIterator c_it(batch_shape, c_batch_strides, batch_shape.size() - 1);
|
ContiguousIterator c_it(batch_shape, c_batch_strides, batch_shape.size() - 1);
|
||||||
auto concurrent = encoder.concurrent_context();
|
auto concurrent = encoder.concurrent_context();
|
||||||
for (size_t i = 0; i < nbatch; ++i) {
|
for (size_t i = 0; i < nbatch; ++i) {
|
||||||
run_impl(
|
execute(
|
||||||
encoder,
|
encoder,
|
||||||
out.data<int8_t>() + out.itemsize() * i * batch_shape.back() * M_ * N_,
|
out.data<int8_t>() + out.itemsize() * i * batch_shape.back() * M_ * N_,
|
||||||
a.data<int8_t>() + a.itemsize() * a_it.loc,
|
a.data<int8_t>() + a.itemsize() * a_it.loc,
|
||||||
@@ -70,4 +72,4 @@ void Matmul::run_batched(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core
|
||||||
329
mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu
Normal file
329
mlx/backend/cuda/gemms/cublas_gemm_batched_12_9.cu
Normal file
@@ -0,0 +1,329 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
||||||
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
|
|
||||||
|
#include <cooperative_groups.h>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace cu {
|
||||||
|
|
||||||
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
__global__ void set_mm_device_pointers_nd(
|
||||||
|
int8_t** pointers,
|
||||||
|
int8_t* a_start,
|
||||||
|
int8_t* b_start,
|
||||||
|
int8_t* out_start,
|
||||||
|
int item_size,
|
||||||
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> batch_shape,
|
||||||
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_batch_strides,
|
||||||
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_batch_strides,
|
||||||
|
int64_t batch_stride,
|
||||||
|
int batch_count) {
|
||||||
|
auto index = cg::this_grid().thread_rank();
|
||||||
|
if (index >= batch_count) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto [a_offset, b_offset] = elem_to_loc_nd<NDIM>(
|
||||||
|
index,
|
||||||
|
batch_shape.data(),
|
||||||
|
a_batch_strides.data(),
|
||||||
|
b_batch_strides.data());
|
||||||
|
pointers[index] = a_start + item_size * a_offset;
|
||||||
|
pointers[index + batch_count] = b_start + item_size * b_offset;
|
||||||
|
pointers[index + 2 * batch_count] =
|
||||||
|
out_start + item_size * index * batch_stride;
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void set_mm_device_pointers_g(
|
||||||
|
int8_t** pointers,
|
||||||
|
int8_t* a_start,
|
||||||
|
int8_t* b_start,
|
||||||
|
int8_t* out_start,
|
||||||
|
int item_size,
|
||||||
|
const __grid_constant__ Shape batch_shape,
|
||||||
|
const __grid_constant__ Strides a_batch_strides,
|
||||||
|
const __grid_constant__ Strides b_batch_strides,
|
||||||
|
int64_t batch_stride,
|
||||||
|
int batch_ndim,
|
||||||
|
int batch_count) {
|
||||||
|
auto index = cg::this_grid().thread_rank();
|
||||||
|
if (index >= batch_count) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto [a_offset, b_offset] = elem_to_loc(
|
||||||
|
index,
|
||||||
|
batch_shape.data(),
|
||||||
|
a_batch_strides.data(),
|
||||||
|
b_batch_strides.data(),
|
||||||
|
batch_ndim);
|
||||||
|
pointers[index] = a_start + item_size * a_offset;
|
||||||
|
pointers[index + batch_count] = b_start + item_size * b_offset;
|
||||||
|
pointers[index + 2 * batch_count] =
|
||||||
|
out_start + item_size * index * batch_stride;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int NDIM>
|
||||||
|
__global__ void set_addmm_device_pointers_nd(
|
||||||
|
int8_t** pointers,
|
||||||
|
int8_t* a_start,
|
||||||
|
int8_t* b_start,
|
||||||
|
int8_t* c_start,
|
||||||
|
int8_t* out_start,
|
||||||
|
int item_size,
|
||||||
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> batch_shape,
|
||||||
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_batch_strides,
|
||||||
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_batch_strides,
|
||||||
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> c_batch_strides,
|
||||||
|
int64_t batch_stride,
|
||||||
|
int batch_count) {
|
||||||
|
auto index = cg::this_grid().thread_rank();
|
||||||
|
if (index >= batch_count) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto [a_offset, b_offset, c_offset] = elem_to_loc_nd<NDIM>(
|
||||||
|
index,
|
||||||
|
batch_shape.data(),
|
||||||
|
a_batch_strides.data(),
|
||||||
|
b_batch_strides.data(),
|
||||||
|
c_batch_strides.data());
|
||||||
|
pointers[index] = a_start + item_size * a_offset;
|
||||||
|
pointers[index + batch_count] = b_start + item_size * b_offset;
|
||||||
|
pointers[index + 2 * batch_count] = c_start + item_size * c_offset;
|
||||||
|
pointers[index + 3 * batch_count] =
|
||||||
|
out_start + item_size * index * batch_stride;
|
||||||
|
}
|
||||||
|
|
||||||
|
__global__ void set_addmm_device_pointers_g(
|
||||||
|
int8_t** pointers,
|
||||||
|
int8_t* a_start,
|
||||||
|
int8_t* b_start,
|
||||||
|
int8_t* c_start,
|
||||||
|
int8_t* out_start,
|
||||||
|
int item_size,
|
||||||
|
const __grid_constant__ Shape batch_shape,
|
||||||
|
const __grid_constant__ Strides a_batch_strides,
|
||||||
|
const __grid_constant__ Strides b_batch_strides,
|
||||||
|
const __grid_constant__ Strides c_batch_strides,
|
||||||
|
int64_t batch_stride,
|
||||||
|
int batch_ndim,
|
||||||
|
int batch_count) {
|
||||||
|
auto index = cg::this_grid().thread_rank();
|
||||||
|
if (index >= batch_count) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
auto [a_offset, b_offset, c_offset] = elem_to_loc(
|
||||||
|
index,
|
||||||
|
batch_shape.data(),
|
||||||
|
a_batch_strides.data(),
|
||||||
|
b_batch_strides.data(),
|
||||||
|
c_batch_strides.data(),
|
||||||
|
batch_ndim);
|
||||||
|
pointers[index] = a_start + item_size * a_offset;
|
||||||
|
pointers[index + batch_count] = b_start + item_size * b_offset;
|
||||||
|
pointers[index + 2 * batch_count] = c_start + item_size * c_offset;
|
||||||
|
pointers[index + 3 * batch_count] =
|
||||||
|
out_start + item_size * index * batch_stride;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cu
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
void set_pointer_mode(cublasLtMatrixLayout_t desc, int batch_count) {
|
||||||
|
auto batch_mode = CUBLASLT_BATCH_MODE_POINTER_ARRAY;
|
||||||
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
||||||
|
desc,
|
||||||
|
CUBLASLT_MATRIX_LAYOUT_BATCH_MODE,
|
||||||
|
&batch_mode,
|
||||||
|
sizeof(batch_mode)));
|
||||||
|
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutSetAttribute(
|
||||||
|
desc, CUBLASLT_MATRIX_LAYOUT_BATCH_COUNT, &batch_count, sizeof(int32_t)));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
void CublasGemm::run_batched(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
array& out,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
const Shape& batch_shape,
|
||||||
|
const Strides& a_batch_strides,
|
||||||
|
const Strides& b_batch_strides,
|
||||||
|
float alpha) {
|
||||||
|
int batch_count = out.size() / (M_ * N_);
|
||||||
|
set_pointer_mode(a_desc_, batch_count);
|
||||||
|
set_pointer_mode(b_desc_, batch_count);
|
||||||
|
set_pointer_mode(out_desc_, batch_count);
|
||||||
|
|
||||||
|
// Launch kernel to set device offsets
|
||||||
|
auto pointers = array(
|
||||||
|
allocator::malloc(batch_count * sizeof(void*) * 3),
|
||||||
|
{batch_count * 3},
|
||||||
|
uint64);
|
||||||
|
|
||||||
|
encoder.add_temporary(pointers);
|
||||||
|
encoder.set_output_array(pointers);
|
||||||
|
|
||||||
|
int block_dims = std::min(batch_count, 256);
|
||||||
|
int num_blocks = cuda::ceil_div(batch_count, block_dims);
|
||||||
|
int64_t batch_stride = M_ * N_;
|
||||||
|
int item_size = out.itemsize();
|
||||||
|
|
||||||
|
int ndim = batch_shape.size();
|
||||||
|
if (ndim <= 3) {
|
||||||
|
dispatch_1_2_3(ndim, [&](auto ndim_constant) {
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
cu::set_mm_device_pointers_nd<ndim_constant()>,
|
||||||
|
num_blocks,
|
||||||
|
block_dims,
|
||||||
|
0,
|
||||||
|
pointers.data<int8_t*>(),
|
||||||
|
a.data<int8_t>(),
|
||||||
|
b.data<int8_t>(),
|
||||||
|
out.data<int8_t>(),
|
||||||
|
item_size,
|
||||||
|
const_param<ndim_constant()>(batch_shape),
|
||||||
|
const_param<ndim_constant()>(a_batch_strides),
|
||||||
|
const_param<ndim_constant()>(b_batch_strides),
|
||||||
|
batch_stride,
|
||||||
|
batch_count);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
cu::set_mm_device_pointers_g,
|
||||||
|
num_blocks,
|
||||||
|
block_dims,
|
||||||
|
0,
|
||||||
|
pointers.data<int8_t*>(),
|
||||||
|
a.data<int8_t>(),
|
||||||
|
b.data<int8_t>(),
|
||||||
|
out.data<int8_t>(),
|
||||||
|
item_size,
|
||||||
|
const_param(batch_shape),
|
||||||
|
const_param(a_batch_strides),
|
||||||
|
const_param(b_batch_strides),
|
||||||
|
batch_stride,
|
||||||
|
ndim,
|
||||||
|
batch_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run matmul
|
||||||
|
encoder.set_input_array(pointers);
|
||||||
|
encoder.set_input_array(a);
|
||||||
|
encoder.set_input_array(b);
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
auto a_pointers = pointers.data<int8_t*>();
|
||||||
|
auto b_pointers = a_pointers + batch_count;
|
||||||
|
auto out_pointers = b_pointers + batch_count;
|
||||||
|
execute(
|
||||||
|
encoder,
|
||||||
|
reinterpret_cast<void*>(out_pointers),
|
||||||
|
reinterpret_cast<void*>(a_pointers),
|
||||||
|
reinterpret_cast<void*>(b_pointers),
|
||||||
|
nullptr,
|
||||||
|
alpha);
|
||||||
|
}
|
||||||
|
|
||||||
|
void CublasGemm::run_batched(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
array& out,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
const array& c,
|
||||||
|
const Shape& batch_shape,
|
||||||
|
const Strides& a_batch_strides,
|
||||||
|
const Strides& b_batch_strides,
|
||||||
|
const Strides& c_batch_strides,
|
||||||
|
float alpha,
|
||||||
|
float beta) {
|
||||||
|
int batch_count = out.size() / (M_ * N_);
|
||||||
|
set_pointer_mode(a_desc_, batch_count);
|
||||||
|
set_pointer_mode(b_desc_, batch_count);
|
||||||
|
set_pointer_mode(c_desc_, batch_count);
|
||||||
|
set_pointer_mode(out_desc_, batch_count);
|
||||||
|
|
||||||
|
// Launch kernel to set device offsets
|
||||||
|
auto pointers = array(
|
||||||
|
allocator::malloc(batch_count * sizeof(uint64_t) * 4),
|
||||||
|
{batch_count * 4},
|
||||||
|
uint64);
|
||||||
|
|
||||||
|
encoder.add_temporary(pointers);
|
||||||
|
encoder.set_output_array(pointers);
|
||||||
|
|
||||||
|
int block_dims = std::min(batch_count, 256);
|
||||||
|
int num_blocks = cuda::ceil_div(batch_count, block_dims);
|
||||||
|
int64_t batch_stride = M_ * N_;
|
||||||
|
int item_size = out.itemsize();
|
||||||
|
|
||||||
|
int ndim = batch_shape.size();
|
||||||
|
if (ndim <= 3) {
|
||||||
|
dispatch_1_2_3(ndim, [&](auto ndim_constant) {
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
cu::set_addmm_device_pointers_nd<ndim_constant()>,
|
||||||
|
num_blocks,
|
||||||
|
block_dims,
|
||||||
|
0,
|
||||||
|
pointers.data<int8_t*>(),
|
||||||
|
a.data<int8_t>(),
|
||||||
|
b.data<int8_t>(),
|
||||||
|
c.data<int8_t>(),
|
||||||
|
out.data<int8_t>(),
|
||||||
|
item_size,
|
||||||
|
const_param<ndim_constant()>(batch_shape),
|
||||||
|
const_param<ndim_constant()>(a_batch_strides),
|
||||||
|
const_param<ndim_constant()>(b_batch_strides),
|
||||||
|
const_param<ndim_constant()>(c_batch_strides),
|
||||||
|
batch_stride,
|
||||||
|
batch_count);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
cu::set_addmm_device_pointers_g,
|
||||||
|
num_blocks,
|
||||||
|
block_dims,
|
||||||
|
0,
|
||||||
|
pointers.data<int8_t*>(),
|
||||||
|
a.data<int8_t>(),
|
||||||
|
b.data<int8_t>(),
|
||||||
|
c.data<int8_t>(),
|
||||||
|
out.data<int8_t>(),
|
||||||
|
item_size,
|
||||||
|
const_param(batch_shape),
|
||||||
|
const_param(a_batch_strides),
|
||||||
|
const_param(b_batch_strides),
|
||||||
|
const_param(c_batch_strides),
|
||||||
|
batch_stride,
|
||||||
|
ndim,
|
||||||
|
batch_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run matmul
|
||||||
|
encoder.set_input_array(pointers);
|
||||||
|
encoder.set_input_array(a);
|
||||||
|
encoder.set_input_array(b);
|
||||||
|
encoder.set_input_array(c);
|
||||||
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
auto a_pointers = pointers.data<int8_t*>();
|
||||||
|
auto b_pointers = a_pointers + batch_count;
|
||||||
|
auto c_pointers = b_pointers + batch_count;
|
||||||
|
auto out_pointers = c_pointers + batch_count;
|
||||||
|
execute(
|
||||||
|
encoder,
|
||||||
|
reinterpret_cast<void*>(out_pointers),
|
||||||
|
reinterpret_cast<void*>(a_pointers),
|
||||||
|
reinterpret_cast<void*>(b_pointers),
|
||||||
|
reinterpret_cast<void*>(c_pointers),
|
||||||
|
alpha,
|
||||||
|
beta);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -94,7 +94,7 @@ void Gather::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
large ? "int64_t" : "int32_t"));
|
large ? "int64_t" : "int32_t"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_pair(jit_source_gather, std::move(kernel_names));
|
return std::make_tuple(false, jit_source_gather, std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
cu::KernelArgs args;
|
cu::KernelArgs args;
|
||||||
@@ -110,7 +110,7 @@ void Gather::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
args.append<int32_t>(src.ndim());
|
args.append<int32_t>(src.ndim());
|
||||||
args.append_ndim(slice_sizes_);
|
args.append_ndim(slice_sizes_);
|
||||||
args.append(slice_size);
|
args.append(slice_size);
|
||||||
args.append(SmallVector<int32_t>(axes_.begin(), axes_.end()));
|
args.append(axes_);
|
||||||
append_indices_arg(args, inputs, nidx, idx_ndim);
|
append_indices_arg(args, inputs, nidx, idx_ndim);
|
||||||
|
|
||||||
std::string kernel_name = fmt::format(
|
std::string kernel_name = fmt::format(
|
||||||
@@ -189,7 +189,7 @@ void Scatter::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
large ? "int64_t" : "int32_t"));
|
large ? "int64_t" : "int32_t"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_pair(jit_source_scatter, std::move(kernel_names));
|
return std::make_tuple(false, jit_source_scatter, std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
cu::KernelArgs args;
|
cu::KernelArgs args;
|
||||||
@@ -211,7 +211,7 @@ void Scatter::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
args.append_ndim(out.shape());
|
args.append_ndim(out.shape());
|
||||||
args.append_ndim(out.strides());
|
args.append_ndim(out.strides());
|
||||||
args.append<int32_t>(out.ndim());
|
args.append<int32_t>(out.ndim());
|
||||||
args.append(SmallVector<int32_t>(axes_.begin(), axes_.end()));
|
args.append(axes_);
|
||||||
append_indices_arg(args, inputs, nidx, idx_ndim);
|
append_indices_arg(args, inputs, nidx, idx_ndim);
|
||||||
|
|
||||||
std::string kernel_name = fmt::format(
|
std::string kernel_name = fmt::format(
|
||||||
@@ -268,7 +268,8 @@ void GatherAxis::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_pair(jit_source_gather_axis, std::move(kernel_names));
|
return std::make_tuple(
|
||||||
|
false, jit_source_gather_axis, std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
size_t idx_size_pre = 1;
|
size_t idx_size_pre = 1;
|
||||||
@@ -371,7 +372,8 @@ void ScatterAxis::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_pair(jit_source_scatter_axis, std::move(kernel_names));
|
return std::make_tuple(
|
||||||
|
false, jit_source_scatter_axis, std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
size_t idx_size_pre = 1;
|
size_t idx_size_pre = 1;
|
||||||
|
|||||||
@@ -67,10 +67,12 @@ const std::string& cccl_dir() {
|
|||||||
return path.string();
|
return path.string();
|
||||||
}
|
}
|
||||||
// Finally check the environment variable.
|
// Finally check the environment variable.
|
||||||
path = std::getenv("MLX_CCCL_DIR");
|
if (const char* env = std::getenv("MLX_CCCL_DIR"); env) {
|
||||||
|
path = env;
|
||||||
if (!path.empty() && std::filesystem::exists(path)) {
|
if (!path.empty() && std::filesystem::exists(path)) {
|
||||||
return path.string();
|
return path.string();
|
||||||
}
|
}
|
||||||
|
}
|
||||||
return std::string();
|
return std::string();
|
||||||
}();
|
}();
|
||||||
return dir;
|
return dir;
|
||||||
@@ -101,8 +103,8 @@ const std::filesystem::path& ptx_cache_dir() {
|
|||||||
bool read_cached_ptx(
|
bool read_cached_ptx(
|
||||||
const std::filesystem::path& cache_dir,
|
const std::filesystem::path& cache_dir,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
std::vector<char>* ptx,
|
std::string& ptx,
|
||||||
std::vector<std::pair<std::string, std::string>>* ptx_kernels) {
|
std::vector<std::pair<std::string, std::string>>& ptx_kernels) {
|
||||||
if (cache_dir.empty()) {
|
if (cache_dir.empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -117,15 +119,15 @@ bool read_cached_ptx(
|
|||||||
if (!ptx_file.good()) {
|
if (!ptx_file.good()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ptx->resize(ptx_size);
|
ptx.resize(ptx_size);
|
||||||
ptx_file.read(ptx->data(), ptx_size);
|
ptx_file.read(ptx.data(), ptx_size);
|
||||||
|
|
||||||
std::ifstream txt_file(cache_dir / (module_name + ".txt"), std::ios::binary);
|
std::ifstream txt_file(cache_dir / (module_name + ".txt"), std::ios::binary);
|
||||||
std::string line;
|
std::string line;
|
||||||
while (std::getline(txt_file, line)) {
|
while (std::getline(txt_file, line)) {
|
||||||
auto tab = line.find('\t');
|
auto tab = line.find('\t');
|
||||||
if (tab != std::string::npos) {
|
if (tab != std::string::npos) {
|
||||||
ptx_kernels->emplace_back(line.substr(0, tab), line.substr(tab + 1));
|
ptx_kernels.emplace_back(line.substr(0, tab), line.substr(tab + 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@@ -135,7 +137,7 @@ bool read_cached_ptx(
|
|||||||
void write_cached_ptx(
|
void write_cached_ptx(
|
||||||
const std::filesystem::path& cache_dir,
|
const std::filesystem::path& cache_dir,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
const std::vector<char>& ptx,
|
const std::string& ptx,
|
||||||
const std::vector<std::pair<std::string, std::string>>& ptx_kernels,
|
const std::vector<std::pair<std::string, std::string>>& ptx_kernels,
|
||||||
const std::string& source_code) {
|
const std::string& source_code) {
|
||||||
if (cache_dir.empty()) {
|
if (cache_dir.empty()) {
|
||||||
@@ -217,22 +219,18 @@ constexpr const char* g_headers[] = {
|
|||||||
jit_source_utils,
|
jit_source_utils,
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace
|
void compile(
|
||||||
|
|
||||||
JitModule::JitModule(
|
|
||||||
Device& device,
|
Device& device,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
const KernelBuilder& builder) {
|
const std::string& source,
|
||||||
// Check cache.
|
const std::vector<std::string>& kernel_names,
|
||||||
std::vector<char> ptx;
|
std::string& ptx,
|
||||||
std::vector<std::pair<std::string, std::string>> ptx_kernels;
|
std::vector<std::pair<std::string, std::string>>& ptx_kernels) {
|
||||||
if (!read_cached_ptx(ptx_cache_dir(), module_name, &ptx, &ptx_kernels)) {
|
// Create the program
|
||||||
// Create program.
|
|
||||||
auto [source_code, kernel_names] = builder();
|
|
||||||
nvrtcProgram prog;
|
nvrtcProgram prog;
|
||||||
CHECK_NVRTC_ERROR(nvrtcCreateProgram(
|
CHECK_NVRTC_ERROR(nvrtcCreateProgram(
|
||||||
&prog,
|
&prog,
|
||||||
source_code.c_str(),
|
source.c_str(),
|
||||||
(module_name + ".cu").c_str(),
|
(module_name + ".cu").c_str(),
|
||||||
std::size(g_headers),
|
std::size(g_headers),
|
||||||
g_headers,
|
g_headers,
|
||||||
@@ -286,16 +284,20 @@ JitModule::JitModule(
|
|||||||
} else {
|
} else {
|
||||||
CHECK_NVRTC_ERROR(nvrtcGetPTXSize(prog, &ptx_size));
|
CHECK_NVRTC_ERROR(nvrtcGetPTXSize(prog, &ptx_size));
|
||||||
}
|
}
|
||||||
ptx.resize(ptx_size, 0);
|
ptx.resize(ptx_size);
|
||||||
if (use_sass) {
|
if (use_sass) {
|
||||||
CHECK_NVRTC_ERROR(nvrtcGetCUBIN(prog, ptx.data()));
|
CHECK_NVRTC_ERROR(nvrtcGetCUBIN(prog, ptx.data()));
|
||||||
} else {
|
} else {
|
||||||
CHECK_NVRTC_ERROR(nvrtcGetPTX(prog, ptx.data()));
|
CHECK_NVRTC_ERROR(nvrtcGetPTX(prog, ptx.data()));
|
||||||
}
|
}
|
||||||
write_cached_ptx(
|
}
|
||||||
ptx_cache_dir(), module_name, ptx, ptx_kernels, source_code);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
void load_module(
|
||||||
|
const std::string& module_name,
|
||||||
|
const std::string& ptx,
|
||||||
|
const std::vector<std::pair<std::string, std::string>>& ptx_kernels,
|
||||||
|
CUmodule& module_,
|
||||||
|
std::unordered_map<std::string, std::pair<CUfunction, bool>>& kernels) {
|
||||||
// Load module.
|
// Load module.
|
||||||
char jit_log[4089] = {};
|
char jit_log[4089] = {};
|
||||||
CUjit_option options[] = {
|
CUjit_option options[] = {
|
||||||
@@ -312,21 +314,69 @@ JitModule::JitModule(
|
|||||||
for (const auto& [name, mangled] : ptx_kernels) {
|
for (const auto& [name, mangled] : ptx_kernels) {
|
||||||
CUfunction kernel;
|
CUfunction kernel;
|
||||||
CHECK_CUDA_ERROR(cuModuleGetFunction(&kernel, module_, mangled.c_str()));
|
CHECK_CUDA_ERROR(cuModuleGetFunction(&kernel, module_, mangled.c_str()));
|
||||||
kernels_[name] = kernel;
|
kernels[name] = std::make_pair(kernel, false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
JitModule::JitModule(
|
||||||
|
Device& device,
|
||||||
|
const std::string& module_name,
|
||||||
|
const KernelBuilder& builder,
|
||||||
|
bool use_disk_cache) {
|
||||||
|
// Will hold the actual device executable source code and kernel names
|
||||||
|
std::string ptx;
|
||||||
|
std::vector<std::pair<std::string, std::string>> ptx_kernels;
|
||||||
|
|
||||||
|
// Try to load them from the file cache
|
||||||
|
if (!read_cached_ptx(ptx_cache_dir(), module_name, ptx, ptx_kernels)) {
|
||||||
|
auto [precompiled, source_code, kernel_names] = builder();
|
||||||
|
|
||||||
|
// Get the PTX or cubin
|
||||||
|
if (precompiled) {
|
||||||
|
ptx = std::move(source_code);
|
||||||
|
for (auto& name : kernel_names) {
|
||||||
|
ptx_kernels.emplace_back(name, name);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
compile(device, module_name, source_code, kernel_names, ptx, ptx_kernels);
|
||||||
|
}
|
||||||
|
|
||||||
|
// If requested save them in the file cache for the next launch
|
||||||
|
if (use_disk_cache) {
|
||||||
|
write_cached_ptx(
|
||||||
|
ptx_cache_dir(), module_name, ptx, ptx_kernels, source_code);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Load the module
|
||||||
|
load_module(module_name, ptx, ptx_kernels, module_, kernels_);
|
||||||
|
}
|
||||||
|
|
||||||
JitModule::~JitModule() {
|
JitModule::~JitModule() {
|
||||||
CHECK_CUDA_ERROR(cuModuleUnload(module_));
|
CHECK_CUDA_ERROR(cuModuleUnload(module_));
|
||||||
}
|
}
|
||||||
|
|
||||||
CUfunction JitModule::get_kernel(const std::string& kernel_name) {
|
CUfunction JitModule::get_kernel(
|
||||||
|
const std::string& kernel_name,
|
||||||
|
std::function<void(CUfunction)> configure_kernel) {
|
||||||
auto it = kernels_.find(kernel_name);
|
auto it = kernels_.find(kernel_name);
|
||||||
if (it == kernels_.end()) {
|
if (it == kernels_.end()) {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
fmt::format("There is no kernel named {}.", kernel_name));
|
fmt::format("There is no kernel named {}.", kernel_name));
|
||||||
}
|
}
|
||||||
return it->second;
|
|
||||||
|
// If it is the first time we run this kernel then configure it. Do it only
|
||||||
|
// once!
|
||||||
|
if (!it->second.second) {
|
||||||
|
if (configure_kernel) {
|
||||||
|
configure_kernel(it->second.first);
|
||||||
|
}
|
||||||
|
it->second.second = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
return it->second.first;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unordered_map<std::string, JitModule>& get_jit_module_cache() {
|
std::unordered_map<std::string, JitModule>& get_jit_module_cache() {
|
||||||
@@ -337,11 +387,12 @@ std::unordered_map<std::string, JitModule>& get_jit_module_cache() {
|
|||||||
JitModule& get_jit_module(
|
JitModule& get_jit_module(
|
||||||
const mlx::core::Device& device,
|
const mlx::core::Device& device,
|
||||||
const std::string& name,
|
const std::string& name,
|
||||||
const KernelBuilder& builder) {
|
const KernelBuilder& builder,
|
||||||
|
bool cache) {
|
||||||
auto& map = get_jit_module_cache();
|
auto& map = get_jit_module_cache();
|
||||||
auto it = map.find(name);
|
auto it = map.find(name);
|
||||||
if (it == map.end()) {
|
if (it == map.end()) {
|
||||||
it = map.try_emplace(name, cu::device(device), name, builder).first;
|
it = map.try_emplace(name, cu::device(device), name, builder, cache).first;
|
||||||
}
|
}
|
||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,7 +19,8 @@ namespace mlx::core::cu {
|
|||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
|
|
||||||
using KernelBuilderResult = std::pair<
|
using KernelBuilderResult = std::tuple<
|
||||||
|
/* precompiled */ bool,
|
||||||
/* source code */ std::string,
|
/* source code */ std::string,
|
||||||
/* kernel names */ std::vector<std::string>>;
|
/* kernel names */ std::vector<std::string>>;
|
||||||
using KernelBuilder = std::function<KernelBuilderResult()>;
|
using KernelBuilder = std::function<KernelBuilderResult()>;
|
||||||
@@ -45,6 +46,11 @@ struct KernelArgs {
|
|||||||
append_ptr(std::get<SmallVector<T>>(storage_.back()).data());
|
append_ptr(std::get<SmallVector<T>>(storage_.back()).data());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
void append(const std::vector<T>& vec) {
|
||||||
|
append(SmallVector<T>(vec.begin(), vec.end()));
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure the arg is copied to an array with size of NDIM.
|
// Make sure the arg is copied to an array with size of NDIM.
|
||||||
template <size_t NDIM = MAX_NDIM, typename T>
|
template <size_t NDIM = MAX_NDIM, typename T>
|
||||||
void append_ndim(SmallVector<T> vec) {
|
void append_ndim(SmallVector<T> vec) {
|
||||||
@@ -63,14 +69,16 @@ struct KernelArgs {
|
|||||||
private:
|
private:
|
||||||
std::vector<void*> args_;
|
std::vector<void*> args_;
|
||||||
|
|
||||||
// The cuLaunchKernel API requires passing pointers to arguments so store
|
// The cuGraphAddKernelNode API requires passing pointers to arguments so
|
||||||
// temporary values untill kernel is launched.
|
// store temporary values until the node is created.
|
||||||
using Arg = std::variant<
|
using Arg = std::variant<
|
||||||
std::monostate,
|
std::monostate,
|
||||||
CUdeviceptr,
|
CUdeviceptr,
|
||||||
|
bool,
|
||||||
int32_t,
|
int32_t,
|
||||||
uint32_t,
|
uint32_t,
|
||||||
int64_t,
|
int64_t,
|
||||||
|
float,
|
||||||
SmallVector<const void*>,
|
SmallVector<const void*>,
|
||||||
SmallVector<int32_t>,
|
SmallVector<int32_t>,
|
||||||
SmallVector<int64_t>>;
|
SmallVector<int64_t>>;
|
||||||
@@ -82,16 +90,19 @@ class JitModule {
|
|||||||
JitModule(
|
JitModule(
|
||||||
Device& device,
|
Device& device,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
const KernelBuilder& builder);
|
const KernelBuilder& builder,
|
||||||
|
bool cache);
|
||||||
~JitModule();
|
~JitModule();
|
||||||
|
|
||||||
JitModule(const JitModule&) = delete;
|
JitModule(const JitModule&) = delete;
|
||||||
JitModule& operator=(const JitModule&) = delete;
|
JitModule& operator=(const JitModule&) = delete;
|
||||||
CUfunction get_kernel(const std::string& kernel_name);
|
CUfunction get_kernel(
|
||||||
|
const std::string& kernel_name,
|
||||||
|
std::function<void(CUfunction)> configure_kernel = nullptr);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
CUmodule module_{nullptr};
|
CUmodule module_{nullptr};
|
||||||
std::unordered_map<std::string, CUfunction> kernels_;
|
std::unordered_map<std::string, std::pair<CUfunction, bool>> kernels_;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<std::string, JitModule>& get_jit_module_cache();
|
std::unordered_map<std::string, JitModule>& get_jit_module_cache();
|
||||||
@@ -99,6 +110,7 @@ std::unordered_map<std::string, JitModule>& get_jit_module_cache();
|
|||||||
JitModule& get_jit_module(
|
JitModule& get_jit_module(
|
||||||
const mlx::core::Device& device,
|
const mlx::core::Device& device,
|
||||||
const std::string& name,
|
const std::string& name,
|
||||||
const KernelBuilder& builder);
|
const KernelBuilder& builder,
|
||||||
|
bool use_disk_cache = true);
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core::cu
|
||||||
|
|||||||
@@ -2,11 +2,15 @@
|
|||||||
|
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
|
#include "mlx/utils.h"
|
||||||
|
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <list>
|
#include <list>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
#include <utility>
|
#include <utility>
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
template <
|
template <
|
||||||
@@ -27,6 +31,14 @@ class LRUCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize with capacity read from |env_name|.
|
||||||
|
LRUCache(const char* env_name, int default_capacity)
|
||||||
|
: LRUCache(env::get_var(env_name, default_capacity)) {
|
||||||
|
if (env::get_var("MLX_ENABLE_CACHE_THRASHING_CHECK", 1)) {
|
||||||
|
env_name_ = env_name;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
size_t size() const {
|
size_t size() const {
|
||||||
return map_.size();
|
return map_.size();
|
||||||
}
|
}
|
||||||
@@ -76,6 +88,14 @@ class LRUCache {
|
|||||||
return {it->second, false};
|
return {it->second, false};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (env_name_ && ++cache_misses_ > 2 * capacity_) {
|
||||||
|
throw std::runtime_error(fmt::format(
|
||||||
|
"Cache thrashing is happening, please set the environment variable "
|
||||||
|
"{} to a larger value than {} to fix degraded performance.",
|
||||||
|
env_name_,
|
||||||
|
capacity_));
|
||||||
|
}
|
||||||
|
|
||||||
vlist_.emplace_front(key, std::forward<U>(value));
|
vlist_.emplace_front(key, std::forward<U>(value));
|
||||||
map_[key] = vlist_.begin();
|
map_[key] = vlist_.begin();
|
||||||
|
|
||||||
@@ -106,6 +126,9 @@ class LRUCache {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const char* env_name_{nullptr};
|
||||||
|
size_t cache_misses_{0};
|
||||||
|
|
||||||
list_type vlist_;
|
list_type vlist_;
|
||||||
map_type map_;
|
map_type map_;
|
||||||
size_t capacity_;
|
size_t capacity_;
|
||||||
|
|||||||
@@ -11,6 +11,7 @@
|
|||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
std::tuple<bool, int64_t, array>
|
std::tuple<bool, int64_t, array>
|
||||||
@@ -28,6 +29,76 @@ check_transpose(cu::CommandEncoder& enc, const Stream& s, const array& arr) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void gemm_and_bias(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K,
|
||||||
|
bool a_transposed,
|
||||||
|
int64_t lda,
|
||||||
|
bool b_transposed,
|
||||||
|
int64_t ldb,
|
||||||
|
array& out,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
const std::optional<array>& bias = std::nullopt,
|
||||||
|
float alpha = 1.0f) {
|
||||||
|
// Check and collapse batch dimensions
|
||||||
|
auto [batch_shape, a_batch_strides, b_batch_strides] = collapse_batches(a, b);
|
||||||
|
|
||||||
|
auto batch_count = out.size() / (M * N);
|
||||||
|
|
||||||
|
// Collapse batches into M if needed
|
||||||
|
if (batch_count > 1 && !a_transposed && batch_shape.size() == 1 &&
|
||||||
|
a.strides()[a.ndim() - 2] == K && a_batch_strides.back() == M * K &&
|
||||||
|
b_batch_strides.back() == 0) {
|
||||||
|
M *= batch_shape.back();
|
||||||
|
batch_count = 1;
|
||||||
|
|
||||||
|
a_batch_strides = {0};
|
||||||
|
b_batch_strides = {0};
|
||||||
|
batch_shape = {1};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use gemmv when possible
|
||||||
|
if (!bias && cu::can_use_gemv(M, N, K, a_transposed, b_transposed)) {
|
||||||
|
cu::gemv(
|
||||||
|
a,
|
||||||
|
b,
|
||||||
|
out,
|
||||||
|
M,
|
||||||
|
N,
|
||||||
|
K,
|
||||||
|
batch_count,
|
||||||
|
batch_shape,
|
||||||
|
a_batch_strides,
|
||||||
|
b_batch_strides,
|
||||||
|
encoder);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Invoke cublasLt
|
||||||
|
CublasGemm gemm(
|
||||||
|
encoder.device(),
|
||||||
|
a.dtype(),
|
||||||
|
a_transposed,
|
||||||
|
M,
|
||||||
|
K,
|
||||||
|
lda,
|
||||||
|
b_transposed,
|
||||||
|
K,
|
||||||
|
N,
|
||||||
|
ldb,
|
||||||
|
batch_shape.back(),
|
||||||
|
a_batch_strides.back(),
|
||||||
|
b_batch_strides.back());
|
||||||
|
if (bias) {
|
||||||
|
gemm.set_bias(encoder, *bias);
|
||||||
|
}
|
||||||
|
gemm.run(
|
||||||
|
encoder, out, a, b, batch_shape, a_batch_strides, b_batch_strides, alpha);
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void Matmul::eval_gpu(const std::vector<array>& inputs, array& out) {
|
void Matmul::eval_gpu(const std::vector<array>& inputs, array& out) {
|
||||||
@@ -48,9 +119,6 @@ void Matmul::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Init checks and prep
|
|
||||||
|
|
||||||
int M = a_pre.shape(-2);
|
int M = a_pre.shape(-2);
|
||||||
int N = b_pre.shape(-1);
|
int N = b_pre.shape(-1);
|
||||||
int K = a_pre.shape(-1);
|
int K = a_pre.shape(-1);
|
||||||
@@ -60,65 +128,8 @@ void Matmul::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
auto [a_transposed, lda, a] = check_transpose(encoder, s, a_pre);
|
auto [a_transposed, lda, a] = check_transpose(encoder, s, a_pre);
|
||||||
auto [b_transposed, ldb, b] = check_transpose(encoder, s, b_pre);
|
auto [b_transposed, ldb, b] = check_transpose(encoder, s, b_pre);
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
gemm_and_bias(
|
||||||
// Check and collapse batch dimensions
|
encoder, M, N, K, a_transposed, lda, b_transposed, ldb, out, a, b);
|
||||||
|
|
||||||
auto [batch_shape, a_batch_strides, b_batch_strides] = collapse_batches(a, b);
|
|
||||||
|
|
||||||
auto batch_count = out.size() / (M * N);
|
|
||||||
|
|
||||||
// Collapse batches into M if needed
|
|
||||||
if (batch_count > 1 && !a_transposed && batch_shape.size() == 1 &&
|
|
||||||
a.strides()[a.ndim() - 2] == K && a_batch_strides.back() == M * K &&
|
|
||||||
b_batch_strides.back() == 0) {
|
|
||||||
M *= batch_shape.back();
|
|
||||||
batch_count = 1;
|
|
||||||
|
|
||||||
a_batch_strides = {0};
|
|
||||||
b_batch_strides = {0};
|
|
||||||
batch_shape = {1};
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cu::can_use_gemv(M, N, K, a_transposed, b_transposed)) {
|
|
||||||
cu::gemv(
|
|
||||||
a,
|
|
||||||
b,
|
|
||||||
out,
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
batch_count,
|
|
||||||
batch_shape,
|
|
||||||
a_batch_strides,
|
|
||||||
b_batch_strides,
|
|
||||||
encoder);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
|
||||||
// Invoke cublasLt
|
|
||||||
cu::Matmul matmul(
|
|
||||||
cu::device(s.device),
|
|
||||||
a.dtype(),
|
|
||||||
a_transposed,
|
|
||||||
M,
|
|
||||||
K,
|
|
||||||
lda,
|
|
||||||
b_transposed,
|
|
||||||
K,
|
|
||||||
N,
|
|
||||||
ldb,
|
|
||||||
batch_shape.back(),
|
|
||||||
a_batch_strides.back(),
|
|
||||||
b_batch_strides.back());
|
|
||||||
|
|
||||||
if ((batch_count / batch_shape.back()) == 1) {
|
|
||||||
matmul.run(encoder, out, a, b);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
matmul.run_batched(
|
|
||||||
encoder, out, a, b, batch_shape, a_batch_strides, b_batch_strides);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AddMM::eval_gpu(const std::vector<array>& inputs, array& out) {
|
void AddMM::eval_gpu(const std::vector<array>& inputs, array& out) {
|
||||||
@@ -143,6 +154,28 @@ void AddMM::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
auto [a_transposed, lda, a] = check_transpose(encoder, s, a_pre);
|
auto [a_transposed, lda, a] = check_transpose(encoder, s, a_pre);
|
||||||
auto [b_transposed, ldb, b] = check_transpose(encoder, s, b_pre);
|
auto [b_transposed, ldb, b] = check_transpose(encoder, s, b_pre);
|
||||||
|
|
||||||
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
|
// Dispatch to GEMM with epilogue or AddMM
|
||||||
|
|
||||||
|
if (beta_ == 1 && c.strides(-1) == 1 && c.data_size() == out.shape(-1)) {
|
||||||
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
gemm_and_bias(
|
||||||
|
encoder,
|
||||||
|
M,
|
||||||
|
N,
|
||||||
|
K,
|
||||||
|
a_transposed,
|
||||||
|
lda,
|
||||||
|
b_transposed,
|
||||||
|
ldb,
|
||||||
|
out,
|
||||||
|
a,
|
||||||
|
b,
|
||||||
|
c,
|
||||||
|
alpha_);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
int64_t ldc;
|
int64_t ldc;
|
||||||
{
|
{
|
||||||
auto stx = c.strides()[c.ndim() - 2];
|
auto stx = c.strides()[c.ndim() - 2];
|
||||||
@@ -184,9 +217,9 @@ void AddMM::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
// Invoke cublasLt
|
// Invoke cublasLt with AddMM settings
|
||||||
|
|
||||||
cu::Matmul matmul(
|
CublasGemm gemm(
|
||||||
cu::device(s.device),
|
cu::device(s.device),
|
||||||
a.dtype(),
|
a.dtype(),
|
||||||
a_transposed,
|
a_transposed,
|
||||||
@@ -202,12 +235,7 @@ void AddMM::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
a_batch_strides.back(),
|
a_batch_strides.back(),
|
||||||
b_batch_strides.back(),
|
b_batch_strides.back(),
|
||||||
c_batch_strides.back());
|
c_batch_strides.back());
|
||||||
|
gemm.run(
|
||||||
if ((batch_count / batch_shape.back()) == 1) {
|
|
||||||
matmul.run(encoder, out, a, b, c, alpha_, beta_);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
matmul.run_batched(
|
|
||||||
encoder,
|
encoder,
|
||||||
out,
|
out,
|
||||||
a,
|
a,
|
||||||
|
|||||||
@@ -1,11 +1,47 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/cuda/cuda.h"
|
#include "mlx/backend/cuda/cuda.h"
|
||||||
|
#include "mlx/fast.h"
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace cu {
|
||||||
|
|
||||||
bool is_available() {
|
bool is_available() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace cu
|
||||||
|
|
||||||
|
namespace fast {
|
||||||
|
|
||||||
|
CustomKernelFunction cuda_kernel(
|
||||||
|
const std::string&,
|
||||||
|
const std::vector<std::string>&,
|
||||||
|
const std::vector<std::string>&,
|
||||||
|
const std::string&,
|
||||||
|
const std::string&,
|
||||||
|
bool,
|
||||||
|
int) {
|
||||||
|
throw std::runtime_error("[cuda_kernel] No CUDA back-end.");
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<array> precompiled_cuda_kernel(
|
||||||
|
const std::string&,
|
||||||
|
const std::string&,
|
||||||
|
const std::vector<array>&,
|
||||||
|
const std::vector<Shape>&,
|
||||||
|
const std::vector<Dtype>&,
|
||||||
|
const std::vector<ScalarArg>&,
|
||||||
|
std::tuple<int, int, int>,
|
||||||
|
std::tuple<int, int, int>,
|
||||||
|
int shared_memory,
|
||||||
|
std::optional<float> init_value,
|
||||||
|
bool ensure_row_contiguous,
|
||||||
|
StreamOrDevice) {
|
||||||
|
throw std::runtime_error("[cuda_kernel] No CUDA back-end.");
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace fast
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -24,8 +24,6 @@ namespace mlx::core {
|
|||||||
}
|
}
|
||||||
|
|
||||||
NO_GPU(BlockMaskedMM)
|
NO_GPU(BlockMaskedMM)
|
||||||
NO_GPU(DynamicSlice)
|
|
||||||
NO_GPU(DynamicSliceUpdate)
|
|
||||||
NO_GPU(FFT)
|
NO_GPU(FFT)
|
||||||
NO_GPU(GatherMM)
|
NO_GPU(GatherMM)
|
||||||
NO_GPU(GatherQMM)
|
NO_GPU(GatherQMM)
|
||||||
@@ -41,12 +39,7 @@ NO_GPU(Cholesky)
|
|||||||
NO_GPU_MULTI(Eig)
|
NO_GPU_MULTI(Eig)
|
||||||
NO_GPU_MULTI(Eigh)
|
NO_GPU_MULTI(Eigh)
|
||||||
|
|
||||||
namespace fast {
|
|
||||||
NO_GPU_MULTI(CustomKernel)
|
|
||||||
} // namespace fast
|
|
||||||
|
|
||||||
namespace distributed {
|
namespace distributed {
|
||||||
NO_GPU_MULTI(AllReduce)
|
|
||||||
NO_GPU_MULTI(AllGather)
|
NO_GPU_MULTI(AllGather)
|
||||||
NO_GPU_MULTI(Send)
|
NO_GPU_MULTI(Send)
|
||||||
NO_GPU_MULTI(Recv)
|
NO_GPU_MULTI(Recv)
|
||||||
|
|||||||
@@ -46,10 +46,10 @@ inline array ensure_row_contiguous_matrix(
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void fast::AffineQuantize::eval_gpu(
|
void fast::Quantize::eval_gpu(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs) {
|
std::vector<array>& outputs) {
|
||||||
nvtx3::scoped_range r("AffineQuantize::eval_gpu");
|
nvtx3::scoped_range r("Quantize::eval_gpu");
|
||||||
auto& s = stream();
|
auto& s = stream();
|
||||||
auto& d = cu::device(s.device);
|
auto& d = cu::device(s.device);
|
||||||
auto& enc = d.get_command_encoder(s);
|
auto& enc = d.get_command_encoder(s);
|
||||||
|
|||||||
@@ -103,15 +103,21 @@ template <typename T, bool traditional, bool forward, int N = 4>
|
|||||||
__device__ void rope_impl(
|
__device__ void rope_impl(
|
||||||
const T* in,
|
const T* in,
|
||||||
T* out,
|
T* out,
|
||||||
int offset,
|
const int* offset,
|
||||||
float inv_freq,
|
float inv_freq,
|
||||||
float scale,
|
float scale,
|
||||||
const cuda::std::array<int64_t, 3> strides,
|
const cuda::std::array<int64_t, 3> strides,
|
||||||
const cuda::std::array<int64_t, 3> out_strides,
|
const cuda::std::array<int64_t, 3> out_strides,
|
||||||
int64_t n_batch,
|
int64_t offset_stride,
|
||||||
|
int n_head,
|
||||||
uint3 pos,
|
uint3 pos,
|
||||||
uint3 dims) {
|
uint3 dims) {
|
||||||
float L = scale * static_cast<float>(pos.y + offset);
|
auto n_head_up = N * ((n_head + N - 1) / N);
|
||||||
|
auto head_idx = static_cast<int>((pos.z * N) % n_head_up);
|
||||||
|
auto batch_idx = (pos.z * N) / n_head_up;
|
||||||
|
auto batch_offset = offset[batch_idx * offset_stride];
|
||||||
|
float L = scale * static_cast<float>(pos.y + batch_offset);
|
||||||
|
auto mat_idx = batch_idx * n_head + head_idx;
|
||||||
|
|
||||||
// Compute costheta, sintheta
|
// Compute costheta, sintheta
|
||||||
float theta = L * inv_freq;
|
float theta = L * inv_freq;
|
||||||
@@ -123,20 +129,19 @@ __device__ void rope_impl(
|
|||||||
size_t out_index_1, out_index_2;
|
size_t out_index_1, out_index_2;
|
||||||
if (traditional) {
|
if (traditional) {
|
||||||
out_index_1 = 2 * pos.x * out_strides[2] + pos.y * out_strides[1] +
|
out_index_1 = 2 * pos.x * out_strides[2] + pos.y * out_strides[1] +
|
||||||
N * pos.z * out_strides[0];
|
mat_idx * out_strides[0];
|
||||||
out_index_2 = out_index_1 + 1;
|
out_index_2 = out_index_1 + 1;
|
||||||
in_index_1 =
|
in_index_1 =
|
||||||
2 * pos.x * strides[2] + pos.y * strides[1] + N * pos.z * strides[0];
|
2 * pos.x * strides[2] + pos.y * strides[1] + mat_idx * strides[0];
|
||||||
in_index_2 = in_index_1 + strides[2];
|
in_index_2 = in_index_1 + strides[2];
|
||||||
} else {
|
} else {
|
||||||
out_index_1 = pos.x * out_strides[2] + pos.y * out_strides[1] +
|
out_index_1 = pos.x * out_strides[2] + pos.y * out_strides[1] +
|
||||||
N * pos.z * out_strides[0];
|
mat_idx * out_strides[0];
|
||||||
out_index_2 = out_index_1 + dims.x * out_strides[2];
|
out_index_2 = out_index_1 + dims.x * out_strides[2];
|
||||||
in_index_1 =
|
in_index_1 = pos.x * strides[2] + pos.y * strides[1] + mat_idx * strides[0];
|
||||||
pos.x * strides[2] + pos.y * strides[1] + N * pos.z * strides[0];
|
|
||||||
in_index_2 = in_index_1 + dims.x * strides[2];
|
in_index_2 = in_index_1 + dims.x * strides[2];
|
||||||
}
|
}
|
||||||
for (int i = 0; i < N && pos.z * N + i < n_batch; ++i) {
|
for (int i = 0; i < N && head_idx + i < n_head; ++i) {
|
||||||
// Read and write the output
|
// Read and write the output
|
||||||
float x1 = static_cast<float>(in[in_index_1]);
|
float x1 = static_cast<float>(in[in_index_1]);
|
||||||
float x2 = static_cast<float>(in[in_index_2]);
|
float x2 = static_cast<float>(in[in_index_2]);
|
||||||
@@ -167,7 +172,8 @@ __global__ void rope(
|
|||||||
float base,
|
float base,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, 3> strides,
|
const __grid_constant__ cuda::std::array<int64_t, 3> strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, 3> out_strides,
|
const __grid_constant__ cuda::std::array<int64_t, 3> out_strides,
|
||||||
int64_t n_batch,
|
int64_t offset_stride,
|
||||||
|
int n_head,
|
||||||
uint3 dims) {
|
uint3 dims) {
|
||||||
uint3 pos = make_uint3(
|
uint3 pos = make_uint3(
|
||||||
blockIdx.x * blockDim.x + threadIdx.x,
|
blockIdx.x * blockDim.x + threadIdx.x,
|
||||||
@@ -182,12 +188,13 @@ __global__ void rope(
|
|||||||
rope_impl<T, traditional, forward>(
|
rope_impl<T, traditional, forward>(
|
||||||
in,
|
in,
|
||||||
out,
|
out,
|
||||||
*offset,
|
offset,
|
||||||
inv_freq,
|
inv_freq,
|
||||||
scale,
|
scale,
|
||||||
strides,
|
strides,
|
||||||
out_strides,
|
out_strides,
|
||||||
n_batch,
|
offset_stride,
|
||||||
|
n_head,
|
||||||
pos,
|
pos,
|
||||||
dims);
|
dims);
|
||||||
}
|
}
|
||||||
@@ -202,7 +209,8 @@ __global__ void rope_freqs(
|
|||||||
float base,
|
float base,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, 3> strides,
|
const __grid_constant__ cuda::std::array<int64_t, 3> strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, 3> out_strides,
|
const __grid_constant__ cuda::std::array<int64_t, 3> out_strides,
|
||||||
int64_t n_batch,
|
int64_t offset_stride,
|
||||||
|
int n_head,
|
||||||
uint3 dims,
|
uint3 dims,
|
||||||
int64_t freq_stride) {
|
int64_t freq_stride) {
|
||||||
uint3 pos = make_uint3(
|
uint3 pos = make_uint3(
|
||||||
@@ -217,12 +225,13 @@ __global__ void rope_freqs(
|
|||||||
rope_impl<T, traditional, forward>(
|
rope_impl<T, traditional, forward>(
|
||||||
in,
|
in,
|
||||||
out,
|
out,
|
||||||
*offset,
|
offset,
|
||||||
inv_freq,
|
inv_freq,
|
||||||
scale,
|
scale,
|
||||||
strides,
|
strides,
|
||||||
out_strides,
|
out_strides,
|
||||||
n_batch,
|
offset_stride,
|
||||||
|
n_head,
|
||||||
pos,
|
pos,
|
||||||
dims);
|
dims);
|
||||||
}
|
}
|
||||||
@@ -245,23 +254,28 @@ void RoPE::eval_gpu(
|
|||||||
auto& offset = inputs[1];
|
auto& offset = inputs[1];
|
||||||
auto& out = outputs[0];
|
auto& out = outputs[0];
|
||||||
|
|
||||||
if (in.ndim() < 3) {
|
|
||||||
throw std::runtime_error("[RoPE] Input must have at least 3 dimensions");
|
|
||||||
}
|
|
||||||
|
|
||||||
cuda::std::array<int64_t, 3> strides;
|
cuda::std::array<int64_t, 3> strides;
|
||||||
cuda::std::array<int64_t, 3> out_strides;
|
cuda::std::array<int64_t, 3> out_strides;
|
||||||
bool donated = false;
|
bool donated = false;
|
||||||
int ndim = in.ndim();
|
int ndim = in.ndim();
|
||||||
int dispatch_ndim = in.ndim();
|
|
||||||
|
int B = in.shape(0);
|
||||||
|
int T = in.shape(-2);
|
||||||
|
int D = in.shape(-1);
|
||||||
|
size_t mat_size = T * D;
|
||||||
|
int dispatch_ndim = ndim;
|
||||||
while (in.shape(-dispatch_ndim) == 1 && dispatch_ndim > 3) {
|
while (in.shape(-dispatch_ndim) == 1 && dispatch_ndim > 3) {
|
||||||
dispatch_ndim--;
|
dispatch_ndim--;
|
||||||
}
|
}
|
||||||
size_t mat_size = in.shape(-2) * in.shape(-1);
|
|
||||||
|
int N = 1;
|
||||||
|
for (int i = 1; i < (ndim - 2); ++i) {
|
||||||
|
N *= in.shape(i);
|
||||||
|
}
|
||||||
|
|
||||||
// We apply rope to less that the whole vector so copy to output and then
|
// We apply rope to less that the whole vector so copy to output and then
|
||||||
// apply in-place.
|
// apply in-place.
|
||||||
if (dims_ < in.shape(-1)) {
|
if (dims_ < D) {
|
||||||
donated = true;
|
donated = true;
|
||||||
auto ctype =
|
auto ctype =
|
||||||
(in.flags().row_contiguous) ? CopyType::Vector : CopyType::General;
|
(in.flags().row_contiguous) ? CopyType::Vector : CopyType::General;
|
||||||
@@ -302,7 +316,7 @@ void RoPE::eval_gpu(
|
|||||||
out_strides[2] = out.strides()[ndim - 1];
|
out_strides[2] = out.strides()[ndim - 1];
|
||||||
|
|
||||||
// Some flags to help us dispatch below
|
// Some flags to help us dispatch below
|
||||||
bool single = in.flags().row_contiguous && (mat_size == in.shape(-1));
|
bool single = in.flags().row_contiguous && B == 1 && T == 1;
|
||||||
bool with_freqs = inputs.size() == 3;
|
bool with_freqs = inputs.size() == 3;
|
||||||
|
|
||||||
auto& encoder = cu::get_command_encoder(s);
|
auto& encoder = cu::get_command_encoder(s);
|
||||||
@@ -319,7 +333,7 @@ void RoPE::eval_gpu(
|
|||||||
if (single && !with_freqs) {
|
if (single && !with_freqs) {
|
||||||
auto kernel =
|
auto kernel =
|
||||||
cu::rope_single<DataType, traditional.value, forward.value>;
|
cu::rope_single<DataType, traditional.value, forward.value>;
|
||||||
uint2 dims = make_uint2(dims_ / 2, in.size() / mat_size);
|
uint2 dims = make_uint2(dims_ / 2, N);
|
||||||
auto [grid, block] = get_grid_and_block(dims.x, dims.y, 1);
|
auto [grid, block] = get_grid_and_block(dims.x, dims.y, 1);
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
kernel,
|
||||||
@@ -336,7 +350,7 @@ void RoPE::eval_gpu(
|
|||||||
} else if (single) {
|
} else if (single) {
|
||||||
auto kernel =
|
auto kernel =
|
||||||
cu::rope_single_freqs<DataType, traditional.value, forward.value>;
|
cu::rope_single_freqs<DataType, traditional.value, forward.value>;
|
||||||
uint2 dims = make_uint2(dims_ / 2, in.size() / mat_size);
|
uint2 dims = make_uint2(dims_ / 2, N);
|
||||||
auto [grid, block] = get_grid_and_block(dims.x, dims.y, 1);
|
auto [grid, block] = get_grid_and_block(dims.x, dims.y, 1);
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
kernel,
|
||||||
@@ -354,10 +368,14 @@ void RoPE::eval_gpu(
|
|||||||
} else if (with_freqs) {
|
} else if (with_freqs) {
|
||||||
auto kernel =
|
auto kernel =
|
||||||
cu::rope_freqs<DataType, traditional.value, forward.value>;
|
cu::rope_freqs<DataType, traditional.value, forward.value>;
|
||||||
uint3 dims =
|
int n_per_thread = 4;
|
||||||
make_uint3(dims_ / 2, in.shape(-2), in.size() / mat_size);
|
uint32_t dimz = B * ((N + n_per_thread - 1) / n_per_thread);
|
||||||
dims.z = (dims.z + 3) / 4;
|
uint3 dims = make_uint3(dims_ / 2, T, dimz);
|
||||||
auto [grid, block] = get_grid_and_block(dims.x, dims.y, dims.z);
|
auto [grid, block] = get_grid_and_block(dims.x, dims.y, dims.z);
|
||||||
|
int64_t offset_stride = 0;
|
||||||
|
if (inputs[1].ndim() > 0) {
|
||||||
|
offset_stride = inputs[1].strides()[0];
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
kernel,
|
||||||
grid,
|
grid,
|
||||||
@@ -371,15 +389,20 @@ void RoPE::eval_gpu(
|
|||||||
std::log2(base_),
|
std::log2(base_),
|
||||||
strides,
|
strides,
|
||||||
out_strides,
|
out_strides,
|
||||||
in.size() / mat_size,
|
offset_stride,
|
||||||
|
N,
|
||||||
dims,
|
dims,
|
||||||
inputs[2].strides(0));
|
inputs[2].strides(0));
|
||||||
} else {
|
} else {
|
||||||
auto kernel = cu::rope<DataType, traditional.value, forward.value>;
|
auto kernel = cu::rope<DataType, traditional.value, forward.value>;
|
||||||
uint3 dims =
|
int n_per_thread = 4;
|
||||||
make_uint3(dims_ / 2, in.shape(-2), in.size() / mat_size);
|
uint32_t dimz = B * ((N + n_per_thread - 1) / n_per_thread);
|
||||||
dims.z = (dims.z + 3) / 4;
|
uint3 dims = make_uint3(dims_ / 2, T, dimz);
|
||||||
auto [grid, block] = get_grid_and_block(dims.x, dims.y, dims.z);
|
auto [grid, block] = get_grid_and_block(dims.x, dims.y, dims.z);
|
||||||
|
int64_t offset_stride = 0;
|
||||||
|
if (inputs[1].ndim() > 0) {
|
||||||
|
offset_stride = inputs[1].strides()[0];
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
kernel,
|
||||||
grid,
|
grid,
|
||||||
@@ -392,7 +415,8 @@ void RoPE::eval_gpu(
|
|||||||
std::log2(base_),
|
std::log2(base_),
|
||||||
strides,
|
strides,
|
||||||
out_strides,
|
out_strides,
|
||||||
in.size() / mat_size,
|
offset_stride,
|
||||||
|
N,
|
||||||
dims);
|
dims);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -4,23 +4,16 @@
|
|||||||
#include "mlx/backend/cuda/device/config.h"
|
#include "mlx/backend/cuda/device/config.h"
|
||||||
#include "mlx/backend/cuda/device/utils.cuh"
|
#include "mlx/backend/cuda/device/utils.cuh"
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
#include "mlx/backend/cuda/lru_cache.h"
|
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
#include "mlx/dtype_utils.h"
|
#include "mlx/dtype_utils.h"
|
||||||
#include "mlx/fast_primitives.h"
|
#include "mlx/fast_primitives.h"
|
||||||
|
#include "mlx/transforms_impl.h"
|
||||||
|
|
||||||
// cudnn_frontend.h redefines this macro.
|
|
||||||
#undef CHECK_CUDA_ERROR
|
|
||||||
|
|
||||||
#include <cudnn_frontend.h>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
|
||||||
#include <cooperative_groups.h>
|
#include <cooperative_groups.h>
|
||||||
#include <cooperative_groups/reduce.h>
|
#include <cooperative_groups/reduce.h>
|
||||||
|
|
||||||
namespace fe = cudnn_frontend;
|
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
namespace cu {
|
namespace cu {
|
||||||
@@ -52,6 +45,7 @@ __global__ void kernel_sdpav_1pass(
|
|||||||
const T* K,
|
const T* K,
|
||||||
const T* V,
|
const T* V,
|
||||||
T* O,
|
T* O,
|
||||||
|
const T* sinks,
|
||||||
__grid_constant__ const AttnParams params) {
|
__grid_constant__ const AttnParams params) {
|
||||||
constexpr int BN = 32;
|
constexpr int BN = 32;
|
||||||
constexpr int BD = 32;
|
constexpr int BD = 32;
|
||||||
@@ -71,7 +65,7 @@ __global__ void kernel_sdpav_1pass(
|
|||||||
__shared__ U max_scores[BN];
|
__shared__ U max_scores[BN];
|
||||||
__shared__ U sum_exp_scores[BN];
|
__shared__ U sum_exp_scores[BN];
|
||||||
|
|
||||||
const U scale_log2 = params.scale * 1.44269504089f;
|
const U scale_log2 = params.scale * M_LOG2E;
|
||||||
|
|
||||||
auto block = cg::this_thread_block();
|
auto block = cg::this_thread_block();
|
||||||
auto warp = cg::tiled_partition<32>(block);
|
auto warp = cg::tiled_partition<32>(block);
|
||||||
@@ -114,8 +108,12 @@ __global__ void kernel_sdpav_1pass(
|
|||||||
o[i] = 0.f;
|
o[i] = 0.f;
|
||||||
}
|
}
|
||||||
|
|
||||||
U max_score = -INFINITY;
|
U max_score = Limits<U>::finite_min();
|
||||||
U sum_exp_score = 0.f;
|
U sum_exp_score = 0.f;
|
||||||
|
if (sinks && warp_idx == 0) {
|
||||||
|
max_score = M_LOG2E * static_cast<U>(sinks[head_idx]);
|
||||||
|
sum_exp_score = 1.f;
|
||||||
|
}
|
||||||
|
|
||||||
// For each key
|
// For each key
|
||||||
for (int i = kv_seq_idx; i < params.kL; i += BN) {
|
for (int i = kv_seq_idx; i < params.kL; i += BN) {
|
||||||
@@ -173,7 +171,7 @@ __global__ void kernel_sdpav_1pass(
|
|||||||
U factor = exp2f(max_score - new_max);
|
U factor = exp2f(max_score - new_max);
|
||||||
sum_exp_score =
|
sum_exp_score =
|
||||||
cg::reduce(warp, sum_exp_scores[lane_idx] * factor, cg::plus<U>());
|
cg::reduce(warp, sum_exp_scores[lane_idx] * factor, cg::plus<U>());
|
||||||
sum_exp_score = __frcp_rn(sum_exp_score);
|
sum_exp_score = sum_exp_score == 0 ? 0 : __frcp_rn(sum_exp_score);
|
||||||
|
|
||||||
// Now we need to aggregate all the outputs
|
// Now we need to aggregate all the outputs
|
||||||
PRAGMA_LOOP_UNROLL
|
PRAGMA_LOOP_UNROLL
|
||||||
@@ -199,6 +197,7 @@ __global__ void kernel_sdpav_2pass_1(
|
|||||||
const T* Q,
|
const T* Q,
|
||||||
const T* K,
|
const T* K,
|
||||||
const T* V,
|
const T* V,
|
||||||
|
const T* sinks,
|
||||||
float* partials,
|
float* partials,
|
||||||
float* sums,
|
float* sums,
|
||||||
float* maxs,
|
float* maxs,
|
||||||
@@ -274,8 +273,12 @@ __global__ void kernel_sdpav_2pass_1(
|
|||||||
o[i] = 0.f;
|
o[i] = 0.f;
|
||||||
}
|
}
|
||||||
|
|
||||||
U max_score = -1e9;
|
U max_score = Limits<U>::finite_min();
|
||||||
U sum_exp_score = 0.f;
|
U sum_exp_score = 0.f;
|
||||||
|
if (sinks && warp_idx == 0 && block_idx == 0) {
|
||||||
|
max_score = M_LOG2E * static_cast<U>(sinks[head_idx]);
|
||||||
|
sum_exp_score = 1.f;
|
||||||
|
}
|
||||||
|
|
||||||
// For each key
|
// For each key
|
||||||
for (int i = kv_seq_idx; i < params.kL; i += blocks * BN) {
|
for (int i = kv_seq_idx; i < params.kL; i += blocks * BN) {
|
||||||
@@ -416,7 +419,7 @@ __global__ void kernel_sdpav_2pass_2(
|
|||||||
U new_max = cg::reduce(warp, max_score, cg::greater<U>());
|
U new_max = cg::reduce(warp, max_score, cg::greater<U>());
|
||||||
U factor = exp2f(max_score - new_max);
|
U factor = exp2f(max_score - new_max);
|
||||||
U sum_exp_score = cg::reduce(warp, sums[lane_idx] * factor, cg::plus<U>());
|
U sum_exp_score = cg::reduce(warp, sums[lane_idx] * factor, cg::plus<U>());
|
||||||
sum_exp_score = __frcp_rn(sum_exp_score);
|
sum_exp_score = sum_exp_score == 0 ? 0 : __frcp_rn(sum_exp_score);
|
||||||
|
|
||||||
PRAGMA_LOOP_UNROLL
|
PRAGMA_LOOP_UNROLL
|
||||||
for (int i = 0; i < v_per_thread; i++) {
|
for (int i = 0; i < v_per_thread; i++) {
|
||||||
@@ -469,10 +472,14 @@ void sdpa_vector_1pass_fallback(
|
|||||||
const array& v,
|
const array& v,
|
||||||
const float scale,
|
const float scale,
|
||||||
array& o,
|
array& o,
|
||||||
bool do_causal_ = false) {
|
bool do_causal,
|
||||||
|
const std::optional<array>& sinks) {
|
||||||
encoder.set_input_array(q);
|
encoder.set_input_array(q);
|
||||||
encoder.set_input_array(k);
|
encoder.set_input_array(k);
|
||||||
encoder.set_input_array(v);
|
encoder.set_input_array(v);
|
||||||
|
if (sinks) {
|
||||||
|
encoder.set_input_array(*sinks);
|
||||||
|
}
|
||||||
encoder.set_output_array(o);
|
encoder.set_output_array(o);
|
||||||
|
|
||||||
cu::AttnParams params{
|
cu::AttnParams params{
|
||||||
@@ -495,7 +502,7 @@ void sdpa_vector_1pass_fallback(
|
|||||||
dim3 block_dim(1024, 1, 1);
|
dim3 block_dim(1024, 1, 1);
|
||||||
|
|
||||||
dispatch_float_types(o.dtype(), "kernel_sdpav_1pass", [&](auto type_tag) {
|
dispatch_float_types(o.dtype(), "kernel_sdpav_1pass", [&](auto type_tag) {
|
||||||
dispatch_bool(do_causal_, [&](auto do_causal) {
|
dispatch_bool(do_causal, [&](auto do_causal) {
|
||||||
dispatch_headdim(params.D, [&](auto headdim) {
|
dispatch_headdim(params.D, [&](auto headdim) {
|
||||||
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
||||||
|
|
||||||
@@ -510,6 +517,7 @@ void sdpa_vector_1pass_fallback(
|
|||||||
k.data<DataType>(),
|
k.data<DataType>(),
|
||||||
v.data<DataType>(),
|
v.data<DataType>(),
|
||||||
o.data<DataType>(),
|
o.data<DataType>(),
|
||||||
|
sinks ? (*sinks).data<DataType>() : nullptr,
|
||||||
params);
|
params);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
@@ -524,7 +532,8 @@ void sdpa_vector_2pass_fallback(
|
|||||||
const array& v,
|
const array& v,
|
||||||
const float scale,
|
const float scale,
|
||||||
array& o,
|
array& o,
|
||||||
bool do_causal_ = false) {
|
bool do_causal,
|
||||||
|
const std::optional<array>& sinks) {
|
||||||
cu::AttnParams params{
|
cu::AttnParams params{
|
||||||
/* int B = */ q.shape(0),
|
/* int B = */ q.shape(0),
|
||||||
/* int H = */ q.shape(1),
|
/* int H = */ q.shape(1),
|
||||||
@@ -565,7 +574,7 @@ void sdpa_vector_2pass_fallback(
|
|||||||
encoder.add_temporary(maxs);
|
encoder.add_temporary(maxs);
|
||||||
|
|
||||||
dispatch_float_types(o.dtype(), "kernel_sdpav_2pass", [&](auto type_tag) {
|
dispatch_float_types(o.dtype(), "kernel_sdpav_2pass", [&](auto type_tag) {
|
||||||
dispatch_bool(do_causal_, [&](auto do_causal) {
|
dispatch_bool(do_causal, [&](auto do_causal) {
|
||||||
dispatch_headdim(params.D, [&](auto headdim) {
|
dispatch_headdim(params.D, [&](auto headdim) {
|
||||||
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
||||||
|
|
||||||
@@ -576,6 +585,10 @@ void sdpa_vector_2pass_fallback(
|
|||||||
encoder.set_input_array(q);
|
encoder.set_input_array(q);
|
||||||
encoder.set_input_array(k);
|
encoder.set_input_array(k);
|
||||||
encoder.set_input_array(v);
|
encoder.set_input_array(v);
|
||||||
|
if (sinks) {
|
||||||
|
encoder.set_input_array(*sinks);
|
||||||
|
}
|
||||||
|
|
||||||
encoder.set_output_array(intermediate);
|
encoder.set_output_array(intermediate);
|
||||||
encoder.set_output_array(sums);
|
encoder.set_output_array(sums);
|
||||||
encoder.set_output_array(maxs);
|
encoder.set_output_array(maxs);
|
||||||
@@ -591,6 +604,7 @@ void sdpa_vector_2pass_fallback(
|
|||||||
q.data<DataType>(),
|
q.data<DataType>(),
|
||||||
k.data<DataType>(),
|
k.data<DataType>(),
|
||||||
v.data<DataType>(),
|
v.data<DataType>(),
|
||||||
|
sinks ? (*sinks).data<DataType>() : nullptr,
|
||||||
intermediate.data<float>(),
|
intermediate.data<float>(),
|
||||||
sums.data<float>(),
|
sums.data<float>(),
|
||||||
maxs.data<float>(),
|
maxs.data<float>(),
|
||||||
@@ -633,306 +647,19 @@ void sdpa_vector_fallback(
|
|||||||
const array& v,
|
const array& v,
|
||||||
const float scale,
|
const float scale,
|
||||||
array& o,
|
array& o,
|
||||||
bool do_causal_ = false) {
|
bool do_causal,
|
||||||
|
const std::optional<array>& sinks) {
|
||||||
int kL = k.shape(2);
|
int kL = k.shape(2);
|
||||||
|
|
||||||
if (kL > 1024) {
|
if (kL > 1024) {
|
||||||
return sdpa_vector_2pass_fallback(
|
return sdpa_vector_2pass_fallback(
|
||||||
s, encoder, q, k, v, scale, o, do_causal_);
|
s, encoder, q, k, v, scale, o, do_causal, sinks);
|
||||||
} else {
|
} else {
|
||||||
return sdpa_vector_1pass_fallback(
|
return sdpa_vector_1pass_fallback(
|
||||||
s, encoder, q, k, v, scale, o, do_causal_);
|
s, encoder, q, k, v, scale, o, do_causal, sinks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct SDPACacheKey {
|
|
||||||
int device_id;
|
|
||||||
fe::DataType_t cudnn_type;
|
|
||||||
|
|
||||||
int B;
|
|
||||||
int H;
|
|
||||||
int D;
|
|
||||||
|
|
||||||
int qL;
|
|
||||||
int kL;
|
|
||||||
|
|
||||||
int gqa_factor;
|
|
||||||
float scale;
|
|
||||||
|
|
||||||
int64_t Q_strides[3];
|
|
||||||
int64_t K_strides[3];
|
|
||||||
int64_t V_strides[3];
|
|
||||||
int64_t O_strides[3];
|
|
||||||
|
|
||||||
bool generate_stats;
|
|
||||||
bool causal_mask;
|
|
||||||
};
|
|
||||||
|
|
||||||
auto& sdpa_cache() {
|
|
||||||
static LRUBytesKeyCache<SDPACacheKey, std::shared_ptr<fe::graph::Graph>>
|
|
||||||
cache(
|
|
||||||
/* capacity */ 128);
|
|
||||||
return cache;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define Q_UID 1
|
|
||||||
#define K_UID 2
|
|
||||||
#define V_UID 3
|
|
||||||
#define O_UID 4
|
|
||||||
#define STATS_UID 5
|
|
||||||
|
|
||||||
std::shared_ptr<fe::graph::Graph> get_sdpa_forward_graph(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const SDPACacheKey& cache_key) {
|
|
||||||
// Check if graph has already been fully built
|
|
||||||
if (auto it = sdpa_cache().find(cache_key); it != sdpa_cache().end()) {
|
|
||||||
return it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set up new graph
|
|
||||||
auto graph = std::make_shared<fe::graph::Graph>();
|
|
||||||
|
|
||||||
graph->set_io_data_type(cache_key.cudnn_type)
|
|
||||||
.set_intermediate_data_type(fe::DataType_t::FLOAT)
|
|
||||||
.set_compute_data_type(fe::DataType_t::FLOAT);
|
|
||||||
|
|
||||||
auto Q = graph->tensor(
|
|
||||||
fe::graph::Tensor_attributes()
|
|
||||||
.set_name("Q")
|
|
||||||
.set_uid(Q_UID)
|
|
||||||
.set_dim({cache_key.B, cache_key.H, cache_key.qL, cache_key.D})
|
|
||||||
.set_stride(
|
|
||||||
{cache_key.Q_strides[0],
|
|
||||||
cache_key.Q_strides[1],
|
|
||||||
cache_key.Q_strides[2],
|
|
||||||
1}));
|
|
||||||
|
|
||||||
int h_kv = cache_key.H / cache_key.gqa_factor;
|
|
||||||
auto K =
|
|
||||||
graph->tensor(fe::graph::Tensor_attributes()
|
|
||||||
.set_name("K")
|
|
||||||
.set_uid(K_UID)
|
|
||||||
.set_dim({cache_key.B, h_kv, cache_key.kL, cache_key.D})
|
|
||||||
.set_stride(
|
|
||||||
{cache_key.K_strides[0],
|
|
||||||
cache_key.K_strides[1],
|
|
||||||
cache_key.V_strides[2],
|
|
||||||
1}));
|
|
||||||
|
|
||||||
auto V =
|
|
||||||
graph->tensor(fe::graph::Tensor_attributes()
|
|
||||||
.set_name("V")
|
|
||||||
.set_uid(V_UID)
|
|
||||||
.set_dim({cache_key.B, h_kv, cache_key.kL, cache_key.D})
|
|
||||||
.set_stride(
|
|
||||||
{cache_key.V_strides[0],
|
|
||||||
cache_key.V_strides[1],
|
|
||||||
cache_key.V_strides[2],
|
|
||||||
1}));
|
|
||||||
|
|
||||||
auto sdpa_options = fe::graph::SDPA_attributes()
|
|
||||||
.set_name("flash_attention")
|
|
||||||
.set_is_inference(!cache_key.generate_stats)
|
|
||||||
.set_attn_scale(cache_key.scale);
|
|
||||||
|
|
||||||
if (cache_key.causal_mask && cache_key.qL > 1) {
|
|
||||||
sdpa_options.set_diagonal_alignment(fe::DiagonalAlignment_t::TOP_LEFT)
|
|
||||||
.set_diagonal_band_right_bound(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto [O, Stats] = graph->sdpa(Q, K, V, sdpa_options);
|
|
||||||
|
|
||||||
O->set_output(true)
|
|
||||||
.set_uid(O_UID)
|
|
||||||
.set_dim({cache_key.B, cache_key.H, cache_key.qL, cache_key.D})
|
|
||||||
.set_stride(
|
|
||||||
{cache_key.O_strides[0],
|
|
||||||
cache_key.O_strides[1],
|
|
||||||
cache_key.O_strides[2],
|
|
||||||
1});
|
|
||||||
|
|
||||||
if (cache_key.generate_stats) {
|
|
||||||
Stats->set_output(true)
|
|
||||||
.set_data_type(fe::DataType_t::FLOAT)
|
|
||||||
.set_uid(STATS_UID);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build and Validate cudnn graph
|
|
||||||
|
|
||||||
auto handle = encoder.device().cudnn_handle();
|
|
||||||
|
|
||||||
// cuDNN only supports native CUDA graphs for sdpa in 9.6 or above.
|
|
||||||
if (cudnnGetVersion() < 90600) {
|
|
||||||
auto build_status = graph->build(handle, {fe::HeurMode_t::A});
|
|
||||||
if (!build_status.is_good()) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"Unable to build cudnn graph for attention."
|
|
||||||
" Failed with message: " +
|
|
||||||
build_status.get_message());
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
auto val_status = graph->validate();
|
|
||||||
auto op_status = graph->build_operation_graph(handle);
|
|
||||||
|
|
||||||
auto plan_stauts =
|
|
||||||
graph->create_execution_plans({cudnn_frontend::HeurMode_t::A});
|
|
||||||
if (!plan_stauts.is_good()) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"Unable to create exec plan for cudnn attention."
|
|
||||||
" Failed with message: " +
|
|
||||||
plan_stauts.get_message());
|
|
||||||
}
|
|
||||||
|
|
||||||
graph->select_behavior_notes(
|
|
||||||
{cudnn_frontend::BehaviorNote_t::SUPPORTS_CUDA_GRAPH_NATIVE_API});
|
|
||||||
|
|
||||||
auto support_status = graph->check_support(handle);
|
|
||||||
if (!support_status.is_good()) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"No cuda graph support for cudnn attention."
|
|
||||||
" Failed with message: " +
|
|
||||||
support_status.get_message());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto build_status = graph->build_plans(handle);
|
|
||||||
if (!build_status.is_good()) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"Unable to build cudnn graph for attention."
|
|
||||||
" Failed with message: " +
|
|
||||||
build_status.get_message());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
auto [it, _] = sdpa_cache().emplace(cache_key, graph);
|
|
||||||
|
|
||||||
return it->second;
|
|
||||||
}
|
|
||||||
|
|
||||||
inline fe::DataType_t dtype_to_cudnn_type(Dtype dtype) {
|
|
||||||
switch (dtype) {
|
|
||||||
case int8:
|
|
||||||
return fe::DataType_t::INT8;
|
|
||||||
case int32:
|
|
||||||
return fe::DataType_t::INT32;
|
|
||||||
case uint8:
|
|
||||||
return fe::DataType_t::UINT8;
|
|
||||||
case float16:
|
|
||||||
return fe::DataType_t::HALF;
|
|
||||||
case bfloat16:
|
|
||||||
return fe::DataType_t::BFLOAT16;
|
|
||||||
case float32:
|
|
||||||
return fe::DataType_t::FLOAT;
|
|
||||||
case float64:
|
|
||||||
return fe::DataType_t::DOUBLE;
|
|
||||||
default:
|
|
||||||
throw std::runtime_error(fmt::format(
|
|
||||||
"Unsupported dtype in SDPA: {}.", dtype_to_string(dtype)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void sdpa_cudnn(
|
|
||||||
const Stream& s,
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& q,
|
|
||||||
const array& k,
|
|
||||||
const array& v,
|
|
||||||
const float scale,
|
|
||||||
array& o,
|
|
||||||
bool do_causal_ = false) {
|
|
||||||
encoder.set_input_array(q);
|
|
||||||
encoder.set_input_array(k);
|
|
||||||
encoder.set_input_array(v);
|
|
||||||
encoder.set_output_array(o);
|
|
||||||
|
|
||||||
auto cudnn_type = dtype_to_cudnn_type(q.dtype());
|
|
||||||
|
|
||||||
int B = q.shape(0);
|
|
||||||
int H = q.shape(1);
|
|
||||||
int D = q.shape(3);
|
|
||||||
int gqa_factor = q.shape(1) / k.shape(1);
|
|
||||||
|
|
||||||
int qL = q.shape(2);
|
|
||||||
int kL = k.shape(2);
|
|
||||||
|
|
||||||
SDPACacheKey cache_key{
|
|
||||||
/* int device_id = */ encoder.device().cuda_device(),
|
|
||||||
/* fe::DataType_t cudnn_type = */ cudnn_type,
|
|
||||||
|
|
||||||
/* int B = */ B,
|
|
||||||
/* int H = */ H,
|
|
||||||
/* int D = */ D,
|
|
||||||
|
|
||||||
/* int qL = */ qL,
|
|
||||||
/* int kL = */ kL,
|
|
||||||
|
|
||||||
/* int gqa_factor = */ gqa_factor,
|
|
||||||
/* float scale = */ scale,
|
|
||||||
|
|
||||||
/* int64_t Q_strides[3] = */ {q.strides(0), q.strides(1), q.strides(2)},
|
|
||||||
/* int64_t K_strides[3] = */ {k.strides(0), k.strides(1), k.strides(2)},
|
|
||||||
/* int64_t V_strides[3] = */ {v.strides(0), v.strides(1), v.strides(2)},
|
|
||||||
/* int64_t O_strides[3] = */ {o.strides(0), o.strides(1), o.strides(2)},
|
|
||||||
|
|
||||||
/* bool generate_stats = */ false,
|
|
||||||
/* bool causal_mask = */ do_causal_};
|
|
||||||
|
|
||||||
auto graph = get_sdpa_forward_graph(encoder, cache_key);
|
|
||||||
|
|
||||||
int64_t workspace_size = 0;
|
|
||||||
auto workspace_status = graph->get_workspace_size(workspace_size);
|
|
||||||
if (!workspace_status.is_good()) {
|
|
||||||
throw std::runtime_error("Unable to get workspace for cudnn attention.");
|
|
||||||
}
|
|
||||||
|
|
||||||
array workspace(
|
|
||||||
allocator::malloc(workspace_size), {int(workspace_size)}, uint8);
|
|
||||||
auto workspace_ptr = workspace.data<void>();
|
|
||||||
|
|
||||||
std::unordered_map<int64_t, void*> variant_pack = {
|
|
||||||
{Q_UID, const_cast<void*>(q.data<void>())},
|
|
||||||
{K_UID, const_cast<void*>(k.data<void>())},
|
|
||||||
{V_UID, const_cast<void*>(v.data<void>())},
|
|
||||||
{O_UID, o.data<void>()}};
|
|
||||||
|
|
||||||
auto handle = encoder.device().cudnn_handle();
|
|
||||||
cudnnSetStream(handle, encoder.stream());
|
|
||||||
|
|
||||||
// cuDNN only supports native CUDA graphs for sdpa in 9.6 or above.
|
|
||||||
if (cudnnGetVersion() < 90600) {
|
|
||||||
auto capture = encoder.capture_context();
|
|
||||||
auto exec_status = graph->execute(handle, variant_pack, workspace_ptr);
|
|
||||||
|
|
||||||
if (!exec_status.is_good()) {
|
|
||||||
capture.discard = true;
|
|
||||||
throw std::runtime_error(
|
|
||||||
"Unable to execute cudnn attention."
|
|
||||||
" Failed with message: " +
|
|
||||||
exec_status.get_message());
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
cudaGraph_t cu_graph;
|
|
||||||
cudaGraphCreate(&cu_graph, 0);
|
|
||||||
|
|
||||||
std::unique_ptr<cudaGraph_t, void (*)(cudaGraph_t*)> graph_freer(
|
|
||||||
&cu_graph, [](cudaGraph_t* p) { cudaGraphDestroy(*p); });
|
|
||||||
|
|
||||||
auto cu_graph_status = graph->populate_cuda_graph(
|
|
||||||
handle, variant_pack, workspace_ptr, cu_graph);
|
|
||||||
|
|
||||||
if (!cu_graph_status.is_good()) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
"Unable to add cuda graph for cudnn attention."
|
|
||||||
" Failed with message: " +
|
|
||||||
cu_graph_status.get_message());
|
|
||||||
}
|
|
||||||
|
|
||||||
encoder.add_graph_node(cu_graph);
|
|
||||||
}
|
|
||||||
|
|
||||||
encoder.add_temporary(workspace);
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
namespace fast {
|
namespace fast {
|
||||||
@@ -945,6 +672,9 @@ bool ScaledDotProductAttention::use_fallback(
|
|||||||
bool has_arr_mask,
|
bool has_arr_mask,
|
||||||
bool do_causal,
|
bool do_causal,
|
||||||
Stream s) {
|
Stream s) {
|
||||||
|
if (detail::in_grad_tracing()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (s.device == Device::cpu) {
|
if (s.device == Device::cpu) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@@ -960,15 +690,7 @@ bool ScaledDotProductAttention::use_fallback(
|
|||||||
const bool supported_vector_config =
|
const bool supported_vector_config =
|
||||||
sdpa_supported_head_dim && query_sequence_length < 4;
|
sdpa_supported_head_dim && query_sequence_length < 4;
|
||||||
|
|
||||||
auto& cu_device = cu::device(s.device);
|
const bool supported_config = supported_vector_config;
|
||||||
|
|
||||||
const bool supported_matrix_config = query_sequence_length > 4 &&
|
|
||||||
cu_device.compute_capability_major() >= 8 &&
|
|
||||||
query_sequence_length == key_sequence_length &&
|
|
||||||
(q.dtype() == float16 || q.dtype() == bfloat16);
|
|
||||||
|
|
||||||
const bool supported_config =
|
|
||||||
(supported_matrix_config || supported_vector_config);
|
|
||||||
|
|
||||||
return has_arr_mask || !supported_config;
|
return has_arr_mask || !supported_config;
|
||||||
}
|
}
|
||||||
@@ -990,7 +712,7 @@ void ScaledDotProductAttention::eval_gpu(
|
|||||||
|
|
||||||
// Define some copy functions to ensure the layout of the inputs is as
|
// Define some copy functions to ensure the layout of the inputs is as
|
||||||
// expected.
|
// expected.
|
||||||
copies.reserve(3);
|
copies.reserve(inputs.size());
|
||||||
auto copy_unless = [&copies, &s](
|
auto copy_unless = [&copies, &s](
|
||||||
auto predicate, const array& arr) -> const array& {
|
auto predicate, const array& arr) -> const array& {
|
||||||
if (!predicate(arr)) {
|
if (!predicate(arr)) {
|
||||||
@@ -1002,10 +724,16 @@ void ScaledDotProductAttention::eval_gpu(
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Checks that the headdim dimension has stride 1.
|
||||||
auto is_matrix_contiguous = [](const array& arr) {
|
auto is_matrix_contiguous = [](const array& arr) {
|
||||||
return arr.strides(-1) == 1;
|
return arr.strides(-1) == 1;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
std::optional<array> sinks = std::nullopt;
|
||||||
|
if (has_sinks_) {
|
||||||
|
sinks = copy_unless(is_matrix_contiguous, inputs.back());
|
||||||
|
}
|
||||||
|
|
||||||
// We are in vector mode ie single query
|
// We are in vector mode ie single query
|
||||||
if (q_pre.shape(2) < 4) {
|
if (q_pre.shape(2) < 4) {
|
||||||
auto q_copy_unless = [](const array& arr) {
|
auto q_copy_unless = [](const array& arr) {
|
||||||
@@ -1043,10 +771,6 @@ void ScaledDotProductAttention::eval_gpu(
|
|||||||
const auto& k = copy_unless(kv_copy_unless, k_pre);
|
const auto& k = copy_unless(kv_copy_unless, k_pre);
|
||||||
const auto& v = copy_unless(kv_copy_unless, v_pre);
|
const auto& v = copy_unless(kv_copy_unless, v_pre);
|
||||||
|
|
||||||
for (const auto& cp : copies) {
|
|
||||||
encoder.add_temporary(cp);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Donate the query if possible
|
// Donate the query if possible
|
||||||
if (q.is_donatable() && q.flags().row_contiguous && q.size() == o.size()) {
|
if (q.is_donatable() && q.flags().row_contiguous && q.size() == o.size()) {
|
||||||
o.copy_shared_buffer(q);
|
o.copy_shared_buffer(q);
|
||||||
@@ -1055,53 +779,31 @@ void ScaledDotProductAttention::eval_gpu(
|
|||||||
int64_t str_oH = o.shape(3);
|
int64_t str_oH = o.shape(3);
|
||||||
int64_t str_oL = o.shape(1) * str_oH;
|
int64_t str_oL = o.shape(1) * str_oH;
|
||||||
int64_t str_oB = o.shape(2) * str_oL;
|
int64_t str_oB = o.shape(2) * str_oL;
|
||||||
size_t data_size = o.shape(0) * str_oB;
|
|
||||||
|
|
||||||
array::Flags flags{
|
array::Flags flags{
|
||||||
/* bool contiguous = */ 1,
|
/* bool contiguous = */ 1,
|
||||||
/* bool row_contiguous = */ 0,
|
/* bool row_contiguous = */ o.shape(2) == 1,
|
||||||
/* bool col_contiguous = */ 0,
|
/* bool col_contiguous = */ o.size() == o.shape(3),
|
||||||
};
|
};
|
||||||
|
|
||||||
o.set_data(
|
o.set_data(
|
||||||
allocator::malloc(o.nbytes()),
|
allocator::malloc(o.nbytes()),
|
||||||
data_size,
|
o.size(),
|
||||||
{str_oB, str_oH, str_oL, str_oD},
|
{str_oB, str_oH, str_oL, str_oD},
|
||||||
flags);
|
flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
return sdpa_vector_fallback(s, encoder, q, k, v, scale_, o, do_causal_);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Full attention mode
|
|
||||||
else {
|
|
||||||
const auto& q = copy_unless(is_matrix_contiguous, q_pre);
|
|
||||||
const auto& k = copy_unless(is_matrix_contiguous, k_pre);
|
|
||||||
const auto& v = copy_unless(is_matrix_contiguous, v_pre);
|
|
||||||
|
|
||||||
for (const auto& cp : copies) {
|
for (const auto& cp : copies) {
|
||||||
encoder.add_temporary(cp);
|
encoder.add_temporary(cp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int64_t str_oD = 1;
|
return sdpa_vector_fallback(
|
||||||
int64_t str_oH = o.shape(3);
|
s, encoder, q, k, v, scale_, o, do_causal_, sinks);
|
||||||
int64_t str_oL = o.shape(1) * str_oH;
|
}
|
||||||
int64_t str_oB = o.shape(2) * str_oL;
|
|
||||||
size_t data_size = o.shape(0) * str_oB;
|
|
||||||
|
|
||||||
array::Flags flags{
|
// Full attention mode should never reach here
|
||||||
/* bool contiguous = */ 1,
|
else {
|
||||||
/* bool row_contiguous = */ 0,
|
throw std::runtime_error("Doesn't support matrix yet.");
|
||||||
/* bool col_contiguous = */ 0,
|
|
||||||
};
|
|
||||||
|
|
||||||
o.set_data(
|
|
||||||
allocator::malloc(o.nbytes()),
|
|
||||||
data_size,
|
|
||||||
{str_oB, str_oH, str_oL, str_oD},
|
|
||||||
flags);
|
|
||||||
|
|
||||||
return sdpa_cudnn(s, encoder, q, k, v, scale_, o, do_causal_);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/common/slicing.h"
|
#include "mlx/backend/common/slicing.h"
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/backend/cuda/jit_module.h"
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
#include "mlx/backend/gpu/slicing.h"
|
#include "mlx/backend/gpu/slicing.h"
|
||||||
|
#include "mlx/dtype_utils.h"
|
||||||
|
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
|
||||||
@@ -27,8 +30,7 @@ void concatenate_gpu(
|
|||||||
flags.row_contiguous = false;
|
flags.row_contiguous = false;
|
||||||
flags.col_contiguous = false;
|
flags.col_contiguous = false;
|
||||||
flags.contiguous = false;
|
flags.contiguous = false;
|
||||||
// TODO: Handle concurrent outputs:
|
auto concurrent = cu::get_command_encoder(s).concurrent_context();
|
||||||
// https://github.com/ml-explore/mlx/pull/2145#discussion_r2070753816
|
|
||||||
for (int i = 0; i < inputs.size(); i++) {
|
for (int i = 0; i < inputs.size(); i++) {
|
||||||
array out_slice(inputs[i].shape(), out.dtype(), nullptr, {});
|
array out_slice(inputs[i].shape(), out.dtype(), nullptr, {});
|
||||||
size_t data_offset = strides[axis] * sizes[i];
|
size_t data_offset = strides[axis] * sizes[i];
|
||||||
@@ -38,4 +40,71 @@ void concatenate_gpu(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
array compute_dynamic_offset(
|
||||||
|
const array& indices,
|
||||||
|
const Strides& strides,
|
||||||
|
const std::vector<int>& axes,
|
||||||
|
const Stream& s) {
|
||||||
|
Dtype dtype = indices.dtype();
|
||||||
|
int nidx = axes.size();
|
||||||
|
|
||||||
|
std::string module_name =
|
||||||
|
fmt::format("compute_dynamic_offset_{}_{}", dtype_to_string(dtype), nidx);
|
||||||
|
std::string kernel_name = fmt::format(
|
||||||
|
"mlx::core::cu::compute_dynamic_offset<{}, {}>",
|
||||||
|
dtype_to_cuda_type(dtype),
|
||||||
|
nidx);
|
||||||
|
|
||||||
|
cu::JitModule& mod = cu::get_jit_module(s.device, module_name, [&]() {
|
||||||
|
std::string source = R"(
|
||||||
|
#include "mlx/backend/cuda/device/utils.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core::cu {
|
||||||
|
|
||||||
|
template <typename T, int NIDX>
|
||||||
|
__global__ void compute_dynamic_offset(
|
||||||
|
const T* indices,
|
||||||
|
int64_t* offset,
|
||||||
|
const __grid_constant__ Strides strides,
|
||||||
|
const __grid_constant__ cuda::std::array<int, NIDX> axes) {
|
||||||
|
int64_t acc = 0;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < NIDX; ++i) {
|
||||||
|
acc += indices[i] * strides[axes[i]];
|
||||||
|
}
|
||||||
|
*offset = acc;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core::cu
|
||||||
|
)";
|
||||||
|
return std::make_tuple(false, std::move(source), std::vector{kernel_name});
|
||||||
|
});
|
||||||
|
|
||||||
|
// Prepare output.
|
||||||
|
array offset({1}, int64, nullptr, {});
|
||||||
|
bool donate = indices.is_donatable() &&
|
||||||
|
(indices.data_size() * indices.itemsize()) >= offset.itemsize();
|
||||||
|
if (donate) {
|
||||||
|
offset.copy_shared_buffer(indices);
|
||||||
|
} else {
|
||||||
|
offset.set_data(allocator::malloc(offset.itemsize()));
|
||||||
|
}
|
||||||
|
|
||||||
|
auto& encoder = cu::get_command_encoder(s);
|
||||||
|
encoder.add_temporary(offset);
|
||||||
|
encoder.set_input_array(indices);
|
||||||
|
encoder.set_output_array(offset);
|
||||||
|
|
||||||
|
cu::KernelArgs args;
|
||||||
|
args.append(indices);
|
||||||
|
args.append(offset);
|
||||||
|
args.append_ndim(strides);
|
||||||
|
args.append(axes);
|
||||||
|
|
||||||
|
auto kernel = mod.get_kernel(kernel_name);
|
||||||
|
encoder.add_kernel_node(kernel, 1, 1, 0, args.args());
|
||||||
|
|
||||||
|
return offset;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/common/utils.h"
|
|
||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
@@ -10,7 +9,7 @@
|
|||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
#include <thrust/device_ptr.h>
|
#include <thrust/device_ptr.h>
|
||||||
#include <thrust/transform.h>
|
#include <thrust/transform.h>
|
||||||
#include <cub/device/device_segmented_sort.cuh>
|
#include <cub/device/device_segmented_radix_sort.cuh>
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
@@ -80,7 +79,7 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
encoder.add_temporary(discard);
|
encoder.add_temporary(discard);
|
||||||
|
|
||||||
size_t size;
|
size_t size;
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortPairs(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortPairs(
|
||||||
nullptr,
|
nullptr,
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -91,6 +90,8 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
|
0,
|
||||||
|
sizeof(Type) * 8,
|
||||||
stream));
|
stream));
|
||||||
|
|
||||||
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
||||||
@@ -105,7 +106,7 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
thrust::device_pointer_cast(indices.data<uint32_t>()),
|
thrust::device_pointer_cast(indices.data<uint32_t>()),
|
||||||
ModOp<uint32_t>{static_cast<uint32_t>(nsort)});
|
ModOp<uint32_t>{static_cast<uint32_t>(nsort)});
|
||||||
|
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortPairs(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortPairs(
|
||||||
temp.data<void>(),
|
temp.data<void>(),
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -116,10 +117,12 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
|
0,
|
||||||
|
sizeof(Type) * 8,
|
||||||
stream));
|
stream));
|
||||||
} else {
|
} else {
|
||||||
size_t size;
|
size_t size;
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortKeys(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortKeys(
|
||||||
nullptr,
|
nullptr,
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -128,6 +131,8 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
|
0,
|
||||||
|
sizeof(Type) * 8,
|
||||||
stream));
|
stream));
|
||||||
|
|
||||||
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
||||||
@@ -135,7 +140,7 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
|
|
||||||
// Start capturing after allocations
|
// Start capturing after allocations
|
||||||
auto capture = encoder.capture_context();
|
auto capture = encoder.capture_context();
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortKeys(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortKeys(
|
||||||
temp.data<void>(),
|
temp.data<void>(),
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -144,6 +149,8 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
|
0,
|
||||||
|
sizeof(Type) * 8,
|
||||||
stream));
|
stream));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -39,52 +39,98 @@ ternary_v(const bool* a, const T* b, const T* c, T* out, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename T, typename IdxT, int NDIM>
|
template <typename Op, typename T, typename IdxT, int NDIM, int N_READS>
|
||||||
__global__ void ternary_g_nd(
|
__global__ void ternary_g_nd(
|
||||||
const bool* a,
|
const bool* a,
|
||||||
const T* b,
|
const T* b,
|
||||||
const T* c,
|
const T* c,
|
||||||
T* out,
|
T* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> c_strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> c_strides) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
|
IdxT index_rest =
|
||||||
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[NDIM - 1];
|
||||||
|
auto a_stride_x = a_strides[NDIM - 1];
|
||||||
|
auto b_stride_x = b_strides[NDIM - 1];
|
||||||
|
auto c_stride_x = c_strides[NDIM - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
auto [a_idx, b_idx, c_idx] = elem_to_loc_nd<NDIM>(
|
auto [a_idx, b_idx, c_idx] = elem_to_loc_nd<NDIM>(
|
||||||
index,
|
index_rest * shape_x,
|
||||||
shape.data(),
|
shape.data(),
|
||||||
a_strides.data(),
|
a_strides.data(),
|
||||||
b_strides.data(),
|
b_strides.data(),
|
||||||
c_strides.data());
|
c_strides.data());
|
||||||
out[index] = Op{}(a[a_idx], b[b_idx], c[c_idx]);
|
auto a_vec =
|
||||||
|
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, false);
|
||||||
|
auto b_vec =
|
||||||
|
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, T(0));
|
||||||
|
auto c_vec =
|
||||||
|
load_vector<N_READS>(c + c_idx, index_x, shape_x, c_stride_x, T(0));
|
||||||
|
|
||||||
|
AlignedVector<T, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = Op{}(a_vec[i], b_vec[i], c_vec[i]);
|
||||||
}
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename T, typename IdxT>
|
template <typename Op, typename T, typename IdxT, int N_READS>
|
||||||
__global__ void ternary_g(
|
__global__ void ternary_g(
|
||||||
const bool* a,
|
const bool* a,
|
||||||
const T* b,
|
const T* b,
|
||||||
const T* c,
|
const T* c,
|
||||||
T* out,
|
T* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides a_strides,
|
const __grid_constant__ Strides a_strides,
|
||||||
const __grid_constant__ Strides b_strides,
|
const __grid_constant__ Strides b_strides,
|
||||||
const __grid_constant__ Strides c_strides,
|
const __grid_constant__ Strides c_strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
|
IdxT index_rest =
|
||||||
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[ndim - 1];
|
||||||
|
auto a_stride_x = a_strides[ndim - 1];
|
||||||
|
auto b_stride_x = b_strides[ndim - 1];
|
||||||
|
auto c_stride_x = c_strides[ndim - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
auto [a_idx, b_idx, c_idx] = elem_to_loc(
|
auto [a_idx, b_idx, c_idx] = elem_to_loc(
|
||||||
index,
|
index_rest * shape_x,
|
||||||
shape.data(),
|
shape.data(),
|
||||||
a_strides.data(),
|
a_strides.data(),
|
||||||
b_strides.data(),
|
b_strides.data(),
|
||||||
c_strides.data(),
|
c_strides.data(),
|
||||||
ndim);
|
ndim);
|
||||||
out[index] = Op{}(a[a_idx], b[b_idx], c[c_idx]);
|
auto a_vec =
|
||||||
|
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, false);
|
||||||
|
auto b_vec =
|
||||||
|
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, T(0));
|
||||||
|
auto c_vec =
|
||||||
|
load_vector<N_READS>(c + c_idx, index_x, shape_x, c_stride_x, T(0));
|
||||||
|
|
||||||
|
AlignedVector<T, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = Op{}(a_vec[i], b_vec[i], c_vec[i]);
|
||||||
}
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -123,36 +169,55 @@ void ternary_op_gpu_inplace(
|
|||||||
auto& b_strides = strides[1];
|
auto& b_strides = strides[1];
|
||||||
auto& c_strides = strides[2];
|
auto& c_strides = strides[2];
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
|
int work_per_thread = 1;
|
||||||
|
auto dim0 = ndim > 0 ? shape.back() : 1;
|
||||||
|
auto rest = out.size() / dim0;
|
||||||
|
if (dim0 >= 4) {
|
||||||
|
work_per_thread = 4;
|
||||||
|
}
|
||||||
|
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||||
|
auto block_dims = get_block_dims(dim0, rest, 1);
|
||||||
|
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
||||||
|
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
auto kernel =
|
||||||
|
cu::ternary_g_nd<Op, DType, IdxT, dims_constant(), 1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel =
|
||||||
|
cu::ternary_g_nd<Op, DType, IdxT, dims_constant(), 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::ternary_g_nd<Op, DType, IdxT, dims_constant()>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<bool>(),
|
a.data<bool>(),
|
||||||
b.data<DType>(),
|
b.data<DType>(),
|
||||||
c.data<DType>(),
|
c.data<DType>(),
|
||||||
out.data<DType>(),
|
out.data<DType>(),
|
||||||
out.size(),
|
rest,
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(a_strides),
|
const_param<dims_constant()>(a_strides),
|
||||||
const_param<dims_constant()>(b_strides),
|
const_param<dims_constant()>(b_strides),
|
||||||
const_param<dims_constant()>(c_strides));
|
const_param<dims_constant()>(c_strides));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
auto kernel = cu::ternary_g<Op, DType, IdxT, 1>;
|
||||||
|
if (work_per_thread == 4) {
|
||||||
|
kernel = cu::ternary_g<Op, DType, IdxT, 4>;
|
||||||
|
}
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::ternary_g<Op, DType, IdxT>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<bool>(),
|
a.data<bool>(),
|
||||||
b.data<DType>(),
|
b.data<DType>(),
|
||||||
c.data<DType>(),
|
c.data<DType>(),
|
||||||
out.data<DType>(),
|
out.data<DType>(),
|
||||||
out.data_size(),
|
rest,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(a_strides),
|
const_param(a_strides),
|
||||||
const_param(b_strides),
|
const_param(b_strides),
|
||||||
|
|||||||
@@ -37,19 +37,36 @@ __global__ void unary_v(const In* in, Out* out, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void unary_g(
|
__global__ void unary_g(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size,
|
IdxT size_rest,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides strides,
|
const __grid_constant__ Strides strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
auto block = cg::this_thread_block();
|
||||||
if (index < size) {
|
auto grid = cg::this_grid();
|
||||||
auto idx = elem_to_loc(index, shape.data(), strides.data(), ndim);
|
IdxT index_rest =
|
||||||
out[index] = Op{}(in[idx]);
|
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
||||||
|
if (index_rest >= size_rest) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auto shape_x = shape[ndim - 1];
|
||||||
|
auto stride_x = strides[ndim - 1];
|
||||||
|
IdxT index_x =
|
||||||
|
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
||||||
|
auto idx =
|
||||||
|
elem_to_loc(index_rest * shape_x, shape.data(), strides.data(), ndim);
|
||||||
|
auto in_vec =
|
||||||
|
load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
|
||||||
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
|
#pragma unroll
|
||||||
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
|
out_vec[i] = Op{}(in_vec[i]);
|
||||||
|
}
|
||||||
|
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out>
|
template <typename Op, typename In, typename Out>
|
||||||
@@ -127,8 +144,7 @@ void unary_op_gpu_inplace(
|
|||||||
using OutType = cuda_type_t<CTYPE_OUT>;
|
using OutType = cuda_type_t<CTYPE_OUT>;
|
||||||
if (contig) {
|
if (contig) {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
// TODO: Choose optimized value based on type size.
|
constexpr int N_READS = 16 / sizeof(OutType);
|
||||||
constexpr int N_READS = 4;
|
|
||||||
auto [num_blocks, block_dims] = get_launch_args(
|
auto [num_blocks, block_dims] = get_launch_args(
|
||||||
out.data_size(), out.shape(), out.strides(), large, N_READS);
|
out.data_size(), out.shape(), out.strides(), large, N_READS);
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
@@ -142,18 +158,30 @@ void unary_op_gpu_inplace(
|
|||||||
} else {
|
} else {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
||||||
auto [shape, strides] = collapse_contiguous_dims(in);
|
auto [shape, strides] = collapse_contiguous_dims(in);
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large);
|
auto ndim = shape.size();
|
||||||
|
int work_per_thread = 1;
|
||||||
|
auto kernel = cu::unary_g<Op, InType, OutType, IdxT, 1>;
|
||||||
|
auto dim0 = ndim > 0 ? shape.back() : 1;
|
||||||
|
auto rest = out.size() / dim0;
|
||||||
|
if (dim0 >= 4) {
|
||||||
|
kernel = cu::unary_g<Op, InType, OutType, IdxT, 4>;
|
||||||
|
work_per_thread = 4;
|
||||||
|
}
|
||||||
|
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
||||||
|
auto block_dims = get_block_dims(dim0, rest, 1);
|
||||||
|
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
||||||
|
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
cu::unary_g<Op, InType, OutType, IdxT>,
|
kernel,
|
||||||
num_blocks,
|
{num_blocks_x, num_blocks_y},
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in.data<InType>(),
|
in.data<InType>(),
|
||||||
out.data<OutType>(),
|
out.data<OutType>(),
|
||||||
out.data_size(),
|
rest,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(strides),
|
const_param(strides),
|
||||||
shape.size());
|
ndim);
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
34
mlx/backend/cuda/unary/CMakeLists.txt
Normal file
34
mlx/backend/cuda/unary/CMakeLists.txt
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
target_sources(
|
||||||
|
mlx
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/abs.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arccos.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arccosh.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arcsin.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arcsinh.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arctan.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arctanh.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bitwise_invert.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/ceil.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/conjugate.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cos.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cosh.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/erf.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/erf_inv.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/exp.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/expm1.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/floor.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/imag.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/log.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/log1p.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/logical_not.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/negative.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/real.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/round.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sigmoid.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sign.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sin.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sinh.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sqrt.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/square.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/tan.cu
|
||||||
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/tanh.cu)
|
||||||
7
mlx/backend/cuda/unary/abs.cu
Normal file
7
mlx/backend/cuda/unary/abs.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(Abs)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/arccos.cu
Normal file
7
mlx/backend/cuda/unary/arccos.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(ArcCos)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/arccosh.cu
Normal file
7
mlx/backend/cuda/unary/arccosh.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(ArcCosh)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/arcsin.cu
Normal file
7
mlx/backend/cuda/unary/arcsin.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(ArcSin)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/arcsinh.cu
Normal file
7
mlx/backend/cuda/unary/arcsinh.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(ArcSinh)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/arctan.cu
Normal file
7
mlx/backend/cuda/unary/arctan.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(ArcTan)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/arctanh.cu
Normal file
7
mlx/backend/cuda/unary/arctanh.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(ArcTanh)
|
||||||
|
} // namespace mlx::core
|
||||||
7
mlx/backend/cuda/unary/bitwise_invert.cu
Normal file
7
mlx/backend/cuda/unary/bitwise_invert.cu
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/unary/unary.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
UNARY_GPU(BitwiseInvert)
|
||||||
|
} // namespace mlx::core
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user