mirror of
https://github.com/ml-explore/mlx.git
synced 2025-12-16 01:49:05 +08:00
Compare commits
1 Commits
v0.29.0
...
jagrit06/c
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
400f8457ea |
@@ -18,14 +18,13 @@ jobs:
|
|||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
macos:
|
macos:
|
||||||
xcode: "26.0.0"
|
xcode: "16.2.0"
|
||||||
resource_class: m4pro.medium
|
resource_class: m2pro.medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install
|
name: Install
|
||||||
command: |
|
command: |
|
||||||
xcodebuild -downloadComponent MetalToolchain
|
|
||||||
brew install python@3.9
|
brew install python@3.9
|
||||||
brew install doxygen
|
brew install doxygen
|
||||||
python3.9 -m venv env
|
python3.9 -m venv env
|
||||||
@@ -90,7 +89,6 @@ jobs:
|
|||||||
command: |
|
command: |
|
||||||
uv venv
|
uv venv
|
||||||
uv pip install cmake
|
uv pip install cmake
|
||||||
DEBUG=1 CMAKE_ARGS="-DCMAKE_COMPILE_WARNING_AS_ERROR=ON" \
|
|
||||||
uv pip install -e ".[dev]" -v
|
uv pip install -e ".[dev]" -v
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
@@ -120,7 +118,7 @@ jobs:
|
|||||||
parameters:
|
parameters:
|
||||||
xcode_version:
|
xcode_version:
|
||||||
type: string
|
type: string
|
||||||
default: "26.0.0"
|
default: "16.2.0"
|
||||||
macosx_deployment_target:
|
macosx_deployment_target:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
@@ -128,13 +126,12 @@ jobs:
|
|||||||
xcode: << parameters.xcode_version >>
|
xcode: << parameters.xcode_version >>
|
||||||
environment:
|
environment:
|
||||||
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
||||||
resource_class: m4pro.medium
|
resource_class: m2pro.medium
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
xcodebuild -downloadComponent MetalToolchain
|
|
||||||
HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 \
|
HOMEBREW_NO_AUTO_UPDATE=1 HOMEBREW_NO_INSTALL_CLEANUP=1 \
|
||||||
brew install openmpi uv
|
brew install openmpi uv
|
||||||
- run:
|
- run:
|
||||||
@@ -199,7 +196,7 @@ jobs:
|
|||||||
name: Run Python tests with JIT
|
name: Run Python tests with JIT
|
||||||
command: |
|
command: |
|
||||||
CMAKE_ARGS="-DMLX_METAL_JIT=ON" \
|
CMAKE_ARGS="-DMLX_METAL_JIT=ON" \
|
||||||
uv pip install -e . -v
|
uv pip install -e .
|
||||||
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 \
|
LOW_MEMORY=1 DEVICE=gpu METAL_DEVICE_WRAPPER_TYPE=1 \
|
||||||
METAL_DEBUG_ERROR_MODE=0 \
|
METAL_DEBUG_ERROR_MODE=0 \
|
||||||
uv run --no-project python -m xmlrunner discover \
|
uv run --no-project python -m xmlrunner discover \
|
||||||
@@ -225,7 +222,6 @@ jobs:
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install libcudnn9-dev-cuda-12
|
sudo apt-get install libcudnn9-dev-cuda-12
|
||||||
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
sudo apt-get install libblas-dev liblapack-dev liblapacke-dev
|
||||||
sudo apt-get install libnccl2 libnccl-dev
|
|
||||||
curl -sL https://github.com/ccache/ccache/releases/download/v4.11.3/ccache-4.11.3-linux-x86_64.tar.xz | tar xJf -
|
curl -sL https://github.com/ccache/ccache/releases/download/v4.11.3/ccache-4.11.3-linux-x86_64.tar.xz | tar xJf -
|
||||||
sudo mv ccache-4.11.3-linux-x86_64/ccache /usr/bin/ccache
|
sudo mv ccache-4.11.3-linux-x86_64/ccache /usr/bin/ccache
|
||||||
rm -rf ccache-4.11.3-linux-x86_64
|
rm -rf ccache-4.11.3-linux-x86_64
|
||||||
@@ -234,8 +230,7 @@ jobs:
|
|||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
uv venv
|
uv venv
|
||||||
uv pip install cmake
|
CMAKE_ARGS="-DMLX_BUILD_CUDA=ON -DCMAKE_CUDA_COMPILER=`which nvcc`" \
|
||||||
DEBUG=1 CMAKE_ARGS="-DMLX_BUILD_CUDA=ON -DCMAKE_COMPILE_WARNING_AS_ERROR=ON -DCMAKE_CUDA_COMPILER=`which nvcc`" \
|
|
||||||
uv pip install -e ".[dev]" -v
|
uv pip install -e ".[dev]" -v
|
||||||
- run:
|
- run:
|
||||||
name: Run Python tests
|
name: Run Python tests
|
||||||
@@ -243,18 +238,6 @@ jobs:
|
|||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
LOW_MEMORY=1 DEVICE=cpu python -m unittest discover python/tests -v
|
LOW_MEMORY=1 DEVICE=cpu python -m unittest discover python/tests -v
|
||||||
LOW_MEMORY=1 DEVICE=gpu python -m tests discover python/tests -v
|
LOW_MEMORY=1 DEVICE=gpu python -m tests discover python/tests -v
|
||||||
- run:
|
|
||||||
name: Build CPP only
|
|
||||||
command: |
|
|
||||||
source .venv/bin/activate
|
|
||||||
cmake . -B build \
|
|
||||||
-DMLX_BUILD_CUDA=ON \
|
|
||||||
-DCMAKE_CUDA_COMPILER=`which nvcc` \
|
|
||||||
-DCMAKE_BUILD_TYPE=DEBUG
|
|
||||||
cmake --build build -j `nproc`
|
|
||||||
- run:
|
|
||||||
name: Run CPP tests
|
|
||||||
command: ./build/tests/tests -sfe="*fft_tests.cpp,*linalg_tests.cpp"
|
|
||||||
- run:
|
- run:
|
||||||
name: CCache report
|
name: CCache report
|
||||||
command: |
|
command: |
|
||||||
@@ -274,7 +257,7 @@ jobs:
|
|||||||
default: "3.9"
|
default: "3.9"
|
||||||
xcode_version:
|
xcode_version:
|
||||||
type: string
|
type: string
|
||||||
default: "26.0.0"
|
default: "16.2.0"
|
||||||
build_env:
|
build_env:
|
||||||
type: string
|
type: string
|
||||||
default: ""
|
default: ""
|
||||||
@@ -283,7 +266,7 @@ jobs:
|
|||||||
default: ""
|
default: ""
|
||||||
macos:
|
macos:
|
||||||
xcode: << parameters.xcode_version >>
|
xcode: << parameters.xcode_version >>
|
||||||
resource_class: m4pro.medium
|
resource_class: m2pro.medium
|
||||||
environment:
|
environment:
|
||||||
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
MACOSX_DEPLOYMENT_TARGET: << parameters.macosx_deployment_target >>
|
||||||
steps:
|
steps:
|
||||||
@@ -291,15 +274,11 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install dependencies
|
name: Install dependencies
|
||||||
command: |
|
command: |
|
||||||
xcodebuild -downloadComponent MetalToolchain
|
brew install python@<< parameters.python_version >>
|
||||||
mkdir -p ~/miniconda3
|
brew install openmpi
|
||||||
curl https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-arm64.sh -o ~/miniconda3/miniconda.sh
|
python<< parameters.python_version >> -m venv env
|
||||||
bash ~/miniconda3/miniconda.sh -b -u -p ~/miniconda3
|
source env/bin/activate
|
||||||
rm ~/miniconda3/miniconda.sh
|
pip install --upgrade pip
|
||||||
source ~/miniconda3/bin/activate
|
|
||||||
conda init --all
|
|
||||||
conda create -n env python=<< parameters.python_version >> -y
|
|
||||||
conda activate env
|
|
||||||
pip install --upgrade cmake
|
pip install --upgrade cmake
|
||||||
pip install nanobind==2.4.0
|
pip install nanobind==2.4.0
|
||||||
pip install --upgrade setuptools
|
pip install --upgrade setuptools
|
||||||
@@ -309,19 +288,19 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Install Python package
|
name: Install Python package
|
||||||
command: |
|
command: |
|
||||||
conda activate env
|
source env/bin/activate
|
||||||
env -u MACOSX_DEPLOYMENT_TARGET DEV_RELEASE=1 \
|
env -u MACOSX_DEPLOYMENT_TARGET DEV_RELEASE=1 \
|
||||||
pip install . -v
|
pip install . -v
|
||||||
- run:
|
- run:
|
||||||
name: Generate package stubs
|
name: Generate package stubs
|
||||||
command: |
|
command: |
|
||||||
conda activate env
|
source env/bin/activate
|
||||||
pip install typing_extensions
|
pip install typing_extensions
|
||||||
python setup.py generate_stubs
|
python setup.py generate_stubs
|
||||||
- run:
|
- run:
|
||||||
name: Build Python package
|
name: Build Python package
|
||||||
command: |
|
command: |
|
||||||
conda activate env
|
source env/bin/activate
|
||||||
python setup.py clean --all
|
python setup.py clean --all
|
||||||
<< parameters.build_env >> MLX_BUILD_STAGE=1 python -m build -w
|
<< parameters.build_env >> MLX_BUILD_STAGE=1 python -m build -w
|
||||||
- when:
|
- when:
|
||||||
@@ -331,7 +310,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Build common package
|
name: Build common package
|
||||||
command: |
|
command: |
|
||||||
conda activate env
|
source env/bin/activate
|
||||||
python setup.py clean --all
|
python setup.py clean --all
|
||||||
<< parameters.build_env >> MLX_BUILD_STAGE=2 python -m build -w
|
<< parameters.build_env >> MLX_BUILD_STAGE=2 python -m build -w
|
||||||
- when:
|
- when:
|
||||||
@@ -340,7 +319,7 @@ jobs:
|
|||||||
- run:
|
- run:
|
||||||
name: Upload package
|
name: Upload package
|
||||||
command: |
|
command: |
|
||||||
conda activate env
|
source env/bin/activate
|
||||||
twine upload dist/*
|
twine upload dist/*
|
||||||
- store_artifacts:
|
- store_artifacts:
|
||||||
path: dist/
|
path: dist/
|
||||||
@@ -413,7 +392,7 @@ jobs:
|
|||||||
default: ""
|
default: ""
|
||||||
machine:
|
machine:
|
||||||
image: ubuntu-2204:current
|
image: ubuntu-2204:current
|
||||||
resource_class: xlarge
|
resource_class: large
|
||||||
steps:
|
steps:
|
||||||
- checkout
|
- checkout
|
||||||
- run:
|
- run:
|
||||||
@@ -460,7 +439,7 @@ workflows:
|
|||||||
- mac_build_and_test:
|
- mac_build_and_test:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
macosx_deployment_target: ["13.5", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0"]
|
||||||
- linux_build_and_test
|
- linux_build_and_test
|
||||||
- cuda_build_and_test:
|
- cuda_build_and_test:
|
||||||
matrix:
|
matrix:
|
||||||
@@ -485,7 +464,68 @@ workflows:
|
|||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
build_env: ["PYPI_RELEASE=1"]
|
build_env: ["PYPI_RELEASE=1"]
|
||||||
xcode_version: ["26.0.0"]
|
xcode_version: ["16.2.0", "15.0.0"]
|
||||||
|
exclude:
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
build_env: "PYPI_RELEASE=1"
|
||||||
- build_documentation:
|
- build_documentation:
|
||||||
filters:
|
filters:
|
||||||
tags:
|
tags:
|
||||||
@@ -527,7 +567,7 @@ workflows:
|
|||||||
requires: [ hold ]
|
requires: [ hold ]
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
macosx_deployment_target: ["13.5", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0"]
|
||||||
- linux_build_and_test:
|
- linux_build_and_test:
|
||||||
requires: [ hold ]
|
requires: [ hold ]
|
||||||
- cuda_build_and_test:
|
- cuda_build_and_test:
|
||||||
@@ -546,7 +586,53 @@ workflows:
|
|||||||
parameters:
|
parameters:
|
||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
xcode_version: ["26.0.0"]
|
xcode_version: ["16.2.0", "15.0.0"]
|
||||||
|
exclude:
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.13"
|
||||||
- build_linux_release:
|
- build_linux_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
@@ -565,7 +651,68 @@ workflows:
|
|||||||
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
python_version: ["3.9", "3.10", "3.11", "3.12", "3.13"]
|
||||||
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
macosx_deployment_target: ["13.5", "14.0", "15.0"]
|
||||||
build_env: ["DEV_RELEASE=1"]
|
build_env: ["DEV_RELEASE=1"]
|
||||||
xcode_version: ["26.0.0"]
|
xcode_version: ["16.2.0", "15.0.0"]
|
||||||
|
exclude:
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "13.5"
|
||||||
|
xcode_version: "16.2.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "14.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.9"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.10"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.11"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.12"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
|
- macosx_deployment_target: "15.0"
|
||||||
|
xcode_version: "15.0.0"
|
||||||
|
python_version: "3.13"
|
||||||
|
build_env: "DEV_RELEASE=1"
|
||||||
- build_linux_release:
|
- build_linux_release:
|
||||||
matrix:
|
matrix:
|
||||||
parameters:
|
parameters:
|
||||||
|
|||||||
@@ -25,11 +25,6 @@ MLX was developed with contributions from the following individuals:
|
|||||||
<img class="dark-light" src="https://contrib.rocks/image?repo=ml-explore/mlx&anon=0&columns=20&max=100&r=true" />
|
<img class="dark-light" src="https://contrib.rocks/image?repo=ml-explore/mlx&anon=0&columns=20&max=100&r=true" />
|
||||||
</a>
|
</a>
|
||||||
|
|
||||||
# Organizations
|
|
||||||
|
|
||||||
MLX has received contributions from the following companies:
|
|
||||||
- NVIDIA Corporation & Affiliates
|
|
||||||
|
|
||||||
# Third-Party Software
|
# Third-Party Software
|
||||||
|
|
||||||
MLX leverages several third-party software, listed here together with
|
MLX leverages several third-party software, listed here together with
|
||||||
|
|||||||
@@ -140,12 +140,6 @@ elseif(MLX_BUILD_METAL)
|
|||||||
target_link_libraries(mlx PUBLIC ${METAL_LIB} ${FOUNDATION_LIB} ${QUARTZ_LIB})
|
target_link_libraries(mlx PUBLIC ${METAL_LIB} ${FOUNDATION_LIB} ${QUARTZ_LIB})
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
|
||||||
# With newer clang/gcc versions following libs are implicitly linked, but when
|
|
||||||
# building on old distributions they need to be explicitly listed.
|
|
||||||
target_link_libraries(mlx PRIVATE dl pthread)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(WIN32)
|
if(WIN32)
|
||||||
if(MSVC)
|
if(MSVC)
|
||||||
# GGUF does not build with MSVC.
|
# GGUF does not build with MSVC.
|
||||||
|
|||||||
@@ -1,54 +0,0 @@
|
|||||||
# FindNCCL.cmake This module finds the NVIDIA NCCL library and its include
|
|
||||||
# directories.
|
|
||||||
|
|
||||||
set(NCCL_ROOT_DIR
|
|
||||||
$ENV{NCCL_ROOT_DIR}
|
|
||||||
CACHE PATH "Folder contains NVIDIA NCCL")
|
|
||||||
|
|
||||||
find_path(
|
|
||||||
NCCL_INCLUDE_DIRS
|
|
||||||
NAMES nccl.h
|
|
||||||
HINTS ${NCCL_INCLUDE_DIR} ${NCCL_ROOT_DIR} ${NCCL_ROOT_DIR}/include
|
|
||||||
${CUDA_TOOLKIT_ROOT_DIR}/include)
|
|
||||||
|
|
||||||
if($ENV{USE_STATIC_NCCL})
|
|
||||||
message(
|
|
||||||
STATUS "USE_STATIC_NCCL detected. Linking against static NCCL library")
|
|
||||||
set(NCCL_LIBNAME "libnccl_static.a")
|
|
||||||
else()
|
|
||||||
set(NCCL_LIBNAME "nccl")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
find_library(
|
|
||||||
NCCL_LIBRARIES
|
|
||||||
NAMES ${NCCL_LIBNAME}
|
|
||||||
HINTS ${NCCL_LIB_DIR}
|
|
||||||
${NCCL_ROOT_DIR}
|
|
||||||
${NCCL_ROOT_DIR}/lib
|
|
||||||
${NCCL_ROOT_DIR}/lib/x86_64-linux-gnu
|
|
||||||
${NCCL_ROOT_DIR}/lib64
|
|
||||||
${CUDA_TOOLKIT_ROOT_DIR}/lib
|
|
||||||
${CUDA_TOOLKIT_ROOT_DIR}/lib64)
|
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
|
||||||
find_package_handle_standard_args(NCCL DEFAULT_MSG NCCL_INCLUDE_DIRS
|
|
||||||
NCCL_LIBRARIES)
|
|
||||||
|
|
||||||
if(NCCL_FOUND)
|
|
||||||
set(NCCL_HEADER_FILE "${NCCL_INCLUDE_DIRS}/nccl.h")
|
|
||||||
message(
|
|
||||||
STATUS "Determining NCCL version from the header file: ${NCCL_HEADER_FILE}")
|
|
||||||
file(
|
|
||||||
STRINGS ${NCCL_HEADER_FILE} NCCL_MAJOR_VERSION_DEFINED
|
|
||||||
REGEX "^[ \t]*#define[ \t]+NCCL_MAJOR[ \t]+[0-9]+.*$"
|
|
||||||
LIMIT_COUNT 1)
|
|
||||||
if(NCCL_MAJOR_VERSION_DEFINED)
|
|
||||||
string(REGEX REPLACE "^[ \t]*#define[ \t]+NCCL_MAJOR[ \t]+" ""
|
|
||||||
NCCL_MAJOR_VERSION ${NCCL_MAJOR_VERSION_DEFINED})
|
|
||||||
message(STATUS "NCCL_MAJOR_VERSION: ${NCCL_MAJOR_VERSION}")
|
|
||||||
endif()
|
|
||||||
message(
|
|
||||||
STATUS
|
|
||||||
"Found NCCL (include: ${NCCL_INCLUDE_DIRS}, library: ${NCCL_LIBRARIES})")
|
|
||||||
mark_as_advanced(NCCL_ROOT_DIR NCCL_INCLUDE_DIRS NCCL_LIBRARIES)
|
|
||||||
endif()
|
|
||||||
@@ -127,8 +127,7 @@ relying on a copy from ``ensure_row_contiguous``:
|
|||||||
name="myexp_strided",
|
name="myexp_strided",
|
||||||
input_names=["inp"],
|
input_names=["inp"],
|
||||||
output_names=["out"],
|
output_names=["out"],
|
||||||
source=source,
|
source=source
|
||||||
ensure_row_contiguous=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def exp_elementwise(a: mx.array):
|
def exp_elementwise(a: mx.array):
|
||||||
@@ -139,6 +138,7 @@ relying on a copy from ``ensure_row_contiguous``:
|
|||||||
threadgroup=(256, 1, 1),
|
threadgroup=(256, 1, 1),
|
||||||
output_shapes=[a.shape],
|
output_shapes=[a.shape],
|
||||||
output_dtypes=[a.dtype],
|
output_dtypes=[a.dtype],
|
||||||
|
ensure_row_contiguous=False,
|
||||||
)
|
)
|
||||||
return outputs[0]
|
return outputs[0]
|
||||||
|
|
||||||
|
|||||||
@@ -70,7 +70,6 @@ are the CPU and GPU.
|
|||||||
python/fft
|
python/fft
|
||||||
python/linalg
|
python/linalg
|
||||||
python/metal
|
python/metal
|
||||||
python/cuda
|
|
||||||
python/memory_management
|
python/memory_management
|
||||||
python/nn
|
python/nn
|
||||||
python/optimizers
|
python/optimizers
|
||||||
|
|||||||
@@ -271,7 +271,7 @@ and the CUDA toolkit. For example on Ubuntu, run the following:
|
|||||||
dpkg -i cuda-keyring_1.1-1_all.deb
|
dpkg -i cuda-keyring_1.1-1_all.deb
|
||||||
apt-get update -y
|
apt-get update -y
|
||||||
apt-get -y install cuda-toolkit-12-9
|
apt-get -y install cuda-toolkit-12-9
|
||||||
apt-get install libblas-dev liblapack-dev liblapacke-dev libcudnn9-dev-cuda-12 -y
|
apt-get install libblas-dev liblapack-dev liblapacke-dev -y
|
||||||
|
|
||||||
|
|
||||||
When building either the Python or C++ APIs make sure to pass the cmake flag
|
When building either the Python or C++ APIs make sure to pass the cmake flag
|
||||||
|
|||||||
@@ -1,9 +0,0 @@
|
|||||||
CUDA
|
|
||||||
=====
|
|
||||||
|
|
||||||
.. currentmodule:: mlx.core.cuda
|
|
||||||
|
|
||||||
.. autosummary::
|
|
||||||
:toctree: _autosummary
|
|
||||||
|
|
||||||
is_available
|
|
||||||
@@ -13,4 +13,3 @@ Fast
|
|||||||
rope
|
rope
|
||||||
scaled_dot_product_attention
|
scaled_dot_product_attention
|
||||||
metal_kernel
|
metal_kernel
|
||||||
cuda_kernel
|
|
||||||
|
|||||||
@@ -225,7 +225,7 @@ In some cases returning updated state can be pretty inconvenient. Hence,
|
|||||||
def fun(x, y):
|
def fun(x, y):
|
||||||
z = x + y
|
z = x + y
|
||||||
state.append(z)
|
state.append(z)
|
||||||
return mx.exp(z)
|
return mx.exp(z), state
|
||||||
|
|
||||||
fun(mx.array(1.0), mx.array(2.0))
|
fun(mx.array(1.0), mx.array(2.0))
|
||||||
# Prints [array(3, dtype=float32)]
|
# Prints [array(3, dtype=float32)]
|
||||||
|
|||||||
@@ -228,4 +228,31 @@ std::pair<Dims, Dims> get_grid_and_block_common(int dim0, int dim1, int dim2) {
|
|||||||
std::make_tuple(gx, gy, gz), std::make_tuple(bx, by, bz));
|
std::make_tuple(gx, gy, gz), std::make_tuple(bx, by, bz));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
array swapaxes_in_eval(const array& x, int axis1, int axis2) {
|
||||||
|
int ndim = x.ndim();
|
||||||
|
if (axis1 < 0) {
|
||||||
|
axis1 += ndim;
|
||||||
|
}
|
||||||
|
if (axis2 < 0) {
|
||||||
|
axis2 += ndim;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto shape = x.shape();
|
||||||
|
std::swap(shape[axis1], shape[axis2]);
|
||||||
|
auto strides = x.strides();
|
||||||
|
std::swap(strides[axis1], strides[axis2]);
|
||||||
|
|
||||||
|
auto [data_size, row_contiguous, col_contiguous] =
|
||||||
|
check_contiguity(shape, strides);
|
||||||
|
bool contiguous = data_size == x.data_size();
|
||||||
|
|
||||||
|
array out(std::move(shape), x.dtype(), nullptr, {});
|
||||||
|
out.copy_shared_buffer(
|
||||||
|
x,
|
||||||
|
std::move(strides),
|
||||||
|
{contiguous, row_contiguous, col_contiguous},
|
||||||
|
x.data_size());
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -196,6 +196,9 @@ void shared_buffer_reshape(
|
|||||||
const Strides& out_strides,
|
const Strides& out_strides,
|
||||||
array& out);
|
array& out);
|
||||||
|
|
||||||
|
// Like the swapaxes op but safe to call in eval_gpu.
|
||||||
|
array swapaxes_in_eval(const array& x, int axis1, int axis2);
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline SmallVector<T> remove_index(SmallVector<T> vec, size_t index) {
|
inline SmallVector<T> remove_index(SmallVector<T> vec, size_t index) {
|
||||||
vec.erase(std::next(vec.begin(), index));
|
vec.erase(std::next(vec.begin(), index));
|
||||||
|
|||||||
@@ -15,7 +15,6 @@
|
|||||||
#include "mlx/backend/cpu/jit_compiler.h"
|
#include "mlx/backend/cpu/jit_compiler.h"
|
||||||
#include "mlx/device.h"
|
#include "mlx/device.h"
|
||||||
#include "mlx/graph_utils.h"
|
#include "mlx/graph_utils.h"
|
||||||
#include "mlx/version.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
@@ -95,11 +94,7 @@ void* compile(
|
|||||||
kernel_file_name = kernel_name;
|
kernel_file_name = kernel_name;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto output_dir =
|
auto output_dir = std::filesystem::temp_directory_path();
|
||||||
std::filesystem::temp_directory_path() / "mlx" / version() / "cpu";
|
|
||||||
if (!std::filesystem::exists(output_dir)) {
|
|
||||||
std::filesystem::create_directories(output_dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string shared_lib_name = "lib" + kernel_file_name + ".so";
|
std::string shared_lib_name = "lib" + kernel_file_name + ".so";
|
||||||
auto shared_lib_path = (output_dir / shared_lib_name).string();
|
auto shared_lib_path = (output_dir / shared_lib_name).string();
|
||||||
@@ -162,12 +157,10 @@ inline void build_kernel(
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Start the kernel
|
// Start the kernel
|
||||||
os << "void " << kernel_name
|
os << "void " << kernel_name << "(void** args) {" << std::endl;
|
||||||
<< "(int* shape, int64_t** strides, void** args) {" << std::endl;
|
|
||||||
|
|
||||||
// Add the input arguments
|
// Add the input arguments
|
||||||
int cnt = 0;
|
int cnt = 0;
|
||||||
int strides_index = 1;
|
|
||||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||||
// Skip constants from the input list
|
// Skip constants from the input list
|
||||||
if (is_constant(i)) {
|
if (is_constant(i)) {
|
||||||
@@ -182,8 +175,8 @@ inline void build_kernel(
|
|||||||
<< "];" << std::endl;
|
<< "];" << std::endl;
|
||||||
// Scalars and contiguous need no strides
|
// Scalars and contiguous need no strides
|
||||||
if (!is_scalar(x) && !contiguous) {
|
if (!is_scalar(x) && !contiguous) {
|
||||||
os << " const int64_t* " << xname << "_strides = strides["
|
os << " const size_t* " << xname << "_strides = (size_t*)args[" << cnt++
|
||||||
<< strides_index++ << "];" << std::endl;
|
<< "];" << std::endl;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,8 +186,10 @@ inline void build_kernel(
|
|||||||
os << " " << tstr << "* " << namer.get_name(x) << " = (" << tstr
|
os << " " << tstr << "* " << namer.get_name(x) << " = (" << tstr
|
||||||
<< "*)args[" << cnt++ << "];" << std::endl;
|
<< "*)args[" << cnt++ << "];" << std::endl;
|
||||||
}
|
}
|
||||||
// Add output size
|
// Add output strides and shape to extract the indices.
|
||||||
if (contiguous) {
|
if (!contiguous) {
|
||||||
|
os << " const int* shape = (int*)args[" << cnt++ << "];" << std::endl;
|
||||||
|
} else {
|
||||||
os << " const size_t size = (size_t)args[" << cnt++ << "];" << std::endl;
|
os << " const size_t size = (size_t)args[" << cnt++ << "];" << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -293,8 +288,17 @@ void Compiled::eval_cpu(
|
|||||||
auto [contiguous, shape, strides] =
|
auto [contiguous, shape, strides] =
|
||||||
compiled_collapse_contiguous_dims(inputs, outputs[0], is_constant_);
|
compiled_collapse_contiguous_dims(inputs, outputs[0], is_constant_);
|
||||||
|
|
||||||
|
// Force allocating shape/strides on heap so we can take their data() first
|
||||||
|
// and then std::move them.
|
||||||
|
// TODO: Refactor code to avoid heap allocation.
|
||||||
|
shape.grow();
|
||||||
|
for (auto& s : strides) {
|
||||||
|
s.grow();
|
||||||
|
}
|
||||||
|
|
||||||
// Collect function input arguments.
|
// Collect function input arguments.
|
||||||
std::vector<void*> args;
|
std::vector<void*> args;
|
||||||
|
int strides_index = 1;
|
||||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||||
if (is_constant_(i)) {
|
if (is_constant_(i)) {
|
||||||
continue;
|
continue;
|
||||||
@@ -302,6 +306,9 @@ void Compiled::eval_cpu(
|
|||||||
const auto& x = inputs[i];
|
const auto& x = inputs[i];
|
||||||
encoder.set_input_array(x);
|
encoder.set_input_array(x);
|
||||||
args.push_back((void*)x.data<void>());
|
args.push_back((void*)x.data<void>());
|
||||||
|
if (!contiguous && !is_scalar(x)) {
|
||||||
|
args.push_back(strides[strides_index++].data());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the kernel name from the lib
|
// Get the kernel name from the lib
|
||||||
@@ -336,20 +343,16 @@ void Compiled::eval_cpu(
|
|||||||
args.push_back(x.data<void>());
|
args.push_back(x.data<void>());
|
||||||
encoder.set_output_array(x);
|
encoder.set_output_array(x);
|
||||||
}
|
}
|
||||||
if (contiguous) {
|
if (!contiguous) {
|
||||||
|
args.push_back((void*)shape.data());
|
||||||
|
} else {
|
||||||
args.push_back((void*)outputs[0].data_size());
|
args.push_back((void*)outputs[0].data_size());
|
||||||
}
|
}
|
||||||
auto fun = reinterpret_cast<void (*)(int*, int64_t**, void**)>(fn_ptr);
|
auto fun = (void (*)(void**))fn_ptr;
|
||||||
encoder.dispatch([fun,
|
encoder.dispatch([fun,
|
||||||
args = std::move(args),
|
args = std::move(args),
|
||||||
strides = std::move(strides),
|
strides = std::move(strides),
|
||||||
shape = std::move(shape)]() mutable {
|
shape = std::move(shape)]() mutable { fun(args.data()); });
|
||||||
SmallVector<int64_t*> strides_ptrs;
|
|
||||||
for (auto& s : strides) {
|
|
||||||
strides_ptrs.push_back(s.data());
|
|
||||||
}
|
|
||||||
fun(shape.data(), strides_ptrs.data(), args.data());
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ INSTANTIATE_LAPACK_REAL(orgqr)
|
|||||||
INSTANTIATE_LAPACK_REAL(syevd)
|
INSTANTIATE_LAPACK_REAL(syevd)
|
||||||
INSTANTIATE_LAPACK_REAL(geev)
|
INSTANTIATE_LAPACK_REAL(geev)
|
||||||
INSTANTIATE_LAPACK_REAL(potrf)
|
INSTANTIATE_LAPACK_REAL(potrf)
|
||||||
INSTANTIATE_LAPACK_REAL(gesdd)
|
INSTANTIATE_LAPACK_REAL(gesvdx)
|
||||||
INSTANTIATE_LAPACK_REAL(getrf)
|
INSTANTIATE_LAPACK_REAL(getrf)
|
||||||
INSTANTIATE_LAPACK_REAL(getri)
|
INSTANTIATE_LAPACK_REAL(getri)
|
||||||
INSTANTIATE_LAPACK_REAL(trtri)
|
INSTANTIATE_LAPACK_REAL(trtri)
|
||||||
|
|||||||
@@ -1,5 +1,7 @@
|
|||||||
// Copyright © 2023 Apple Inc.
|
// Copyright © 2023 Apple Inc.
|
||||||
|
|
||||||
|
#include <cassert>
|
||||||
|
|
||||||
#include "mlx/backend/cpu/copy.h"
|
#include "mlx/backend/cpu/copy.h"
|
||||||
#include "mlx/backend/cpu/encoder.h"
|
#include "mlx/backend/cpu/encoder.h"
|
||||||
#include "mlx/backend/cpu/simd/simd.h"
|
#include "mlx/backend/cpu/simd/simd.h"
|
||||||
@@ -11,35 +13,6 @@ namespace mlx::core {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
const static float MXFP4_LUT[16] = {
|
|
||||||
+0.0f,
|
|
||||||
+0.5f,
|
|
||||||
+1.0f,
|
|
||||||
+1.5f,
|
|
||||||
+2.0f,
|
|
||||||
+3.0f,
|
|
||||||
+4.0f,
|
|
||||||
+6.0f,
|
|
||||||
-0.0f,
|
|
||||||
-0.5f,
|
|
||||||
-1.0f,
|
|
||||||
-1.5f,
|
|
||||||
-2.0f,
|
|
||||||
-3.0f,
|
|
||||||
-4.0f,
|
|
||||||
-6.0f};
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
static inline T dequantize_scale(uint8_t s) {
|
|
||||||
using FOrI = union {
|
|
||||||
bfloat16_t f;
|
|
||||||
uint16_t i;
|
|
||||||
};
|
|
||||||
FOrI out;
|
|
||||||
out.i = (s == 0 ? 0x40 : (static_cast<uint16_t>(s) << 7));
|
|
||||||
return static_cast<T>(out.f);
|
|
||||||
}
|
|
||||||
|
|
||||||
inline constexpr short get_pack_factor(int bits, int wsize = 8) {
|
inline constexpr short get_pack_factor(int bits, int wsize = 8) {
|
||||||
return (bits == 3 || bits == 5) ? 8 : (bits == 6 ? 4 : wsize / bits);
|
return (bits == 3 || bits == 5) ? 8 : (bits == 6 ? 4 : wsize / bits);
|
||||||
}
|
}
|
||||||
@@ -434,231 +407,6 @@ void _qmm_dispatch(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void mxfp4_qmm(
|
|
||||||
T* result,
|
|
||||||
const T* x,
|
|
||||||
const uint32_t* w,
|
|
||||||
const uint8_t* scales,
|
|
||||||
int M,
|
|
||||||
int N,
|
|
||||||
int K) {
|
|
||||||
constexpr int group_size = 32;
|
|
||||||
constexpr int pack_factor = get_pack_factor(4, 8);
|
|
||||||
constexpr int bytes_per_pack = get_bytes_per_pack(4);
|
|
||||||
constexpr int packs_in_group = group_size / pack_factor;
|
|
||||||
|
|
||||||
for (int m = 0; m < M; m++) {
|
|
||||||
const uint8_t* w_local = (const uint8_t*)w;
|
|
||||||
const uint8_t* scales_local = scales;
|
|
||||||
|
|
||||||
std::fill(result, result + N, 0);
|
|
||||||
|
|
||||||
for (int k = 0; k < K; k++) {
|
|
||||||
T* result_local = result;
|
|
||||||
T xi = *x++;
|
|
||||||
|
|
||||||
for (int n = 0; n < N; n += group_size) {
|
|
||||||
T scale = dequantize_scale<T>(*scales_local++);
|
|
||||||
for (int ng = 0; ng < packs_in_group; ng++) {
|
|
||||||
uint8_t wi = *w_local++;
|
|
||||||
#pragma clang loop unroll(full)
|
|
||||||
for (int p = 0; p < pack_factor; p++) {
|
|
||||||
(*result_local++) +=
|
|
||||||
xi * scale * static_cast<T>(MXFP4_LUT[wi & 0xf]);
|
|
||||||
wi >>= 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result += N;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void mxfp4_qmm_t(
|
|
||||||
T* result,
|
|
||||||
const T* x,
|
|
||||||
const uint32_t* w,
|
|
||||||
const uint8_t* scales,
|
|
||||||
int M,
|
|
||||||
int N,
|
|
||||||
int K) {
|
|
||||||
constexpr int group_size = 32;
|
|
||||||
constexpr int pack_factor = get_pack_factor(4, 8);
|
|
||||||
constexpr int bytes_per_pack = get_bytes_per_pack(4);
|
|
||||||
constexpr int packs_in_group = group_size / pack_factor;
|
|
||||||
|
|
||||||
for (int m = 0; m < M; m++) {
|
|
||||||
const uint8_t* w_local = (const uint8_t*)w;
|
|
||||||
const uint8_t* scales_local = scales;
|
|
||||||
|
|
||||||
for (int n = 0; n < N; n++) {
|
|
||||||
const T* x_local = x;
|
|
||||||
T sum = 0;
|
|
||||||
for (int k = 0; k < K; k += group_size) {
|
|
||||||
T scale = dequantize_scale<T>(*scales_local++);
|
|
||||||
|
|
||||||
T gsum = 0;
|
|
||||||
for (int kw = 0; kw < packs_in_group; kw++) {
|
|
||||||
uint8_t wi = *w_local++;
|
|
||||||
#pragma clang loop unroll(full)
|
|
||||||
for (int p = 0; p < pack_factor; p++) {
|
|
||||||
gsum += (*x_local++) * static_cast<T>(MXFP4_LUT[wi & 0xf]);
|
|
||||||
wi >>= 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sum += scale * gsum;
|
|
||||||
}
|
|
||||||
*result = sum;
|
|
||||||
result++;
|
|
||||||
}
|
|
||||||
|
|
||||||
x += K;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <int S>
|
|
||||||
simd::Simd<float, S> mxfp4_extract_bits_simd(const uint32_t* w) {
|
|
||||||
if constexpr (S == 8) {
|
|
||||||
constexpr std::array<uint32_t, 8> shifts_ = {{0, 4, 8, 12, 16, 20, 24, 28}};
|
|
||||||
auto shifts(*(simd::Simd<uint32_t, S>*)&shifts_);
|
|
||||||
auto wi = simd::Simd<uint32_t, S>(*w);
|
|
||||||
wi = wi >> shifts;
|
|
||||||
wi = wi & 0xf;
|
|
||||||
simd::Simd<float, S> w_out;
|
|
||||||
for (int i = 0; i < S; ++i) {
|
|
||||||
w_out[i] = MXFP4_LUT[wi[i]];
|
|
||||||
}
|
|
||||||
return w_out;
|
|
||||||
} else {
|
|
||||||
// Appease compiler.. but should never get here
|
|
||||||
throw std::runtime_error("Unsupported combination for simd qmm.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void mxfp4_qmm_t_simd(
|
|
||||||
T* result,
|
|
||||||
const T* x,
|
|
||||||
const uint32_t* w,
|
|
||||||
const uint8_t* scales,
|
|
||||||
int M,
|
|
||||||
int N,
|
|
||||||
int K) {
|
|
||||||
constexpr int group_size = 32;
|
|
||||||
constexpr int pack_factor = 32 / 4;
|
|
||||||
constexpr int packs_in_group = group_size / pack_factor;
|
|
||||||
constexpr int S = simd::max_size<T>;
|
|
||||||
static_assert(
|
|
||||||
S % pack_factor == 0, "SIMD size must be divisible by pack factor");
|
|
||||||
constexpr int packs_per_simd = S / pack_factor;
|
|
||||||
|
|
||||||
for (int m = 0; m < M; m++) {
|
|
||||||
const uint32_t* w_local = w;
|
|
||||||
const uint8_t* scales_local = scales;
|
|
||||||
|
|
||||||
for (int n = 0; n < N; n++) {
|
|
||||||
simd::Simd<float, S> acc(0);
|
|
||||||
auto x_local = x;
|
|
||||||
for (int k = 0; k < K; k += group_size) {
|
|
||||||
T scale = dequantize_scale<T>(*scales_local++);
|
|
||||||
|
|
||||||
simd::Simd<float, S> g_acc(0);
|
|
||||||
for (int kw = 0; kw < packs_in_group; kw += packs_per_simd) {
|
|
||||||
// Extract bits
|
|
||||||
auto wf = mxfp4_extract_bits_simd<S>(w_local);
|
|
||||||
w_local += packs_per_simd;
|
|
||||||
simd::Simd<float, S> x_simd = simd::load<T, S>(x_local);
|
|
||||||
g_acc = g_acc + x_simd * wf;
|
|
||||||
x_local += S;
|
|
||||||
}
|
|
||||||
acc = acc + scale * g_acc;
|
|
||||||
}
|
|
||||||
|
|
||||||
*result = T(simd::sum(acc));
|
|
||||||
result++;
|
|
||||||
}
|
|
||||||
x += K;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void mxfp4_qmm_dispatch_transpose(
|
|
||||||
T* result,
|
|
||||||
const T* x,
|
|
||||||
const uint32_t* w,
|
|
||||||
const uint8_t* scales,
|
|
||||||
int M,
|
|
||||||
int N,
|
|
||||||
int K,
|
|
||||||
bool transposed_w) {
|
|
||||||
if (transposed_w) {
|
|
||||||
// the simd size must be a multiple of the number of elements per word
|
|
||||||
if constexpr (simd::max_size<T> % 8 == 0) {
|
|
||||||
mxfp4_qmm_t_simd<T>(result, x, w, scales, M, N, K);
|
|
||||||
} else {
|
|
||||||
mxfp4_qmm_t<T>(result, x, w, scales, M, N, K);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mxfp4_qmm<T>(result, x, w, scales, M, N, K);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void mxfp4_qmm_dispatch_typed(
|
|
||||||
array& out,
|
|
||||||
const array& x,
|
|
||||||
const array& w,
|
|
||||||
const array& scales,
|
|
||||||
bool transposed_w) {
|
|
||||||
int K = x.shape(-1);
|
|
||||||
int M = x.ndim() > 1 ? x.shape(-2) : 1;
|
|
||||||
int N = out.shape(-1);
|
|
||||||
int w_els = w.ndim() > 2 ? w.shape(-1) * w.shape(-2) : 0;
|
|
||||||
int g_els = w.ndim() > 2 ? scales.shape(-1) * scales.shape(-2) : 0;
|
|
||||||
int batch_size = x.size() / (K * M);
|
|
||||||
|
|
||||||
auto out_ptr = out.data<T>();
|
|
||||||
auto x_ptr = x.data<T>();
|
|
||||||
auto w_ptr = w.data<uint32_t>();
|
|
||||||
auto scales_ptr = scales.data<uint8_t>();
|
|
||||||
for (int i = 0; i < batch_size; i++) {
|
|
||||||
mxfp4_qmm_dispatch_transpose<T>(
|
|
||||||
out_ptr + i * M * N,
|
|
||||||
x_ptr + elem_to_loc(i * M * K, x.shape(), x.strides()),
|
|
||||||
w_ptr + elem_to_loc(i * w_els, w.shape(), w.strides()),
|
|
||||||
scales_ptr + elem_to_loc(i * g_els, scales.shape(), scales.strides()),
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
transposed_w);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void mxfp4_qmm_dispatch(
|
|
||||||
array& out,
|
|
||||||
const array& x,
|
|
||||||
const array& w,
|
|
||||||
const array& scales,
|
|
||||||
bool transposed_w) {
|
|
||||||
switch (x.dtype()) {
|
|
||||||
case bfloat16:
|
|
||||||
mxfp4_qmm_dispatch_typed<bfloat16_t>(out, x, w, scales, transposed_w);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
mxfp4_qmm_dispatch_typed<float16_t>(out, x, w, scales, transposed_w);
|
|
||||||
break;
|
|
||||||
case float32:
|
|
||||||
mxfp4_qmm_dispatch_typed<float>(out, x, w, scales, transposed_w);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw std::invalid_argument(
|
|
||||||
"[quantized_matmul] only floating types are supported");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
void _bs_qmm_dispatch_typed(
|
void _bs_qmm_dispatch_typed(
|
||||||
array& out,
|
array& out,
|
||||||
@@ -765,106 +513,41 @@ void _bs_qmm_dispatch(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void mxfp4_bs_qmm_dispatch_typed(
|
|
||||||
array& out,
|
|
||||||
const array& x,
|
|
||||||
const array& w,
|
|
||||||
const array& scales,
|
|
||||||
const array& lhs_indices,
|
|
||||||
const array& rhs_indices,
|
|
||||||
bool transposed_w) {
|
|
||||||
int K = x.shape(-1);
|
|
||||||
int M = x.shape(-2);
|
|
||||||
int N = out.shape(-1);
|
|
||||||
|
|
||||||
int w_els = w.shape(-1) * w.shape(-2);
|
|
||||||
int g_els = scales.shape(-1) * scales.shape(-2);
|
|
||||||
|
|
||||||
auto out_ptr = out.data<T>();
|
|
||||||
auto x_ptr = x.data<T>();
|
|
||||||
auto w_ptr = w.data<uint32_t>();
|
|
||||||
auto scales_ptr = scales.data<uint8_t>();
|
|
||||||
auto lhs_indices_ptr = lhs_indices.data<uint32_t>();
|
|
||||||
auto rhs_indices_ptr = rhs_indices.data<uint32_t>();
|
|
||||||
|
|
||||||
for (int i = 0; i < lhs_indices.size(); i++) {
|
|
||||||
int x_idx = lhs_indices_ptr[elem_to_loc(
|
|
||||||
i, lhs_indices.shape(), lhs_indices.strides())];
|
|
||||||
int w_idx = rhs_indices_ptr[elem_to_loc(
|
|
||||||
i, rhs_indices.shape(), rhs_indices.strides())];
|
|
||||||
mxfp4_qmm_dispatch_transpose<T>(
|
|
||||||
out_ptr + i * M * N,
|
|
||||||
x_ptr + elem_to_loc(x_idx * M * K, x.shape(), x.strides()),
|
|
||||||
w_ptr + elem_to_loc(w_idx * w_els, w.shape(), w.strides()),
|
|
||||||
scales_ptr +
|
|
||||||
elem_to_loc(w_idx * g_els, scales.shape(), scales.strides()),
|
|
||||||
M,
|
|
||||||
N,
|
|
||||||
K,
|
|
||||||
transposed_w);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void mxfp4_bs_qmm_dispatch(
|
|
||||||
array& out,
|
|
||||||
const array& x,
|
|
||||||
const array& w,
|
|
||||||
const array& scales,
|
|
||||||
const array& lhs_indices,
|
|
||||||
const array& rhs_indices,
|
|
||||||
bool transposed_w) {
|
|
||||||
switch (x.dtype()) {
|
|
||||||
case float32:
|
|
||||||
mxfp4_bs_qmm_dispatch_typed<float>(
|
|
||||||
out, x, w, scales, lhs_indices, rhs_indices, transposed_w);
|
|
||||||
break;
|
|
||||||
case float16:
|
|
||||||
mxfp4_bs_qmm_dispatch_typed<float16_t>(
|
|
||||||
out, x, w, scales, lhs_indices, rhs_indices, transposed_w);
|
|
||||||
break;
|
|
||||||
case bfloat16:
|
|
||||||
mxfp4_bs_qmm_dispatch_typed<bfloat16_t>(
|
|
||||||
out, x, w, scales, lhs_indices, rhs_indices, transposed_w);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw std::invalid_argument(
|
|
||||||
"[quantized_matmul] only floating types are supported");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
||||||
|
assert(inputs.size() == 4);
|
||||||
|
|
||||||
auto& x_pre = inputs[0];
|
auto& x_pre = inputs[0];
|
||||||
auto& w_pre = inputs[1];
|
auto& w_pre = inputs[1];
|
||||||
auto& scales_pre = inputs[2];
|
auto& scales_pre = inputs[2];
|
||||||
|
auto& biases_pre = inputs[3];
|
||||||
|
|
||||||
auto& encoder = cpu::get_command_encoder(stream());
|
std::vector<array> temps;
|
||||||
auto ensure_row_contiguous = [s = stream(), &encoder](const array& arr) {
|
auto ensure_row_contiguous = [s = stream(), &temps](const array& arr) {
|
||||||
if (arr.flags().row_contiguous) {
|
if (arr.flags().row_contiguous) {
|
||||||
return arr;
|
return arr;
|
||||||
} else {
|
} else {
|
||||||
auto arr_cpy = array(arr.shape(), arr.dtype(), nullptr, {});
|
temps.push_back(array(arr.shape(), arr.dtype(), nullptr, {}));
|
||||||
copy_cpu(arr, arr_cpy, CopyType::General, s);
|
copy_cpu(arr, temps.back(), CopyType::General, s);
|
||||||
encoder.add_temporary(arr_cpy);
|
return temps.back();
|
||||||
return arr_cpy;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto x = ensure_row_contiguous(x_pre);
|
auto x = ensure_row_contiguous(x_pre);
|
||||||
auto w = ensure_row_contiguous(w_pre);
|
auto w = ensure_row_contiguous(w_pre);
|
||||||
auto scales = ensure_row_contiguous(scales_pre);
|
auto scales = ensure_row_contiguous(scales_pre);
|
||||||
|
auto biases = ensure_row_contiguous(biases_pre);
|
||||||
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
|
auto& encoder = cpu::get_command_encoder(stream());
|
||||||
|
encoder.add_temporaries(std::move(temps));
|
||||||
encoder.set_input_array(x);
|
encoder.set_input_array(x);
|
||||||
encoder.set_input_array(w);
|
encoder.set_input_array(w);
|
||||||
encoder.set_input_array(scales);
|
encoder.set_input_array(scales);
|
||||||
encoder.set_output_array(out);
|
|
||||||
if (mode_ == QuantizationMode::Affine) {
|
|
||||||
auto biases = ensure_row_contiguous(inputs[3]);
|
|
||||||
encoder.set_input_array(biases);
|
encoder.set_input_array(biases);
|
||||||
|
encoder.set_output_array(out);
|
||||||
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
||||||
x = array::unsafe_weak_copy(x),
|
x = array::unsafe_weak_copy(x),
|
||||||
w = array::unsafe_weak_copy(w),
|
w = array::unsafe_weak_copy(w),
|
||||||
@@ -875,54 +558,48 @@ void QuantizedMatmul::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|||||||
transpose_ = transpose_]() mutable {
|
transpose_ = transpose_]() mutable {
|
||||||
_qmm_dispatch(out, x, w, scales, biases, group_size_, bits_, transpose_);
|
_qmm_dispatch(out, x, w, scales, biases, group_size_, bits_, transpose_);
|
||||||
});
|
});
|
||||||
} else {
|
|
||||||
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
|
||||||
x = array::unsafe_weak_copy(x),
|
|
||||||
w = array::unsafe_weak_copy(w),
|
|
||||||
scales = array::unsafe_weak_copy(scales),
|
|
||||||
transpose_ = transpose_]() mutable {
|
|
||||||
mxfp4_qmm_dispatch(out, x, w, scales, transpose_);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void GatherQMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
void GatherQMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
||||||
|
assert(inputs.size() == 6);
|
||||||
|
|
||||||
auto& x_pre = inputs[0];
|
auto& x_pre = inputs[0];
|
||||||
auto& w_pre = inputs[1];
|
auto& w_pre = inputs[1];
|
||||||
auto& scales_pre = inputs[2];
|
auto& scales_pre = inputs[2];
|
||||||
auto& lhs_indices = inputs[inputs.size() - 2];
|
auto& biases_pre = inputs[3];
|
||||||
auto& rhs_indices = inputs[inputs.size() - 1];
|
auto& lhs_indices = inputs[4];
|
||||||
|
auto& rhs_indices = inputs[5];
|
||||||
|
|
||||||
auto& encoder = cpu::get_command_encoder(stream());
|
std::vector<array> temps;
|
||||||
auto ensure_row_contiguous_last_dims = [s = stream(),
|
auto ensure_row_contiguous_last_dims = [s = stream(),
|
||||||
&encoder](const array& arr) {
|
&temps](const array& arr) {
|
||||||
auto stride_0 = arr.strides()[arr.ndim() - 2];
|
auto stride_0 = arr.strides()[arr.ndim() - 2];
|
||||||
auto stride_1 = arr.strides()[arr.ndim() - 1];
|
auto stride_1 = arr.strides()[arr.ndim() - 1];
|
||||||
if (stride_0 == arr.shape(-1) && stride_1 == 1) {
|
if (stride_0 == arr.shape(-1) && stride_1 == 1) {
|
||||||
return arr;
|
return arr;
|
||||||
} else {
|
} else {
|
||||||
auto arr_cpy = array(arr.shape(), arr.dtype(), nullptr, {});
|
temps.push_back(array(arr.shape(), arr.dtype(), nullptr, {}));
|
||||||
copy_cpu(arr, arr_cpy, CopyType::General, s);
|
copy_cpu(arr, temps.back(), CopyType::General, s);
|
||||||
encoder.add_temporary(arr_cpy);
|
return temps.back();
|
||||||
return arr_cpy;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
auto x = ensure_row_contiguous_last_dims(x_pre);
|
auto x = ensure_row_contiguous_last_dims(x_pre);
|
||||||
auto w = ensure_row_contiguous_last_dims(w_pre);
|
auto w = ensure_row_contiguous_last_dims(w_pre);
|
||||||
auto scales = ensure_row_contiguous_last_dims(scales_pre);
|
auto scales = ensure_row_contiguous_last_dims(scales_pre);
|
||||||
|
auto biases = ensure_row_contiguous_last_dims(biases_pre);
|
||||||
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
out.set_data(allocator::malloc(out.nbytes()));
|
||||||
|
|
||||||
|
auto& encoder = cpu::get_command_encoder(stream());
|
||||||
|
encoder.add_temporaries(std::move(temps));
|
||||||
encoder.set_input_array(x);
|
encoder.set_input_array(x);
|
||||||
encoder.set_input_array(w);
|
encoder.set_input_array(w);
|
||||||
encoder.set_input_array(scales);
|
encoder.set_input_array(scales);
|
||||||
|
encoder.set_input_array(biases);
|
||||||
encoder.set_input_array(lhs_indices);
|
encoder.set_input_array(lhs_indices);
|
||||||
encoder.set_input_array(rhs_indices);
|
encoder.set_input_array(rhs_indices);
|
||||||
encoder.set_output_array(out);
|
encoder.set_output_array(out);
|
||||||
if (mode_ == QuantizationMode::Affine) {
|
|
||||||
auto biases = ensure_row_contiguous_last_dims(inputs[3]);
|
|
||||||
encoder.set_input_array(biases);
|
|
||||||
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
||||||
x = array::unsafe_weak_copy(x),
|
x = array::unsafe_weak_copy(x),
|
||||||
w = array::unsafe_weak_copy(w),
|
w = array::unsafe_weak_copy(w),
|
||||||
@@ -945,18 +622,6 @@ void GatherQMM::eval_cpu(const std::vector<array>& inputs, array& out) {
|
|||||||
bits_,
|
bits_,
|
||||||
transpose_);
|
transpose_);
|
||||||
});
|
});
|
||||||
} else {
|
|
||||||
encoder.dispatch([out = array::unsafe_weak_copy(out),
|
|
||||||
x = array::unsafe_weak_copy(x),
|
|
||||||
w = array::unsafe_weak_copy(w),
|
|
||||||
scales = array::unsafe_weak_copy(scales),
|
|
||||||
lhs_indices = array::unsafe_weak_copy(lhs_indices),
|
|
||||||
rhs_indices = array::unsafe_weak_copy(rhs_indices),
|
|
||||||
transpose_ = transpose_]() mutable {
|
|
||||||
mxfp4_bs_qmm_dispatch(
|
|
||||||
out, x, w, scales, lhs_indices, rhs_indices, transpose_);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T, typename U>
|
template <typename T, typename U>
|
||||||
@@ -1040,7 +705,7 @@ void dispatch_quantize(
|
|||||||
w_ptr, out_ptr, scales_ptr, biases_ptr, bits, group_size, w.size());
|
w_ptr, out_ptr, scales_ptr, biases_ptr, bits, group_size, w.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
void fast::Quantize::eval_cpu(
|
void fast::AffineQuantize::eval_cpu(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs) {
|
std::vector<array>& outputs) {
|
||||||
auto ensure_row_contiguous = [s = stream()](const array& arr) {
|
auto ensure_row_contiguous = [s = stream()](const array& arr) {
|
||||||
@@ -1099,7 +764,7 @@ void fast::Quantize::eval_cpu(
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
"[fast::Quantize::eval_cpu] Only supports floating point inputs");
|
"[fast::AffineQuantize::eval_cpu] Only supports floating point inputs");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -234,7 +234,6 @@ Simd<T, N> remainder(Simd<T, N> a, Simd<T, N> b) {
|
|||||||
|
|
||||||
template <typename MaskT, typename T1, typename T2, int N>
|
template <typename MaskT, typename T1, typename T2, int N>
|
||||||
Simd<T1, N> select(Simd<MaskT, N> mask, Simd<T1, N> x, Simd<T2, N> y) {
|
Simd<T1, N> select(Simd<MaskT, N> mask, Simd<T1, N> x, Simd<T2, N> y) {
|
||||||
static_assert(std::is_same_v<MaskT, bool>);
|
|
||||||
if constexpr (sizeof(T1) == 1) {
|
if constexpr (sizeof(T1) == 1) {
|
||||||
return asd::bitselect(y.value, x.value, asd::convert<char>(mask.value));
|
return asd::bitselect(y.value, x.value, asd::convert<char>(mask.value));
|
||||||
} else if constexpr (sizeof(T1) == 2) {
|
} else if constexpr (sizeof(T1) == 2) {
|
||||||
@@ -252,13 +251,9 @@ Simd<T, N> pow(Simd<T, N> base, Simd<T, N> exp) {
|
|||||||
return asd::pow(base.value, exp.value);
|
return asd::pow(base.value, exp.value);
|
||||||
} else {
|
} else {
|
||||||
Simd<T, N> res = 1;
|
Simd<T, N> res = 1;
|
||||||
// Raising an integer to a negative power is undefined
|
while (any(exp)) {
|
||||||
if (any(exp < 0)) {
|
res = select(exp & 1, res * base, res);
|
||||||
return 0;
|
base = select(exp, base * base, base);
|
||||||
}
|
|
||||||
while (any(exp > 0)) {
|
|
||||||
res = select((exp & 1) != 0, res * base, res);
|
|
||||||
base = select(exp > 0, base * base, base);
|
|
||||||
exp = exp >> 1;
|
exp = exp >> 1;
|
||||||
}
|
}
|
||||||
return res;
|
return res;
|
||||||
|
|||||||
@@ -81,7 +81,9 @@ void svd_impl(
|
|||||||
// Vᵀ of shape N x N. (M x M in lapack).
|
// Vᵀ of shape N x N. (M x M in lapack).
|
||||||
const int ldvt = M;
|
const int ldvt = M;
|
||||||
|
|
||||||
auto jobz = (u_ptr) ? "A" : "N";
|
auto job_u = (u_ptr) ? "V" : "N";
|
||||||
|
auto job_vt = (u_ptr) ? "V" : "N";
|
||||||
|
static constexpr auto range = "A";
|
||||||
|
|
||||||
// Will contain the number of singular values after the call has returned.
|
// Will contain the number of singular values after the call has returned.
|
||||||
int ns = 0;
|
int ns = 0;
|
||||||
@@ -89,20 +91,30 @@ void svd_impl(
|
|||||||
|
|
||||||
// Will contain the indices of eigenvectors that failed to converge (not
|
// Will contain the indices of eigenvectors that failed to converge (not
|
||||||
// used here but required by lapack).
|
// used here but required by lapack).
|
||||||
auto iwork = array::Data{allocator::malloc(sizeof(int) * 8 * K)};
|
auto iwork = array::Data{allocator::malloc(sizeof(int) * 12 * K)};
|
||||||
|
|
||||||
static const int lwork_query = -1;
|
static const int lwork_query = -1;
|
||||||
|
|
||||||
|
static const int ignored_int = 0;
|
||||||
|
static const T ignored_float = 0;
|
||||||
|
|
||||||
int info;
|
int info;
|
||||||
|
|
||||||
// Compute workspace size.
|
// Compute workspace size.
|
||||||
gesdd<T>(
|
gesvdx<T>(
|
||||||
/* jobz = */ jobz,
|
/* jobu = */ job_u,
|
||||||
|
/* jobvt = */ job_vt,
|
||||||
|
/* range = */ range,
|
||||||
// M and N are swapped since lapack expects column-major.
|
// M and N are swapped since lapack expects column-major.
|
||||||
/* m = */ &N,
|
/* m = */ &N,
|
||||||
/* n = */ &M,
|
/* n = */ &M,
|
||||||
/* a = */ nullptr,
|
/* a = */ nullptr,
|
||||||
/* lda = */ &lda,
|
/* lda = */ &lda,
|
||||||
|
/* vl = */ &ignored_float,
|
||||||
|
/* vu = */ &ignored_float,
|
||||||
|
/* il = */ &ignored_int,
|
||||||
|
/* iu = */ &ignored_int,
|
||||||
|
/* ns = */ &ns,
|
||||||
/* s = */ nullptr,
|
/* s = */ nullptr,
|
||||||
/* u = */ nullptr,
|
/* u = */ nullptr,
|
||||||
/* ldu = */ &ldu,
|
/* ldu = */ &ldu,
|
||||||
@@ -124,13 +136,20 @@ void svd_impl(
|
|||||||
|
|
||||||
// Loop over matrices.
|
// Loop over matrices.
|
||||||
for (int i = 0; i < num_matrices; i++) {
|
for (int i = 0; i < num_matrices; i++) {
|
||||||
gesdd<T>(
|
gesvdx<T>(
|
||||||
/* jobz = */ jobz,
|
/* jobu = */ job_u,
|
||||||
|
/* jobvt = */ job_vt,
|
||||||
|
/* range = */ range,
|
||||||
// M and N are swapped since lapack expects column-major.
|
// M and N are swapped since lapack expects column-major.
|
||||||
/* m = */ &N,
|
/* m = */ &N,
|
||||||
/* n = */ &M,
|
/* n = */ &M,
|
||||||
/* a = */ in_ptr + M * N * i,
|
/* a = */ in_ptr + M * N * i,
|
||||||
/* lda = */ &lda,
|
/* lda = */ &lda,
|
||||||
|
/* vl = */ &ignored_float,
|
||||||
|
/* vu = */ &ignored_float,
|
||||||
|
/* il = */ &ignored_int,
|
||||||
|
/* iu = */ &ignored_int,
|
||||||
|
/* ns = */ &ns,
|
||||||
/* s = */ s_ptr + K * i,
|
/* s = */ s_ptr + K * i,
|
||||||
// According to the identity above, lapack will write Vᵀᵀ as U.
|
// According to the identity above, lapack will write Vᵀᵀ as U.
|
||||||
/* u = */ vt_ptr ? vt_ptr + N * N * i : nullptr,
|
/* u = */ vt_ptr ? vt_ptr + N * N * i : nullptr,
|
||||||
@@ -148,6 +167,13 @@ void svd_impl(
|
|||||||
ss << "svd_impl: sgesvdx_ failed with code " << info;
|
ss << "svd_impl: sgesvdx_ failed with code " << info;
|
||||||
throw std::runtime_error(ss.str());
|
throw std::runtime_error(ss.str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ns != K) {
|
||||||
|
std::stringstream ss;
|
||||||
|
ss << "svd_impl: expected " << K << " singular values, but " << ns
|
||||||
|
<< " were computed.";
|
||||||
|
throw std::runtime_error(ss.str());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
encoder.add_temporary(in);
|
encoder.add_temporary(in);
|
||||||
|
|||||||
@@ -8,6 +8,7 @@ target_sources(
|
|||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/allocator.cpp
|
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/allocator.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/arange.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/arange.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/arg_reduce.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/arg_reduce.cu
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/binary.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/binary_two.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/binary_two.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/compiled.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/compiled.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/copy.cu
|
||||||
@@ -16,18 +17,14 @@ target_sources(
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_dynamic.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_dynamic.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_input.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/copy/copy_general_input.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/conv.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/conv.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/conv/gemm_conv.cu
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/conv/gemm_grouped_conv.cu
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/cuda.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/cuda.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/cudnn_utils.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/custom_kernel.cpp
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/device.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/distributed.cu
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/eval.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/eval.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/event.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/event.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/fence.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/fence.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/gemms/gemv.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/gemms/gemv.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_gemm.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_gemm.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/gemms/steel_gemm.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/jit_module.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/jit_module.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/indexing.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/indexing.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/kernel_utils.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/kernel_utils.cu
|
||||||
@@ -49,14 +46,12 @@ target_sources(
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/softmax.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/softmax.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/sort.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/sort.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/ternary.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/ternary.cu
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/unary.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/utils.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/quantized/affine_quantize.cu
|
${CMAKE_CURRENT_SOURCE_DIR}/quantized/affine_quantize.cu
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/quantized/quantized.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/quantized/quantized.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/worker.cpp)
|
${CMAKE_CURRENT_SOURCE_DIR}/worker.cpp)
|
||||||
|
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/binary)
|
|
||||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/unary)
|
|
||||||
|
|
||||||
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 12.9.0)
|
if(CMAKE_CUDA_COMPILER_VERSION VERSION_GREATER_EQUAL 12.9.0)
|
||||||
target_sources(
|
target_sources(
|
||||||
mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_gemm_batched_12_9.cu)
|
mlx PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/gemms/cublas_gemm_batched_12_9.cu)
|
||||||
@@ -154,7 +149,7 @@ target_link_libraries(mlx PRIVATE CUDA::nvrtc CUDA::cuda_driver)
|
|||||||
FetchContent_Declare(
|
FetchContent_Declare(
|
||||||
cudnn
|
cudnn
|
||||||
GIT_REPOSITORY https://github.com/NVIDIA/cudnn-frontend.git
|
GIT_REPOSITORY https://github.com/NVIDIA/cudnn-frontend.git
|
||||||
GIT_TAG v1.14.0
|
GIT_TAG v1.12.1
|
||||||
GIT_SHALLOW TRUE
|
GIT_SHALLOW TRUE
|
||||||
EXCLUDE_FROM_ALL)
|
EXCLUDE_FROM_ALL)
|
||||||
set(CUDNN_FRONTEND_SKIP_JSON_LIB ON)
|
set(CUDNN_FRONTEND_SKIP_JSON_LIB ON)
|
||||||
|
|||||||
@@ -30,15 +30,8 @@ SmallSizePool::SmallSizePool() {
|
|||||||
next_free_ = buffer_;
|
next_free_ = buffer_;
|
||||||
|
|
||||||
CHECK_CUDA_ERROR(cudaMallocManaged(&data_, small_pool_size));
|
CHECK_CUDA_ERROR(cudaMallocManaged(&data_, small_pool_size));
|
||||||
#if CUDART_VERSION >= 13000
|
|
||||||
cudaMemLocation loc;
|
|
||||||
loc.type = cudaMemLocationTypeDevice;
|
|
||||||
loc.id = 0;
|
|
||||||
#else
|
|
||||||
int loc = 0;
|
|
||||||
#endif // CUDART_VERSION >= 13000
|
|
||||||
CHECK_CUDA_ERROR(
|
CHECK_CUDA_ERROR(
|
||||||
cudaMemAdvise(data_, small_pool_size, cudaMemAdviseSetReadMostly, loc));
|
cudaMemAdvise(data_, small_pool_size, cudaMemAdviseSetReadMostly, 0));
|
||||||
|
|
||||||
auto curr = next_free_;
|
auto curr = next_free_;
|
||||||
for (size_t i = 1; i < num_blocks; ++i) {
|
for (size_t i = 1; i < num_blocks; ++i) {
|
||||||
|
|||||||
@@ -6,33 +6,23 @@
|
|||||||
#include "mlx/dtype_utils.h"
|
#include "mlx/dtype_utils.h"
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
#include <cooperative_groups.h>
|
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
#include <thrust/device_ptr.h>
|
||||||
|
#include <thrust/transform.h>
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core {
|
||||||
|
|
||||||
namespace cu {
|
namespace cu {
|
||||||
|
|
||||||
namespace cg = cooperative_groups;
|
template <typename T>
|
||||||
|
struct Arange {
|
||||||
|
const T start;
|
||||||
|
const T step;
|
||||||
|
|
||||||
template <typename T, typename IdxT, int N_WRITES>
|
__device__ T operator()(uint32_t i) const {
|
||||||
__global__ void arange(T* out, IdxT size, T start, T step) {
|
return start + i * step;
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
|
||||||
|
|
||||||
if ((index + 1) * N_WRITES > size) {
|
|
||||||
for (IdxT i = index * N_WRITES; i < size; ++i) {
|
|
||||||
out[i] = start + i * step;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
AlignedVector<T, N_WRITES> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_WRITES; ++i) {
|
|
||||||
out_vec[i] = start + (index * N_WRITES + i) * step;
|
|
||||||
}
|
|
||||||
|
|
||||||
store_vector<N_WRITES>(out, index, out_vec);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
|
|
||||||
@@ -46,23 +36,19 @@ void Arange::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
auto& encoder = cu::get_command_encoder(stream());
|
auto& encoder = cu::get_command_encoder(stream());
|
||||||
encoder.set_output_array(out);
|
encoder.set_output_array(out);
|
||||||
|
|
||||||
|
auto capture = encoder.capture_context();
|
||||||
dispatch_int_float_types(out.dtype(), "Arange", [&](auto type_tag) {
|
dispatch_int_float_types(out.dtype(), "Arange", [&](auto type_tag) {
|
||||||
using CTYPE = MLX_GET_TYPE(type_tag);
|
using CTYPE = MLX_GET_TYPE(type_tag);
|
||||||
using OutType = cuda_type_t<CTYPE>;
|
using OutType = cuda_type_t<CTYPE>;
|
||||||
constexpr int N_WRITES = 16 / sizeof(OutType);
|
CTYPE step =
|
||||||
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
static_cast<CTYPE>(start_ + step_) - static_cast<CTYPE>(start_);
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
thrust::transform(
|
||||||
auto [num_blocks, block_dims] = get_launch_args(out, large(), N_WRITES);
|
cu::thrust_policy(encoder.stream()),
|
||||||
encoder.add_kernel_node(
|
thrust::counting_iterator<uint32_t>(0),
|
||||||
cu::arange<OutType, IdxT, N_WRITES>,
|
thrust::counting_iterator<uint32_t>(out.data_size()),
|
||||||
num_blocks,
|
thrust::device_pointer_cast(out.data<OutType>()),
|
||||||
block_dims,
|
cu::Arange<OutType>{
|
||||||
0,
|
static_cast<OutType>(start_), static_cast<OutType>(step)});
|
||||||
out.data<OutType>(),
|
|
||||||
out.data_size(),
|
|
||||||
static_cast<CTYPE>(start_),
|
|
||||||
static_cast<CTYPE>(start_ + step_) - static_cast<CTYPE>(start_));
|
|
||||||
});
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -99,89 +99,39 @@ __global__ void binary_vv(const In* a, const In* b, Out* out, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <typename Op, typename In, typename Out, typename IdxT, int NDIM>
|
||||||
typename Op,
|
|
||||||
typename In,
|
|
||||||
typename Out,
|
|
||||||
typename IdxT,
|
|
||||||
int NDIM,
|
|
||||||
int N_READS>
|
|
||||||
__global__ void binary_g_nd(
|
__global__ void binary_g_nd(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[NDIM - 1];
|
|
||||||
auto a_stride_x = a_strides[NDIM - 1];
|
|
||||||
auto b_stride_x = b_strides[NDIM - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
||||||
index_rest * shape_x, shape.data(), a_strides.data(), b_strides.data());
|
index, shape.data(), a_strides.data(), b_strides.data());
|
||||||
auto a_vec =
|
out[index] = Op{}(a[a_idx], b[b_idx]);
|
||||||
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
|
||||||
auto b_vec =
|
|
||||||
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
|
||||||
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = Op{}(a_vec[i], b_vec[i]);
|
|
||||||
}
|
}
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT>
|
||||||
__global__ void binary_g(
|
__global__ void binary_g(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides a_strides,
|
const __grid_constant__ Strides a_strides,
|
||||||
const __grid_constant__ Strides b_strides,
|
const __grid_constant__ Strides b_strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[ndim - 1];
|
|
||||||
auto a_stride_x = a_strides[ndim - 1];
|
|
||||||
auto b_stride_x = b_strides[ndim - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [a_idx, b_idx] = elem_to_loc(
|
auto [a_idx, b_idx] = elem_to_loc(
|
||||||
index_rest * shape_x,
|
index, shape.data(), a_strides.data(), b_strides.data(), ndim);
|
||||||
shape.data(),
|
out[index] = Op{}(a[a_idx], b[b_idx]);
|
||||||
a_strides.data(),
|
|
||||||
b_strides.data(),
|
|
||||||
ndim);
|
|
||||||
auto a_vec =
|
|
||||||
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
|
||||||
auto b_vec =
|
|
||||||
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
|
||||||
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = Op{}(a_vec[i], b_vec[i]);
|
|
||||||
}
|
}
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out>
|
template <typename Op, typename In, typename Out>
|
||||||
@@ -259,61 +209,39 @@ void binary_op_gpu_inplace(
|
|||||||
auto& a_strides = strides[0];
|
auto& a_strides = strides[0];
|
||||||
auto& b_strides = strides[1];
|
auto& b_strides = strides[1];
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
int work_per_thread = 1;
|
|
||||||
auto dim0 = ndim > 0 ? shape.back() : 1;
|
|
||||||
auto rest = out.size() / dim0;
|
|
||||||
if (dim0 >= 4) {
|
|
||||||
work_per_thread = 4;
|
|
||||||
}
|
|
||||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
|
||||||
auto block_dims = get_block_dims(dim0, rest, 1);
|
|
||||||
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
|
||||||
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto kernel = cu::binary_g_nd<
|
auto [num_blocks, block_dims] =
|
||||||
Op,
|
get_launch_args(out, large());
|
||||||
InType,
|
|
||||||
OutType,
|
|
||||||
IdxT,
|
|
||||||
dims_constant(),
|
|
||||||
1>;
|
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel = cu::binary_g_nd<
|
|
||||||
Op,
|
|
||||||
InType,
|
|
||||||
OutType,
|
|
||||||
IdxT,
|
|
||||||
dims_constant(),
|
|
||||||
4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::binary_g_nd<
|
||||||
{num_blocks_x, num_blocks_y},
|
Op,
|
||||||
|
InType,
|
||||||
|
OutType,
|
||||||
|
IdxT,
|
||||||
|
dims_constant()>,
|
||||||
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out.data<OutType>(),
|
out.data<OutType>(),
|
||||||
rest,
|
out.size(),
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(a_strides),
|
const_param<dims_constant()>(a_strides),
|
||||||
const_param<dims_constant()>(b_strides));
|
const_param<dims_constant()>(b_strides));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
auto kernel = cu::binary_g<Op, InType, OutType, IdxT, 1>;
|
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel = cu::binary_g<Op, InType, OutType, IdxT, 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::binary_g<Op, InType, OutType, IdxT>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out.data<OutType>(),
|
out.data<OutType>(),
|
||||||
rest,
|
out.size(),
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(a_strides),
|
const_param(a_strides),
|
||||||
const_param(b_strides),
|
const_param(b_strides),
|
||||||
@@ -376,4 +304,54 @@ void binary_op_gpu(
|
|||||||
binary_op_gpu<cu::func>(inputs, out, name(), s); \
|
binary_op_gpu<cu::func>(inputs, out, name(), s); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BINARY_GPU(Add)
|
||||||
|
BINARY_GPU(ArcTan2)
|
||||||
|
BINARY_GPU(Divide)
|
||||||
|
BINARY_GPU(Remainder)
|
||||||
|
BINARY_GPU(Greater)
|
||||||
|
BINARY_GPU(GreaterEqual)
|
||||||
|
BINARY_GPU(Less)
|
||||||
|
BINARY_GPU(LessEqual)
|
||||||
|
BINARY_GPU(LogicalAnd)
|
||||||
|
BINARY_GPU(LogicalOr)
|
||||||
|
BINARY_GPU(LogAddExp)
|
||||||
|
BINARY_GPU(Maximum)
|
||||||
|
BINARY_GPU(Minimum)
|
||||||
|
BINARY_GPU(Multiply)
|
||||||
|
BINARY_GPU(NotEqual)
|
||||||
|
BINARY_GPU(Power)
|
||||||
|
BINARY_GPU(Subtract)
|
||||||
|
|
||||||
|
void Equal::eval_gpu(const std::vector<array>& inputs, array& out) {
|
||||||
|
nvtx3::scoped_range r("Equal::eval_gpu");
|
||||||
|
auto& s = out.primitive().stream();
|
||||||
|
if (equal_nan_) {
|
||||||
|
binary_op_gpu<cu::NaNEqual>(inputs, out, name(), s);
|
||||||
|
} else {
|
||||||
|
binary_op_gpu<cu::Equal>(inputs, out, name(), s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void BitwiseBinary::eval_gpu(const std::vector<array>& inputs, array& out) {
|
||||||
|
nvtx3::scoped_range r("BitwiseBinary::eval_gpu");
|
||||||
|
auto& s = out.primitive().stream();
|
||||||
|
switch (op_) {
|
||||||
|
case BitwiseBinary::And:
|
||||||
|
binary_op_gpu<cu::BitwiseAnd>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::Or:
|
||||||
|
binary_op_gpu<cu::BitwiseOr>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::Xor:
|
||||||
|
binary_op_gpu<cu::BitwiseXor>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::LeftShift:
|
||||||
|
binary_op_gpu<cu::LeftShift>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
case BitwiseBinary::RightShift:
|
||||||
|
binary_op_gpu<cu::RightShift>(inputs, out, name(), s);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
target_sources(
|
|
||||||
mlx
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/add.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arctan2.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bitwise_binary.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/divide.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/equal.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/greater.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/greater_equal.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/less.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/less_equal.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/logical_and.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/logical_or.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/log_add_exp.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/minimum.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/maximum.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/multiply.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/power.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/remainder.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/not_equal.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/subtract.cu)
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Add)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(ArcTan2)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
void BitwiseBinary::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
nvtx3::scoped_range r("BitwiseBinary::eval_gpu");
|
|
||||||
auto& s = out.primitive().stream();
|
|
||||||
switch (op_) {
|
|
||||||
case BitwiseBinary::And:
|
|
||||||
binary_op_gpu<cu::BitwiseAnd>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::Or:
|
|
||||||
binary_op_gpu<cu::BitwiseOr>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::Xor:
|
|
||||||
binary_op_gpu<cu::BitwiseXor>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::LeftShift:
|
|
||||||
binary_op_gpu<cu::LeftShift>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case BitwiseBinary::RightShift:
|
|
||||||
binary_op_gpu<cu::RightShift>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Divide)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
void Equal::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
nvtx3::scoped_range r("Equal::eval_gpu");
|
|
||||||
auto& s = out.primitive().stream();
|
|
||||||
if (equal_nan_) {
|
|
||||||
binary_op_gpu<cu::NaNEqual>(inputs, out, name(), s);
|
|
||||||
} else {
|
|
||||||
binary_op_gpu<cu::Equal>(inputs, out, name(), s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Greater)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(GreaterEqual)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Less)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(LessEqual)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(LogAddExp)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(LogicalAnd)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(LogicalOr)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Maximum)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Minimum)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Multiply)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(NotEqual)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Power)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Remainder)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/binary/binary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
BINARY_GPU(Subtract)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -127,99 +127,45 @@ binary_two_vv(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <
|
template <typename Op, typename In, typename Out, typename IdxT, int NDIM>
|
||||||
typename Op,
|
|
||||||
typename In,
|
|
||||||
typename Out,
|
|
||||||
typename IdxT,
|
|
||||||
int NDIM,
|
|
||||||
int N_READS>
|
|
||||||
__global__ void binary_two_g_nd(
|
__global__ void binary_two_g_nd(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out_a,
|
Out* out_a,
|
||||||
Out* out_b,
|
Out* out_b,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[NDIM - 1];
|
|
||||||
auto a_stride_x = a_strides[NDIM - 1];
|
|
||||||
auto b_stride_x = b_strides[NDIM - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
auto [a_idx, b_idx] = elem_to_loc_nd<NDIM>(
|
||||||
index_rest * shape_x, shape.data(), a_strides.data(), b_strides.data());
|
index, shape.data(), a_strides.data(), b_strides.data());
|
||||||
auto a_vec =
|
auto out = Op{}(a[a_idx], b[b_idx]);
|
||||||
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
out_a[index] = out[0];
|
||||||
auto b_vec =
|
out_b[index] = out[1];
|
||||||
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
|
||||||
|
|
||||||
AlignedVector<Out, N_READS> out_vec_a;
|
|
||||||
AlignedVector<Out, N_READS> out_vec_b;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
auto out = Op{}(a_vec[i], b_vec[i]);
|
|
||||||
out_vec_a[i] = out[0];
|
|
||||||
out_vec_b[i] = out[1];
|
|
||||||
}
|
}
|
||||||
store_vector(out_a + shape_x * index_rest, index_x, out_vec_a, shape_x);
|
|
||||||
store_vector(out_b + shape_x * index_rest, index_x, out_vec_b, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT>
|
||||||
__global__ void binary_two_g(
|
__global__ void binary_two_g(
|
||||||
const In* a,
|
const In* a,
|
||||||
const In* b,
|
const In* b,
|
||||||
Out* out_a,
|
Out* out_a,
|
||||||
Out* out_b,
|
Out* out_b,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides a_strides,
|
const __grid_constant__ Strides a_strides,
|
||||||
const __grid_constant__ Strides b_strides,
|
const __grid_constant__ Strides b_strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[ndim - 1];
|
|
||||||
auto a_stride_x = a_strides[ndim - 1];
|
|
||||||
auto b_stride_x = b_strides[ndim - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [a_idx, b_idx] = elem_to_loc(
|
auto [a_idx, b_idx] = elem_to_loc(
|
||||||
index_rest * shape_x,
|
index, shape.data(), a_strides.data(), b_strides.data(), ndim);
|
||||||
shape.data(),
|
auto out = Op{}(a[a_idx], b[b_idx]);
|
||||||
a_strides.data(),
|
out_a[index] = out[0];
|
||||||
b_strides.data(),
|
out_b[index] = out[1];
|
||||||
ndim);
|
|
||||||
auto a_vec =
|
|
||||||
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, In(0));
|
|
||||||
auto b_vec =
|
|
||||||
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, In(0));
|
|
||||||
|
|
||||||
AlignedVector<Out, N_READS> out_vec_a;
|
|
||||||
AlignedVector<Out, N_READS> out_vec_b;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
auto out = Op{}(a_vec[i], b_vec[i]);
|
|
||||||
out_vec_a[i] = out[0];
|
|
||||||
out_vec_b[i] = out[1];
|
|
||||||
}
|
}
|
||||||
store_vector(out_a + shape_x * index_rest, index_x, out_vec_a, shape_x);
|
|
||||||
store_vector(out_b + shape_x * index_rest, index_x, out_vec_b, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out>
|
template <typename Op, typename In, typename Out>
|
||||||
@@ -279,64 +225,42 @@ void binary_two_op_gpu_inplace(
|
|||||||
auto& a_strides = strides[0];
|
auto& a_strides = strides[0];
|
||||||
auto& b_strides = strides[1];
|
auto& b_strides = strides[1];
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
int work_per_thread = 1;
|
|
||||||
auto dim0 = ndim > 0 ? shape.back() : 1;
|
|
||||||
auto rest = out_a.size() / dim0;
|
|
||||||
if (dim0 >= 4) {
|
|
||||||
work_per_thread = 4;
|
|
||||||
}
|
|
||||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
|
||||||
auto block_dims = get_block_dims(dim0, rest, 1);
|
|
||||||
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
|
||||||
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto kernel = cu::binary_two_g_nd<
|
auto [num_blocks, block_dims] =
|
||||||
Op,
|
get_launch_args(out_a, large());
|
||||||
InType,
|
|
||||||
OutType,
|
|
||||||
IdxT,
|
|
||||||
dims_constant(),
|
|
||||||
1>;
|
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel = cu::binary_two_g_nd<
|
|
||||||
Op,
|
|
||||||
InType,
|
|
||||||
OutType,
|
|
||||||
IdxT,
|
|
||||||
dims_constant(),
|
|
||||||
4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::binary_two_g_nd<
|
||||||
{num_blocks_x, num_blocks_y},
|
Op,
|
||||||
|
InType,
|
||||||
|
OutType,
|
||||||
|
IdxT,
|
||||||
|
dims_constant()>,
|
||||||
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out_a.data<OutType>(),
|
out_a.data<OutType>(),
|
||||||
out_b.data<OutType>(),
|
out_b.data<OutType>(),
|
||||||
rest,
|
out_a.size(),
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(a_strides),
|
const_param<dims_constant()>(a_strides),
|
||||||
const_param<dims_constant()>(b_strides));
|
const_param<dims_constant()>(b_strides));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
auto kernel = cu::binary_two_g<Op, InType, OutType, IdxT, 1>;
|
auto [num_blocks, block_dims] =
|
||||||
if (work_per_thread == 4) {
|
get_launch_args(out_a, large());
|
||||||
kernel = cu::binary_two_g<Op, InType, OutType, IdxT, 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::binary_two_g<Op, InType, OutType, IdxT>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<InType>(),
|
a.data<InType>(),
|
||||||
b.data<InType>(),
|
b.data<InType>(),
|
||||||
out_a.data<OutType>(),
|
out_a.data<OutType>(),
|
||||||
out_b.data<OutType>(),
|
out_b.data<OutType>(),
|
||||||
rest,
|
out_a.size(),
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(a_strides),
|
const_param(a_strides),
|
||||||
const_param(b_strides),
|
const_param(b_strides),
|
||||||
|
|||||||
@@ -267,8 +267,7 @@ void Compiled::eval_gpu(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_tuple(
|
return std::make_pair(std::move(builder.os), std::move(kernel_names));
|
||||||
false, std::move(builder.os), std::move(kernel_names));
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// Collapse contiguous dims to route to a faster kernel if possible. Also
|
// Collapse contiguous dims to route to a faster kernel if possible. Also
|
||||||
|
|||||||
@@ -1,12 +1,18 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/cuda/conv/conv.h"
|
|
||||||
#include "mlx/backend/cuda/cudnn_utils.h"
|
|
||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/backend/cuda/device/config.h"
|
||||||
#include "mlx/backend/cuda/lru_cache.h"
|
#include "mlx/backend/cuda/lru_cache.h"
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
|
#include "mlx/dtype_utils.h"
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
|
// cudnn_frontend.h redefines this macro.
|
||||||
|
#undef CHECK_CUDA_ERROR
|
||||||
|
|
||||||
|
#include <cudnn_frontend.h>
|
||||||
|
#include <cudnn_frontend_find_plan.h>
|
||||||
|
#include <fmt/format.h>
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
@@ -15,6 +21,9 @@ namespace mlx::core {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
// Not all engines support it so can not use this API now.
|
||||||
|
#define MLX_USE_CUDNN_NATIVE_CUDA_GRAPH_API 0
|
||||||
|
|
||||||
// Alias for better readability.
|
// Alias for better readability.
|
||||||
#define CONV_FORWARD CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR
|
#define CONV_FORWARD CUDNN_BACKEND_OPERATION_CONVOLUTION_FORWARD_DESCRIPTOR
|
||||||
#define CONV_BACKWARD_INPUT \
|
#define CONV_BACKWARD_INPUT \
|
||||||
@@ -22,9 +31,6 @@ namespace {
|
|||||||
#define CONV_BACKWARD_WEIGHT \
|
#define CONV_BACKWARD_WEIGHT \
|
||||||
CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR
|
CUDNN_BACKEND_OPERATION_CONVOLUTION_BACKWARD_FILTER_DESCRIPTOR
|
||||||
|
|
||||||
// Custom placeholder representing fallback kernel.
|
|
||||||
#define CONV_FALLBACK static_cast<cudnnBackendDescriptorType_t>(-1)
|
|
||||||
|
|
||||||
struct ConvCacheKey {
|
struct ConvCacheKey {
|
||||||
int device_id;
|
int device_id;
|
||||||
cudnnDataType_t cudnn_dtype;
|
cudnnDataType_t cudnn_dtype;
|
||||||
@@ -44,13 +50,203 @@ struct ConvCacheKey {
|
|||||||
auto& conv_cache() {
|
auto& conv_cache() {
|
||||||
static LRUBytesKeyCache<
|
static LRUBytesKeyCache<
|
||||||
ConvCacheKey,
|
ConvCacheKey,
|
||||||
std::pair<
|
std::pair<cudnnBackendDescriptorType_t, cudnn_frontend::ExecutionPlan>>
|
||||||
cudnnBackendDescriptorType_t,
|
|
||||||
std::optional<cudnn_frontend::ExecutionPlan>>>
|
|
||||||
cache(/* capacity */ 128);
|
cache(/* capacity */ 128);
|
||||||
return cache;
|
return cache;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
template <typename T, typename Vec>
|
||||||
|
inline SmallVector<T> convert_vector(const Vec& vec) {
|
||||||
|
return SmallVector<T>(vec.begin(), vec.end());
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T, template <typename U> class Vec>
|
||||||
|
inline std::array<T, MAX_NDIM> fixed_vector(const Vec<T>& vec) {
|
||||||
|
if (vec.size() > MAX_NDIM) {
|
||||||
|
throw std::runtime_error(
|
||||||
|
fmt::format("ndim can not be larger than {}.", MAX_NDIM));
|
||||||
|
}
|
||||||
|
std::array<T, MAX_NDIM> result = {};
|
||||||
|
std::copy_n(vec.begin(), vec.size(), result.begin());
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto nhwc_to_nchw(const array& x) {
|
||||||
|
auto shape = convert_vector<int64_t>(x.shape());
|
||||||
|
shape.insert(shape.begin() + 1, shape.back());
|
||||||
|
shape.erase(shape.end() - 1);
|
||||||
|
auto strides = convert_vector<int64_t>(x.strides());
|
||||||
|
strides.insert(strides.begin() + 1, strides.back());
|
||||||
|
strides.erase(strides.end() - 1);
|
||||||
|
return std::make_tuple(std::move(shape), std::move(strides));
|
||||||
|
}
|
||||||
|
|
||||||
|
inline cudnnDataType_t dtype_to_cudnn_type(Dtype dtype) {
|
||||||
|
switch (dtype) {
|
||||||
|
case int8:
|
||||||
|
return CUDNN_DATA_INT8;
|
||||||
|
case int32:
|
||||||
|
return CUDNN_DATA_INT32;
|
||||||
|
case uint8:
|
||||||
|
return CUDNN_DATA_UINT8;
|
||||||
|
case float16:
|
||||||
|
return CUDNN_DATA_HALF;
|
||||||
|
case bfloat16:
|
||||||
|
return CUDNN_DATA_BFLOAT16;
|
||||||
|
case float32:
|
||||||
|
return CUDNN_DATA_FLOAT;
|
||||||
|
case float64:
|
||||||
|
return CUDNN_DATA_DOUBLE;
|
||||||
|
default:
|
||||||
|
throw std::runtime_error(fmt::format(
|
||||||
|
"Unsupported dtype in Convolution: {}.", dtype_to_string(dtype)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
inline uint8_t get_alignment(const array& x) {
|
||||||
|
uint8_t alignment = 1;
|
||||||
|
uintptr_t address = reinterpret_cast<uintptr_t>(x.data<void>());
|
||||||
|
for (; alignment < 32; alignment *= 2) {
|
||||||
|
if (address % (alignment * 2)) {
|
||||||
|
return alignment;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return alignment;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline cudnn_frontend::Tensor build_tensor(int64_t id, const array& x) {
|
||||||
|
auto [shape, strides] = nhwc_to_nchw(x);
|
||||||
|
return cudnn_frontend::TensorBuilder()
|
||||||
|
.setDim(shape.size(), shape.data())
|
||||||
|
.setStrides(strides.size(), strides.data())
|
||||||
|
.setId(id)
|
||||||
|
.setAlignment(get_alignment(x))
|
||||||
|
.setDataType(dtype_to_cudnn_type(x.dtype()))
|
||||||
|
.build();
|
||||||
|
}
|
||||||
|
|
||||||
|
cudnn_frontend::EngineConfigList get_engine_configs(
|
||||||
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
|
Dtype dtype,
|
||||||
|
cudnn_frontend::OperationGraph& op_graph,
|
||||||
|
bool use_fallback = false) {
|
||||||
|
cudnn_frontend::GeneratorSource source;
|
||||||
|
if (use_fallback) {
|
||||||
|
source = [&backend_type](cudnn_frontend::OperationGraph& op_graph) {
|
||||||
|
auto fallback = cudnn_frontend::EngineFallbackListBuilder()
|
||||||
|
.setOperationGraph(op_graph)
|
||||||
|
.setOperation(backend_type)
|
||||||
|
.build();
|
||||||
|
return fallback.getFallbackList();
|
||||||
|
};
|
||||||
|
} else {
|
||||||
|
source = [](cudnn_frontend::OperationGraph& op_graph) {
|
||||||
|
auto heuristics = cudnn_frontend::EngineHeuristicsBuilder()
|
||||||
|
.setOperationGraph(op_graph)
|
||||||
|
.setHeurMode(CUDNN_HEUR_MODE_A)
|
||||||
|
.build();
|
||||||
|
return heuristics.getEngineConfig(heuristics.getEngineConfigCount());
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
cudnn_frontend::EngineConfigGenerator generator(1, &source);
|
||||||
|
auto configs = generator.generate_engine_config(op_graph);
|
||||||
|
|
||||||
|
cudnn_frontend::EngineConfigList filtered_configs;
|
||||||
|
cudnn_frontend::filter(configs, filtered_configs, [dtype](auto c) {
|
||||||
|
if (cudnn_frontend::hasNumericalNote<
|
||||||
|
CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS>(c)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if (cudnn_frontend::hasNumericalNote<CUDNN_NUMERICAL_NOTE_TENSOR_CORE>(c) &&
|
||||||
|
dtype == float32 && !env::enable_tf32()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
});
|
||||||
|
return filtered_configs;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool execute_plan(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
cudnn_frontend::ExecutionPlan& plan,
|
||||||
|
array& x,
|
||||||
|
array& w,
|
||||||
|
array& y) {
|
||||||
|
int workspace_size = plan.getWorkspaceSize();
|
||||||
|
array workspace(allocator::malloc(workspace_size), {workspace_size}, uint8);
|
||||||
|
|
||||||
|
int64_t uids[3] = {'x', 'w', 'y'};
|
||||||
|
void* data_ptrs[3] = {
|
||||||
|
x.data<void>(),
|
||||||
|
w.data<void>(),
|
||||||
|
y.data<void>(),
|
||||||
|
};
|
||||||
|
|
||||||
|
auto variantPack = cudnn_frontend::VariantPackBuilder()
|
||||||
|
.setWorkspacePointer(workspace.data<void>())
|
||||||
|
.setDataPointers(3, data_ptrs)
|
||||||
|
.setUids(3, uids)
|
||||||
|
.build();
|
||||||
|
|
||||||
|
auto handle = encoder.device().cudnn_handle();
|
||||||
|
cudnnSetStream(handle, encoder.stream());
|
||||||
|
|
||||||
|
#if CUDNN_VERSION >= 90500 && MLX_USE_CUDNN_NATIVE_CUDA_GRAPH_API
|
||||||
|
cudaGraph_t graph;
|
||||||
|
cudaGraphCreate(&graph, 0);
|
||||||
|
std::unique_ptr<cudaGraph_t, void (*)(cudaGraph_t*)> graph_freer(
|
||||||
|
&graph, [](cudaGraph_t* p) { cudaGraphDestroy(*p); });
|
||||||
|
if (cudnnBackendPopulateCudaGraph(
|
||||||
|
handle, plan.get_raw_desc(), variantPack.get_raw_desc(), graph) !=
|
||||||
|
CUDNN_STATUS_SUCCESS) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
encoder.add_graph_node(graph);
|
||||||
|
#else
|
||||||
|
auto capture = encoder.capture_context();
|
||||||
|
if (cudnnBackendExecute(
|
||||||
|
handle, plan.get_raw_desc(), variantPack.get_raw_desc()) !=
|
||||||
|
CUDNN_STATUS_SUCCESS) {
|
||||||
|
// Discard the captured graph when failed.
|
||||||
|
capture.discard = true;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
encoder.add_temporary(workspace);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool try_engines(
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const ConvCacheKey& cache_key,
|
||||||
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
|
cudnn_frontend::EngineConfigList& configs,
|
||||||
|
const std::string& op_graph_tag,
|
||||||
|
array& x,
|
||||||
|
array& w,
|
||||||
|
array& y) {
|
||||||
|
for (auto& config : configs) {
|
||||||
|
try {
|
||||||
|
auto plan = cudnn_frontend::ExecutionPlanBuilder()
|
||||||
|
.setHandle(encoder.device().cudnn_handle())
|
||||||
|
.setEngineConfig(config, op_graph_tag)
|
||||||
|
.build();
|
||||||
|
if (execute_plan(encoder, plan, x, w, y)) {
|
||||||
|
conv_cache().emplace(
|
||||||
|
cache_key, std::make_pair(backend_type, std::move(plan)));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} catch (cudnn_frontend::cudnnException& error) {
|
||||||
|
if (error.getCudnnStatus() != CUDNN_STATUS_NOT_SUPPORTED) {
|
||||||
|
throw;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
auto get_conv_op_settings(
|
auto get_conv_op_settings(
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
array& x,
|
array& x,
|
||||||
@@ -95,7 +291,7 @@ auto get_conv_op_settings(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<cudnn_frontend::OperationGraph> build_conv_op_graph(
|
std::optional<cudnn_frontend::OperationGraph> build_op_graph(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
cudnnBackendDescriptorType_t backend_type,
|
||||||
Dtype dtype,
|
Dtype dtype,
|
||||||
@@ -121,9 +317,9 @@ std::optional<cudnn_frontend::OperationGraph> build_conv_op_graph(
|
|||||||
.build();
|
.build();
|
||||||
|
|
||||||
auto op = cudnn_frontend::OperationBuilder(backend_type)
|
auto op = cudnn_frontend::OperationBuilder(backend_type)
|
||||||
.setxDesc(build_cudnn_tensor_nchw('x', x))
|
.setxDesc(build_tensor('x', x))
|
||||||
.setwDesc(build_cudnn_tensor_nchw('w', w))
|
.setwDesc(build_tensor('w', w))
|
||||||
.setyDesc(build_cudnn_tensor_nchw('y', y))
|
.setyDesc(build_tensor('y', y))
|
||||||
.setcDesc(conv_desc)
|
.setcDesc(conv_desc)
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
@@ -140,42 +336,6 @@ std::optional<cudnn_frontend::OperationGraph> build_conv_op_graph(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Transpose from (C_out, H, W, C_in / groups) to (C_in, H, W, C_out / groups).
|
|
||||||
array group_transpose(
|
|
||||||
const array& x,
|
|
||||||
int groups,
|
|
||||||
int group_dim,
|
|
||||||
int axis1,
|
|
||||||
int axis2,
|
|
||||||
Stream s) {
|
|
||||||
if (groups == 1) {
|
|
||||||
return swapaxes_in_eval(x, axis1, axis2);
|
|
||||||
}
|
|
||||||
int ndim = x.ndim();
|
|
||||||
if (group_dim < 0) {
|
|
||||||
group_dim += ndim;
|
|
||||||
}
|
|
||||||
if (axis1 < 0) {
|
|
||||||
axis1 += ndim;
|
|
||||||
}
|
|
||||||
if (axis2 < 0) {
|
|
||||||
axis2 += ndim;
|
|
||||||
}
|
|
||||||
if (group_dim <= axis1) {
|
|
||||||
axis1 += 1;
|
|
||||||
}
|
|
||||||
if (group_dim <= axis2) {
|
|
||||||
axis2 += 1;
|
|
||||||
}
|
|
||||||
auto shape = x.shape();
|
|
||||||
shape.insert(shape.begin() + group_dim, groups);
|
|
||||||
shape[group_dim + 1] = shape[group_dim + 1] / groups;
|
|
||||||
array x_trans = reshape_in_eval(x, std::move(shape), s);
|
|
||||||
x_trans = swapaxes_in_eval(x_trans, axis1, axis2);
|
|
||||||
x_trans = flatten_in_eval(x_trans, group_dim, group_dim + 1, s);
|
|
||||||
return x_trans;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do necessary transposes and copies to prepare the inputs and outputs for
|
// Do necessary transposes and copies to prepare the inputs and outputs for
|
||||||
// building the cuDNN conv op. It is safe to be called multiple times in one
|
// building the cuDNN conv op. It is safe to be called multiple times in one
|
||||||
// eval_gpu, with cost of possible redundant copies.
|
// eval_gpu, with cost of possible redundant copies.
|
||||||
@@ -185,14 +345,13 @@ std::tuple<array, array, array> prepare_args(
|
|||||||
array in,
|
array in,
|
||||||
array wt,
|
array wt,
|
||||||
array out,
|
array out,
|
||||||
int groups,
|
|
||||||
Stream s) {
|
Stream s) {
|
||||||
// Transpose the args depending on the backend type.
|
// Transpose the args depending on the backend type.
|
||||||
// TODO: Handle groups.
|
// TODO: Handle groups.
|
||||||
if (backend_type == CONV_BACKWARD_INPUT) {
|
if (backend_type == CONV_BACKWARD_INPUT) {
|
||||||
wt = group_transpose(wt, groups, 0, 0, -1, s);
|
wt = swapaxes_in_eval(wt, 0, -1);
|
||||||
} else if (backend_type == CONV_BACKWARD_WEIGHT) {
|
} else if (backend_type == CONV_BACKWARD_WEIGHT) {
|
||||||
in = group_transpose(in, groups, -1, 0, -1, s);
|
in = swapaxes_in_eval(in, 0, -1);
|
||||||
wt = swapaxes_in_eval(wt, 0, -1);
|
wt = swapaxes_in_eval(wt, 0, -1);
|
||||||
// Create a contiguous array that shares the data with |out|, but with dim
|
// Create a contiguous array that shares the data with |out|, but with dim
|
||||||
// C_in and C_out swapped.
|
// C_in and C_out swapped.
|
||||||
@@ -285,12 +444,12 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
ConvCacheKey cache_key{
|
ConvCacheKey cache_key{
|
||||||
encoder.device().cuda_device(),
|
encoder.device().cuda_device(),
|
||||||
dtype_to_cudnn_type(dtype),
|
dtype_to_cudnn_type(dtype),
|
||||||
vector_key(in.shape()),
|
fixed_vector(in.shape()),
|
||||||
vector_key(wt.shape()),
|
fixed_vector(wt.shape()),
|
||||||
vector_key(kernel_strides_),
|
fixed_vector(kernel_strides_),
|
||||||
vector_key(padding_lo_),
|
fixed_vector(padding_lo_),
|
||||||
vector_key(padding_hi_),
|
fixed_vector(padding_hi_),
|
||||||
vector_key(kernel_dilation_),
|
fixed_vector(kernel_dilation_),
|
||||||
groups_,
|
groups_,
|
||||||
flip_,
|
flip_,
|
||||||
get_alignment(in),
|
get_alignment(in),
|
||||||
@@ -298,30 +457,12 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
get_alignment(out)};
|
get_alignment(out)};
|
||||||
if (auto it = conv_cache().find(cache_key); it != conv_cache().end()) {
|
if (auto it = conv_cache().find(cache_key); it != conv_cache().end()) {
|
||||||
auto& [backend_type, plan] = it->second;
|
auto& [backend_type, plan] = it->second;
|
||||||
if (plan) {
|
std::tie(in, wt, out) = prepare_args(encoder, backend_type, in, wt, out, s);
|
||||||
// Run cached plan.
|
|
||||||
std::tie(in, wt, out) =
|
|
||||||
prepare_args(encoder, backend_type, in, wt, out, groups_, s);
|
|
||||||
register_args(encoder, backend_type, in, wt, out, out_);
|
register_args(encoder, backend_type, in, wt, out, out_);
|
||||||
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
||||||
if (!encode_cudnn_plan(encoder, *plan, {'x', 'w', 'y'}, x, w, y)) {
|
if (!execute_plan(encoder, plan, x, w, y)) {
|
||||||
throw std::runtime_error("[conv] Cached plan failed to execute.");
|
throw std::runtime_error("[conv] Cached plan failed to execute.");
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Run fallback kernel.
|
|
||||||
gemm_conv(
|
|
||||||
encoder,
|
|
||||||
in,
|
|
||||||
wt,
|
|
||||||
out,
|
|
||||||
kernel_strides_,
|
|
||||||
padding_lo_,
|
|
||||||
kernel_dilation_,
|
|
||||||
input_dilation_,
|
|
||||||
groups_,
|
|
||||||
flip_,
|
|
||||||
s);
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -349,7 +490,7 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
std::optional<cudnn_frontend::OperationGraph> op_graph;
|
std::optional<cudnn_frontend::OperationGraph> op_graph;
|
||||||
for (auto try_backend : try_backends) {
|
for (auto try_backend : try_backends) {
|
||||||
auto [in_copy, wt_copy, out_copy] =
|
auto [in_copy, wt_copy, out_copy] =
|
||||||
prepare_args(encoder, try_backend, in, wt, out, groups_, s);
|
prepare_args(encoder, try_backend, in, wt, out, s);
|
||||||
auto [x, w, y] = dispatch_args(try_backend, in_copy, wt_copy, out_copy);
|
auto [x, w, y] = dispatch_args(try_backend, in_copy, wt_copy, out_copy);
|
||||||
auto [stride, padding_lo, padding_hi, dilation] = get_conv_op_settings(
|
auto [stride, padding_lo, padding_hi, dilation] = get_conv_op_settings(
|
||||||
try_backend,
|
try_backend,
|
||||||
@@ -361,7 +502,7 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
padding_hi_,
|
padding_hi_,
|
||||||
kernel_dilation_,
|
kernel_dilation_,
|
||||||
input_dilation_);
|
input_dilation_);
|
||||||
op_graph = build_conv_op_graph(
|
op_graph = build_op_graph(
|
||||||
encoder,
|
encoder,
|
||||||
try_backend,
|
try_backend,
|
||||||
dtype,
|
dtype,
|
||||||
@@ -380,39 +521,26 @@ void Convolution::eval_gpu(const std::vector<array>& inputs, array& out_) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (!op_graph) {
|
||||||
|
throw std::runtime_error("[conv] Can not build op graph.");
|
||||||
|
}
|
||||||
|
|
||||||
if (op_graph) {
|
// Get ready to execute the graph.
|
||||||
// Setup inputs and outputs.
|
|
||||||
register_args(encoder, backend_type, in, wt, out, out_);
|
register_args(encoder, backend_type, in, wt, out, out_);
|
||||||
|
|
||||||
// Find a plan for the graph and execute it.
|
// Try to run plans based on heuristics.
|
||||||
auto plan = find_cudnn_plan_from_op_graph(
|
auto configs = get_engine_configs(backend_type, dtype, *op_graph);
|
||||||
encoder.device().cudnn_handle(), backend_type, dtype, *op_graph);
|
auto tag = op_graph->getTag();
|
||||||
if (!plan) {
|
|
||||||
throw std::runtime_error("[conv] Unable to find an execution plan.");
|
|
||||||
}
|
|
||||||
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
auto [x, w, y] = dispatch_args(backend_type, in, wt, out);
|
||||||
if (encode_cudnn_plan(encoder, *plan, {'x', 'w', 'y'}, x, w, y)) {
|
if (try_engines(encoder, cache_key, backend_type, configs, tag, x, w, y)) {
|
||||||
conv_cache().emplace(
|
|
||||||
cache_key, std::make_pair(backend_type, std::move(*plan)));
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
// Then try fallback plans.
|
||||||
|
configs = get_engine_configs(backend_type, dtype, *op_graph);
|
||||||
|
if (try_engines(encoder, cache_key, backend_type, configs, tag, x, w, y)) {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
throw std::runtime_error("[conv] Unable to find a working engine.");
|
||||||
// Use fallback kernel for settings not supported by cuDNN.
|
|
||||||
gemm_conv(
|
|
||||||
encoder,
|
|
||||||
in,
|
|
||||||
wt,
|
|
||||||
out,
|
|
||||||
kernel_strides_,
|
|
||||||
padding_lo_,
|
|
||||||
kernel_dilation_,
|
|
||||||
input_dilation_,
|
|
||||||
groups_,
|
|
||||||
flip_,
|
|
||||||
s);
|
|
||||||
conv_cache().emplace(cache_key, std::make_pair(CONV_FALLBACK, std::nullopt));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,126 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/device.h"
|
|
||||||
#include "mlx/backend/gpu/copy.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
template <int NDIM>
|
|
||||||
struct ConvParams {
|
|
||||||
int N; // Batch size
|
|
||||||
int C; // In channels
|
|
||||||
int O; // Out channels
|
|
||||||
int strides[NDIM];
|
|
||||||
int padding[NDIM];
|
|
||||||
int kernel_dilation[NDIM];
|
|
||||||
int input_dilation[NDIM];
|
|
||||||
int groups;
|
|
||||||
bool flip;
|
|
||||||
int in_spatial_dims[NDIM];
|
|
||||||
int wt_spatial_dims[NDIM];
|
|
||||||
int out_spatial_dims[NDIM];
|
|
||||||
int64_t in_strides[NDIM + 2];
|
|
||||||
|
|
||||||
ConvParams(
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
const array& out,
|
|
||||||
const std::vector<int>& strides,
|
|
||||||
const std::vector<int>& padding,
|
|
||||||
const std::vector<int>& kernel_dilation,
|
|
||||||
const std::vector<int>& input_dilation,
|
|
||||||
int groups,
|
|
||||||
bool flip)
|
|
||||||
: N(in.shape(0)),
|
|
||||||
C(in.shape(-1)),
|
|
||||||
O(wt.shape(0)),
|
|
||||||
groups(groups),
|
|
||||||
flip(flip) {
|
|
||||||
std::copy_n(strides.begin(), NDIM, this->strides);
|
|
||||||
std::copy_n(padding.begin(), NDIM, this->padding);
|
|
||||||
std::copy_n(kernel_dilation.begin(), NDIM, this->kernel_dilation);
|
|
||||||
std::copy_n(input_dilation.begin(), NDIM, this->input_dilation);
|
|
||||||
std::copy_n(in.shape().begin() + 1, NDIM, this->in_spatial_dims);
|
|
||||||
std::copy_n(wt.shape().begin() + 1, NDIM, this->wt_spatial_dims);
|
|
||||||
std::copy_n(out.shape().begin() + 1, NDIM, this->out_spatial_dims);
|
|
||||||
std::copy_n(in.strides().begin(), NDIM + 2, this->in_strides);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
void gemm_grouped_conv(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& strides,
|
|
||||||
const std::vector<int>& padding,
|
|
||||||
const std::vector<int>& kernel_dilation,
|
|
||||||
const std::vector<int>& input_dilation,
|
|
||||||
int groups,
|
|
||||||
bool flip,
|
|
||||||
Stream s);
|
|
||||||
|
|
||||||
void gemm_conv(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& strides,
|
|
||||||
const std::vector<int>& padding,
|
|
||||||
const std::vector<int>& kernel_dilation,
|
|
||||||
const std::vector<int>& input_dilation,
|
|
||||||
bool flip,
|
|
||||||
Stream s);
|
|
||||||
|
|
||||||
inline void gemm_conv(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
array in,
|
|
||||||
array wt,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& strides,
|
|
||||||
const std::vector<int>& padding,
|
|
||||||
const std::vector<int>& kernel_dilation,
|
|
||||||
const std::vector<int>& input_dilation,
|
|
||||||
int groups,
|
|
||||||
bool flip,
|
|
||||||
Stream s) {
|
|
||||||
if (!in.flags().row_contiguous) {
|
|
||||||
in = contiguous_copy_gpu(in, s);
|
|
||||||
encoder.add_temporary(in);
|
|
||||||
}
|
|
||||||
if (!wt.flags().row_contiguous) {
|
|
||||||
wt = contiguous_copy_gpu(wt, s);
|
|
||||||
encoder.add_temporary(wt);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (groups == 1) {
|
|
||||||
gemm_conv(
|
|
||||||
encoder,
|
|
||||||
in,
|
|
||||||
wt,
|
|
||||||
out,
|
|
||||||
strides,
|
|
||||||
padding,
|
|
||||||
kernel_dilation,
|
|
||||||
input_dilation,
|
|
||||||
flip,
|
|
||||||
s);
|
|
||||||
} else {
|
|
||||||
gemm_grouped_conv(
|
|
||||||
encoder,
|
|
||||||
in,
|
|
||||||
wt,
|
|
||||||
out,
|
|
||||||
strides,
|
|
||||||
padding,
|
|
||||||
kernel_dilation,
|
|
||||||
input_dilation,
|
|
||||||
groups,
|
|
||||||
flip,
|
|
||||||
s);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,217 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/conv/conv.h"
|
|
||||||
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
|
||||||
#include "mlx/dtype_utils.h"
|
|
||||||
|
|
||||||
#include <cooperative_groups.h>
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace cu {
|
|
||||||
|
|
||||||
namespace cg = cooperative_groups;
|
|
||||||
|
|
||||||
template <typename T, int NDIM>
|
|
||||||
__global__ void naive_unfold_nd(
|
|
||||||
const T* in,
|
|
||||||
T* out,
|
|
||||||
int filter_size,
|
|
||||||
int out_pixels,
|
|
||||||
const __grid_constant__ ConvParams<NDIM> params) {
|
|
||||||
auto block = cg::this_thread_block();
|
|
||||||
auto tid = block.group_index();
|
|
||||||
auto lid = block.thread_index();
|
|
||||||
|
|
||||||
int index_batch = tid.z / out_pixels; // [0, N)
|
|
||||||
int index_out_spatial = tid.z % out_pixels; // [0, H_out * W_out)
|
|
||||||
int index_wt_spatial =
|
|
||||||
tid.x * block.dim_threads().x + lid.x; // [0, H_wt * W_wt)
|
|
||||||
|
|
||||||
if (index_wt_spatial >= filter_size / params.C) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
in += tid.y; // [0, C)
|
|
||||||
out += tid.z * filter_size + index_wt_spatial * params.C + tid.y;
|
|
||||||
|
|
||||||
bool valid = index_batch < params.N;
|
|
||||||
|
|
||||||
// Get the coordinates in input.
|
|
||||||
int index_in[NDIM] = {};
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = NDIM - 1; i >= 0; --i) {
|
|
||||||
int index_out = index_out_spatial % params.out_spatial_dims[i];
|
|
||||||
int index_wt = index_wt_spatial % params.wt_spatial_dims[i];
|
|
||||||
|
|
||||||
if (params.flip) {
|
|
||||||
index_wt = params.wt_spatial_dims[i] - index_wt - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int index = index_out * params.strides[i] - params.padding[i] +
|
|
||||||
index_wt * params.kernel_dilation[i];
|
|
||||||
int index_max =
|
|
||||||
1 + params.input_dilation[i] * (params.in_spatial_dims[i] - 1);
|
|
||||||
|
|
||||||
valid &= (index >= 0) && (index < index_max) &&
|
|
||||||
(index % params.input_dilation[i] == 0);
|
|
||||||
|
|
||||||
index_in[i] = index / params.input_dilation[i];
|
|
||||||
|
|
||||||
index_out_spatial /= params.out_spatial_dims[i];
|
|
||||||
index_wt_spatial /= params.wt_spatial_dims[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (valid) {
|
|
||||||
int in_offset = index_batch * params.in_strides[0];
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NDIM; ++i) {
|
|
||||||
in_offset += index_in[i] * params.in_strides[i + 1];
|
|
||||||
}
|
|
||||||
*out = in[in_offset];
|
|
||||||
} else {
|
|
||||||
*out = T{0};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cu
|
|
||||||
|
|
||||||
template <int NDIM>
|
|
||||||
array unfold_inputs_nd(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
int mat_M,
|
|
||||||
int mat_K,
|
|
||||||
int mat_N,
|
|
||||||
ConvParams<NDIM>& params) {
|
|
||||||
array unfolded({mat_M, mat_K}, in.dtype(), nullptr, {});
|
|
||||||
unfolded.set_data(allocator::malloc(unfolded.nbytes()));
|
|
||||||
encoder.add_temporary(unfolded);
|
|
||||||
|
|
||||||
int filter_size = params.C;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NDIM; ++i) {
|
|
||||||
filter_size *= params.wt_spatial_dims[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
int out_pixels = 1;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NDIM; ++i) {
|
|
||||||
out_pixels *= params.out_spatial_dims[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
int wt_spatial_size = mat_K / params.C;
|
|
||||||
dim3 block_dims;
|
|
||||||
block_dims.x = std::min(std::max(wt_spatial_size, 32), 1024);
|
|
||||||
dim3 num_blocks;
|
|
||||||
num_blocks.x = cuda::ceil_div(wt_spatial_size, block_dims.x);
|
|
||||||
num_blocks.y = params.C;
|
|
||||||
num_blocks.z = mat_M;
|
|
||||||
|
|
||||||
encoder.set_input_array(in);
|
|
||||||
encoder.set_output_array(unfolded);
|
|
||||||
dispatch_float_types(in.dtype(), "unfold", [&](auto type_tag) {
|
|
||||||
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
|
||||||
encoder.add_kernel_node(
|
|
||||||
cu::naive_unfold_nd<DataType, NDIM>,
|
|
||||||
num_blocks,
|
|
||||||
block_dims,
|
|
||||||
0,
|
|
||||||
in.data<DataType>(),
|
|
||||||
unfolded.data<DataType>(),
|
|
||||||
filter_size,
|
|
||||||
out_pixels,
|
|
||||||
params);
|
|
||||||
});
|
|
||||||
|
|
||||||
return unfolded;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <int NDIM>
|
|
||||||
void gemm_conv_nd(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
array& out,
|
|
||||||
ConvParams<NDIM>& params,
|
|
||||||
Stream s) {
|
|
||||||
// Get gemm shapes.
|
|
||||||
int mat_M = out.size() / params.O; // N * H_out * W_out
|
|
||||||
int mat_K = wt.size() / params.O; // C * H_wt * W_wt
|
|
||||||
int mat_N = params.O; // O
|
|
||||||
|
|
||||||
// Unfold input to (N * H_out * W_out, C * H_wt * W_wt) for gemm.
|
|
||||||
array in_unfolded =
|
|
||||||
unfold_inputs_nd<NDIM>(encoder, in, mat_M, mat_K, mat_N, params);
|
|
||||||
|
|
||||||
// Reshape weight to (C * H_wt * W_wt, O) for gemm.
|
|
||||||
array wt_reshaped({mat_K, mat_N}, wt.dtype(), nullptr, {});
|
|
||||||
wt_reshaped.copy_shared_buffer(
|
|
||||||
wt,
|
|
||||||
{1, mat_K},
|
|
||||||
{false, false, /* col_contiguous */ true},
|
|
||||||
wt.data_size());
|
|
||||||
|
|
||||||
// Single batch.
|
|
||||||
Shape batch_shape{1};
|
|
||||||
Strides a_batch_strides{0};
|
|
||||||
Strides b_batch_strides{0};
|
|
||||||
|
|
||||||
// Run matmul.
|
|
||||||
CublasGemm gemm(
|
|
||||||
encoder.device(),
|
|
||||||
in.dtype(),
|
|
||||||
false, // a_transposed
|
|
||||||
mat_M, // a_rows
|
|
||||||
mat_K, // a_cols
|
|
||||||
mat_K, // lda
|
|
||||||
true, // b_transposed
|
|
||||||
mat_K, // b_rows
|
|
||||||
mat_N, // b_cols
|
|
||||||
mat_K, // ldb
|
|
||||||
batch_shape.back(),
|
|
||||||
a_batch_strides.back(),
|
|
||||||
b_batch_strides.back());
|
|
||||||
gemm.run(
|
|
||||||
encoder,
|
|
||||||
out,
|
|
||||||
in_unfolded,
|
|
||||||
wt_reshaped,
|
|
||||||
batch_shape,
|
|
||||||
a_batch_strides,
|
|
||||||
b_batch_strides);
|
|
||||||
}
|
|
||||||
|
|
||||||
void gemm_conv(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& strides,
|
|
||||||
const std::vector<int>& padding,
|
|
||||||
const std::vector<int>& kernel_dilation,
|
|
||||||
const std::vector<int>& input_dilation,
|
|
||||||
bool flip,
|
|
||||||
Stream s) {
|
|
||||||
int conv_ndim = in.ndim() - 2;
|
|
||||||
if (conv_ndim < 1 || conv_ndim > 3) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
fmt::format("[conv] Unsupported gemm_conv for {}D conv.", conv_ndim));
|
|
||||||
}
|
|
||||||
dispatch_1_2_3(conv_ndim, [&](auto ndim_constant) {
|
|
||||||
ConvParams<ndim_constant()> params(
|
|
||||||
in,
|
|
||||||
wt,
|
|
||||||
out,
|
|
||||||
strides,
|
|
||||||
padding,
|
|
||||||
kernel_dilation,
|
|
||||||
input_dilation,
|
|
||||||
1, // groups
|
|
||||||
flip);
|
|
||||||
gemm_conv_nd<ndim_constant()>(encoder, in, wt, out, params, s);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,231 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/conv/conv.h"
|
|
||||||
#include "mlx/backend/cuda/gemms/cublas_gemm.h"
|
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
|
||||||
#include "mlx/dtype_utils.h"
|
|
||||||
|
|
||||||
#include <cooperative_groups.h>
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace cu {
|
|
||||||
|
|
||||||
namespace cg = cooperative_groups;
|
|
||||||
|
|
||||||
template <typename T, int NDIM>
|
|
||||||
__global__ void naive_grouped_unfold_transpose_nd(
|
|
||||||
const T* in,
|
|
||||||
T* out,
|
|
||||||
int filter_size,
|
|
||||||
int out_pixels,
|
|
||||||
const __grid_constant__ ConvParams<NDIM> params) {
|
|
||||||
auto block = cg::this_thread_block();
|
|
||||||
auto tid = block.group_index();
|
|
||||||
auto lid = block.thread_index();
|
|
||||||
|
|
||||||
int index_batch = tid.z / out_pixels; // [0, N)
|
|
||||||
int index_out_spatial = tid.z % out_pixels; // [0, H_out * W_out)
|
|
||||||
int index_wt_spatial =
|
|
||||||
tid.x * block.dim_threads().x + lid.x; // [0, H_wt * W_wt)
|
|
||||||
|
|
||||||
if (index_wt_spatial >= filter_size / params.C) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
in += tid.y; // [0, C)
|
|
||||||
out += tid.z * filter_size + tid.y * (filter_size / params.C);
|
|
||||||
|
|
||||||
bool valid = index_batch < params.N;
|
|
||||||
|
|
||||||
// Get the coordinates in input.
|
|
||||||
int index_in[NDIM] = {};
|
|
||||||
int wt_stride = 1;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = NDIM - 1; i >= 0; --i) {
|
|
||||||
int index_out = index_out_spatial % params.out_spatial_dims[i];
|
|
||||||
int index_wt = index_wt_spatial % params.wt_spatial_dims[i];
|
|
||||||
out += index_wt * wt_stride;
|
|
||||||
|
|
||||||
if (params.flip) {
|
|
||||||
index_wt = params.wt_spatial_dims[i] - index_wt - 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int index = index_out * params.strides[i] - params.padding[i] +
|
|
||||||
index_wt * params.kernel_dilation[i];
|
|
||||||
int index_max =
|
|
||||||
1 + params.input_dilation[i] * (params.in_spatial_dims[i] - 1);
|
|
||||||
|
|
||||||
valid &= (index >= 0) && (index < index_max) &&
|
|
||||||
(index % params.input_dilation[i] == 0);
|
|
||||||
|
|
||||||
index_in[i] = index / params.input_dilation[i];
|
|
||||||
|
|
||||||
index_out_spatial /= params.out_spatial_dims[i];
|
|
||||||
index_wt_spatial /= params.wt_spatial_dims[i];
|
|
||||||
wt_stride *= params.wt_spatial_dims[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
if (valid) {
|
|
||||||
int in_offset = index_batch * params.in_strides[0];
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NDIM; ++i) {
|
|
||||||
in_offset += index_in[i] * params.in_strides[i + 1];
|
|
||||||
}
|
|
||||||
*out = in[in_offset];
|
|
||||||
} else {
|
|
||||||
*out = T{0};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace cu
|
|
||||||
|
|
||||||
template <int NDIM>
|
|
||||||
array grouped_unfold_transpose_inputs_nd(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
int mat_M,
|
|
||||||
int mat_K,
|
|
||||||
int mat_N,
|
|
||||||
ConvParams<NDIM>& params) {
|
|
||||||
array unfolded({mat_M, mat_K * params.groups}, in.dtype(), nullptr, {});
|
|
||||||
unfolded.set_data(allocator::malloc(unfolded.nbytes()));
|
|
||||||
encoder.add_temporary(unfolded);
|
|
||||||
|
|
||||||
int filter_size = params.C;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NDIM; ++i) {
|
|
||||||
filter_size *= params.wt_spatial_dims[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
int out_pixels = 1;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NDIM; ++i) {
|
|
||||||
out_pixels *= params.out_spatial_dims[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
int wt_spatial_size = (mat_K * params.groups) / params.C;
|
|
||||||
dim3 block_dims;
|
|
||||||
block_dims.x = std::min(std::max(wt_spatial_size, 32), 1024);
|
|
||||||
dim3 num_blocks;
|
|
||||||
num_blocks.x = cuda::ceil_div(wt_spatial_size, block_dims.x);
|
|
||||||
num_blocks.y = params.C;
|
|
||||||
num_blocks.z = mat_M;
|
|
||||||
|
|
||||||
encoder.set_input_array(in);
|
|
||||||
encoder.set_output_array(unfolded);
|
|
||||||
dispatch_float_types(in.dtype(), "unfold", [&](auto type_tag) {
|
|
||||||
using DataType = cuda_type_t<MLX_GET_TYPE(type_tag)>;
|
|
||||||
encoder.add_kernel_node(
|
|
||||||
cu::naive_grouped_unfold_transpose_nd<DataType, NDIM>,
|
|
||||||
num_blocks,
|
|
||||||
block_dims,
|
|
||||||
0,
|
|
||||||
in.data<DataType>(),
|
|
||||||
unfolded.data<DataType>(),
|
|
||||||
filter_size,
|
|
||||||
out_pixels,
|
|
||||||
params);
|
|
||||||
});
|
|
||||||
|
|
||||||
return unfolded;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <int NDIM>
|
|
||||||
void gemm_grouped_conv_nd(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
array& out,
|
|
||||||
ConvParams<NDIM>& params,
|
|
||||||
Stream s) {
|
|
||||||
// Get gemm shapes.
|
|
||||||
int C_per_group = params.C / params.groups;
|
|
||||||
int O_per_group = params.O / params.groups;
|
|
||||||
int mat_M = out.size() / params.O; // N * H_out * W_out
|
|
||||||
int mat_K = wt.size() / params.O; // C_per_group * H_wt * W_wt
|
|
||||||
int mat_N = O_per_group; // O_per_group
|
|
||||||
|
|
||||||
// Unfold input to (N * H_out * W_out, C * H_wt * W_wt) for gemm.
|
|
||||||
array in_unfolded = grouped_unfold_transpose_inputs_nd<NDIM>(
|
|
||||||
encoder, in, mat_M, mat_K, mat_N, params);
|
|
||||||
|
|
||||||
// Reshape weight to (O, C_per_group, H_wt * W_wt) for gemm.
|
|
||||||
int wt_spatial_size = (wt.size() / wt.shape(0)) / wt.shape(-1);
|
|
||||||
array wt_view(
|
|
||||||
{params.O, C_per_group, wt_spatial_size}, wt.dtype(), nullptr, {});
|
|
||||||
wt_view.copy_shared_buffer(
|
|
||||||
wt, {wt.strides(0), 1, C_per_group}, wt.flags(), wt.size());
|
|
||||||
array wt_reshaped = contiguous_copy_gpu(wt_view, s);
|
|
||||||
|
|
||||||
// Batch with size of groups.
|
|
||||||
Shape batch_shape{params.groups};
|
|
||||||
Strides a_batch_strides{mat_K};
|
|
||||||
Strides b_batch_strides{mat_N * mat_K};
|
|
||||||
|
|
||||||
// Run matmul.
|
|
||||||
CublasGemm gemm(
|
|
||||||
encoder.device(),
|
|
||||||
in.dtype(),
|
|
||||||
false, // a_transposed
|
|
||||||
mat_M, // a_rows
|
|
||||||
mat_K, // a_cols
|
|
||||||
mat_K * params.groups, // lda
|
|
||||||
true, // b_transposed
|
|
||||||
mat_K, // b_rows
|
|
||||||
mat_N, // b_cols
|
|
||||||
mat_K, // ldb
|
|
||||||
batch_shape.back(),
|
|
||||||
a_batch_strides.back(),
|
|
||||||
b_batch_strides.back());
|
|
||||||
gemm.set_out(
|
|
||||||
out.dtype(),
|
|
||||||
false, // out_transposed
|
|
||||||
mat_M, // out_rows
|
|
||||||
mat_N, // out_cols
|
|
||||||
mat_N * params.groups, // out_ld
|
|
||||||
params.groups, // batch_count
|
|
||||||
mat_N); // batch_stride
|
|
||||||
gemm.run(
|
|
||||||
encoder,
|
|
||||||
out,
|
|
||||||
in_unfolded,
|
|
||||||
wt_reshaped,
|
|
||||||
batch_shape,
|
|
||||||
a_batch_strides,
|
|
||||||
b_batch_strides);
|
|
||||||
}
|
|
||||||
|
|
||||||
void gemm_grouped_conv(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
const array& in,
|
|
||||||
const array& wt,
|
|
||||||
array& out,
|
|
||||||
const std::vector<int>& strides,
|
|
||||||
const std::vector<int>& padding,
|
|
||||||
const std::vector<int>& kernel_dilation,
|
|
||||||
const std::vector<int>& input_dilation,
|
|
||||||
int groups,
|
|
||||||
bool flip,
|
|
||||||
Stream s) {
|
|
||||||
int conv_ndim = in.ndim() - 2;
|
|
||||||
if (conv_ndim < 1 || conv_ndim > 3) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
fmt::format("[conv] Unsupported gemm_conv for {}D conv.", conv_ndim));
|
|
||||||
}
|
|
||||||
dispatch_1_2_3(conv_ndim, [&](auto ndim_constant) {
|
|
||||||
ConvParams<ndim_constant()> params(
|
|
||||||
in,
|
|
||||||
wt,
|
|
||||||
out,
|
|
||||||
strides,
|
|
||||||
padding,
|
|
||||||
kernel_dilation,
|
|
||||||
input_dilation,
|
|
||||||
groups,
|
|
||||||
flip);
|
|
||||||
gemm_grouped_conv_nd<ndim_constant()>(encoder, in, wt, out, params, s);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -15,8 +15,8 @@ void copy_gpu_inplace(
|
|||||||
int64_t offset_out,
|
int64_t offset_out,
|
||||||
CopyType ctype,
|
CopyType ctype,
|
||||||
const Stream& s,
|
const Stream& s,
|
||||||
std::optional<array> dynamic_offset_in,
|
const std::optional<array>& dynamic_offset_in,
|
||||||
std::optional<array> dynamic_offset_out) {
|
const std::optional<array>& dynamic_offset_out) {
|
||||||
if (out.size() == 0) {
|
if (out.size() == 0) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -44,16 +44,6 @@ void copy_gpu_inplace(
|
|||||||
strides_vec[0]);
|
strides_vec[0]);
|
||||||
} else {
|
} else {
|
||||||
if (dynamic_offset_in || dynamic_offset_out) {
|
if (dynamic_offset_in || dynamic_offset_out) {
|
||||||
if (!dynamic_offset_in) {
|
|
||||||
dynamic_offset_in = array(0, int64);
|
|
||||||
encoder.add_temporary(*dynamic_offset_in);
|
|
||||||
}
|
|
||||||
if (!dynamic_offset_out) {
|
|
||||||
dynamic_offset_out = array(0, int64);
|
|
||||||
encoder.add_temporary(*dynamic_offset_out);
|
|
||||||
}
|
|
||||||
encoder.set_input_array(*dynamic_offset_in);
|
|
||||||
encoder.set_input_array(*dynamic_offset_out);
|
|
||||||
copy_general_dynamic(
|
copy_general_dynamic(
|
||||||
encoder,
|
encoder,
|
||||||
ctype,
|
ctype,
|
||||||
@@ -64,8 +54,8 @@ void copy_gpu_inplace(
|
|||||||
shape_collapsed,
|
shape_collapsed,
|
||||||
strides_vec[0],
|
strides_vec[0],
|
||||||
strides_vec[1],
|
strides_vec[1],
|
||||||
*dynamic_offset_in,
|
dynamic_offset_in ? *dynamic_offset_in : array(0, int64),
|
||||||
*dynamic_offset_out);
|
dynamic_offset_out ? *dynamic_offset_out : array(0, int64));
|
||||||
} else {
|
} else {
|
||||||
copy_general(
|
copy_general(
|
||||||
encoder,
|
encoder,
|
||||||
|
|||||||
@@ -10,80 +10,37 @@ namespace cu {
|
|||||||
|
|
||||||
namespace cg = cooperative_groups;
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT, int NDIM, int N_READS>
|
template <typename In, typename Out, typename IdxT, int NDIM>
|
||||||
__global__ void copy_gg_nd(
|
__global__ void copy_gg_nd(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_in,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_in,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_out) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_out) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[NDIM - 1];
|
|
||||||
auto in_stride_x = strides_in[NDIM - 1];
|
|
||||||
auto out_stride_x = strides_out[NDIM - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [idx_in, idx_out] = elem_to_loc_nd<NDIM>(
|
auto [idx_in, idx_out] = elem_to_loc_nd<NDIM>(
|
||||||
index_rest * shape_x,
|
index, shape.data(), strides_in.data(), strides_out.data());
|
||||||
shape.data(),
|
out[idx_out] = CastOp<In, Out>{}(in[idx_in]);
|
||||||
strides_in.data(),
|
|
||||||
strides_out.data());
|
|
||||||
|
|
||||||
auto in_vec =
|
|
||||||
load_vector<N_READS>(in + idx_in, index_x, shape_x, in_stride_x, In(0));
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
|
||||||
}
|
}
|
||||||
store_vector(out + idx_out, index_x, out_vec, shape_x, out_stride_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT, int N_READS>
|
template <typename In, typename Out, typename IdxT>
|
||||||
__global__ void copy_gg(
|
__global__ void copy_gg(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides strides_in,
|
const __grid_constant__ Strides strides_in,
|
||||||
const __grid_constant__ Strides strides_out,
|
const __grid_constant__ Strides strides_out,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[ndim - 1];
|
|
||||||
auto in_stride_x = strides_in[ndim - 1];
|
|
||||||
auto out_stride_x = strides_out[ndim - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [idx_in, idx_out] = elem_to_loc(
|
auto [idx_in, idx_out] = elem_to_loc(
|
||||||
index_rest * shape_x,
|
index, shape.data(), strides_in.data(), strides_out.data(), ndim);
|
||||||
shape.data(),
|
out[idx_out] = CastOp<In, Out>{}(in[idx_in]);
|
||||||
strides_in.data(),
|
|
||||||
strides_out.data(),
|
|
||||||
ndim);
|
|
||||||
|
|
||||||
auto in_vec =
|
|
||||||
load_vector<N_READS>(in + idx_in, index_x, shape_x, in_stride_x, In(0));
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
|
||||||
}
|
}
|
||||||
store_vector(out + idx_out, index_x, out_vec, shape_x, out_stride_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -112,52 +69,33 @@ void copy_general(
|
|||||||
size_t data_size = 1;
|
size_t data_size = 1;
|
||||||
for (auto& s : shape)
|
for (auto& s : shape)
|
||||||
data_size *= s;
|
data_size *= s;
|
||||||
|
|
||||||
int work_per_thread = 1;
|
|
||||||
auto dim0 = ndim > 0 ? shape.back() : 1;
|
|
||||||
auto rest = data_size / dim0;
|
|
||||||
if (dim0 >= 4) {
|
|
||||||
work_per_thread = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
|
||||||
auto block_dims = get_block_dims(dim0, rest, 1);
|
|
||||||
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
|
||||||
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto ndim_constant) {
|
dispatch_1_2_3(ndim, [&](auto ndim_constant) {
|
||||||
auto kernel =
|
auto [num_blocks, block_dims] =
|
||||||
cu::copy_gg_nd<InType, OutType, IdxT, ndim_constant(), 1>;
|
get_launch_args(data_size, shape, out.strides(), large());
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel =
|
|
||||||
cu::copy_gg_nd<InType, OutType, IdxT, ndim_constant(), 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::copy_gg_nd<InType, OutType, IdxT, ndim_constant()>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
rest,
|
data_size,
|
||||||
const_param<ndim_constant()>(shape),
|
const_param<ndim_constant()>(shape),
|
||||||
const_param<ndim_constant()>(strides_in),
|
const_param<ndim_constant()>(strides_in),
|
||||||
const_param<ndim_constant()>(strides_out));
|
const_param<ndim_constant()>(strides_out));
|
||||||
});
|
});
|
||||||
} else { // ndim >= 4
|
} else { // ndim >= 4
|
||||||
auto kernel = cu::copy_gg<InType, OutType, IdxT, 1>;
|
auto [num_blocks, block_dims] =
|
||||||
if (work_per_thread == 4) {
|
get_launch_args(data_size, shape, out.strides(), large());
|
||||||
kernel = cu::copy_gg<InType, OutType, IdxT, 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::copy_gg<InType, OutType, IdxT>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
rest,
|
data_size,
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(strides_in),
|
const_param(strides_in),
|
||||||
const_param(strides_out),
|
const_param(strides_out),
|
||||||
|
|||||||
@@ -10,67 +10,33 @@ namespace cu {
|
|||||||
|
|
||||||
namespace cg = cooperative_groups;
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT, int NDIM, int N_READS>
|
template <typename In, typename Out, typename IdxT, int NDIM>
|
||||||
__global__ void copy_g_nd(
|
__global__ void copy_g_nd(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> strides_in) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
IdxT idx_in = elem_to_loc_nd<NDIM>(index, shape.data(), strides_in.data());
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
out[index] = CastOp<In, Out>{}(in[idx_in]);
|
||||||
if (index_rest >= size_rest) {
|
}
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto shape_x = shape[NDIM - 1];
|
template <typename In, typename Out, typename IdxT>
|
||||||
auto stride_x = strides[NDIM - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto idx =
|
|
||||||
elem_to_loc_nd<NDIM>(index_rest * shape_x, shape.data(), strides.data());
|
|
||||||
auto in_vec =
|
|
||||||
load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
|
||||||
}
|
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename In, typename Out, typename IdxT, int N_READS>
|
|
||||||
__global__ void copy_g(
|
__global__ void copy_g(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides strides,
|
const __grid_constant__ Strides strides_in,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
IdxT idx_in = elem_to_loc(index, shape.data(), strides_in.data(), ndim);
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
out[index] = CastOp<In, Out>{}(in[idx_in]);
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto shape_x = shape[ndim - 1];
|
|
||||||
auto stride_x = strides[ndim - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto idx =
|
|
||||||
elem_to_loc(index_rest * shape_x, shape.data(), strides.data(), ndim);
|
|
||||||
auto in_vec =
|
|
||||||
load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = CastOp<In, Out>{}(in_vec[i]);
|
|
||||||
}
|
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -95,49 +61,30 @@ void copy_general_input(
|
|||||||
const InType* in_ptr = in.data<InType>() + offset_in;
|
const InType* in_ptr = in.data<InType>() + offset_in;
|
||||||
OutType* out_ptr = out.data<OutType>() + offset_out;
|
OutType* out_ptr = out.data<OutType>() + offset_out;
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
int work_per_thread = 1;
|
|
||||||
auto dim0 = ndim > 0 ? shape.back() : 1;
|
|
||||||
auto rest = out.size() / dim0;
|
|
||||||
if (dim0 >= 4) {
|
|
||||||
work_per_thread = 4;
|
|
||||||
}
|
|
||||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
|
||||||
auto block_dims = get_block_dims(dim0, rest, 1);
|
|
||||||
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
|
||||||
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto kernel =
|
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
||||||
cu::copy_g_nd<InType, OutType, IdxT, dims_constant(), 1>;
|
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel =
|
|
||||||
cu::copy_g_nd<InType, OutType, IdxT, dims_constant(), 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::copy_g_nd<InType, OutType, IdxT, dims_constant()>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
rest,
|
out.size(),
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(strides_in));
|
const_param<dims_constant()>(strides_in));
|
||||||
});
|
});
|
||||||
} else { // ndim >= 4
|
} else { // ndim >= 4
|
||||||
auto kernel = cu::copy_g<InType, OutType, IdxT, 1>;
|
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel = cu::copy_g<InType, OutType, IdxT, 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::copy_g<InType, OutType, IdxT>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in_ptr,
|
in_ptr,
|
||||||
out_ptr,
|
out_ptr,
|
||||||
rest,
|
out.size(),
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(strides_in),
|
const_param(strides_in),
|
||||||
ndim);
|
ndim);
|
||||||
|
|||||||
@@ -1,272 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/cudnn_utils.h"
|
|
||||||
#include "mlx/backend/cuda/device.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
// Create a cudnn tensor descriptor.
|
|
||||||
template <typename Vec>
|
|
||||||
inline cudnn_frontend::Tensor build_cudnn_tensor(
|
|
||||||
int64_t id,
|
|
||||||
const array& x,
|
|
||||||
const Vec& shape,
|
|
||||||
const Vec& strides) {
|
|
||||||
return cudnn_frontend::TensorBuilder()
|
|
||||||
.setDim(shape.size(), shape.data())
|
|
||||||
.setStrides(strides.size(), strides.data())
|
|
||||||
.setId(id)
|
|
||||||
.setAlignment(get_alignment(x))
|
|
||||||
.setDataType(dtype_to_cudnn_type(x.dtype()))
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
// In MLX a singleton dim (shape[dim] == 1) can have any stride, but in cuDNN
|
|
||||||
// whether a tensor is contiguous is determined with:
|
|
||||||
// shape[dim] == shape[dim + 1] * strides[dim + 1]
|
|
||||||
// So a contiguous array with singleton dims in MLX may be mistakenly treated
|
|
||||||
// as strided in cuDNN, and we work around it by normalizing the strides.
|
|
||||||
Strides normalized_strides(const array& x) {
|
|
||||||
if (!x.flags().row_contiguous || x.ndim() < 2) {
|
|
||||||
return x.strides();
|
|
||||||
}
|
|
||||||
Strides strides = x.strides();
|
|
||||||
for (int i = x.ndim() - 2; i >= 0; --i) {
|
|
||||||
if (x.shape(i) == 1) {
|
|
||||||
strides[i] = x.shape(i + 1) * strides[i + 1];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strides;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the shape and strides after transposing from NHWC to NCHW.
|
|
||||||
auto nhwc_to_nchw(SmallVector<int64_t> shape, SmallVector<int64_t> strides) {
|
|
||||||
assert(shape.size() >= 3);
|
|
||||||
shape.insert(shape.begin() + 1, shape.back());
|
|
||||||
shape.erase(shape.end() - 1);
|
|
||||||
strides.insert(strides.begin() + 1, strides.back());
|
|
||||||
strides.erase(strides.end() - 1);
|
|
||||||
return std::make_tuple(std::move(shape), std::move(strides));
|
|
||||||
}
|
|
||||||
|
|
||||||
inline auto nhwc_to_nchw(const array& x) {
|
|
||||||
return nhwc_to_nchw(
|
|
||||||
convert_vector<int64_t>(x.shape()), normalized_strides(x));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return available engines for a |op_graph|.
|
|
||||||
cudnn_frontend::EngineConfigList get_cudnn_engine_configs(
|
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
|
||||||
Dtype dtype,
|
|
||||||
cudnn_frontend::OperationGraph& op_graph,
|
|
||||||
bool use_fallback = true) {
|
|
||||||
SmallVector<cudnn_frontend::GeneratorSource, 2> sources;
|
|
||||||
sources.push_back([](auto& op_graph) {
|
|
||||||
auto heuristics = cudnn_frontend::EngineHeuristicsBuilder()
|
|
||||||
.setOperationGraph(op_graph)
|
|
||||||
.setHeurMode(CUDNN_HEUR_MODE_A)
|
|
||||||
.build();
|
|
||||||
return heuristics.getEngineConfig(heuristics.getEngineConfigCount());
|
|
||||||
});
|
|
||||||
if (use_fallback) {
|
|
||||||
sources.push_back([&backend_type](auto& op_graph) {
|
|
||||||
auto fallback = cudnn_frontend::EngineFallbackListBuilder()
|
|
||||||
.setOperationGraph(op_graph)
|
|
||||||
.setOperation(backend_type)
|
|
||||||
.build();
|
|
||||||
return fallback.getFallbackList();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
auto configs =
|
|
||||||
cudnn_frontend::EngineConfigGenerator(sources.size(), sources.data())
|
|
||||||
.generate_engine_config(op_graph);
|
|
||||||
|
|
||||||
cudnn_frontend::EngineConfigList filtered_configs;
|
|
||||||
cudnn_frontend::filter(configs, filtered_configs, [dtype](auto c) {
|
|
||||||
if (cudnn_frontend::hasNumericalNote<
|
|
||||||
CUDNN_NUMERICAL_NOTE_DOWN_CONVERT_INPUTS>(c)) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
if (cudnn_frontend::hasNumericalNote<CUDNN_NUMERICAL_NOTE_TENSOR_CORE>(c) &&
|
|
||||||
dtype == float32 && !env::enable_tf32()) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
});
|
|
||||||
return filtered_configs;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take |engine_configs| and |op_graph| and find a working execution plans
|
|
||||||
// from them.
|
|
||||||
std::optional<cudnn_frontend::ExecutionPlan>
|
|
||||||
find_cudnn_plan_from_engine_configs(
|
|
||||||
cudnnHandle_t handle,
|
|
||||||
const cudnn_frontend::EngineConfigList& engine_configs,
|
|
||||||
const cudnn_frontend::OperationGraph& op_graph) {
|
|
||||||
auto op_graph_tag = op_graph.getTag();
|
|
||||||
for (const auto& config : engine_configs) {
|
|
||||||
try {
|
|
||||||
return cudnn_frontend::ExecutionPlanBuilder()
|
|
||||||
.setHandle(handle)
|
|
||||||
.setEngineConfig(config, op_graph_tag)
|
|
||||||
.build();
|
|
||||||
} catch (cudnn_frontend::cudnnException& error) {
|
|
||||||
if (error.getCudnnStatus() != CUDNN_STATUS_NOT_SUPPORTED) {
|
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return std::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prepare workspace and args to execute plan.
|
|
||||||
template <typename F>
|
|
||||||
bool prepare_cudnn_plan(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
int num_args,
|
|
||||||
const int64_t* uids,
|
|
||||||
void** data_ptrs,
|
|
||||||
F&& execute) {
|
|
||||||
int workspace_size = plan.getWorkspaceSize();
|
|
||||||
array workspace(
|
|
||||||
workspace_size > 0 ? allocator::malloc(workspace_size)
|
|
||||||
: allocator::Buffer(nullptr),
|
|
||||||
{workspace_size},
|
|
||||||
uint8);
|
|
||||||
|
|
||||||
auto args = cudnn_frontend::VariantPackBuilder()
|
|
||||||
.setWorkspacePointer(workspace.data<void>())
|
|
||||||
.setDataPointers(num_args, data_ptrs)
|
|
||||||
.setUids(num_args, uids)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
auto handle = encoder.device().cudnn_handle();
|
|
||||||
cudnnSetStream(handle, encoder.stream());
|
|
||||||
|
|
||||||
if (!execute(handle, plan.get_raw_desc(), args.get_raw_desc())) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
encoder.add_temporary(workspace);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
cudnn_frontend::Tensor build_cudnn_tensor(int64_t id, const array& x) {
|
|
||||||
auto shape = convert_vector<int64_t>(x.shape());
|
|
||||||
return build_cudnn_tensor(id, x, shape, normalized_strides(x));
|
|
||||||
}
|
|
||||||
|
|
||||||
cudnn_frontend::Tensor build_cudnn_tensor_nchw(int64_t id, const array& x) {
|
|
||||||
auto [shape, strides] = nhwc_to_nchw(x);
|
|
||||||
return build_cudnn_tensor(id, x, shape, strides);
|
|
||||||
}
|
|
||||||
|
|
||||||
cudnn_frontend::Tensor build_cudnn_tensor_4d_nchw(int64_t id, const array& x) {
|
|
||||||
if (x.ndim() == 0) {
|
|
||||||
SmallVector<int64_t, 4> scalar_dims = {1, 1, 1, 1};
|
|
||||||
return build_cudnn_tensor(id, x, scalar_dims, scalar_dims);
|
|
||||||
}
|
|
||||||
if (x.ndim() == 1) {
|
|
||||||
int64_t s = x.shape(0);
|
|
||||||
SmallVector<int64_t, 4> shape = {1, x.shape(0), 1, 1};
|
|
||||||
SmallVector<int64_t, 4> strides = {s, 1, s, s};
|
|
||||||
return build_cudnn_tensor(id, x, shape, strides);
|
|
||||||
}
|
|
||||||
if (x.ndim() == 2) {
|
|
||||||
int64_t s =
|
|
||||||
x.flags().row_contiguous ? x.shape(1) * x.strides(1) : x.strides(0);
|
|
||||||
SmallVector<int64_t, 4> shape = {x.shape(0), x.shape(1), 1, 1};
|
|
||||||
SmallVector<int64_t, 4> strides = {s, x.strides(1), s, s};
|
|
||||||
return build_cudnn_tensor(id, x, shape, strides);
|
|
||||||
}
|
|
||||||
if (x.ndim() == 3 || x.ndim() == 4) {
|
|
||||||
return build_cudnn_tensor_nchw(id, x);
|
|
||||||
}
|
|
||||||
throw std::runtime_error(
|
|
||||||
fmt::format("Unsupported array with {} dims.", x.ndim()));
|
|
||||||
}
|
|
||||||
|
|
||||||
cudnn_frontend::Tensor build_cudnn_scalar_4d(int64_t id, Dtype dtype) {
|
|
||||||
SmallVector<int64_t, 4> scalar_dims = {1, 1, 1, 1};
|
|
||||||
return cudnn_frontend::TensorBuilder()
|
|
||||||
.setDim(scalar_dims.size(), scalar_dims.data())
|
|
||||||
.setStrides(scalar_dims.size(), scalar_dims.data())
|
|
||||||
.setId(id)
|
|
||||||
.setAlignment(16)
|
|
||||||
.setDataType(dtype_to_cudnn_type(dtype))
|
|
||||||
.setByValue(true)
|
|
||||||
.build();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::optional<cudnn_frontend::ExecutionPlan> find_cudnn_plan_from_op_graph(
|
|
||||||
cudnnHandle_t handle,
|
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
|
||||||
Dtype dtype,
|
|
||||||
cudnn_frontend::OperationGraph& op_graph) {
|
|
||||||
auto engine_configs = get_cudnn_engine_configs(backend_type, dtype, op_graph);
|
|
||||||
return find_cudnn_plan_from_engine_configs(handle, engine_configs, op_graph);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool encode_cudnn_plan_with_capturing(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
int num_args,
|
|
||||||
const int64_t* uids,
|
|
||||||
void** data_ptrs) {
|
|
||||||
return prepare_cudnn_plan(
|
|
||||||
encoder,
|
|
||||||
plan,
|
|
||||||
num_args,
|
|
||||||
uids,
|
|
||||||
data_ptrs,
|
|
||||||
[&](auto handle, auto plan, auto args) {
|
|
||||||
auto capture = encoder.capture_context();
|
|
||||||
if (cudnnBackendExecute(handle, plan, args) != CUDNN_STATUS_SUCCESS) {
|
|
||||||
// Discard the captured graph when failed.
|
|
||||||
capture.discard = true;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#if CUDNN_VERSION >= 90500
|
|
||||||
bool encode_cudnn_plan_with_graph_api(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
CudaGraph& graph,
|
|
||||||
int num_args,
|
|
||||||
const int64_t* uids,
|
|
||||||
void** data_ptrs) {
|
|
||||||
return prepare_cudnn_plan(
|
|
||||||
encoder,
|
|
||||||
plan,
|
|
||||||
num_args,
|
|
||||||
uids,
|
|
||||||
data_ptrs,
|
|
||||||
[&](auto handle, auto plan, auto args) {
|
|
||||||
if (!graph) {
|
|
||||||
graph = CudaGraph(encoder.device());
|
|
||||||
if (cudnnBackendPopulateCudaGraph(handle, plan, args, graph) !=
|
|
||||||
CUDNN_STATUS_SUCCESS) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (cudnnBackendUpdateCudaGraph(handle, plan, args, graph) !=
|
|
||||||
CUDNN_STATUS_SUCCESS) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
encoder.add_graph_node(graph);
|
|
||||||
return true;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,164 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#include "mlx/array.h"
|
|
||||||
#include "mlx/backend/cuda/device/config.h"
|
|
||||||
#include "mlx/backend/cuda/utils.h"
|
|
||||||
#include "mlx/dtype_utils.h"
|
|
||||||
|
|
||||||
#include <cudnn_frontend.h>
|
|
||||||
#include <cudnn_frontend_find_plan.h>
|
|
||||||
#include <fmt/format.h>
|
|
||||||
|
|
||||||
#include <algorithm>
|
|
||||||
#include <array>
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
|
|
||||||
namespace cu {
|
|
||||||
class CommandEncoder;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return pointer alignment of |x|'s data.
|
|
||||||
inline uint8_t get_alignment(const array& x) {
|
|
||||||
uint8_t alignment = 1;
|
|
||||||
uintptr_t address = reinterpret_cast<uintptr_t>(x.data<void>());
|
|
||||||
for (; alignment < 32; alignment *= 2) {
|
|
||||||
if (address % (alignment * 2)) {
|
|
||||||
return alignment;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return alignment;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert the type of elements in |vec| to |T|.
|
|
||||||
template <typename T, typename Vec>
|
|
||||||
inline SmallVector<T> convert_vector(const Vec& vec) {
|
|
||||||
return SmallVector<T>(vec.begin(), vec.end());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an array that can be used as map key for |vec| with size <= MAX_NDIM.
|
|
||||||
//
|
|
||||||
// There are 2 differences from the const_param util from kernel_utils.cuh:
|
|
||||||
// 1. The rest of array is filled with 0.
|
|
||||||
// 2. This util can be used in .cpp files.
|
|
||||||
template <typename T, template <typename U> class Vec>
|
|
||||||
inline std::array<T, MAX_NDIM> vector_key(const Vec<T>& vec) {
|
|
||||||
if (vec.size() > MAX_NDIM) {
|
|
||||||
throw std::runtime_error(
|
|
||||||
fmt::format("ndim can not be larger than {}.", MAX_NDIM));
|
|
||||||
}
|
|
||||||
std::array<T, MAX_NDIM> result = {};
|
|
||||||
std::copy_n(vec.begin(), vec.size(), result.begin());
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Helpers used by get_data_ptrs to get pointers.
|
|
||||||
inline void* get_data_ptr(const array& arr) {
|
|
||||||
return const_cast<void*>(arr.data<void>());
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T, typename = std::enable_if_t<std::is_scalar_v<T>>>
|
|
||||||
inline void* get_data_ptr(T& scalar) {
|
|
||||||
return &scalar;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return an array filled with data pointers of args.
|
|
||||||
template <typename... Args>
|
|
||||||
inline std::array<void*, sizeof...(Args)> get_data_ptrs(Args&... args) {
|
|
||||||
return {get_data_ptr(args)...};
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map dtype to cudnn data type.
|
|
||||||
inline cudnnDataType_t dtype_to_cudnn_type(Dtype dtype) {
|
|
||||||
switch (dtype) {
|
|
||||||
case int8:
|
|
||||||
return CUDNN_DATA_INT8;
|
|
||||||
case int32:
|
|
||||||
return CUDNN_DATA_INT32;
|
|
||||||
case uint8:
|
|
||||||
return CUDNN_DATA_UINT8;
|
|
||||||
case float16:
|
|
||||||
return CUDNN_DATA_HALF;
|
|
||||||
case bfloat16:
|
|
||||||
return CUDNN_DATA_BFLOAT16;
|
|
||||||
case float32:
|
|
||||||
return CUDNN_DATA_FLOAT;
|
|
||||||
case float64:
|
|
||||||
return CUDNN_DATA_DOUBLE;
|
|
||||||
default:
|
|
||||||
throw std::runtime_error(fmt::format(
|
|
||||||
"Unsupported dtype in Convolution: {}.", dtype_to_string(dtype)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a tensor descriptor from |x|.
|
|
||||||
cudnn_frontend::Tensor build_cudnn_tensor(int64_t id, const array& x);
|
|
||||||
|
|
||||||
// Create a tensor descriptor from |x|, and transpose from NHWC to NCHW.
|
|
||||||
cudnn_frontend::Tensor build_cudnn_tensor_nchw(int64_t id, const array& x);
|
|
||||||
|
|
||||||
// Create a tensor descriptor from |x|, make sure it is 4D, and transpose it
|
|
||||||
// from NHWC to NCHW.
|
|
||||||
cudnn_frontend::Tensor build_cudnn_tensor_4d_nchw(int64_t id, const array& x);
|
|
||||||
|
|
||||||
// Create a 4D scalar tensor descriptor, which is passed by value.
|
|
||||||
cudnn_frontend::Tensor build_cudnn_scalar_4d(int64_t id, Dtype dtype);
|
|
||||||
|
|
||||||
// Find a working plan for |op_graph|.
|
|
||||||
std::optional<cudnn_frontend::ExecutionPlan> find_cudnn_plan_from_op_graph(
|
|
||||||
cudnnHandle_t handle,
|
|
||||||
cudnnBackendDescriptorType_t backend_type,
|
|
||||||
Dtype dtype,
|
|
||||||
cudnn_frontend::OperationGraph& op_graph);
|
|
||||||
|
|
||||||
// Encode the plan to command buffer by capturing.
|
|
||||||
bool encode_cudnn_plan_with_capturing(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
int num_args,
|
|
||||||
const int64_t* uids,
|
|
||||||
void** data_ptrs);
|
|
||||||
|
|
||||||
#if CUDNN_VERSION >= 90500
|
|
||||||
// Encode the plan to command buffer by using native graph api of cudnn. If the
|
|
||||||
// |graph| is empty it will be populated, otherwise it will be updated.
|
|
||||||
bool encode_cudnn_plan_with_graph_api(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
CudaGraph& graph,
|
|
||||||
int num_args,
|
|
||||||
const int64_t* uids,
|
|
||||||
void** data_ptrs);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Helpers to make calls like encode_cudnn_plan(..., {'x', 'y', 'z'}, x, y, z).
|
|
||||||
template <typename... Args>
|
|
||||||
bool encode_cudnn_plan(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
std::initializer_list<int64_t> uids,
|
|
||||||
Args&... args) {
|
|
||||||
assert(uids.size() == sizeof...(args));
|
|
||||||
auto data_ptrs = get_data_ptrs(args...);
|
|
||||||
return encode_cudnn_plan_with_capturing(
|
|
||||||
encoder, plan, uids.size(), uids.begin(), data_ptrs.data());
|
|
||||||
}
|
|
||||||
|
|
||||||
#if CUDNN_VERSION >= 90500
|
|
||||||
template <typename... Args>
|
|
||||||
bool encode_cudnn_plan(
|
|
||||||
cu::CommandEncoder& encoder,
|
|
||||||
cudnn_frontend::ExecutionPlan& plan,
|
|
||||||
CudaGraph& graph,
|
|
||||||
std::initializer_list<int64_t> uids,
|
|
||||||
Args&... args) {
|
|
||||||
assert(uids.size() == sizeof...(args));
|
|
||||||
auto data_ptrs = get_data_ptrs(args...);
|
|
||||||
return encode_cudnn_plan_with_graph_api(
|
|
||||||
encoder, plan, graph, uids.size(), uids.begin(), data_ptrs.data());
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,379 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
#include "mlx/backend/common/compiled.h"
|
|
||||||
#include "mlx/backend/cuda/jit_module.h"
|
|
||||||
#include "mlx/backend/cuda/utils.h"
|
|
||||||
#include "mlx/backend/gpu/copy.h"
|
|
||||||
#include "mlx/fast.h"
|
|
||||||
#include "mlx/fast_primitives.h"
|
|
||||||
|
|
||||||
#include <fmt/format.h>
|
|
||||||
#include <nvtx3/nvtx3.hpp>
|
|
||||||
|
|
||||||
namespace mlx::core::fast {
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
constexpr const char* default_header = R"(
|
|
||||||
#include "mlx/backend/cuda/device/utils.cuh"
|
|
||||||
|
|
||||||
#include <cooperative_groups.h>
|
|
||||||
|
|
||||||
#define inf cuda::std::numeric_limits<float>::infinity()
|
|
||||||
|
|
||||||
)";
|
|
||||||
|
|
||||||
std::string template_arguments_hash(
|
|
||||||
const std::vector<std::pair<std::string, TemplateArg>>& template_args) {
|
|
||||||
if (template_args.empty()) {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string hash;
|
|
||||||
hash.reserve(512);
|
|
||||||
|
|
||||||
for (const auto& [name, arg] : template_args) {
|
|
||||||
if (std::holds_alternative<int>(arg)) {
|
|
||||||
hash += fmt::format("_{}", std::get<int>(arg));
|
|
||||||
} else if (std::holds_alternative<bool>(arg)) {
|
|
||||||
hash += (std::get<bool>(arg)) ? "_t" : "_f";
|
|
||||||
} else if (std::holds_alternative<Dtype>(arg)) {
|
|
||||||
hash += "_";
|
|
||||||
hash += get_type_string(std::get<Dtype>(arg));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return hash;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string build_kernel(
|
|
||||||
const std::string& func_name,
|
|
||||||
const std::string& header,
|
|
||||||
const std::string& source,
|
|
||||||
const std::vector<std::string>& input_names,
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
const std::vector<std::string>& output_names,
|
|
||||||
const std::vector<Dtype>& output_dtypes,
|
|
||||||
const std::vector<std::pair<std::string, TemplateArg>>& template_args,
|
|
||||||
const std::vector<CustomKernelShapeInfo>& shape_infos) {
|
|
||||||
std::string kernel_source;
|
|
||||||
kernel_source.reserve(header.size() + source.size() + 8192);
|
|
||||||
kernel_source += default_header;
|
|
||||||
kernel_source += header;
|
|
||||||
kernel_source +=
|
|
||||||
"namespace mlx::core::cu {\n\n"
|
|
||||||
"namespace cg = cooperative_groups;\n\n";
|
|
||||||
|
|
||||||
kernel_source += "__global__ void ";
|
|
||||||
kernel_source += func_name;
|
|
||||||
kernel_source += "(\n";
|
|
||||||
|
|
||||||
// Add inputs
|
|
||||||
for (int i = 0; i < inputs.size(); ++i) {
|
|
||||||
const auto& name = input_names[i];
|
|
||||||
const auto& arr = inputs[i];
|
|
||||||
kernel_source += " const ";
|
|
||||||
kernel_source += dtype_to_cuda_type(arr.dtype());
|
|
||||||
kernel_source += "* ";
|
|
||||||
kernel_source += name;
|
|
||||||
kernel_source += ",\n";
|
|
||||||
// Add input shape, strides and ndim if present in the source
|
|
||||||
if (arr.ndim() > 0) {
|
|
||||||
if (shape_infos[i].shape) {
|
|
||||||
kernel_source += " const __grid_constant__ Shape ";
|
|
||||||
kernel_source += name;
|
|
||||||
kernel_source += "_shape,\n";
|
|
||||||
}
|
|
||||||
if (shape_infos[i].strides) {
|
|
||||||
kernel_source += " const __grid_constant__ Strides ";
|
|
||||||
kernel_source += name;
|
|
||||||
kernel_source += "_strides,\n";
|
|
||||||
}
|
|
||||||
if (shape_infos[i].ndim) {
|
|
||||||
kernel_source += " const __grid_constant__ int ";
|
|
||||||
kernel_source += name;
|
|
||||||
kernel_source += "_ndim,\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add outputs
|
|
||||||
for (int i = 0; i < output_names.size(); ++i) {
|
|
||||||
const auto& name = output_names[i];
|
|
||||||
const auto& dtype = output_dtypes[i];
|
|
||||||
kernel_source += " ";
|
|
||||||
kernel_source += dtype_to_cuda_type(dtype);
|
|
||||||
kernel_source += "* ";
|
|
||||||
kernel_source += name;
|
|
||||||
if (i < output_names.size() - 1) {
|
|
||||||
kernel_source += ",\n";
|
|
||||||
} else {
|
|
||||||
kernel_source += ") {\n";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set compile time constants
|
|
||||||
if (!template_args.empty()) {
|
|
||||||
for (const auto& [name, arg] : template_args) {
|
|
||||||
if (std::holds_alternative<int>(arg)) {
|
|
||||||
kernel_source +=
|
|
||||||
fmt::format(" constexpr int {} = {};\n", name, std::get<int>(arg));
|
|
||||||
} else if (std::holds_alternative<bool>(arg)) {
|
|
||||||
kernel_source += fmt::format(
|
|
||||||
" constexpr bool {} = {};\n", name, std::get<bool>(arg));
|
|
||||||
} else {
|
|
||||||
kernel_source += fmt::format(
|
|
||||||
" using {} = {};\n",
|
|
||||||
name,
|
|
||||||
dtype_to_cuda_type(std::get<Dtype>(arg)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
kernel_source += "\n";
|
|
||||||
}
|
|
||||||
|
|
||||||
kernel_source += source;
|
|
||||||
kernel_source += "\n}\n\n} // namespace mlx::core::cu\n";
|
|
||||||
|
|
||||||
return kernel_source;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
CustomKernelFunction cuda_kernel(
|
|
||||||
const std::string& name,
|
|
||||||
const std::vector<std::string>& input_names,
|
|
||||||
const std::vector<std::string>& output_names,
|
|
||||||
const std::string& source,
|
|
||||||
const std::string& header,
|
|
||||||
bool ensure_row_contiguous,
|
|
||||||
int shared_memory) {
|
|
||||||
if (output_names.empty()) {
|
|
||||||
throw std::invalid_argument(
|
|
||||||
"[custom_kernel] Must specify at least one output.");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<CustomKernelShapeInfo> shape_infos;
|
|
||||||
for (auto& n : input_names) {
|
|
||||||
CustomKernelShapeInfo shape_info;
|
|
||||||
shape_info.shape = source.find(n + "_shape") != std::string::npos;
|
|
||||||
shape_info.strides = source.find(n + "_strides") != std::string::npos;
|
|
||||||
shape_info.ndim = source.find(n + "_ndim") != std::string::npos;
|
|
||||||
shape_infos.push_back(shape_info);
|
|
||||||
}
|
|
||||||
|
|
||||||
return [=, shape_infos = std::move(shape_infos)](
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
const std::vector<Shape>& output_shapes,
|
|
||||||
const std::vector<Dtype>& output_dtypes,
|
|
||||||
std::tuple<int, int, int> grid,
|
|
||||||
std::tuple<int, int, int> threadgroup,
|
|
||||||
const std::vector<std::pair<std::string, TemplateArg>>&
|
|
||||||
template_args = {},
|
|
||||||
std::optional<float> init_value = std::nullopt,
|
|
||||||
bool verbose = false,
|
|
||||||
StreamOrDevice s_ = {}) {
|
|
||||||
if (inputs.size() != input_names.size()) {
|
|
||||||
std::ostringstream msg;
|
|
||||||
msg << "[custom_kernel] Expected `inputs` to have size "
|
|
||||||
<< input_names.size() << " but got size " << inputs.size() << "."
|
|
||||||
<< std::endl;
|
|
||||||
throw std::invalid_argument(msg.str());
|
|
||||||
}
|
|
||||||
if (output_shapes.size() != output_names.size()) {
|
|
||||||
std::ostringstream msg;
|
|
||||||
msg << "[custom_kernel] Expected `output_shapes` to have size "
|
|
||||||
<< output_names.size() << " but got size " << output_shapes.size()
|
|
||||||
<< "." << std::endl;
|
|
||||||
throw std::invalid_argument(msg.str());
|
|
||||||
}
|
|
||||||
if (output_dtypes.size() != output_names.size()) {
|
|
||||||
std::ostringstream msg;
|
|
||||||
msg << "[custom_kernel] Expected `output_dtypes` to have size "
|
|
||||||
<< output_names.size() << " but got size " << output_dtypes.size()
|
|
||||||
<< "." << std::endl;
|
|
||||||
throw std::invalid_argument(msg.str());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto s = to_stream(s_);
|
|
||||||
if (s.device != Device::gpu) {
|
|
||||||
throw std::invalid_argument("[custom_kernel] Only supports the GPU.");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string kernel_name =
|
|
||||||
"custom_kernel_" + name + template_arguments_hash(template_args);
|
|
||||||
std::string kernel_source = build_kernel(
|
|
||||||
kernel_name,
|
|
||||||
header,
|
|
||||||
source,
|
|
||||||
input_names,
|
|
||||||
inputs,
|
|
||||||
output_names,
|
|
||||||
output_dtypes,
|
|
||||||
template_args,
|
|
||||||
shape_infos);
|
|
||||||
|
|
||||||
if (verbose) {
|
|
||||||
std::cout << "Generated source code for `" << kernel_name
|
|
||||||
<< "`:" << std::endl
|
|
||||||
<< "```" << std::endl
|
|
||||||
<< kernel_source << std::endl
|
|
||||||
<< "```" << std::endl;
|
|
||||||
}
|
|
||||||
|
|
||||||
return array::make_arrays(
|
|
||||||
std::move(output_shapes),
|
|
||||||
std::move(output_dtypes),
|
|
||||||
std::make_shared<CustomKernel>(
|
|
||||||
s,
|
|
||||||
std::move(kernel_name),
|
|
||||||
std::move(kernel_source),
|
|
||||||
grid,
|
|
||||||
threadgroup,
|
|
||||||
shape_infos,
|
|
||||||
ensure_row_contiguous,
|
|
||||||
init_value,
|
|
||||||
std::vector<ScalarArg>{},
|
|
||||||
false,
|
|
||||||
shared_memory),
|
|
||||||
std::move(inputs));
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<array> precompiled_cuda_kernel(
|
|
||||||
const std::string& name,
|
|
||||||
const std::string& compiled_source,
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
const std::vector<Shape>& output_shapes,
|
|
||||||
const std::vector<Dtype>& output_dtypes,
|
|
||||||
const std::vector<ScalarArg>& scalars,
|
|
||||||
std::tuple<int, int, int> grid,
|
|
||||||
std::tuple<int, int, int> threadgroup,
|
|
||||||
int shared_memory,
|
|
||||||
std::optional<float> init_value,
|
|
||||||
bool ensure_row_contiguous,
|
|
||||||
StreamOrDevice s) {
|
|
||||||
std::vector<CustomKernelShapeInfo> shape_infos(
|
|
||||||
inputs.size(), CustomKernelShapeInfo{false, false, false});
|
|
||||||
return array::make_arrays(
|
|
||||||
output_shapes,
|
|
||||||
output_dtypes,
|
|
||||||
std::make_shared<CustomKernel>(
|
|
||||||
to_stream(s),
|
|
||||||
name,
|
|
||||||
compiled_source,
|
|
||||||
grid,
|
|
||||||
threadgroup,
|
|
||||||
shape_infos,
|
|
||||||
ensure_row_contiguous,
|
|
||||||
init_value,
|
|
||||||
scalars,
|
|
||||||
true,
|
|
||||||
shared_memory),
|
|
||||||
inputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CustomKernel::eval_gpu(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
std::vector<array>& outputs) {
|
|
||||||
nvtx3::scoped_range r("CustomKernel::eval_gpu");
|
|
||||||
auto& s = stream();
|
|
||||||
|
|
||||||
std::vector<array> copies;
|
|
||||||
|
|
||||||
// Allocate and initialize the output arrays
|
|
||||||
for (auto& out : outputs) {
|
|
||||||
if (init_value_) {
|
|
||||||
copies.emplace_back(init_value_.value(), out.dtype());
|
|
||||||
fill_gpu(copies.back(), out, s);
|
|
||||||
} else {
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the input arrays and copy if needed
|
|
||||||
auto check_input = [&copies, &s, this](const array& x) -> const array {
|
|
||||||
bool no_copy = x.flags().row_contiguous;
|
|
||||||
if (!ensure_row_contiguous_ || no_copy) {
|
|
||||||
return x;
|
|
||||||
} else {
|
|
||||||
copies.push_back(array(x.shape(), x.dtype(), nullptr, {}));
|
|
||||||
copy_gpu(x, copies.back(), CopyType::General, s);
|
|
||||||
return copies.back();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
std::vector<array> checked_inputs;
|
|
||||||
for (const array& in : inputs) {
|
|
||||||
checked_inputs.push_back(check_input(in));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compile the custom kernel
|
|
||||||
std::string kernel_name =
|
|
||||||
(is_precompiled_) ? name_ : "mlx::core::cu::" + name_;
|
|
||||||
cu::JitModule& mod = cu::get_jit_module(
|
|
||||||
s.device,
|
|
||||||
name_,
|
|
||||||
[&]() {
|
|
||||||
return std::make_tuple(
|
|
||||||
is_precompiled_, source_, std::vector{kernel_name});
|
|
||||||
},
|
|
||||||
false);
|
|
||||||
|
|
||||||
// Make the arguments
|
|
||||||
cu::KernelArgs args;
|
|
||||||
for (int i = 0; i < checked_inputs.size(); i++) {
|
|
||||||
const array& in = checked_inputs[i];
|
|
||||||
auto& shape_info = shape_infos_[i];
|
|
||||||
args.append(in);
|
|
||||||
if (shape_info.shape) {
|
|
||||||
args.append_ndim(in.shape());
|
|
||||||
}
|
|
||||||
if (shape_info.strides) {
|
|
||||||
args.append_ndim(in.strides());
|
|
||||||
}
|
|
||||||
if (shape_info.ndim) {
|
|
||||||
args.append<int32_t>(in.ndim());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (auto& out : outputs) {
|
|
||||||
args.append(out);
|
|
||||||
}
|
|
||||||
for (auto& s : scalar_arguments_) {
|
|
||||||
if (std::holds_alternative<bool>(s)) {
|
|
||||||
args.append(std::get<bool>(s));
|
|
||||||
} else if (std::holds_alternative<int>(s)) {
|
|
||||||
args.append(std::get<int>(s));
|
|
||||||
} else if (std::holds_alternative<float>(s)) {
|
|
||||||
args.append(std::get<float>(s));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make the grid
|
|
||||||
const auto [tx, ty, tz] = threadgroup_;
|
|
||||||
const auto [gx, gy, gz] = grid_;
|
|
||||||
dim3 block(std::min(tx, gx), std::min(ty, gy), std::min(tz, gz));
|
|
||||||
dim3 grid((gx + tx - 1) / tx, (gy + ty - 1) / ty, (gz + tz - 1) / tz);
|
|
||||||
|
|
||||||
// Call the kernel
|
|
||||||
auto& encoder = cu::get_command_encoder(s);
|
|
||||||
for (const auto& in : checked_inputs) {
|
|
||||||
encoder.set_input_array(in);
|
|
||||||
}
|
|
||||||
for (const auto& out : outputs) {
|
|
||||||
encoder.set_output_array(out);
|
|
||||||
}
|
|
||||||
for (const auto& t : copies) {
|
|
||||||
encoder.add_temporary(t);
|
|
||||||
}
|
|
||||||
auto kernel =
|
|
||||||
mod.get_kernel(kernel_name, [smem = shared_memory_](CUfunction kernel) {
|
|
||||||
if (smem > 0 && smem > 48000) {
|
|
||||||
cuFuncSetAttribute(
|
|
||||||
kernel, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, smem);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
encoder.add_kernel_node(kernel, grid, block, shared_memory_, args.args());
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core::fast
|
|
||||||
@@ -29,18 +29,11 @@ void check_cudnn_error(const char* name, cudnnStatus_t err) {
|
|||||||
|
|
||||||
int cuda_graph_cache_size() {
|
int cuda_graph_cache_size() {
|
||||||
static int cache_size = []() {
|
static int cache_size = []() {
|
||||||
return env::get_var("MLX_CUDA_GRAPH_CACHE_SIZE", 400);
|
return env::get_var("MLX_CUDA_GRAPH_CACHE_SIZE", 100);
|
||||||
}();
|
}();
|
||||||
return cache_size;
|
return cache_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool use_cuda_graphs() {
|
|
||||||
static bool use_graphs = []() {
|
|
||||||
return env::get_var("MLX_USE_CUDA_GRAPHS", true);
|
|
||||||
}();
|
|
||||||
return use_graphs;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Device::Device(int device) : device_(device) {
|
Device::Device(int device) : device_(device) {
|
||||||
@@ -93,19 +86,14 @@ CommandEncoder& Device::get_command_encoder(Stream s) {
|
|||||||
|
|
||||||
CommandEncoder::CaptureContext::CaptureContext(CommandEncoder& enc) : enc(enc) {
|
CommandEncoder::CaptureContext::CaptureContext(CommandEncoder& enc) : enc(enc) {
|
||||||
enc.device().make_current();
|
enc.device().make_current();
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
CHECK_CUDA_ERROR(
|
CHECK_CUDA_ERROR(
|
||||||
cudaStreamBeginCapture(enc.stream(), cudaStreamCaptureModeGlobal));
|
cudaStreamBeginCapture(enc.stream(), cudaStreamCaptureModeGlobal));
|
||||||
}
|
}
|
||||||
|
|
||||||
CommandEncoder::CaptureContext::~CaptureContext() {
|
CommandEncoder::CaptureContext::~CaptureContext() {
|
||||||
if (!use_cuda_graphs()) {
|
CHECK_CUDA_ERROR(cudaStreamEndCapture(enc.stream(), &graph));
|
||||||
return;
|
std::unique_ptr<cudaGraph_t, void (*)(cudaGraph_t*)> graph_freer(
|
||||||
}
|
&graph, [](cudaGraph_t* p) { CHECK_CUDA_ERROR(cudaGraphDestroy(*p)); });
|
||||||
|
|
||||||
graph.end_capture(enc.stream());
|
|
||||||
if (discard) {
|
if (discard) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@@ -119,9 +107,6 @@ CommandEncoder::ConcurrentContext::ConcurrentContext(CommandEncoder& enc)
|
|||||||
|
|
||||||
CommandEncoder::ConcurrentContext::~ConcurrentContext() {
|
CommandEncoder::ConcurrentContext::~ConcurrentContext() {
|
||||||
enc.in_concurrent_ = false;
|
enc.in_concurrent_ = false;
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use an empty graph node for synchronization
|
// Use an empty graph node for synchronization
|
||||||
CommandEncoder::GraphNode empty{NULL, 'E', std::to_string(enc.node_count_++)};
|
CommandEncoder::GraphNode empty{NULL, 'E', std::to_string(enc.node_count_++)};
|
||||||
@@ -200,28 +185,20 @@ void CommandEncoder::insert_graph_dependencies(std::vector<GraphNode> nodes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
CommandEncoder::CommandEncoder(Device& d)
|
CommandEncoder::CommandEncoder(Device& d)
|
||||||
: device_(d),
|
: device_(d), stream_(d), graph_cache_(cuda_graph_cache_size()) {
|
||||||
stream_(d),
|
CHECK_CUDA_ERROR(cudaGraphCreate(&graph_, 0));
|
||||||
graph_(d),
|
}
|
||||||
graph_cache_(cuda_graph_cache_size()) {}
|
|
||||||
|
|
||||||
void CommandEncoder::add_completed_handler(std::function<void()> task) {
|
void CommandEncoder::add_completed_handler(std::function<void()> task) {
|
||||||
worker_.add_task(std::move(task));
|
worker_.add_task(std::move(task));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CommandEncoder::set_input_array(const array& arr) {
|
void CommandEncoder::set_input_array(const array& arr) {
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
||||||
active_deps_.push_back(id);
|
active_deps_.push_back(id);
|
||||||
}
|
}
|
||||||
|
|
||||||
void CommandEncoder::set_output_array(const array& arr) {
|
void CommandEncoder::set_output_array(const array& arr) {
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
auto id = reinterpret_cast<std::uintptr_t>(arr.buffer().ptr());
|
||||||
active_deps_.push_back(id);
|
active_deps_.push_back(id);
|
||||||
active_outputs_.push_back(id);
|
active_outputs_.push_back(id);
|
||||||
@@ -239,11 +216,6 @@ void CommandEncoder::add_kernel_node(
|
|||||||
dim3 block_dim,
|
dim3 block_dim,
|
||||||
uint32_t smem_bytes,
|
uint32_t smem_bytes,
|
||||||
void** params) {
|
void** params) {
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
CHECK_CUDA_ERROR(cudaLaunchKernel(
|
|
||||||
func, grid_dim, block_dim, params, smem_bytes, stream()));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
cudaKernelNodeParams kernel_params = {0};
|
cudaKernelNodeParams kernel_params = {0};
|
||||||
kernel_params.func = func;
|
kernel_params.func = func;
|
||||||
kernel_params.gridDim = grid_dim;
|
kernel_params.gridDim = grid_dim;
|
||||||
@@ -259,22 +231,6 @@ void CommandEncoder::add_kernel_node(
|
|||||||
dim3 block_dim,
|
dim3 block_dim,
|
||||||
uint32_t smem_bytes,
|
uint32_t smem_bytes,
|
||||||
void** params) {
|
void** params) {
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
CHECK_CUDA_ERROR(cuLaunchKernel(
|
|
||||||
func,
|
|
||||||
grid_dim.x,
|
|
||||||
grid_dim.y,
|
|
||||||
grid_dim.z,
|
|
||||||
block_dim.x,
|
|
||||||
block_dim.y,
|
|
||||||
block_dim.z,
|
|
||||||
smem_bytes,
|
|
||||||
stream(),
|
|
||||||
params,
|
|
||||||
nullptr));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
CUDA_KERNEL_NODE_PARAMS kernel_params = {0};
|
CUDA_KERNEL_NODE_PARAMS kernel_params = {0};
|
||||||
kernel_params.func = func;
|
kernel_params.func = func;
|
||||||
kernel_params.gridDimX = grid_dim.x;
|
kernel_params.gridDimX = grid_dim.x;
|
||||||
@@ -301,12 +257,6 @@ void CommandEncoder::add_kernel_node(const CUDA_KERNEL_NODE_PARAMS& params) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CommandEncoder::add_graph_node(cudaGraph_t child) {
|
void CommandEncoder::add_graph_node(cudaGraph_t child) {
|
||||||
if (!use_cuda_graphs()) {
|
|
||||||
CudaGraphExec graph_exec;
|
|
||||||
graph_exec.instantiate(child);
|
|
||||||
device_.make_current();
|
|
||||||
CHECK_CUDA_ERROR(cudaGraphLaunch(graph_exec, stream()));
|
|
||||||
}
|
|
||||||
cudaGraphNode_t node;
|
cudaGraphNode_t node;
|
||||||
CHECK_CUDA_ERROR(cudaGraphAddChildGraphNode(&node, graph_, NULL, 0, child));
|
CHECK_CUDA_ERROR(cudaGraphAddChildGraphNode(&node, graph_, NULL, 0, child));
|
||||||
insert_graph_dependencies(GraphNode{node, 'G'});
|
insert_graph_dependencies(GraphNode{node, 'G'});
|
||||||
@@ -320,13 +270,7 @@ void CommandEncoder::commit() {
|
|||||||
if (node_count_ > 0) {
|
if (node_count_ > 0) {
|
||||||
if (!from_nodes_.empty()) {
|
if (!from_nodes_.empty()) {
|
||||||
CHECK_CUDA_ERROR(cudaGraphAddDependencies(
|
CHECK_CUDA_ERROR(cudaGraphAddDependencies(
|
||||||
graph_,
|
graph_, from_nodes_.data(), to_nodes_.data(), from_nodes_.size()));
|
||||||
from_nodes_.data(),
|
|
||||||
to_nodes_.data(),
|
|
||||||
#if CUDART_VERSION >= 13000
|
|
||||||
nullptr, // edgeData
|
|
||||||
#endif // CUDART_VERSION >= 13000
|
|
||||||
from_nodes_.size()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
graph_key_ += ".";
|
graph_key_ += ".";
|
||||||
@@ -367,7 +311,8 @@ void CommandEncoder::commit() {
|
|||||||
to_nodes_.clear();
|
to_nodes_.clear();
|
||||||
graph_key_.clear();
|
graph_key_.clear();
|
||||||
node_map_.clear();
|
node_map_.clear();
|
||||||
graph_ = CudaGraph(device_);
|
CHECK_CUDA_ERROR(cudaGraphDestroy(graph_));
|
||||||
|
CHECK_CUDA_ERROR(cudaGraphCreate(&graph_, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Put completion handlers in a batch.
|
// Put completion handlers in a batch.
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ class CommandEncoder {
|
|||||||
struct CaptureContext {
|
struct CaptureContext {
|
||||||
CaptureContext(CommandEncoder& enc);
|
CaptureContext(CommandEncoder& enc);
|
||||||
~CaptureContext();
|
~CaptureContext();
|
||||||
CudaGraph graph;
|
cudaGraph_t graph;
|
||||||
CommandEncoder& enc;
|
CommandEncoder& enc;
|
||||||
bool discard{false};
|
bool discard{false};
|
||||||
};
|
};
|
||||||
@@ -76,6 +76,9 @@ class CommandEncoder {
|
|||||||
uint32_t smem_bytes,
|
uint32_t smem_bytes,
|
||||||
void** params);
|
void** params);
|
||||||
|
|
||||||
|
// Low-level graph helpers.
|
||||||
|
void add_kernel_node(const cudaKernelNodeParams& params);
|
||||||
|
void add_kernel_node(const CUDA_KERNEL_NODE_PARAMS& params);
|
||||||
void add_graph_node(cudaGraph_t child);
|
void add_graph_node(cudaGraph_t child);
|
||||||
|
|
||||||
void add_temporary(const array& arr) {
|
void add_temporary(const array& arr) {
|
||||||
@@ -98,9 +101,6 @@ class CommandEncoder {
|
|||||||
void synchronize();
|
void synchronize();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void add_kernel_node(const cudaKernelNodeParams& params);
|
|
||||||
void add_kernel_node(const CUDA_KERNEL_NODE_PARAMS& params);
|
|
||||||
|
|
||||||
struct GraphNode {
|
struct GraphNode {
|
||||||
cudaGraphNode_t node;
|
cudaGraphNode_t node;
|
||||||
// K = kernel
|
// K = kernel
|
||||||
@@ -115,7 +115,7 @@ class CommandEncoder {
|
|||||||
|
|
||||||
Device& device_;
|
Device& device_;
|
||||||
CudaStream stream_;
|
CudaStream stream_;
|
||||||
CudaGraph graph_;
|
cudaGraph_t graph_;
|
||||||
Worker worker_;
|
Worker worker_;
|
||||||
char node_count_{0};
|
char node_count_{0};
|
||||||
char graph_node_count_{0};
|
char graph_node_count_{0};
|
||||||
|
|||||||
@@ -204,12 +204,6 @@ struct Power {
|
|||||||
__device__ T operator()(T base, T exp) {
|
__device__ T operator()(T base, T exp) {
|
||||||
if constexpr (cuda::std::is_integral_v<T>) {
|
if constexpr (cuda::std::is_integral_v<T>) {
|
||||||
T res = 1;
|
T res = 1;
|
||||||
// Raising an integer to a negative power is undefined
|
|
||||||
if constexpr (cuda::std::is_signed_v<T>) {
|
|
||||||
if (exp < 0) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
while (exp) {
|
while (exp) {
|
||||||
if (exp & 1) {
|
if (exp & 1) {
|
||||||
res *= base;
|
res *= base;
|
||||||
|
|||||||
@@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <cuda_bf16.h>
|
#include <cuda_bf16.h>
|
||||||
#include <cuda_fp16.h>
|
#include <cuda_fp16.h>
|
||||||
|
#include <thrust/iterator/transform_iterator.h>
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
namespace mlx::core::cu {
|
||||||
|
|
||||||
@@ -115,4 +116,15 @@ inline __host__ __device__ auto cast_to(SrcT x) {
|
|||||||
return CastOp<SrcT, DstT>{}(x);
|
return CastOp<SrcT, DstT>{}(x);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Return an iterator that cast the value to DstT using CastOp.
|
||||||
|
template <typename DstT, typename Iterator>
|
||||||
|
inline __host__ __device__ auto make_cast_iterator(Iterator it) {
|
||||||
|
using SrcT = typename cuda::std::iterator_traits<Iterator>::value_type;
|
||||||
|
if constexpr (std::is_same_v<SrcT, DstT>) {
|
||||||
|
return it;
|
||||||
|
} else {
|
||||||
|
return thrust::make_transform_iterator(it, CastOp<SrcT, DstT>{});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core::cu
|
||||||
|
|||||||
@@ -146,23 +146,6 @@ inline __device__ void store_vector(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <int N, typename T, typename SizeT>
|
|
||||||
inline __device__ void store_vector(
|
|
||||||
T* ptr,
|
|
||||||
uint32_t offset,
|
|
||||||
const AlignedVector<T, N>& vec,
|
|
||||||
SizeT size,
|
|
||||||
int64_t stride) {
|
|
||||||
if (is_aligned<N>(ptr) && (offset + 1) * N <= size && stride == 1) {
|
|
||||||
auto* to = reinterpret_cast<AlignedVector<T, N>*>(ptr);
|
|
||||||
to[offset] = vec;
|
|
||||||
} else {
|
|
||||||
for (int i = 0; (offset * N + i) < size && i < N; ++i) {
|
|
||||||
ptr[stride * (offset * N + i)] = vec[i];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
// Type limits utils
|
// Type limits utils
|
||||||
///////////////////////////////////////////////////////////////////////////////
|
///////////////////////////////////////////////////////////////////////////////
|
||||||
|
|||||||
@@ -1,56 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/device.h"
|
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
|
||||||
#include "mlx/backend/gpu/copy.h"
|
|
||||||
#include "mlx/distributed/primitives.h"
|
|
||||||
#include "mlx/primitives.h"
|
|
||||||
|
|
||||||
#include <cassert>
|
|
||||||
|
|
||||||
namespace mlx::core::distributed {
|
|
||||||
void AllReduce::eval_gpu(
|
|
||||||
const std::vector<array>& inputs,
|
|
||||||
std::vector<array>& outputs) {
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
assert(outputs.size() == 1);
|
|
||||||
|
|
||||||
auto set_input_output =
|
|
||||||
[s = stream()](const array& in, array& out) -> std::pair<array, array> {
|
|
||||||
if (!in.flags().row_contiguous) {
|
|
||||||
copy_gpu(in, out, CopyType::General, s);
|
|
||||||
return {out, out};
|
|
||||||
} else if (in.is_donatable()) {
|
|
||||||
out.copy_shared_buffer(in);
|
|
||||||
return {in, out};
|
|
||||||
} else {
|
|
||||||
out.set_data(allocator::malloc(out.nbytes()));
|
|
||||||
return {in, out};
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
auto [input, output] = set_input_output(inputs[0], outputs[0]);
|
|
||||||
|
|
||||||
auto& encoder = cu::get_command_encoder(stream());
|
|
||||||
encoder.set_input_array(input);
|
|
||||||
encoder.set_output_array(output);
|
|
||||||
|
|
||||||
auto capture = encoder.capture_context();
|
|
||||||
auto& s = stream();
|
|
||||||
|
|
||||||
switch (reduce_type_) {
|
|
||||||
case Sum:
|
|
||||||
distributed::detail::all_sum(group(), input, output, s);
|
|
||||||
break;
|
|
||||||
case Max:
|
|
||||||
distributed::detail::all_max(group(), input, output, s);
|
|
||||||
break;
|
|
||||||
case Min:
|
|
||||||
distributed::detail::all_min(group(), input, output, s);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
throw std::runtime_error(
|
|
||||||
"Only all reduce sum, max, and min are supported.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace mlx::core::distributed
|
|
||||||
@@ -202,25 +202,6 @@ CublasGemm::~CublasGemm() {
|
|||||||
CHECK_CUBLAS_ERROR(cublasLtMatmulDescDestroy(matmul_desc_));
|
CHECK_CUBLAS_ERROR(cublasLtMatmulDescDestroy(matmul_desc_));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CublasGemm::set_out(
|
|
||||||
Dtype dtype,
|
|
||||||
bool transposed,
|
|
||||||
uint64_t rows,
|
|
||||||
uint64_t cols,
|
|
||||||
int64_t ld,
|
|
||||||
int32_t batch_count,
|
|
||||||
int64_t batch_stride) {
|
|
||||||
CHECK_CUBLAS_ERROR(cublasLtMatrixLayoutDestroy(out_desc_));
|
|
||||||
out_desc_ = create_matrix_layout(
|
|
||||||
dtype_to_cublas_type(dtype),
|
|
||||||
rows,
|
|
||||||
cols,
|
|
||||||
transposed,
|
|
||||||
ld,
|
|
||||||
batch_count,
|
|
||||||
batch_stride);
|
|
||||||
}
|
|
||||||
|
|
||||||
void CublasGemm::run(
|
void CublasGemm::run(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
array& out,
|
array& out,
|
||||||
|
|||||||
@@ -44,17 +44,6 @@ class CublasGemm {
|
|||||||
|
|
||||||
~CublasGemm();
|
~CublasGemm();
|
||||||
|
|
||||||
// The output's descriptor is inferred from inputs by default, use this method
|
|
||||||
// for unusual output.
|
|
||||||
void set_out(
|
|
||||||
Dtype dtype,
|
|
||||||
bool transposed,
|
|
||||||
uint64_t rows,
|
|
||||||
uint64_t cols,
|
|
||||||
int64_t ld,
|
|
||||||
int32_t batch_count,
|
|
||||||
int64_t batch_stride);
|
|
||||||
|
|
||||||
void run(
|
void run(
|
||||||
cu::CommandEncoder& encoder,
|
cu::CommandEncoder& encoder,
|
||||||
array& out,
|
array& out,
|
||||||
|
|||||||
301
mlx/backend/cuda/gemms/steel_gemm.cu
Normal file
301
mlx/backend/cuda/gemms/steel_gemm.cu
Normal file
@@ -0,0 +1,301 @@
|
|||||||
|
#include "mlx/backend/common/matmul.h"
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/backend/cuda/device/utils.cuh"
|
||||||
|
#include "mlx/backend/cuda/gemms/steel_gemm.h"
|
||||||
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
#include <numeric>
|
||||||
|
|
||||||
|
#include <cooperative_groups.h>
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/steel/gemm.cuh"
|
||||||
|
#include "mlx/backend/cuda/steel/mma.cuh"
|
||||||
|
#include "mlx/backend/cuda/steel/tiles.cuh"
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
namespace cu {
|
||||||
|
|
||||||
|
namespace cg = cooperative_groups;
|
||||||
|
|
||||||
|
struct GemmParams {
|
||||||
|
int M;
|
||||||
|
int N;
|
||||||
|
int K;
|
||||||
|
int lda;
|
||||||
|
int ldb;
|
||||||
|
int ldd;
|
||||||
|
|
||||||
|
int NblockM;
|
||||||
|
int NblockN;
|
||||||
|
int NblockK;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <
|
||||||
|
typename T,
|
||||||
|
int BM,
|
||||||
|
int BN,
|
||||||
|
int BK,
|
||||||
|
int WM,
|
||||||
|
int WN,
|
||||||
|
bool transpose_a,
|
||||||
|
bool transpose_b,
|
||||||
|
int SL,
|
||||||
|
int Nstages>
|
||||||
|
__global__ void kernel_steel_gemm(
|
||||||
|
const T* a,
|
||||||
|
const T* b,
|
||||||
|
T* d,
|
||||||
|
__grid_constant__ const GemmParams params) {
|
||||||
|
const int bM_idx = (blockIdx.y << SL) + (blockIdx.x & ((1 << SL) - 1));
|
||||||
|
const int bN_idx = blockIdx.x >> SL;
|
||||||
|
|
||||||
|
if (params.NblockN <= bN_idx || params.NblockM <= bM_idx) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int d_row = bM_idx * BM;
|
||||||
|
const int d_col = bN_idx * BN;
|
||||||
|
const size_t d_row_long = size_t(d_row);
|
||||||
|
const size_t d_col_long = size_t(d_col);
|
||||||
|
|
||||||
|
a += transpose_a ? d_row_long : d_row_long * params.K;
|
||||||
|
b += transpose_b ? d_col_long * params.K : d_col_long;
|
||||||
|
d += d_row_long * params.ldd + d_col_long;
|
||||||
|
|
||||||
|
auto block = cg::this_thread_block();
|
||||||
|
auto warp = cg::tiled_partition<32>(block);
|
||||||
|
|
||||||
|
const int lane_idx = warp.thread_rank();
|
||||||
|
const int warp_idx = warp.meta_group_rank();
|
||||||
|
|
||||||
|
const int wm = warp_idx / WN;
|
||||||
|
const int wn = warp_idx % WN;
|
||||||
|
|
||||||
|
constexpr int SM = BM / WM;
|
||||||
|
constexpr int SN = BN / WN;
|
||||||
|
constexpr int SK = BK;
|
||||||
|
constexpr int TK = SK / 16;
|
||||||
|
|
||||||
|
constexpr int NUM_WARPS = WM * WN;
|
||||||
|
|
||||||
|
// Allocate shared memory
|
||||||
|
extern __shared__ char shmem[];
|
||||||
|
SharedTile<T, BM, BK>(&as)[Nstages] =
|
||||||
|
*(SharedTile<T, BM, BK>(*)[Nstages])(&shmem[0]);
|
||||||
|
SharedTile<T, BN, BK>(&bs)[Nstages] = *(SharedTile<T, BN, BK>(*)[Nstages])(
|
||||||
|
&shmem[sizeof(T) * Nstages * BM * BK]);
|
||||||
|
|
||||||
|
// Allocate registers for the MMA
|
||||||
|
RegisterTile<float, SM, SN> C;
|
||||||
|
RegisterTile<T, SM, 16> A[TK];
|
||||||
|
RegisterTile<T, SN, 16> B[TK];
|
||||||
|
|
||||||
|
// Zero the accumulators
|
||||||
|
C.fill(0);
|
||||||
|
|
||||||
|
// Start gmem -> smem copies
|
||||||
|
int k_block_read = 0;
|
||||||
|
|
||||||
|
MLX_UNROLL
|
||||||
|
for (int bk = 0; bk < (Nstages - 1); bk++) {
|
||||||
|
load_async<NUM_WARPS>(
|
||||||
|
as[bk], as[bk].base_addr(), a + k_block_read, params.K);
|
||||||
|
load_async<NUM_WARPS>(
|
||||||
|
bs[bk], bs[bk].base_addr(), b + k_block_read, params.K);
|
||||||
|
k_block_read += BK;
|
||||||
|
cp_async_commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
int smem_pipe_read = 0;
|
||||||
|
int smem_pipe_write = Nstages - 1;
|
||||||
|
|
||||||
|
// Wait till only 1 remains laoding
|
||||||
|
cp_async_wait<1>();
|
||||||
|
block.sync();
|
||||||
|
|
||||||
|
const int offset_m = wm * SM;
|
||||||
|
const int offset_n = wn * SN;
|
||||||
|
|
||||||
|
// Start smem -> register copy
|
||||||
|
A[0].load(
|
||||||
|
as[smem_pipe_read],
|
||||||
|
as[smem_pipe_read].base_addr(),
|
||||||
|
offset_m + lane_idx % 16,
|
||||||
|
lane_idx / 16 * 8);
|
||||||
|
B[0].load(
|
||||||
|
bs[smem_pipe_read],
|
||||||
|
bs[smem_pipe_read].base_addr(),
|
||||||
|
offset_n + lane_idx % 16,
|
||||||
|
lane_idx / 16 * 8);
|
||||||
|
|
||||||
|
// Main loop
|
||||||
|
for (int kb = 0; kb < params.NblockK; kb++) {
|
||||||
|
// Prepare next registers
|
||||||
|
{
|
||||||
|
A[1].load(
|
||||||
|
as[smem_pipe_read],
|
||||||
|
as[smem_pipe_read].base_addr(),
|
||||||
|
offset_m + lane_idx % 16,
|
||||||
|
16 + lane_idx / 16 * 8);
|
||||||
|
B[1].load(
|
||||||
|
bs[smem_pipe_read],
|
||||||
|
bs[smem_pipe_read].base_addr(),
|
||||||
|
offset_n + lane_idx % 16,
|
||||||
|
16 + lane_idx / 16 * 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare next smem
|
||||||
|
if ((kb + Nstages - 1) < params.NblockK) {
|
||||||
|
load_async<NUM_WARPS>(
|
||||||
|
as[smem_pipe_write],
|
||||||
|
as[smem_pipe_write].base_addr(),
|
||||||
|
a + k_block_read,
|
||||||
|
params.K);
|
||||||
|
load_async<NUM_WARPS>(
|
||||||
|
bs[smem_pipe_write],
|
||||||
|
bs[smem_pipe_write].base_addr(),
|
||||||
|
b + k_block_read,
|
||||||
|
params.K);
|
||||||
|
}
|
||||||
|
k_block_read += BK;
|
||||||
|
|
||||||
|
cp_async_commit();
|
||||||
|
|
||||||
|
smem_pipe_write = smem_pipe_read;
|
||||||
|
smem_pipe_read = smem_pipe_read + 1;
|
||||||
|
smem_pipe_read = (smem_pipe_read == Nstages) ? 0 : smem_pipe_read;
|
||||||
|
|
||||||
|
// Do current gemm
|
||||||
|
mma_t(C, A[0], B[0]);
|
||||||
|
|
||||||
|
// Do wait for next register
|
||||||
|
cp_async_wait<1>();
|
||||||
|
block.sync();
|
||||||
|
|
||||||
|
// Prepare next register (smem_pipe_read has moved to the next)
|
||||||
|
{
|
||||||
|
A[0].load(
|
||||||
|
as[smem_pipe_read],
|
||||||
|
as[smem_pipe_read].base_addr(),
|
||||||
|
offset_m + lane_idx % 16,
|
||||||
|
lane_idx / 16 * 8);
|
||||||
|
B[0].load(
|
||||||
|
bs[smem_pipe_read],
|
||||||
|
bs[smem_pipe_read].base_addr(),
|
||||||
|
offset_n + lane_idx % 16,
|
||||||
|
lane_idx / 16 * 8);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Do current gemm
|
||||||
|
mma_t(C, A[1], B[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait and clear
|
||||||
|
cp_async_wait_all();
|
||||||
|
block.sync();
|
||||||
|
|
||||||
|
C.store_global(d, params.ldd, offset_m, offset_n);
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace cu
|
||||||
|
|
||||||
|
void dispatch_steel_gemm(
|
||||||
|
const Stream& s,
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
array& d,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K,
|
||||||
|
int lda,
|
||||||
|
int ldb,
|
||||||
|
int ldd,
|
||||||
|
bool a_transposed,
|
||||||
|
bool b_transposed) {
|
||||||
|
using DataType = cuda_type_t<float16_t>;
|
||||||
|
|
||||||
|
encoder.set_input_array(a);
|
||||||
|
encoder.set_input_array(b);
|
||||||
|
encoder.set_output_array(d);
|
||||||
|
|
||||||
|
constexpr int BM = 128;
|
||||||
|
constexpr int BN = 128;
|
||||||
|
constexpr int BK = 32;
|
||||||
|
|
||||||
|
constexpr int WM = 2;
|
||||||
|
constexpr int WN = 2;
|
||||||
|
|
||||||
|
constexpr int SL = 0;
|
||||||
|
constexpr int Nstages = 3;
|
||||||
|
|
||||||
|
constexpr uint32_t smem_bytes = BK * (BM + BN) * Nstages * sizeof(DataType);
|
||||||
|
|
||||||
|
const int NblockM = (M + BM - 1) / BM;
|
||||||
|
const int NblockN = (N + BN - 1) / BN;
|
||||||
|
const int NblockK = (K + BK - 1) / BK;
|
||||||
|
|
||||||
|
cu::GemmParams params{
|
||||||
|
/* int M = */ M,
|
||||||
|
/* int N = */ N,
|
||||||
|
/* int K = */ K,
|
||||||
|
/* int lda = */ lda,
|
||||||
|
/* int ldb = */ ldb,
|
||||||
|
/* int ldd = */ ldd,
|
||||||
|
|
||||||
|
/* int NblockM = */ NblockM,
|
||||||
|
/* int NblockN = */ NblockN,
|
||||||
|
/* int NblockK = */ NblockK,
|
||||||
|
};
|
||||||
|
|
||||||
|
// Prepare launch grid params
|
||||||
|
int tile = 1 << SL;
|
||||||
|
int tm = (NblockM + tile - 1) / tile;
|
||||||
|
int tn = NblockN * tile;
|
||||||
|
|
||||||
|
dim3 grid_dim(tn, tm, 1);
|
||||||
|
dim3 block_dim(32 * WM * WN, 1, 1);
|
||||||
|
|
||||||
|
dispatch_bool(a_transposed, [&](auto ta_) {
|
||||||
|
dispatch_bool(b_transposed, [&](auto tb_) {
|
||||||
|
constexpr bool ta = ta_.value;
|
||||||
|
constexpr bool tb = tb_.value;
|
||||||
|
|
||||||
|
auto kernel = cu::ab_t_aligned<DataType, BM, BN, BK>;
|
||||||
|
cudaFuncSetAttribute(
|
||||||
|
kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
|
||||||
|
|
||||||
|
encoder.add_kernel_node(
|
||||||
|
kernel,
|
||||||
|
grid_dim,
|
||||||
|
block_dim,
|
||||||
|
smem_bytes,
|
||||||
|
a.data<DataType>(),
|
||||||
|
b.data<DataType>(),
|
||||||
|
d.data<DataType>(),
|
||||||
|
N,
|
||||||
|
K);
|
||||||
|
|
||||||
|
// auto kernel = cu::kernel_steel_gemm<DataType, BM, BN, BK, WM, WN, ta,
|
||||||
|
// tb, SL, Nstages>;
|
||||||
|
|
||||||
|
// cudaFuncSetAttribute(kernel,
|
||||||
|
// cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes);
|
||||||
|
|
||||||
|
// encoder.add_kernel_node(
|
||||||
|
// kernel,
|
||||||
|
// grid_dim,
|
||||||
|
// block_dim,
|
||||||
|
// smem_bytes,
|
||||||
|
// a.data<DataType>(),
|
||||||
|
// b.data<DataType>(),
|
||||||
|
// d.data<DataType>(),
|
||||||
|
// params);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
27
mlx/backend/cuda/gemms/steel_gemm.h
Normal file
27
mlx/backend/cuda/gemms/steel_gemm.h
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "mlx/backend/common/matmul.h"
|
||||||
|
#include "mlx/backend/cuda/device.h"
|
||||||
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
|
#include <nvtx3/nvtx3.hpp>
|
||||||
|
#include <numeric>
|
||||||
|
|
||||||
|
namespace mlx::core {
|
||||||
|
|
||||||
|
void dispatch_steel_gemm(
|
||||||
|
const Stream& s,
|
||||||
|
cu::CommandEncoder& encoder,
|
||||||
|
const array& a,
|
||||||
|
const array& b,
|
||||||
|
array& d,
|
||||||
|
int M,
|
||||||
|
int N,
|
||||||
|
int K,
|
||||||
|
int lda,
|
||||||
|
int ldb,
|
||||||
|
int ldd,
|
||||||
|
bool a_transposed,
|
||||||
|
bool b_transposed);
|
||||||
|
|
||||||
|
} // namespace mlx::core
|
||||||
@@ -94,7 +94,7 @@ void Gather::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
large ? "int64_t" : "int32_t"));
|
large ? "int64_t" : "int32_t"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_tuple(false, jit_source_gather, std::move(kernel_names));
|
return std::make_pair(jit_source_gather, std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
cu::KernelArgs args;
|
cu::KernelArgs args;
|
||||||
@@ -110,7 +110,7 @@ void Gather::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
args.append<int32_t>(src.ndim());
|
args.append<int32_t>(src.ndim());
|
||||||
args.append_ndim(slice_sizes_);
|
args.append_ndim(slice_sizes_);
|
||||||
args.append(slice_size);
|
args.append(slice_size);
|
||||||
args.append(axes_);
|
args.append(SmallVector<int32_t>(axes_.begin(), axes_.end()));
|
||||||
append_indices_arg(args, inputs, nidx, idx_ndim);
|
append_indices_arg(args, inputs, nidx, idx_ndim);
|
||||||
|
|
||||||
std::string kernel_name = fmt::format(
|
std::string kernel_name = fmt::format(
|
||||||
@@ -189,7 +189,7 @@ void Scatter::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
large ? "int64_t" : "int32_t"));
|
large ? "int64_t" : "int32_t"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_tuple(false, jit_source_scatter, std::move(kernel_names));
|
return std::make_pair(jit_source_scatter, std::move(kernel_names));
|
||||||
});
|
});
|
||||||
|
|
||||||
cu::KernelArgs args;
|
cu::KernelArgs args;
|
||||||
@@ -211,7 +211,7 @@ void Scatter::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
args.append_ndim(out.shape());
|
args.append_ndim(out.shape());
|
||||||
args.append_ndim(out.strides());
|
args.append_ndim(out.strides());
|
||||||
args.append<int32_t>(out.ndim());
|
args.append<int32_t>(out.ndim());
|
||||||
args.append(axes_);
|
args.append(SmallVector<int32_t>(axes_.begin(), axes_.end()));
|
||||||
append_indices_arg(args, inputs, nidx, idx_ndim);
|
append_indices_arg(args, inputs, nidx, idx_ndim);
|
||||||
|
|
||||||
std::string kernel_name = fmt::format(
|
std::string kernel_name = fmt::format(
|
||||||
@@ -268,8 +268,7 @@ void GatherAxis::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_tuple(
|
return std::make_pair(jit_source_gather_axis, std::move(kernel_names));
|
||||||
false, jit_source_gather_axis, std::move(kernel_names));
|
|
||||||
});
|
});
|
||||||
|
|
||||||
size_t idx_size_pre = 1;
|
size_t idx_size_pre = 1;
|
||||||
@@ -372,8 +371,7 @@ void ScatterAxis::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return std::make_tuple(
|
return std::make_pair(jit_source_scatter_axis, std::move(kernel_names));
|
||||||
false, jit_source_scatter_axis, std::move(kernel_names));
|
|
||||||
});
|
});
|
||||||
|
|
||||||
size_t idx_size_pre = 1;
|
size_t idx_size_pre = 1;
|
||||||
|
|||||||
@@ -67,12 +67,10 @@ const std::string& cccl_dir() {
|
|||||||
return path.string();
|
return path.string();
|
||||||
}
|
}
|
||||||
// Finally check the environment variable.
|
// Finally check the environment variable.
|
||||||
if (const char* env = std::getenv("MLX_CCCL_DIR"); env) {
|
path = std::getenv("MLX_CCCL_DIR");
|
||||||
path = env;
|
|
||||||
if (!path.empty() && std::filesystem::exists(path)) {
|
if (!path.empty() && std::filesystem::exists(path)) {
|
||||||
return path.string();
|
return path.string();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return std::string();
|
return std::string();
|
||||||
}();
|
}();
|
||||||
return dir;
|
return dir;
|
||||||
@@ -103,8 +101,8 @@ const std::filesystem::path& ptx_cache_dir() {
|
|||||||
bool read_cached_ptx(
|
bool read_cached_ptx(
|
||||||
const std::filesystem::path& cache_dir,
|
const std::filesystem::path& cache_dir,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
std::string& ptx,
|
std::vector<char>* ptx,
|
||||||
std::vector<std::pair<std::string, std::string>>& ptx_kernels) {
|
std::vector<std::pair<std::string, std::string>>* ptx_kernels) {
|
||||||
if (cache_dir.empty()) {
|
if (cache_dir.empty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@@ -119,15 +117,15 @@ bool read_cached_ptx(
|
|||||||
if (!ptx_file.good()) {
|
if (!ptx_file.good()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
ptx.resize(ptx_size);
|
ptx->resize(ptx_size);
|
||||||
ptx_file.read(ptx.data(), ptx_size);
|
ptx_file.read(ptx->data(), ptx_size);
|
||||||
|
|
||||||
std::ifstream txt_file(cache_dir / (module_name + ".txt"), std::ios::binary);
|
std::ifstream txt_file(cache_dir / (module_name + ".txt"), std::ios::binary);
|
||||||
std::string line;
|
std::string line;
|
||||||
while (std::getline(txt_file, line)) {
|
while (std::getline(txt_file, line)) {
|
||||||
auto tab = line.find('\t');
|
auto tab = line.find('\t');
|
||||||
if (tab != std::string::npos) {
|
if (tab != std::string::npos) {
|
||||||
ptx_kernels.emplace_back(line.substr(0, tab), line.substr(tab + 1));
|
ptx_kernels->emplace_back(line.substr(0, tab), line.substr(tab + 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
@@ -137,7 +135,7 @@ bool read_cached_ptx(
|
|||||||
void write_cached_ptx(
|
void write_cached_ptx(
|
||||||
const std::filesystem::path& cache_dir,
|
const std::filesystem::path& cache_dir,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
const std::string& ptx,
|
const std::vector<char>& ptx,
|
||||||
const std::vector<std::pair<std::string, std::string>>& ptx_kernels,
|
const std::vector<std::pair<std::string, std::string>>& ptx_kernels,
|
||||||
const std::string& source_code) {
|
const std::string& source_code) {
|
||||||
if (cache_dir.empty()) {
|
if (cache_dir.empty()) {
|
||||||
@@ -219,18 +217,22 @@ constexpr const char* g_headers[] = {
|
|||||||
jit_source_utils,
|
jit_source_utils,
|
||||||
};
|
};
|
||||||
|
|
||||||
void compile(
|
} // namespace
|
||||||
|
|
||||||
|
JitModule::JitModule(
|
||||||
Device& device,
|
Device& device,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
const std::string& source,
|
const KernelBuilder& builder) {
|
||||||
const std::vector<std::string>& kernel_names,
|
// Check cache.
|
||||||
std::string& ptx,
|
std::vector<char> ptx;
|
||||||
std::vector<std::pair<std::string, std::string>>& ptx_kernels) {
|
std::vector<std::pair<std::string, std::string>> ptx_kernels;
|
||||||
// Create the program
|
if (!read_cached_ptx(ptx_cache_dir(), module_name, &ptx, &ptx_kernels)) {
|
||||||
|
// Create program.
|
||||||
|
auto [source_code, kernel_names] = builder();
|
||||||
nvrtcProgram prog;
|
nvrtcProgram prog;
|
||||||
CHECK_NVRTC_ERROR(nvrtcCreateProgram(
|
CHECK_NVRTC_ERROR(nvrtcCreateProgram(
|
||||||
&prog,
|
&prog,
|
||||||
source.c_str(),
|
source_code.c_str(),
|
||||||
(module_name + ".cu").c_str(),
|
(module_name + ".cu").c_str(),
|
||||||
std::size(g_headers),
|
std::size(g_headers),
|
||||||
g_headers,
|
g_headers,
|
||||||
@@ -284,20 +286,16 @@ void compile(
|
|||||||
} else {
|
} else {
|
||||||
CHECK_NVRTC_ERROR(nvrtcGetPTXSize(prog, &ptx_size));
|
CHECK_NVRTC_ERROR(nvrtcGetPTXSize(prog, &ptx_size));
|
||||||
}
|
}
|
||||||
ptx.resize(ptx_size);
|
ptx.resize(ptx_size, 0);
|
||||||
if (use_sass) {
|
if (use_sass) {
|
||||||
CHECK_NVRTC_ERROR(nvrtcGetCUBIN(prog, ptx.data()));
|
CHECK_NVRTC_ERROR(nvrtcGetCUBIN(prog, ptx.data()));
|
||||||
} else {
|
} else {
|
||||||
CHECK_NVRTC_ERROR(nvrtcGetPTX(prog, ptx.data()));
|
CHECK_NVRTC_ERROR(nvrtcGetPTX(prog, ptx.data()));
|
||||||
}
|
}
|
||||||
|
write_cached_ptx(
|
||||||
|
ptx_cache_dir(), module_name, ptx, ptx_kernels, source_code);
|
||||||
}
|
}
|
||||||
|
|
||||||
void load_module(
|
|
||||||
const std::string& module_name,
|
|
||||||
const std::string& ptx,
|
|
||||||
const std::vector<std::pair<std::string, std::string>>& ptx_kernels,
|
|
||||||
CUmodule& module_,
|
|
||||||
std::unordered_map<std::string, std::pair<CUfunction, bool>>& kernels) {
|
|
||||||
// Load module.
|
// Load module.
|
||||||
char jit_log[4089] = {};
|
char jit_log[4089] = {};
|
||||||
CUjit_option options[] = {
|
CUjit_option options[] = {
|
||||||
@@ -314,69 +312,21 @@ void load_module(
|
|||||||
for (const auto& [name, mangled] : ptx_kernels) {
|
for (const auto& [name, mangled] : ptx_kernels) {
|
||||||
CUfunction kernel;
|
CUfunction kernel;
|
||||||
CHECK_CUDA_ERROR(cuModuleGetFunction(&kernel, module_, mangled.c_str()));
|
CHECK_CUDA_ERROR(cuModuleGetFunction(&kernel, module_, mangled.c_str()));
|
||||||
kernels[name] = std::make_pair(kernel, false);
|
kernels_[name] = kernel;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
JitModule::JitModule(
|
|
||||||
Device& device,
|
|
||||||
const std::string& module_name,
|
|
||||||
const KernelBuilder& builder,
|
|
||||||
bool use_disk_cache) {
|
|
||||||
// Will hold the actual device executable source code and kernel names
|
|
||||||
std::string ptx;
|
|
||||||
std::vector<std::pair<std::string, std::string>> ptx_kernels;
|
|
||||||
|
|
||||||
// Try to load them from the file cache
|
|
||||||
if (!read_cached_ptx(ptx_cache_dir(), module_name, ptx, ptx_kernels)) {
|
|
||||||
auto [precompiled, source_code, kernel_names] = builder();
|
|
||||||
|
|
||||||
// Get the PTX or cubin
|
|
||||||
if (precompiled) {
|
|
||||||
ptx = std::move(source_code);
|
|
||||||
for (auto& name : kernel_names) {
|
|
||||||
ptx_kernels.emplace_back(name, name);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
compile(device, module_name, source_code, kernel_names, ptx, ptx_kernels);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If requested save them in the file cache for the next launch
|
|
||||||
if (use_disk_cache) {
|
|
||||||
write_cached_ptx(
|
|
||||||
ptx_cache_dir(), module_name, ptx, ptx_kernels, source_code);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load the module
|
|
||||||
load_module(module_name, ptx, ptx_kernels, module_, kernels_);
|
|
||||||
}
|
|
||||||
|
|
||||||
JitModule::~JitModule() {
|
JitModule::~JitModule() {
|
||||||
CHECK_CUDA_ERROR(cuModuleUnload(module_));
|
CHECK_CUDA_ERROR(cuModuleUnload(module_));
|
||||||
}
|
}
|
||||||
|
|
||||||
CUfunction JitModule::get_kernel(
|
CUfunction JitModule::get_kernel(const std::string& kernel_name) {
|
||||||
const std::string& kernel_name,
|
|
||||||
std::function<void(CUfunction)> configure_kernel) {
|
|
||||||
auto it = kernels_.find(kernel_name);
|
auto it = kernels_.find(kernel_name);
|
||||||
if (it == kernels_.end()) {
|
if (it == kernels_.end()) {
|
||||||
throw std::runtime_error(
|
throw std::runtime_error(
|
||||||
fmt::format("There is no kernel named {}.", kernel_name));
|
fmt::format("There is no kernel named {}.", kernel_name));
|
||||||
}
|
}
|
||||||
|
return it->second;
|
||||||
// If it is the first time we run this kernel then configure it. Do it only
|
|
||||||
// once!
|
|
||||||
if (!it->second.second) {
|
|
||||||
if (configure_kernel) {
|
|
||||||
configure_kernel(it->second.first);
|
|
||||||
}
|
|
||||||
it->second.second = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return it->second.first;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unordered_map<std::string, JitModule>& get_jit_module_cache() {
|
std::unordered_map<std::string, JitModule>& get_jit_module_cache() {
|
||||||
@@ -387,12 +337,11 @@ std::unordered_map<std::string, JitModule>& get_jit_module_cache() {
|
|||||||
JitModule& get_jit_module(
|
JitModule& get_jit_module(
|
||||||
const mlx::core::Device& device,
|
const mlx::core::Device& device,
|
||||||
const std::string& name,
|
const std::string& name,
|
||||||
const KernelBuilder& builder,
|
const KernelBuilder& builder) {
|
||||||
bool cache) {
|
|
||||||
auto& map = get_jit_module_cache();
|
auto& map = get_jit_module_cache();
|
||||||
auto it = map.find(name);
|
auto it = map.find(name);
|
||||||
if (it == map.end()) {
|
if (it == map.end()) {
|
||||||
it = map.try_emplace(name, cu::device(device), name, builder, cache).first;
|
it = map.try_emplace(name, cu::device(device), name, builder).first;
|
||||||
}
|
}
|
||||||
return it->second;
|
return it->second;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,8 +19,7 @@ namespace mlx::core::cu {
|
|||||||
|
|
||||||
class Device;
|
class Device;
|
||||||
|
|
||||||
using KernelBuilderResult = std::tuple<
|
using KernelBuilderResult = std::pair<
|
||||||
/* precompiled */ bool,
|
|
||||||
/* source code */ std::string,
|
/* source code */ std::string,
|
||||||
/* kernel names */ std::vector<std::string>>;
|
/* kernel names */ std::vector<std::string>>;
|
||||||
using KernelBuilder = std::function<KernelBuilderResult()>;
|
using KernelBuilder = std::function<KernelBuilderResult()>;
|
||||||
@@ -46,11 +45,6 @@ struct KernelArgs {
|
|||||||
append_ptr(std::get<SmallVector<T>>(storage_.back()).data());
|
append_ptr(std::get<SmallVector<T>>(storage_.back()).data());
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
void append(const std::vector<T>& vec) {
|
|
||||||
append(SmallVector<T>(vec.begin(), vec.end()));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure the arg is copied to an array with size of NDIM.
|
// Make sure the arg is copied to an array with size of NDIM.
|
||||||
template <size_t NDIM = MAX_NDIM, typename T>
|
template <size_t NDIM = MAX_NDIM, typename T>
|
||||||
void append_ndim(SmallVector<T> vec) {
|
void append_ndim(SmallVector<T> vec) {
|
||||||
@@ -69,16 +63,14 @@ struct KernelArgs {
|
|||||||
private:
|
private:
|
||||||
std::vector<void*> args_;
|
std::vector<void*> args_;
|
||||||
|
|
||||||
// The cuGraphAddKernelNode API requires passing pointers to arguments so
|
// The cuLaunchKernel API requires passing pointers to arguments so store
|
||||||
// store temporary values until the node is created.
|
// temporary values untill kernel is launched.
|
||||||
using Arg = std::variant<
|
using Arg = std::variant<
|
||||||
std::monostate,
|
std::monostate,
|
||||||
CUdeviceptr,
|
CUdeviceptr,
|
||||||
bool,
|
|
||||||
int32_t,
|
int32_t,
|
||||||
uint32_t,
|
uint32_t,
|
||||||
int64_t,
|
int64_t,
|
||||||
float,
|
|
||||||
SmallVector<const void*>,
|
SmallVector<const void*>,
|
||||||
SmallVector<int32_t>,
|
SmallVector<int32_t>,
|
||||||
SmallVector<int64_t>>;
|
SmallVector<int64_t>>;
|
||||||
@@ -90,19 +82,16 @@ class JitModule {
|
|||||||
JitModule(
|
JitModule(
|
||||||
Device& device,
|
Device& device,
|
||||||
const std::string& module_name,
|
const std::string& module_name,
|
||||||
const KernelBuilder& builder,
|
const KernelBuilder& builder);
|
||||||
bool cache);
|
|
||||||
~JitModule();
|
~JitModule();
|
||||||
|
|
||||||
JitModule(const JitModule&) = delete;
|
JitModule(const JitModule&) = delete;
|
||||||
JitModule& operator=(const JitModule&) = delete;
|
JitModule& operator=(const JitModule&) = delete;
|
||||||
CUfunction get_kernel(
|
CUfunction get_kernel(const std::string& kernel_name);
|
||||||
const std::string& kernel_name,
|
|
||||||
std::function<void(CUfunction)> configure_kernel = nullptr);
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
CUmodule module_{nullptr};
|
CUmodule module_{nullptr};
|
||||||
std::unordered_map<std::string, std::pair<CUfunction, bool>> kernels_;
|
std::unordered_map<std::string, CUfunction> kernels_;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::unordered_map<std::string, JitModule>& get_jit_module_cache();
|
std::unordered_map<std::string, JitModule>& get_jit_module_cache();
|
||||||
@@ -110,7 +99,6 @@ std::unordered_map<std::string, JitModule>& get_jit_module_cache();
|
|||||||
JitModule& get_jit_module(
|
JitModule& get_jit_module(
|
||||||
const mlx::core::Device& device,
|
const mlx::core::Device& device,
|
||||||
const std::string& name,
|
const std::string& name,
|
||||||
const KernelBuilder& builder,
|
const KernelBuilder& builder);
|
||||||
bool use_disk_cache = true);
|
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
} // namespace mlx::core::cu
|
||||||
|
|||||||
@@ -7,6 +7,8 @@
|
|||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
#include "mlx/primitives.h"
|
#include "mlx/primitives.h"
|
||||||
|
|
||||||
|
#include "mlx/backend/cuda/gemms/steel_gemm.h"
|
||||||
|
|
||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
|
||||||
@@ -95,6 +97,24 @@ void Matmul::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (out.dtype() == float16 && batch_count == 1 && !a_transposed &&
|
||||||
|
b_transposed) {
|
||||||
|
return dispatch_steel_gemm(
|
||||||
|
/* const Stream& s = */ s,
|
||||||
|
/* cu::CommandEncoder& encoder = */ encoder,
|
||||||
|
/* const array& a = */ a,
|
||||||
|
/* const array& b = */ b,
|
||||||
|
/* array& d = */ out,
|
||||||
|
/* int M = */ M,
|
||||||
|
/* int N = */ N,
|
||||||
|
/* int K = */ K,
|
||||||
|
/* int lda = */ lda,
|
||||||
|
/* int ldb = */ ldb,
|
||||||
|
/* int ldd = */ N,
|
||||||
|
/* bool a_transposed = */ a_transposed,
|
||||||
|
/* bool b_transposed = */ b_transposed);
|
||||||
|
}
|
||||||
|
|
||||||
/////////////////////////////////////////////////////////////////////////////
|
/////////////////////////////////////////////////////////////////////////////
|
||||||
// Invoke cublasLt
|
// Invoke cublasLt
|
||||||
CublasGemm gemm(
|
CublasGemm gemm(
|
||||||
|
|||||||
@@ -1,47 +1,11 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/cuda/cuda.h"
|
#include "mlx/backend/cuda/cuda.h"
|
||||||
#include "mlx/fast.h"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
namespace mlx::core::cu {
|
||||||
|
|
||||||
namespace cu {
|
|
||||||
|
|
||||||
bool is_available() {
|
bool is_available() {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace mlx::core::cu
|
||||||
|
|
||||||
namespace fast {
|
|
||||||
|
|
||||||
CustomKernelFunction cuda_kernel(
|
|
||||||
const std::string&,
|
|
||||||
const std::vector<std::string>&,
|
|
||||||
const std::vector<std::string>&,
|
|
||||||
const std::string&,
|
|
||||||
const std::string&,
|
|
||||||
bool,
|
|
||||||
int) {
|
|
||||||
throw std::runtime_error("[cuda_kernel] No CUDA back-end.");
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<array> precompiled_cuda_kernel(
|
|
||||||
const std::string&,
|
|
||||||
const std::string&,
|
|
||||||
const std::vector<array>&,
|
|
||||||
const std::vector<Shape>&,
|
|
||||||
const std::vector<Dtype>&,
|
|
||||||
const std::vector<ScalarArg>&,
|
|
||||||
std::tuple<int, int, int>,
|
|
||||||
std::tuple<int, int, int>,
|
|
||||||
int shared_memory,
|
|
||||||
std::optional<float> init_value,
|
|
||||||
bool ensure_row_contiguous,
|
|
||||||
StreamOrDevice) {
|
|
||||||
throw std::runtime_error("[cuda_kernel] No CUDA back-end.");
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace fast
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
|
||||||
|
|||||||
@@ -24,6 +24,8 @@ namespace mlx::core {
|
|||||||
}
|
}
|
||||||
|
|
||||||
NO_GPU(BlockMaskedMM)
|
NO_GPU(BlockMaskedMM)
|
||||||
|
NO_GPU(DynamicSlice)
|
||||||
|
NO_GPU(DynamicSliceUpdate)
|
||||||
NO_GPU(FFT)
|
NO_GPU(FFT)
|
||||||
NO_GPU(GatherMM)
|
NO_GPU(GatherMM)
|
||||||
NO_GPU(GatherQMM)
|
NO_GPU(GatherQMM)
|
||||||
@@ -39,7 +41,12 @@ NO_GPU(Cholesky)
|
|||||||
NO_GPU_MULTI(Eig)
|
NO_GPU_MULTI(Eig)
|
||||||
NO_GPU_MULTI(Eigh)
|
NO_GPU_MULTI(Eigh)
|
||||||
|
|
||||||
|
namespace fast {
|
||||||
|
NO_GPU_MULTI(CustomKernel)
|
||||||
|
} // namespace fast
|
||||||
|
|
||||||
namespace distributed {
|
namespace distributed {
|
||||||
|
NO_GPU_MULTI(AllReduce)
|
||||||
NO_GPU_MULTI(AllGather)
|
NO_GPU_MULTI(AllGather)
|
||||||
NO_GPU_MULTI(Send)
|
NO_GPU_MULTI(Send)
|
||||||
NO_GPU_MULTI(Recv)
|
NO_GPU_MULTI(Recv)
|
||||||
|
|||||||
@@ -46,10 +46,10 @@ inline array ensure_row_contiguous_matrix(
|
|||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void fast::Quantize::eval_gpu(
|
void fast::AffineQuantize::eval_gpu(
|
||||||
const std::vector<array>& inputs,
|
const std::vector<array>& inputs,
|
||||||
std::vector<array>& outputs) {
|
std::vector<array>& outputs) {
|
||||||
nvtx3::scoped_range r("Quantize::eval_gpu");
|
nvtx3::scoped_range r("AffineQuantize::eval_gpu");
|
||||||
auto& s = stream();
|
auto& s = stream();
|
||||||
auto& d = cu::device(s.device);
|
auto& d = cu::device(s.device);
|
||||||
auto& enc = d.get_command_encoder(s);
|
auto& enc = d.get_command_encoder(s);
|
||||||
|
|||||||
@@ -1,11 +1,8 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
#include "mlx/backend/common/slicing.h"
|
#include "mlx/backend/common/slicing.h"
|
||||||
#include "mlx/backend/cuda/device.h"
|
|
||||||
#include "mlx/backend/cuda/jit_module.h"
|
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
#include "mlx/backend/gpu/slicing.h"
|
#include "mlx/backend/gpu/slicing.h"
|
||||||
#include "mlx/dtype_utils.h"
|
|
||||||
|
|
||||||
#include <numeric>
|
#include <numeric>
|
||||||
|
|
||||||
@@ -30,7 +27,8 @@ void concatenate_gpu(
|
|||||||
flags.row_contiguous = false;
|
flags.row_contiguous = false;
|
||||||
flags.col_contiguous = false;
|
flags.col_contiguous = false;
|
||||||
flags.contiguous = false;
|
flags.contiguous = false;
|
||||||
auto concurrent = cu::get_command_encoder(s).concurrent_context();
|
// TODO: Handle concurrent outputs:
|
||||||
|
// https://github.com/ml-explore/mlx/pull/2145#discussion_r2070753816
|
||||||
for (int i = 0; i < inputs.size(); i++) {
|
for (int i = 0; i < inputs.size(); i++) {
|
||||||
array out_slice(inputs[i].shape(), out.dtype(), nullptr, {});
|
array out_slice(inputs[i].shape(), out.dtype(), nullptr, {});
|
||||||
size_t data_offset = strides[axis] * sizes[i];
|
size_t data_offset = strides[axis] * sizes[i];
|
||||||
@@ -40,71 +38,4 @@ void concatenate_gpu(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
array compute_dynamic_offset(
|
|
||||||
const array& indices,
|
|
||||||
const Strides& strides,
|
|
||||||
const std::vector<int>& axes,
|
|
||||||
const Stream& s) {
|
|
||||||
Dtype dtype = indices.dtype();
|
|
||||||
int nidx = axes.size();
|
|
||||||
|
|
||||||
std::string module_name =
|
|
||||||
fmt::format("compute_dynamic_offset_{}_{}", dtype_to_string(dtype), nidx);
|
|
||||||
std::string kernel_name = fmt::format(
|
|
||||||
"mlx::core::cu::compute_dynamic_offset<{}, {}>",
|
|
||||||
dtype_to_cuda_type(dtype),
|
|
||||||
nidx);
|
|
||||||
|
|
||||||
cu::JitModule& mod = cu::get_jit_module(s.device, module_name, [&]() {
|
|
||||||
std::string source = R"(
|
|
||||||
#include "mlx/backend/cuda/device/utils.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core::cu {
|
|
||||||
|
|
||||||
template <typename T, int NIDX>
|
|
||||||
__global__ void compute_dynamic_offset(
|
|
||||||
const T* indices,
|
|
||||||
int64_t* offset,
|
|
||||||
const __grid_constant__ Strides strides,
|
|
||||||
const __grid_constant__ cuda::std::array<int, NIDX> axes) {
|
|
||||||
int64_t acc = 0;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < NIDX; ++i) {
|
|
||||||
acc += indices[i] * strides[axes[i]];
|
|
||||||
}
|
|
||||||
*offset = acc;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core::cu
|
|
||||||
)";
|
|
||||||
return std::make_tuple(false, std::move(source), std::vector{kernel_name});
|
|
||||||
});
|
|
||||||
|
|
||||||
// Prepare output.
|
|
||||||
array offset({1}, int64, nullptr, {});
|
|
||||||
bool donate = indices.is_donatable() &&
|
|
||||||
(indices.data_size() * indices.itemsize()) >= offset.itemsize();
|
|
||||||
if (donate) {
|
|
||||||
offset.copy_shared_buffer(indices);
|
|
||||||
} else {
|
|
||||||
offset.set_data(allocator::malloc(offset.itemsize()));
|
|
||||||
}
|
|
||||||
|
|
||||||
auto& encoder = cu::get_command_encoder(s);
|
|
||||||
encoder.add_temporary(offset);
|
|
||||||
encoder.set_input_array(indices);
|
|
||||||
encoder.set_output_array(offset);
|
|
||||||
|
|
||||||
cu::KernelArgs args;
|
|
||||||
args.append(indices);
|
|
||||||
args.append(offset);
|
|
||||||
args.append_ndim(strides);
|
|
||||||
args.append(axes);
|
|
||||||
|
|
||||||
auto kernel = mod.get_kernel(kernel_name);
|
|
||||||
encoder.add_kernel_node(kernel, 1, 1, 0, args.args());
|
|
||||||
|
|
||||||
return offset;
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace mlx::core
|
} // namespace mlx::core
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
// Copyright © 2025 Apple Inc.
|
||||||
|
|
||||||
|
#include "mlx/backend/common/utils.h"
|
||||||
#include "mlx/backend/cuda/device.h"
|
#include "mlx/backend/cuda/device.h"
|
||||||
#include "mlx/backend/cuda/kernel_utils.cuh"
|
#include "mlx/backend/cuda/kernel_utils.cuh"
|
||||||
#include "mlx/backend/gpu/copy.h"
|
#include "mlx/backend/gpu/copy.h"
|
||||||
@@ -9,7 +10,7 @@
|
|||||||
#include <nvtx3/nvtx3.hpp>
|
#include <nvtx3/nvtx3.hpp>
|
||||||
#include <thrust/device_ptr.h>
|
#include <thrust/device_ptr.h>
|
||||||
#include <thrust/transform.h>
|
#include <thrust/transform.h>
|
||||||
#include <cub/device/device_segmented_radix_sort.cuh>
|
#include <cub/device/device_segmented_sort.cuh>
|
||||||
|
|
||||||
#include <cassert>
|
#include <cassert>
|
||||||
|
|
||||||
@@ -79,7 +80,7 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
encoder.add_temporary(discard);
|
encoder.add_temporary(discard);
|
||||||
|
|
||||||
size_t size;
|
size_t size;
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortPairs(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortPairs(
|
||||||
nullptr,
|
nullptr,
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -90,8 +91,6 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
0,
|
|
||||||
sizeof(Type) * 8,
|
|
||||||
stream));
|
stream));
|
||||||
|
|
||||||
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
||||||
@@ -106,7 +105,7 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
thrust::device_pointer_cast(indices.data<uint32_t>()),
|
thrust::device_pointer_cast(indices.data<uint32_t>()),
|
||||||
ModOp<uint32_t>{static_cast<uint32_t>(nsort)});
|
ModOp<uint32_t>{static_cast<uint32_t>(nsort)});
|
||||||
|
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortPairs(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortPairs(
|
||||||
temp.data<void>(),
|
temp.data<void>(),
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -117,12 +116,10 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
0,
|
|
||||||
sizeof(Type) * 8,
|
|
||||||
stream));
|
stream));
|
||||||
} else {
|
} else {
|
||||||
size_t size;
|
size_t size;
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortKeys(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortKeys(
|
||||||
nullptr,
|
nullptr,
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -131,8 +128,6 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
0,
|
|
||||||
sizeof(Type) * 8,
|
|
||||||
stream));
|
stream));
|
||||||
|
|
||||||
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
array temp(allocator::malloc(size), {static_cast<int>(size)}, uint8);
|
||||||
@@ -140,7 +135,7 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
|
|
||||||
// Start capturing after allocations
|
// Start capturing after allocations
|
||||||
auto capture = encoder.capture_context();
|
auto capture = encoder.capture_context();
|
||||||
CHECK_CUDA_ERROR(cub::DeviceSegmentedRadixSort::SortKeys(
|
CHECK_CUDA_ERROR(cub::DeviceSegmentedSort::StableSortKeys(
|
||||||
temp.data<void>(),
|
temp.data<void>(),
|
||||||
size,
|
size,
|
||||||
in.data<Type>(),
|
in.data<Type>(),
|
||||||
@@ -149,8 +144,6 @@ void gpu_sort(const Stream& s, array in, array& out_, int axis, bool argsort) {
|
|||||||
in.data_size() / nsort,
|
in.data_size() / nsort,
|
||||||
offsets,
|
offsets,
|
||||||
offsets + 1,
|
offsets + 1,
|
||||||
0,
|
|
||||||
sizeof(Type) * 8,
|
|
||||||
stream));
|
stream));
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -143,85 +143,87 @@ struct Tile16x16 {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
// /**
|
||||||
* A simple container of multiple Tile16x16.
|
// * A simple container of multiple Tile16x16.
|
||||||
*
|
// *
|
||||||
* Provides utility functions for loading and manipulating collections of basic
|
// * Provides utility functions for loading and manipulating collections of
|
||||||
* tiles.
|
// basic
|
||||||
*/
|
// * tiles.
|
||||||
template <typename T, int ROWS_, int COLS_>
|
// */
|
||||||
struct RegisterTile {
|
// template <typename T, int ROWS_, int COLS_>
|
||||||
static constexpr int ROWS = ROWS_;
|
// struct RegisterTile {
|
||||||
static constexpr int COLS = COLS_;
|
// static constexpr int ROWS = ROWS_;
|
||||||
static constexpr int TILES_X = COLS / 16;
|
// static constexpr int COLS = COLS_;
|
||||||
static constexpr int TILES_Y = ROWS / 16;
|
// static constexpr int TILES_X = COLS / 16;
|
||||||
|
// static constexpr int TILES_Y = ROWS / 16;
|
||||||
|
|
||||||
Tile16x16<T> data[TILES_X * TILES_Y];
|
// Tile16x16<T> data[TILES_X * TILES_Y];
|
||||||
|
|
||||||
__device__ inline void fill(T v) {
|
// __device__ inline void fill(T v) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int i = 0; i < TILES_Y; i++) {
|
// for (int i = 0; i < TILES_Y; i++) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int j = 0; j < TILES_X; j++) {
|
// for (int j = 0; j < TILES_X; j++) {
|
||||||
data[i * TILES_X + j].fill(v);
|
// data[i * TILES_X + j].fill(v);
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
template <typename Tile>
|
// template <typename Tile>
|
||||||
__device__ __forceinline__ void
|
// __device__ __forceinline__ void
|
||||||
load(Tile& tile, uint32_t base_address, int row, int col) {
|
// load(Tile& tile, uint32_t base_address, int row, int col) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int i = 0; i < TILES_Y; i++) {
|
// for (int i = 0; i < TILES_Y; i++) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int j = 0; j < TILES_X; j++) {
|
// for (int j = 0; j < TILES_X; j++) {
|
||||||
data[i * TILES_X + j].load(
|
// data[i * TILES_X + j].load(
|
||||||
tile.loc(base_address, row + i * 16, col + j * 16));
|
// tile.loc(base_address, row + i * 16, col + j * 16));
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
template <typename Tile, typename F>
|
// template <typename Tile, typename F>
|
||||||
__device__ __forceinline__ void
|
// __device__ __forceinline__ void
|
||||||
load(Tile& tile, F f, uint32_t base_address, int row, int col) {
|
// load(Tile& tile, F f, uint32_t base_address, int row, int col) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int i = 0; i < TILES_Y; i++) {
|
// for (int i = 0; i < TILES_Y; i++) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int j = 0; j < TILES_X; j++) {
|
// for (int j = 0; j < TILES_X; j++) {
|
||||||
f(data[i * TILES_X + j],
|
// f(data[i * TILES_X + j],
|
||||||
tile,
|
// tile,
|
||||||
base_address,
|
// base_address,
|
||||||
row + i * 16,
|
// row + i * 16,
|
||||||
col + j * 16);
|
// col + j * 16);
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
template <typename U>
|
// template <typename U>
|
||||||
__device__ inline void store_global(U* x, int N, int row, int col) {
|
// __device__ inline void store_global(U* x, int N, int row, int col) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int i = 0; i < TILES_Y; i++) {
|
// for (int i = 0; i < TILES_Y; i++) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int j = 0; j < TILES_X; j++) {
|
// for (int j = 0; j < TILES_X; j++) {
|
||||||
data[i * TILES_X + j].store_global(
|
// data[i * TILES_X + j].store_global(
|
||||||
x + (row + i * 16) * N + col + j * 16, N);
|
// x + (row + i * 16) * N + col + j * 16, N);
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
template <typename U>
|
// template <typename U>
|
||||||
__device__ inline void
|
// __device__ inline void
|
||||||
store_global_safe(U* x, int N, int row, int col, int max_rows) {
|
// store_global_safe(U* x, int N, int row, int col, int max_rows) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int i = 0; i < TILES_Y; i++) {
|
// for (int i = 0; i < TILES_Y; i++) {
|
||||||
MLX_UNROLL
|
// MLX_UNROLL
|
||||||
for (int j = 0; j < TILES_X; j++) {
|
// for (int j = 0; j < TILES_X; j++) {
|
||||||
data[i * TILES_X + j].store_global_safe(
|
// data[i * TILES_X + j].store_global_safe(
|
||||||
x + (row + i * 16) * N + col + j * 16, N, max_rows - row - i * 16);
|
// x + (row + i * 16) * N + col + j * 16, N, max_rows - row - i *
|
||||||
}
|
// 16);
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
};
|
// }
|
||||||
|
// };
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A simple container of multiple Tile16x16.
|
* A simple container of multiple Tile16x16.
|
||||||
|
|||||||
@@ -39,98 +39,52 @@ ternary_v(const bool* a, const T* b, const T* c, T* out, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename T, typename IdxT, int NDIM, int N_READS>
|
template <typename Op, typename T, typename IdxT, int NDIM>
|
||||||
__global__ void ternary_g_nd(
|
__global__ void ternary_g_nd(
|
||||||
const bool* a,
|
const bool* a,
|
||||||
const T* b,
|
const T* b,
|
||||||
const T* c,
|
const T* c,
|
||||||
T* out,
|
T* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
const __grid_constant__ cuda::std::array<int32_t, NDIM> shape,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> a_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides,
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> b_strides,
|
||||||
const __grid_constant__ cuda::std::array<int64_t, NDIM> c_strides) {
|
const __grid_constant__ cuda::std::array<int64_t, NDIM> c_strides) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[NDIM - 1];
|
|
||||||
auto a_stride_x = a_strides[NDIM - 1];
|
|
||||||
auto b_stride_x = b_strides[NDIM - 1];
|
|
||||||
auto c_stride_x = c_strides[NDIM - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [a_idx, b_idx, c_idx] = elem_to_loc_nd<NDIM>(
|
auto [a_idx, b_idx, c_idx] = elem_to_loc_nd<NDIM>(
|
||||||
index_rest * shape_x,
|
index,
|
||||||
shape.data(),
|
shape.data(),
|
||||||
a_strides.data(),
|
a_strides.data(),
|
||||||
b_strides.data(),
|
b_strides.data(),
|
||||||
c_strides.data());
|
c_strides.data());
|
||||||
auto a_vec =
|
out[index] = Op{}(a[a_idx], b[b_idx], c[c_idx]);
|
||||||
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, false);
|
|
||||||
auto b_vec =
|
|
||||||
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, T(0));
|
|
||||||
auto c_vec =
|
|
||||||
load_vector<N_READS>(c + c_idx, index_x, shape_x, c_stride_x, T(0));
|
|
||||||
|
|
||||||
AlignedVector<T, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = Op{}(a_vec[i], b_vec[i], c_vec[i]);
|
|
||||||
}
|
}
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename T, typename IdxT, int N_READS>
|
template <typename Op, typename T, typename IdxT>
|
||||||
__global__ void ternary_g(
|
__global__ void ternary_g(
|
||||||
const bool* a,
|
const bool* a,
|
||||||
const T* b,
|
const T* b,
|
||||||
const T* c,
|
const T* c,
|
||||||
T* out,
|
T* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides a_strides,
|
const __grid_constant__ Strides a_strides,
|
||||||
const __grid_constant__ Strides b_strides,
|
const __grid_constant__ Strides b_strides,
|
||||||
const __grid_constant__ Strides c_strides,
|
const __grid_constant__ Strides c_strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto shape_x = shape[ndim - 1];
|
|
||||||
auto a_stride_x = a_strides[ndim - 1];
|
|
||||||
auto b_stride_x = b_strides[ndim - 1];
|
|
||||||
auto c_stride_x = c_strides[ndim - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto [a_idx, b_idx, c_idx] = elem_to_loc(
|
auto [a_idx, b_idx, c_idx] = elem_to_loc(
|
||||||
index_rest * shape_x,
|
index,
|
||||||
shape.data(),
|
shape.data(),
|
||||||
a_strides.data(),
|
a_strides.data(),
|
||||||
b_strides.data(),
|
b_strides.data(),
|
||||||
c_strides.data(),
|
c_strides.data(),
|
||||||
ndim);
|
ndim);
|
||||||
auto a_vec =
|
out[index] = Op{}(a[a_idx], b[b_idx], c[c_idx]);
|
||||||
load_vector<N_READS>(a + a_idx, index_x, shape_x, a_stride_x, false);
|
|
||||||
auto b_vec =
|
|
||||||
load_vector<N_READS>(b + b_idx, index_x, shape_x, b_stride_x, T(0));
|
|
||||||
auto c_vec =
|
|
||||||
load_vector<N_READS>(c + c_idx, index_x, shape_x, c_stride_x, T(0));
|
|
||||||
|
|
||||||
AlignedVector<T, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = Op{}(a_vec[i], b_vec[i], c_vec[i]);
|
|
||||||
}
|
}
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace cu
|
} // namespace cu
|
||||||
@@ -169,55 +123,36 @@ void ternary_op_gpu_inplace(
|
|||||||
auto& b_strides = strides[1];
|
auto& b_strides = strides[1];
|
||||||
auto& c_strides = strides[2];
|
auto& c_strides = strides[2];
|
||||||
int ndim = shape.size();
|
int ndim = shape.size();
|
||||||
int work_per_thread = 1;
|
|
||||||
auto dim0 = ndim > 0 ? shape.back() : 1;
|
|
||||||
auto rest = out.size() / dim0;
|
|
||||||
if (dim0 >= 4) {
|
|
||||||
work_per_thread = 4;
|
|
||||||
}
|
|
||||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
|
||||||
auto block_dims = get_block_dims(dim0, rest, 1);
|
|
||||||
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
|
||||||
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
|
||||||
|
|
||||||
if (ndim <= 3) {
|
if (ndim <= 3) {
|
||||||
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
dispatch_1_2_3(ndim, [&](auto dims_constant) {
|
||||||
auto kernel =
|
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
||||||
cu::ternary_g_nd<Op, DType, IdxT, dims_constant(), 1>;
|
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel =
|
|
||||||
cu::ternary_g_nd<Op, DType, IdxT, dims_constant(), 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::ternary_g_nd<Op, DType, IdxT, dims_constant()>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<bool>(),
|
a.data<bool>(),
|
||||||
b.data<DType>(),
|
b.data<DType>(),
|
||||||
c.data<DType>(),
|
c.data<DType>(),
|
||||||
out.data<DType>(),
|
out.data<DType>(),
|
||||||
rest,
|
out.size(),
|
||||||
const_param<dims_constant()>(shape),
|
const_param<dims_constant()>(shape),
|
||||||
const_param<dims_constant()>(a_strides),
|
const_param<dims_constant()>(a_strides),
|
||||||
const_param<dims_constant()>(b_strides),
|
const_param<dims_constant()>(b_strides),
|
||||||
const_param<dims_constant()>(c_strides));
|
const_param<dims_constant()>(c_strides));
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
auto kernel = cu::ternary_g<Op, DType, IdxT, 1>;
|
auto [num_blocks, block_dims] = get_launch_args(out, large());
|
||||||
if (work_per_thread == 4) {
|
|
||||||
kernel = cu::ternary_g<Op, DType, IdxT, 4>;
|
|
||||||
}
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::ternary_g<Op, DType, IdxT>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
a.data<bool>(),
|
a.data<bool>(),
|
||||||
b.data<DType>(),
|
b.data<DType>(),
|
||||||
c.data<DType>(),
|
c.data<DType>(),
|
||||||
out.data<DType>(),
|
out.data<DType>(),
|
||||||
rest,
|
out.data_size(),
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(a_strides),
|
const_param(a_strides),
|
||||||
const_param(b_strides),
|
const_param(b_strides),
|
||||||
|
|||||||
@@ -37,36 +37,19 @@ __global__ void unary_v(const In* in, Out* out, IdxT size) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT>
|
||||||
__global__ void unary_g(
|
__global__ void unary_g(
|
||||||
const In* in,
|
const In* in,
|
||||||
Out* out,
|
Out* out,
|
||||||
IdxT size_rest,
|
IdxT size,
|
||||||
const __grid_constant__ Shape shape,
|
const __grid_constant__ Shape shape,
|
||||||
const __grid_constant__ Strides strides,
|
const __grid_constant__ Strides strides,
|
||||||
int ndim) {
|
int ndim) {
|
||||||
auto block = cg::this_thread_block();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
auto grid = cg::this_grid();
|
if (index < size) {
|
||||||
IdxT index_rest =
|
auto idx = elem_to_loc(index, shape.data(), strides.data(), ndim);
|
||||||
grid.block_index().y * block.dim_threads().y + block.thread_index().y;
|
out[index] = Op{}(in[idx]);
|
||||||
if (index_rest >= size_rest) {
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto shape_x = shape[ndim - 1];
|
|
||||||
auto stride_x = strides[ndim - 1];
|
|
||||||
IdxT index_x =
|
|
||||||
grid.block_index().x * block.dim_threads().x + block.thread_index().x;
|
|
||||||
auto idx =
|
|
||||||
elem_to_loc(index_rest * shape_x, shape.data(), strides.data(), ndim);
|
|
||||||
auto in_vec =
|
|
||||||
load_vector<N_READS>(in + idx, index_x, shape_x, stride_x, In(0));
|
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
|
||||||
#pragma unroll
|
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
|
||||||
out_vec[i] = Op{}(in_vec[i]);
|
|
||||||
}
|
|
||||||
store_vector(out + shape_x * index_rest, index_x, out_vec, shape_x);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
template <typename Op, typename In, typename Out>
|
template <typename Op, typename In, typename Out>
|
||||||
@@ -144,7 +127,8 @@ void unary_op_gpu_inplace(
|
|||||||
using OutType = cuda_type_t<CTYPE_OUT>;
|
using OutType = cuda_type_t<CTYPE_OUT>;
|
||||||
if (contig) {
|
if (contig) {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
constexpr int N_READS = 16 / sizeof(OutType);
|
// TODO: Choose optimized value based on type size.
|
||||||
|
constexpr int N_READS = 4;
|
||||||
auto [num_blocks, block_dims] = get_launch_args(
|
auto [num_blocks, block_dims] = get_launch_args(
|
||||||
out.data_size(), out.shape(), out.strides(), large, N_READS);
|
out.data_size(), out.shape(), out.strides(), large, N_READS);
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
@@ -158,30 +142,18 @@ void unary_op_gpu_inplace(
|
|||||||
} else {
|
} else {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
||||||
auto [shape, strides] = collapse_contiguous_dims(in);
|
auto [shape, strides] = collapse_contiguous_dims(in);
|
||||||
auto ndim = shape.size();
|
auto [num_blocks, block_dims] = get_launch_args(out, large);
|
||||||
int work_per_thread = 1;
|
|
||||||
auto kernel = cu::unary_g<Op, InType, OutType, IdxT, 1>;
|
|
||||||
auto dim0 = ndim > 0 ? shape.back() : 1;
|
|
||||||
auto rest = out.size() / dim0;
|
|
||||||
if (dim0 >= 4) {
|
|
||||||
kernel = cu::unary_g<Op, InType, OutType, IdxT, 4>;
|
|
||||||
work_per_thread = 4;
|
|
||||||
}
|
|
||||||
dim0 = (dim0 + work_per_thread - 1) / work_per_thread;
|
|
||||||
auto block_dims = get_block_dims(dim0, rest, 1);
|
|
||||||
uint32_t num_blocks_x = cuda::ceil_div(dim0, block_dims.x);
|
|
||||||
uint32_t num_blocks_y = cuda::ceil_div(rest, block_dims.y);
|
|
||||||
encoder.add_kernel_node(
|
encoder.add_kernel_node(
|
||||||
kernel,
|
cu::unary_g<Op, InType, OutType, IdxT>,
|
||||||
{num_blocks_x, num_blocks_y},
|
num_blocks,
|
||||||
block_dims,
|
block_dims,
|
||||||
0,
|
0,
|
||||||
in.data<InType>(),
|
in.data<InType>(),
|
||||||
out.data<OutType>(),
|
out.data<OutType>(),
|
||||||
rest,
|
out.data_size(),
|
||||||
const_param(shape),
|
const_param(shape),
|
||||||
const_param(strides),
|
const_param(strides),
|
||||||
ndim);
|
shape.size());
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -1,34 +0,0 @@
|
|||||||
target_sources(
|
|
||||||
mlx
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/abs.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arccos.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arccosh.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arcsin.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arcsinh.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arctan.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/arctanh.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/bitwise_invert.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/ceil.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/conjugate.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cos.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/cosh.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/erf.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/erf_inv.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/exp.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/expm1.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/floor.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/imag.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/log.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/log1p.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/logical_not.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/negative.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/real.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/round.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sigmoid.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sign.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sin.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sinh.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/sqrt.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/square.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/tan.cu
|
|
||||||
PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/tanh.cu)
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Abs)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ArcCos)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ArcCosh)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ArcSin)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ArcSinh)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ArcTan)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ArcTanh)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(BitwiseInvert)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Ceil)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Conjugate)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Cos)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Cosh)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Erf)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(ErfInv)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Exp)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Expm1)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Floor)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Imag)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
void Log::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
nvtx3::scoped_range r("Log::eval_gpu");
|
|
||||||
auto& s = out.primitive().stream();
|
|
||||||
switch (base_) {
|
|
||||||
case Base::e:
|
|
||||||
unary_op_gpu<cu::Log>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case Base::two:
|
|
||||||
unary_op_gpu<cu::Log2>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
case Base::ten:
|
|
||||||
unary_op_gpu<cu::Log10>(inputs, out, name(), s);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Log1p)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(LogicalNot)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Negative)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
UNARY_GPU(Real)
|
|
||||||
} // namespace mlx::core
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
// Copyright © 2025 Apple Inc.
|
|
||||||
|
|
||||||
#include "mlx/backend/cuda/unary/unary.cuh"
|
|
||||||
|
|
||||||
namespace mlx::core {
|
|
||||||
void Round::eval_gpu(const std::vector<array>& inputs, array& out) {
|
|
||||||
nvtx3::scoped_range r("Round::eval_gpu");
|
|
||||||
assert(inputs.size() == 1);
|
|
||||||
const auto& in = inputs[0];
|
|
||||||
auto& s = out.primitive().stream();
|
|
||||||
if (issubdtype(in.dtype(), inexact)) {
|
|
||||||
unary_op_gpu<cu::Round>(inputs, out, name(), s);
|
|
||||||
} else {
|
|
||||||
// No-op integer types
|
|
||||||
out.copy_shared_buffer(in);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} // namespace mlx::core
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user