mirror of
https://github.com/ml-explore/mlx.git
synced 2025-07-16 22:11:15 +08:00

* Fast Inference SDPA op Implements metal shaders for: o = mx.fast_inference_sdpa(queries, keys, values, scale, mask) Supports fp16, fp32 dtypes; assumes d_k = 128. Generic op support / prompt encoding supported via mlx primitives. Metal implementation is for the inference use case only. Majority of performance benefits appears to results from GQA & reduced bandwidth requirements; there is approximate performance parity for the MHA use case (from some measurements on M3 Max). * Flush shared memory to zero before unprotected reads for (scores @ values) * Move to fast:: namespace, address reviewer comments ... also attempt to revert formatter auto-change for files not relevant to this change * Shared memory flush to top of kernel * Resolve compiler warnings * Update python/src/fast.cpp Co-authored-by: Awni Hannun <awni.hannun@gmail.com> * Update python/src/fast.cpp Co-authored-by: Awni Hannun <awni.hannun@gmail.com> * Update python/src/fast.cpp Co-authored-by: Awni Hannun <awni.hannun@gmail.com> * Update python/src/fast.cpp Co-authored-by: Awni Hannun <awni.hannun@gmail.com> * Update docstring per PR feedback * Softmax in higher precision, ... * route to fallback for more use cases - batch size > 1, head_dim other than 128, etc. * Address linux build failure * Address other reviewer comments * Remove extraneous eval_cpu function per review --------- Co-authored-by: Atila Orhon <64497909+atiorh@users.noreply.github.com> Co-authored-by: Awni Hannun <awni.hannun@gmail.com> Co-authored-by: atila <atiorh@icloud.com>
99 lines
2.7 KiB
C++
99 lines
2.7 KiB
C++
#include "mlx/primitives.h"
|
|
|
|
namespace mlx::core::fast {
|
|
|
|
// Custom primitive accepts a fallback function which it uses for
|
|
// transformations. Transformations are virtual so that derived classes may
|
|
// override the default behavior.
|
|
class Custom : public Primitive {
|
|
public:
|
|
explicit Custom(
|
|
Stream stream,
|
|
std::function<std::vector<array>(std::vector<array>)> fallback)
|
|
: Primitive(stream), fallback_(fallback){};
|
|
|
|
virtual std::pair<std::vector<array>, std::vector<int>> vmap(
|
|
const std::vector<array>& inputs,
|
|
const std::vector<int>& axes) override;
|
|
|
|
virtual std::vector<array> jvp(
|
|
const std::vector<array>& primals,
|
|
const std::vector<array>& tangents,
|
|
const std::vector<int>& argnums) override;
|
|
|
|
virtual std::vector<array> vjp(
|
|
const std::vector<array>& primals,
|
|
const std::vector<array>& cotangents,
|
|
const std::vector<int>& argnums,
|
|
const std::vector<array>& outputs) override;
|
|
|
|
private:
|
|
std::function<std::vector<array>(std::vector<array>)> fallback_;
|
|
};
|
|
|
|
class RoPE : public Custom {
|
|
public:
|
|
RoPE(
|
|
Stream stream,
|
|
std::function<std::vector<array>(std::vector<array>)> fallback,
|
|
int dims,
|
|
bool traditional,
|
|
float base,
|
|
float scale,
|
|
int offset)
|
|
: Custom(stream, fallback),
|
|
dims_(dims),
|
|
traditional_(traditional),
|
|
base_(base),
|
|
scale_(scale),
|
|
offset_(offset){};
|
|
|
|
void eval_cpu(const std::vector<array>& inputs, std::vector<array>& outputs)
|
|
override;
|
|
void eval_gpu(const std::vector<array>& inputs, std::vector<array>& outputs)
|
|
override;
|
|
|
|
DEFINE_PRINT(RoPE)
|
|
bool is_equivalent(const Primitive& other) const override;
|
|
|
|
private:
|
|
std::function<std::vector<array>(std::vector<array>)> fallback_;
|
|
int dims_;
|
|
bool traditional_;
|
|
float base_;
|
|
float scale_;
|
|
int offset_;
|
|
};
|
|
|
|
class ScaledDotProductAttention : public Custom {
|
|
public:
|
|
explicit ScaledDotProductAttention(
|
|
Stream stream,
|
|
std::function<std::vector<array>(std::vector<array>)> fallback,
|
|
const float scale,
|
|
const bool needs_mask)
|
|
: Custom(stream, fallback), scale_(scale), needs_mask_(needs_mask){};
|
|
|
|
void eval_cpu(const std::vector<array>& inputs, std::vector<array>& outputs)
|
|
override {
|
|
outputs[0] = fallback_(inputs)[0];
|
|
};
|
|
|
|
void eval_gpu(const std::vector<array>& inputs, std::vector<array>& outputs)
|
|
override {
|
|
eval_gpu(inputs, outputs[0]);
|
|
};
|
|
|
|
void eval_gpu(const std::vector<array>& inputs, array& out);
|
|
bool is_equivalent(const Primitive& other) const override;
|
|
|
|
DEFINE_PRINT(ScaledDotProductAttention)
|
|
|
|
private:
|
|
std::function<std::vector<array>(std::vector<array>)> fallback_;
|
|
float scale_;
|
|
bool needs_mask_;
|
|
};
|
|
|
|
} // namespace mlx::core::fast
|