Reduce a little overhead (#871)

* some small overhead improvements

* use result_type in rms_norm

* remove release force

* fix + use non-vector version

* revert compile change

* fix ops

* a little more overhead

* a little more cleanup and overhead
This commit is contained in:
Awni Hannun
2024-03-22 17:29:36 -07:00
committed by GitHub
parent 6ee1112f30
commit be98f4ab6b
13 changed files with 239 additions and 240 deletions

View File

@@ -17,12 +17,7 @@ void init_fast(nb::module_& parent_module) {
m.def(
"rms_norm",
[](const array& x,
const array& weight,
float eps,
const StreamOrDevice& s /* = {} */) {
return fast::rms_norm(x, weight, eps, s);
},
&fast::rms_norm,
"x"_a,
"weight"_a,
"eps"_a,
@@ -48,13 +43,7 @@ void init_fast(nb::module_& parent_module) {
m.def(
"layer_norm",
[](const array& x,
const std::optional<array>& weight,
const std::optional<array>& bias,
float eps,
const StreamOrDevice& s /* = {} */) {
return fast::layer_norm(x, weight, bias, eps, s);
},
&fast::layer_norm,
"x"_a,
"weight"_a.none(),
"bias"_a.none(),
@@ -84,15 +73,7 @@ void init_fast(nb::module_& parent_module) {
m.def(
"rope",
[](const array& a,
int dims,
bool traditional,
float base,
float scale,
int offset,
const StreamOrDevice& s /* = {} */) {
return fast::rope(a, dims, traditional, base, scale, offset, s);
},
&fast::rope,
"a"_a,
"dims"_a,
nb::kw_only(),
@@ -123,14 +104,7 @@ void init_fast(nb::module_& parent_module) {
m.def(
"scaled_dot_product_attention",
[](const array& q,
const array& k,
const array& v,
const float scale,
const std::optional<array>& mask,
const StreamOrDevice& s) {
return fast::scaled_dot_product_attention(q, k, v, scale, mask, s);
},
&fast::scaled_dot_product_attention,
"q"_a,
"k"_a,
"v"_a,