mirror of
https://github.com/ml-explore/mlx.git
synced 2025-12-16 01:49:05 +08:00
Use uint as index type
This commit is contained in:
@@ -20,15 +20,10 @@ namespace cg = cooperative_groups;
|
|||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void binary_ss(const In* a, const In* b, Out* out, IdxT size) {
|
__global__ void binary_ss(const In* a, const In* b, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (int i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = Op{}(a[0], b[0]);
|
||||||
out[offset] = Op{}(a[0], b[0]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
@@ -44,15 +39,10 @@ __global__ void binary_ss(const In* a, const In* b, Out* out, IdxT size) {
|
|||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void binary_sv(const In* a, const In* b, Out* out, IdxT size) {
|
__global__ void binary_sv(const In* a, const In* b, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = Op{}(a[0], b[i]);
|
||||||
out[offset] = Op{}(a[0], b[offset]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto b_vec = load_vector<N_READS>(b, index);
|
auto b_vec = load_vector<N_READS>(b, index);
|
||||||
@@ -70,15 +60,10 @@ __global__ void binary_sv(const In* a, const In* b, Out* out, IdxT size) {
|
|||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void binary_vs(const In* a, const In* b, Out* out, IdxT size) {
|
__global__ void binary_vs(const In* a, const In* b, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = Op{}(a[i], b[0]);
|
||||||
out[offset] = Op{}(a[offset], b[0]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto a_vec = load_vector<N_READS>(a, index);
|
auto a_vec = load_vector<N_READS>(a, index);
|
||||||
@@ -96,15 +81,10 @@ __global__ void binary_vs(const In* a, const In* b, Out* out, IdxT size) {
|
|||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void binary_vv(const In* a, const In* b, Out* out, IdxT size) {
|
__global__ void binary_vv(const In* a, const In* b, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = Op{}(a[i], b[i]);
|
||||||
out[offset] = Op{}(a[offset], b[offset]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto a_vec = load_vector<N_READS>(a, index);
|
auto a_vec = load_vector<N_READS>(a, index);
|
||||||
@@ -268,7 +248,7 @@ void binary_op_gpu_inplace(
|
|||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
// TODO: Choose optimized value based on type size.
|
// TODO: Choose optimized value based on type size.
|
||||||
constexpr int N_READS = 4;
|
constexpr int N_READS = 4;
|
||||||
auto kernel = cu::binary_ss<Op, InType, OutType, IdxT, N_READS>;
|
auto kernel = cu::binary_ss<Op, InType, OutType, IdxT, N_READS>;
|
||||||
|
|||||||
@@ -21,17 +21,12 @@ template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
|||||||
__global__ void
|
__global__ void
|
||||||
binary_two_ss(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
binary_two_ss(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
|
||||||
auto out = Op{}(a[0], b[0]);
|
auto out = Op{}(a[0], b[0]);
|
||||||
out_a[offset] = out[0];
|
out_a[i] = out[0];
|
||||||
out_b[offset] = out[1];
|
out_b[i] = out[1];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
AlignedVector<Out, N_READS> out_a_vec;
|
AlignedVector<Out, N_READS> out_a_vec;
|
||||||
@@ -52,17 +47,12 @@ template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
|||||||
__global__ void
|
__global__ void
|
||||||
binary_two_sv(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
binary_two_sv(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
auto out = Op{}(a[0], b[i]);
|
||||||
auto out = Op{}(a[0], b[offset]);
|
out_a[i] = out[0];
|
||||||
out_a[offset] = out[0];
|
out_b[i] = out[1];
|
||||||
out_b[offset] = out[1];
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto b_vec = load_vector<N_READS>(b, index);
|
auto b_vec = load_vector<N_READS>(b, index);
|
||||||
@@ -85,17 +75,12 @@ template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
|||||||
__global__ void
|
__global__ void
|
||||||
binary_two_vs(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
binary_two_vs(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
auto out = Op{}(a[i], b[0]);
|
||||||
auto out = Op{}(a[offset], b[0]);
|
out_a[i] = out[0];
|
||||||
out_a[offset] = out[0];
|
out_b[i] = out[1];
|
||||||
out_b[offset] = out[1];
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto a_vec = load_vector<N_READS>(a, index);
|
auto a_vec = load_vector<N_READS>(a, index);
|
||||||
@@ -118,17 +103,12 @@ template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
|||||||
__global__ void
|
__global__ void
|
||||||
binary_two_vv(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
binary_two_vv(const In* a, const In* b, Out* out_a, Out* out_b, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
auto out = Op{}(a[i], b[i]);
|
||||||
auto out = Op{}(a[offset], b[offset]);
|
out_a[i] = out[0];
|
||||||
out_a[offset] = out[0];
|
out_b[i] = out[1];
|
||||||
out_b[offset] = out[1];
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto a_vec = load_vector<N_READS>(a, index);
|
auto a_vec = load_vector<N_READS>(a, index);
|
||||||
@@ -290,7 +270,7 @@ void binary_two_op_gpu_inplace(
|
|||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
dispatch_bool(out_a.data_size() > INT32_MAX, [&](auto large) {
|
dispatch_bool(out_a.data_size() > INT32_MAX, [&](auto large) {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
// TODO: Choose optimized value based on type size.
|
// TODO: Choose optimized value based on type size.
|
||||||
constexpr int N_READS = 4;
|
constexpr int N_READS = 4;
|
||||||
auto kernel = cu::binary_two_ss<Op, InType, OutType, IdxT, N_READS>;
|
auto kernel = cu::binary_two_ss<Op, InType, OutType, IdxT, N_READS>;
|
||||||
|
|||||||
@@ -13,21 +13,16 @@ namespace cg = cooperative_groups;
|
|||||||
template <typename In, typename Out, typename IdxT, int N_READS>
|
template <typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void copy_s(const In* in, Out* out, IdxT size) {
|
__global__ void copy_s(const In* in, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = cast_to<Out>(in[0]);
|
||||||
out[offset] = CastOp<In, Out>{}(in[0]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
AlignedVector<Out, N_READS> out_vec;
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
out_vec.val[i] = CastOp<In, Out>{}(in[0]);
|
out_vec.val[i] = cast_to<Out>(in[0]);
|
||||||
}
|
}
|
||||||
|
|
||||||
store_vector<N_READS>(out, index, out_vec);
|
store_vector<N_READS>(out, index, out_vec);
|
||||||
@@ -37,15 +32,10 @@ __global__ void copy_s(const In* in, Out* out, IdxT size) {
|
|||||||
template <typename In, typename Out, typename IdxT, int N_READS>
|
template <typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void copy_v(const In* in, Out* out, IdxT size) {
|
__global__ void copy_v(const In* in, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = cast_to<Out>(in[i]);
|
||||||
out[offset] = CastOp<In, Out>{}(in[offset]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto in_vec = load_vector<N_READS>(in, index);
|
auto in_vec = load_vector<N_READS>(in, index);
|
||||||
@@ -53,7 +43,7 @@ __global__ void copy_v(const In* in, Out* out, IdxT size) {
|
|||||||
AlignedVector<Out, N_READS> out_vec;
|
AlignedVector<Out, N_READS> out_vec;
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int i = 0; i < N_READS; ++i) {
|
for (int i = 0; i < N_READS; ++i) {
|
||||||
out_vec.val[i] = CastOp<In, Out>{}(in_vec.val[i]);
|
out_vec.val[i] = cast_to<Out>(in_vec.val[i]);
|
||||||
}
|
}
|
||||||
|
|
||||||
store_vector<N_READS>(out, index, out_vec);
|
store_vector<N_READS>(out, index, out_vec);
|
||||||
@@ -71,10 +61,10 @@ void copy_contiguous(
|
|||||||
int64_t out_offset) {
|
int64_t out_offset) {
|
||||||
dispatch_all_types(in.dtype(), [&](auto in_type_tag) {
|
dispatch_all_types(in.dtype(), [&](auto in_type_tag) {
|
||||||
dispatch_all_types(out.dtype(), [&](auto out_type_tag) {
|
dispatch_all_types(out.dtype(), [&](auto out_type_tag) {
|
||||||
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
dispatch_bool(out.data_size() > UINT32_MAX, [&](auto large) {
|
||||||
using InType = cuda_type_t<MLX_GET_TYPE(in_type_tag)>;
|
using InType = cuda_type_t<MLX_GET_TYPE(in_type_tag)>;
|
||||||
using OutType = cuda_type_t<MLX_GET_TYPE(out_type_tag)>;
|
using OutType = cuda_type_t<MLX_GET_TYPE(out_type_tag)>;
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
// TODO: Choose optimized value based on type size.
|
// TODO: Choose optimized value based on type size.
|
||||||
constexpr int N_READS = 4;
|
constexpr int N_READS = 4;
|
||||||
auto kernel = cu::copy_s<InType, OutType, IdxT, N_READS>;
|
auto kernel = cu::copy_s<InType, OutType, IdxT, N_READS>;
|
||||||
|
|||||||
@@ -19,15 +19,10 @@ template <typename Op, typename T, typename IdxT, int N_READS>
|
|||||||
__global__ void
|
__global__ void
|
||||||
ternary_v(const bool* a, const T* b, const T* c, T* out, IdxT size) {
|
ternary_v(const bool* a, const T* b, const T* c, T* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = Op{}(a[i], b[i], c[i]);
|
||||||
out[offset] = Op{}(a[offset], b[offset], c[offset]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto a_vec = load_vector<N_READS>(a, index);
|
auto a_vec = load_vector<N_READS>(a, index);
|
||||||
@@ -170,7 +165,7 @@ void ternary_op_gpu_inplace(
|
|||||||
});
|
});
|
||||||
} else {
|
} else {
|
||||||
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
dispatch_bool(out.data_size() > INT32_MAX, [&](auto large) {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
// TODO: Choose optimized value based on type size.
|
// TODO: Choose optimized value based on type size.
|
||||||
constexpr int N_READS = 4;
|
constexpr int N_READS = 4;
|
||||||
auto kernel = cu::ternary_v<Op, DType, IdxT, N_READS>;
|
auto kernel = cu::ternary_v<Op, DType, IdxT, N_READS>;
|
||||||
|
|||||||
@@ -21,15 +21,10 @@ namespace cg = cooperative_groups;
|
|||||||
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
template <typename Op, typename In, typename Out, typename IdxT, int N_READS>
|
||||||
__global__ void unary_v(const In* in, Out* out, IdxT size) {
|
__global__ void unary_v(const In* in, Out* out, IdxT size) {
|
||||||
IdxT index = cg::this_grid().thread_rank();
|
IdxT index = cg::this_grid().thread_rank();
|
||||||
IdxT remaining = size - index * N_READS;
|
|
||||||
if (remaining <= 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (remaining < N_READS) {
|
if ((index + 1) * N_READS > size) {
|
||||||
for (int i = 0; i < remaining; ++i) {
|
for (IdxT i = index * N_READS; i < size; ++i) {
|
||||||
IdxT offset = index * N_READS + i;
|
out[i] = Op{}(in[i]);
|
||||||
out[offset] = Op{}(in[offset]);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
auto in_vec = load_vector<N_READS>(in, index);
|
auto in_vec = load_vector<N_READS>(in, index);
|
||||||
@@ -130,10 +125,9 @@ void unary_op_gpu_inplace(
|
|||||||
using CTYPE_OUT = MLX_GET_TYPE(out_type_tag);
|
using CTYPE_OUT = MLX_GET_TYPE(out_type_tag);
|
||||||
if constexpr (cu::supports_unary_op<Op, CTYPE_IN, CTYPE_OUT>()) {
|
if constexpr (cu::supports_unary_op<Op, CTYPE_IN, CTYPE_OUT>()) {
|
||||||
dispatch_bool(large, [&](auto large) {
|
dispatch_bool(large, [&](auto large) {
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
|
||||||
using InType = cuda_type_t<CTYPE_IN>;
|
using InType = cuda_type_t<CTYPE_IN>;
|
||||||
using OutType = cuda_type_t<CTYPE_OUT>;
|
using OutType = cuda_type_t<CTYPE_OUT>;
|
||||||
using IdxT = std::conditional_t<large(), int64_t, int32_t>;
|
using IdxT = std::conditional_t<large(), int64_t, uint32_t>;
|
||||||
if (contig) {
|
if (contig) {
|
||||||
// TODO: Choose optimized value based on type size.
|
// TODO: Choose optimized value based on type size.
|
||||||
constexpr int N_READS = 4;
|
constexpr int N_READS = 4;
|
||||||
|
|||||||
Reference in New Issue
Block a user