mirror of
https://github.com/ml-explore/mlx.git
synced 2025-06-25 09:51:17 +08:00
321 lines
7.5 KiB
C++
321 lines
7.5 KiB
C++
// Copyright © 2024 Apple Inc.
|
|
|
|
#include <dlfcn.h>
|
|
#include <mpi.h>
|
|
|
|
#include "mlx/backend/common/copy.h"
|
|
#include "mlx/distributed/distributed.h"
|
|
#include "mlx/distributed/distributed_impl.h"
|
|
#include "mlx/scheduler.h"
|
|
|
|
#define LOAD_SYMBOL(symbol, variable) \
|
|
{ \
|
|
variable = (decltype(variable))dlsym(libmpi_handle_, #symbol); \
|
|
char* error = dlerror(); \
|
|
if (error != nullptr) { \
|
|
libmpi_handle_ = nullptr; \
|
|
return; \
|
|
} \
|
|
}
|
|
|
|
namespace mlx::core::distributed {
|
|
|
|
namespace {
|
|
|
|
array ensure_row_contiguous(const array& arr) {
|
|
if (arr.flags().row_contiguous) {
|
|
return arr;
|
|
} else {
|
|
array arr_copy(arr.shape(), arr.dtype(), nullptr, {});
|
|
copy(arr, arr_copy, CopyType::General);
|
|
return arr_copy;
|
|
}
|
|
}
|
|
|
|
struct MPIWrapper {
|
|
MPIWrapper() {
|
|
libmpi_handle_ = dlopen("libmpi.dylib", RTLD_NOW | RTLD_GLOBAL);
|
|
if (libmpi_handle_ == nullptr) {
|
|
return;
|
|
}
|
|
|
|
// API
|
|
LOAD_SYMBOL(MPI_Init, init);
|
|
LOAD_SYMBOL(MPI_Finalize, finalize);
|
|
LOAD_SYMBOL(MPI_Comm_rank, rank);
|
|
LOAD_SYMBOL(MPI_Comm_size, size);
|
|
LOAD_SYMBOL(MPI_Comm_split, comm_split);
|
|
LOAD_SYMBOL(MPI_Comm_free, comm_free);
|
|
LOAD_SYMBOL(MPI_Allreduce, all_reduce);
|
|
LOAD_SYMBOL(MPI_Allgather, all_gather);
|
|
LOAD_SYMBOL(MPI_Send, send);
|
|
LOAD_SYMBOL(MPI_Recv, recv);
|
|
|
|
// Objects
|
|
LOAD_SYMBOL(ompi_mpi_comm_world, comm_world_);
|
|
|
|
// Ops
|
|
LOAD_SYMBOL(ompi_mpi_op_sum, op_sum_);
|
|
|
|
// Datatypes
|
|
LOAD_SYMBOL(ompi_mpi_c_bool, mpi_bool_);
|
|
LOAD_SYMBOL(ompi_mpi_int8_t, mpi_int8_);
|
|
LOAD_SYMBOL(ompi_mpi_uint8_t, mpi_uint8_);
|
|
LOAD_SYMBOL(ompi_mpi_int16_t, mpi_int16_);
|
|
LOAD_SYMBOL(ompi_mpi_uint16_t, mpi_uint16_);
|
|
LOAD_SYMBOL(ompi_mpi_int32_t, mpi_int32_);
|
|
LOAD_SYMBOL(ompi_mpi_uint32_t, mpi_uint32_);
|
|
LOAD_SYMBOL(ompi_mpi_int64_t, mpi_int64_);
|
|
LOAD_SYMBOL(ompi_mpi_uint64_t, mpi_uint64_);
|
|
LOAD_SYMBOL(ompi_mpi_float, mpi_float_);
|
|
LOAD_SYMBOL(ompi_mpi_c_complex, mpi_complex_);
|
|
}
|
|
|
|
bool is_available() {
|
|
return libmpi_handle_ != nullptr;
|
|
}
|
|
|
|
bool init_safe() {
|
|
if (!is_available()) {
|
|
return false;
|
|
}
|
|
return init(nullptr, nullptr) == MPI_SUCCESS;
|
|
}
|
|
|
|
void finalize_safe() {
|
|
if (is_available()) {
|
|
finalize();
|
|
}
|
|
}
|
|
|
|
MPI_Comm world() {
|
|
return comm_world_;
|
|
}
|
|
|
|
MPI_Datatype datatype(const array& arr) {
|
|
switch (arr.dtype()) {
|
|
case bool_:
|
|
return mpi_bool_;
|
|
case int8:
|
|
return mpi_int8_;
|
|
case uint8:
|
|
return mpi_uint8_;
|
|
case int16:
|
|
return mpi_int16_;
|
|
case uint16:
|
|
return mpi_uint16_;
|
|
case int32:
|
|
return mpi_int32_;
|
|
case uint32:
|
|
return mpi_uint32_;
|
|
case int64:
|
|
return mpi_int64_;
|
|
case uint64:
|
|
return mpi_uint64_;
|
|
case float32:
|
|
return mpi_float_;
|
|
case complex64:
|
|
return mpi_complex_;
|
|
case float16:
|
|
case bfloat16:
|
|
throw std::runtime_error("MPI doesn't support 16-bit floats");
|
|
}
|
|
}
|
|
|
|
MPI_Op op_sum() {
|
|
return op_sum_;
|
|
}
|
|
|
|
void* libmpi_handle_;
|
|
|
|
// API
|
|
int (*init)(int*, char***);
|
|
int (*finalize)();
|
|
int (*rank)(MPI_Comm, int*);
|
|
int (*size)(MPI_Comm, int*);
|
|
int (*all_reduce)(const void*, void*, int, MPI_Datatype, MPI_Op, MPI_Comm);
|
|
int (*all_gather)(
|
|
const void*,
|
|
int,
|
|
MPI_Datatype,
|
|
void*,
|
|
int,
|
|
MPI_Datatype,
|
|
MPI_Comm);
|
|
int (*comm_split)(MPI_Comm, int, int, MPI_Comm*);
|
|
int (*comm_free)(MPI_Comm*);
|
|
int (*send)(const void*, int, MPI_Datatype, int, int, MPI_Comm);
|
|
int (*recv)(void*, int, MPI_Datatype, int, int, MPI_Comm, MPI_Status*);
|
|
|
|
// Objects
|
|
MPI_Comm comm_world_;
|
|
|
|
// Ops
|
|
MPI_Op op_sum_;
|
|
|
|
// Datatypes
|
|
MPI_Datatype mpi_bool_;
|
|
MPI_Datatype mpi_int8_;
|
|
MPI_Datatype mpi_uint8_;
|
|
MPI_Datatype mpi_int16_;
|
|
MPI_Datatype mpi_uint16_;
|
|
MPI_Datatype mpi_int32_;
|
|
MPI_Datatype mpi_uint32_;
|
|
MPI_Datatype mpi_int64_;
|
|
MPI_Datatype mpi_uint64_;
|
|
MPI_Datatype mpi_float_;
|
|
MPI_Datatype mpi_complex_;
|
|
};
|
|
|
|
MPIWrapper& mpi() {
|
|
static MPIWrapper wrapper;
|
|
return wrapper;
|
|
}
|
|
|
|
struct MPIGroupImpl {
|
|
MPIGroupImpl() : comm_(nullptr), global_(true), rank_(0), size_(1) {}
|
|
MPIGroupImpl(MPI_Comm comm, bool global)
|
|
: comm_(comm), global_(global), rank_(-1), size_(-1) {}
|
|
~MPIGroupImpl() {
|
|
if (global_) {
|
|
mpi().finalize_safe();
|
|
} else {
|
|
mpi().comm_free(&comm_);
|
|
}
|
|
}
|
|
|
|
MPI_Comm comm() {
|
|
return comm_;
|
|
}
|
|
|
|
int rank() {
|
|
if (rank_ < 0) {
|
|
mpi().rank(comm_, &rank_);
|
|
}
|
|
return rank_;
|
|
}
|
|
|
|
int size() {
|
|
if (size_ < 0) {
|
|
mpi().size(comm_, &size_);
|
|
}
|
|
return size_;
|
|
}
|
|
|
|
private:
|
|
MPI_Comm comm_;
|
|
bool global_;
|
|
int rank_;
|
|
int size_;
|
|
};
|
|
|
|
MPI_Comm to_comm(Group& group) {
|
|
return std::static_pointer_cast<MPIGroupImpl>(group.raw_group())->comm();
|
|
}
|
|
|
|
} // namespace
|
|
|
|
int Group::rank() {
|
|
return std::static_pointer_cast<MPIGroupImpl>(group_)->rank();
|
|
}
|
|
|
|
int Group::size() {
|
|
return std::static_pointer_cast<MPIGroupImpl>(group_)->size();
|
|
}
|
|
|
|
Group Group::split(int color, int key) {
|
|
auto mpi_group = std::static_pointer_cast<MPIGroupImpl>(group_);
|
|
|
|
key = (key < 0) ? rank() : key;
|
|
|
|
MPI_Comm new_comm;
|
|
int result = mpi().comm_split(mpi_group->comm(), color, key, &new_comm);
|
|
if (result != MPI_SUCCESS) {
|
|
throw std::runtime_error("MPI could not split this group");
|
|
}
|
|
|
|
return Group(std::make_shared<MPIGroupImpl>(new_comm, false));
|
|
}
|
|
|
|
bool is_available() {
|
|
return mpi().is_available();
|
|
}
|
|
|
|
Group init(bool strict /* = false */) {
|
|
static std::shared_ptr<MPIGroupImpl> global_group = nullptr;
|
|
|
|
if (global_group == nullptr) {
|
|
if (!mpi().init_safe()) {
|
|
if (strict) {
|
|
throw std::runtime_error("Cannot initialize MPI");
|
|
}
|
|
global_group = std::make_shared<MPIGroupImpl>();
|
|
} else {
|
|
global_group = std::make_shared<MPIGroupImpl>(mpi().world(), true);
|
|
}
|
|
}
|
|
|
|
// Ensure the communication stream is alive before
|
|
// the graph is evaluated
|
|
detail::communication_stream();
|
|
return Group(global_group);
|
|
}
|
|
|
|
namespace detail {
|
|
|
|
Stream communication_stream() {
|
|
static Stream comm_stream = new_stream(Device::cpu);
|
|
return comm_stream;
|
|
}
|
|
|
|
void all_sum(Group group, const array& input_, array& output) {
|
|
array input = ensure_row_contiguous(input_);
|
|
mpi().all_reduce(
|
|
(input.data<void>() == output.data<void>()) ? MPI_IN_PLACE
|
|
: input.data<void>(),
|
|
output.data<void>(),
|
|
input.size(),
|
|
mpi().datatype(input),
|
|
mpi().op_sum(),
|
|
to_comm(group));
|
|
}
|
|
|
|
void all_gather(Group group, const array& input_, array& output) {
|
|
array input = ensure_row_contiguous(input_);
|
|
mpi().all_gather(
|
|
input.data<void>(),
|
|
input.size(),
|
|
mpi().datatype(input),
|
|
output.data<void>(),
|
|
input.size(),
|
|
mpi().datatype(output),
|
|
to_comm(group));
|
|
}
|
|
|
|
void send(Group group, const array& input_, int dst) {
|
|
array input = ensure_row_contiguous(input_);
|
|
mpi().send(
|
|
input.data<void>(),
|
|
input.size(),
|
|
mpi().datatype(input),
|
|
dst,
|
|
0,
|
|
to_comm(group));
|
|
}
|
|
|
|
void recv(Group group, array& out, int src) {
|
|
MPI_Status status;
|
|
mpi().recv(
|
|
out.data<void>(),
|
|
out.size(),
|
|
mpi().datatype(out),
|
|
src,
|
|
MPI_ANY_TAG,
|
|
to_comm(group),
|
|
&status);
|
|
}
|
|
|
|
} // namespace detail
|
|
|
|
} // namespace mlx::core::distributed
|