mirror of
https://github.com/ml-explore/mlx.git
synced 2025-07-14 20:41:13 +08:00

* redesign for faster cpu/gpu synch * load + more async CPU * use command encoder API and move more ops to use it * make fence back-end generic + CPU only fence * faster build * fix async eval * fixes + handle temporaries * fix / improve cpu conv * remove unused status, fix siblings * fix extensions * fix * fix no cpu build * format * comments * fix perf regression, remove unecessary abort * fix events, task limit cpu * fix waiting * fix donation / temporaries in normalization
34 lines
1.0 KiB
C++
34 lines
1.0 KiB
C++
// Copyright © 2024 Apple Inc.
|
|
|
|
#include <cassert>
|
|
|
|
#include "mlx/allocator.h"
|
|
#include "mlx/backend/common/utils.h"
|
|
#include "mlx/backend/metal/copy.h"
|
|
#include "mlx/backend/metal/device.h"
|
|
#include "mlx/backend/metal/utils.h"
|
|
#include "mlx/distributed/ops.h"
|
|
#include "mlx/distributed/primitives.h"
|
|
#include "mlx/fence.h"
|
|
#include "mlx/scheduler.h"
|
|
|
|
namespace mlx::core::distributed {
|
|
|
|
void AllReduce::eval_gpu(const std::vector<array>&, std::vector<array>&) {
|
|
throw std::runtime_error("[AllReduce::eval_gpu] has no GPU implementation.");
|
|
}
|
|
|
|
void AllGather::eval_gpu(const std::vector<array>&, std::vector<array>&) {
|
|
throw std::runtime_error("[AllGather::eval_gpu] has no GPU implementation.");
|
|
}
|
|
|
|
void Send::eval_gpu(const std::vector<array>&, std::vector<array>&) {
|
|
throw std::runtime_error("[Send::eval_gpu] has no GPU implementation.");
|
|
}
|
|
|
|
void Recv::eval_gpu(const std::vector<array>&, std::vector<array>&) {
|
|
throw std::runtime_error("[Recv::eval_gpu] has no GPU implementation.");
|
|
}
|
|
|
|
} // namespace mlx::core::distributed
|