mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-27 03:05:20 +08:00
Merge pull request #10 from ricardo-larosa/fix-unsupported-scalartype
Fix unsupported ScalarType BFloat16
This commit is contained in:
commit
62dcb3301f
@ -32,7 +32,7 @@ Once you've converted the weights to MLX format, you can interact with the
|
||||
LLaMA model:
|
||||
|
||||
```
|
||||
python llama.py mlx_llama.npz tokenizer.model "hello"
|
||||
python llama.py mlx_llama_weights.npz <path_to_tokenizer.model> "hello"
|
||||
```
|
||||
|
||||
Run `python llama.py --help` for more details.
|
||||
|
@ -32,7 +32,12 @@ def map_torch_to_mlx(key, value):
|
||||
elif "rope" in key:
|
||||
return None, None
|
||||
|
||||
return key, value.numpy()
|
||||
return (
|
||||
key,
|
||||
value.numpy()
|
||||
if value.dtype != torch.bfloat16
|
||||
else value.to(torch.float32).numpy(),
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
Loading…
Reference in New Issue
Block a user