mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-12-16 02:08:55 +08:00
Compare commits
4 Commits
97204566ad
...
44a1a0e7fa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
44a1a0e7fa | ||
|
|
afe88cddde | ||
|
|
93b2a8fa4a | ||
|
|
cb7f211c1b |
@@ -56,7 +56,7 @@ logits = result["logits"] # Shape: (batch, length, vocab_size)
|
||||
|
||||
### Masked Language Modeling
|
||||
|
||||
```python
|
||||
```bash
|
||||
# For a complete example, see main.py
|
||||
python main.py --sequence "YOUR_SEQUENCE" --mask-position 50
|
||||
```
|
||||
@@ -111,7 +111,7 @@ Benchmark PyTorch MPS performance:
|
||||
python benchmarks/benchmark_pt.py
|
||||
```
|
||||
|
||||
Expected performance on M4 MacBook Pro (batch_size = 5):
|
||||
Expected performance on M4 MacBook Pro (ESM-2 650M, batch_size = 5):
|
||||
|
||||
- MLX: 299 ms per step, 16.71 sequences/sec
|
||||
- PyTorch MPS: 402 ms per step, 12.43 sequences/sec
|
||||
@@ -141,15 +141,15 @@ This tests tokenizer and model outputs (logits, hidden states, and attentions) f
|
||||
```
|
||||
|
||||
```bibtex
|
||||
@article{lin2023evolutionary,
|
||||
@article{Lin2023,
|
||||
author={Zeming Lin et al.},
|
||||
title={Evolutionary-scale prediction of atomic-level protein structure with a language model},
|
||||
author={Lin, Zeming and Akin, Halil and Rao, Roshan and Hie, Brian and Zhu, Ziheng and Lu, Wenting and Smetanin, Nikita and Verkuil, Robert and Kabeli, Ori and Shmueli, Yilun and dos Santos Costa, Allan and Fazel-Zarandi, Maryam and Sercu, Tom and Candido, Salvatore and Rives, Alexander},
|
||||
journal={Science},
|
||||
volume={379},
|
||||
number={6637},
|
||||
pages={1123--1130},
|
||||
year={2023},
|
||||
publisher={American Association for the Advancement of Science}
|
||||
doi={10.1126/science.ade2574},
|
||||
url={https://doi.org/10.1126/science.ade2574}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
Reference in New Issue
Block a user