mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-12-16 02:08:55 +08:00
Comments
This commit is contained in:
@@ -226,7 +226,7 @@ this section assumes you can launch distributed MLX programs using `mlx.launch
|
||||
### Distributed Finetuning
|
||||
|
||||
Distributed finetuning scales very well with FLUX and all one has to do is
|
||||
simply to adjust the gradient accumulation and iterations so that the batch
|
||||
adjust the gradient accumulation and training iterations so that the batch
|
||||
size remains the same. For instance, to replicate the following training
|
||||
|
||||
```shell
|
||||
|
||||
@@ -35,7 +35,7 @@ def to_latent_size(image_size):
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Generate images from a textual prompt using stable diffusion"
|
||||
description="Generate images from a textual prompt using FLUX"
|
||||
)
|
||||
parser.add_argument("--quantize", "-q", action="store_true")
|
||||
parser.add_argument("--model", choices=["schnell", "dev"], default="schnell")
|
||||
|
||||
Reference in New Issue
Block a user