From d06c4cde92b660a0fdda24bb2a48a120af591c1b Mon Sep 17 00:00:00 2001 From: Angelos Katharopoulos Date: Mon, 24 Mar 2025 22:15:37 -0700 Subject: [PATCH] Comments --- flux/README.md | 2 +- flux/generate_interactive.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flux/README.md b/flux/README.md index c51d7da0..95f86b49 100644 --- a/flux/README.md +++ b/flux/README.md @@ -226,7 +226,7 @@ this section assumes you can launch distributed MLX programs using `mlx.launch ### Distributed Finetuning Distributed finetuning scales very well with FLUX and all one has to do is -simply to adjust the gradient accumulation and iterations so that the batch +adjust the gradient accumulation and training iterations so that the batch size remains the same. For instance, to replicate the following training ```shell diff --git a/flux/generate_interactive.py b/flux/generate_interactive.py index 448dc5c3..9acde33c 100644 --- a/flux/generate_interactive.py +++ b/flux/generate_interactive.py @@ -35,7 +35,7 @@ def to_latent_size(image_size): if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Generate images from a textual prompt using stable diffusion" + description="Generate images from a textual prompt using FLUX" ) parser.add_argument("--quantize", "-q", action="store_true") parser.add_argument("--model", choices=["schnell", "dev"], default="schnell")