From a99e9d551e0ba7b994e97fb3efc01f0f40d621d8 Mon Sep 17 00:00:00 2001 From: Awni Hannun Date: Tue, 12 Dec 2023 17:08:04 -0800 Subject: [PATCH] hf correction --- bert/hf_model.py | 2 +- mixtral/README.md | 2 +- stable_diffusion/README.md | 4 ++-- stable_diffusion/stable_diffusion/model_io.py | 8 ++++---- stable_diffusion/stable_diffusion/tokenizer.py | 2 +- 5 files changed, 9 insertions(+), 9 deletions(-) diff --git a/bert/hf_model.py b/bert/hf_model.py index 4f07df13..e63b904b 100644 --- a/bert/hf_model.py +++ b/bert/hf_model.py @@ -25,7 +25,7 @@ def run(bert_model: str): if __name__ == "__main__": parser = argparse.ArgumentParser( - description="Run the BERT model using HuggingFace Transformers." + description="Run the BERT model using Hugging Face Transformers." ) parser.add_argument( "--bert-model", diff --git a/mixtral/README.md b/mixtral/README.md index 494e8107..417759e1 100644 --- a/mixtral/README.md +++ b/mixtral/README.md @@ -14,7 +14,7 @@ For example with Homebrew: brew install git-lfs ``` -Download the models from HuggingFace: +Download the models from Hugging Face: ``` git clone https://huggingface.co/someone13574/mixtral-8x7b-32kseqlen diff --git a/stable_diffusion/README.md b/stable_diffusion/README.md index e18a4d49..400a50f7 100644 --- a/stable_diffusion/README.md +++ b/stable_diffusion/README.md @@ -1,9 +1,9 @@ Stable Diffusion ================ -Stable Diffusion in MLX. The implementation was ported from Huggingface's +Stable Diffusion in MLX. The implementation was ported from Hugging Face's [diffusers](https://huggingface.co/docs/diffusers/index) and we are fetching -and using the weights available on the Huggingface Hub by Stability AI at +and using the weights available on the Hugging Face Hub by Stability AI at [stabilitiai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1). ![out](generated-mlx.png) diff --git a/stable_diffusion/stable_diffusion/model_io.py b/stable_diffusion/stable_diffusion/model_io.py index 7eef4e28..c2669de4 100644 --- a/stable_diffusion/stable_diffusion/model_io.py +++ b/stable_diffusion/stable_diffusion/model_io.py @@ -169,7 +169,7 @@ def _check_key(key: str, part: str): def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False): - """Load the stable diffusion UNet from Huggingface Hub.""" + """Load the stable diffusion UNet from Hugging Face Hub.""" _check_key(key, "load_unet") # Download the config and create the model @@ -199,7 +199,7 @@ def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False): def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False): - """Load the stable diffusion text encoder from Huggingface Hub.""" + """Load the stable diffusion text encoder from Hugging Face Hub.""" _check_key(key, "load_text_encoder") # Download the config and create the model @@ -226,7 +226,7 @@ def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False): def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False): - """Load the stable diffusion autoencoder from Huggingface Hub.""" + """Load the stable diffusion autoencoder from Hugging Face Hub.""" _check_key(key, "load_autoencoder") # Download the config and create the model @@ -255,7 +255,7 @@ def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False): def load_diffusion_config(key: str = _DEFAULT_MODEL): - """Load the stable diffusion config from Huggingface Hub.""" + """Load the stable diffusion config from Hugging Face Hub.""" _check_key(key, "load_diffusion_config") diffusion_config = _MODELS[key]["diffusion_config"] diff --git a/stable_diffusion/stable_diffusion/tokenizer.py b/stable_diffusion/stable_diffusion/tokenizer.py index 07375fc7..ae9b967a 100644 --- a/stable_diffusion/stable_diffusion/tokenizer.py +++ b/stable_diffusion/stable_diffusion/tokenizer.py @@ -81,7 +81,7 @@ class Tokenizer: if isinstance(text, list): return [self.tokenize(t, prepend_bos, append_eos) for t in text] - # Lower case cleanup and split according to self.pat. Huggingface does + # Lower case cleanup and split according to self.pat. Hugging Face does # a much more thorough job here but this should suffice for 95% of # cases. clean_text = regex.sub(r"\s+", " ", text.lower())