hf correction

This commit is contained in:
Awni Hannun 2023-12-12 17:08:04 -08:00
parent e0a53edb46
commit a99e9d551e
5 changed files with 9 additions and 9 deletions

View File

@ -25,7 +25,7 @@ def run(bert_model: str):
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description="Run the BERT model using HuggingFace Transformers." description="Run the BERT model using Hugging Face Transformers."
) )
parser.add_argument( parser.add_argument(
"--bert-model", "--bert-model",

View File

@ -14,7 +14,7 @@ For example with Homebrew:
brew install git-lfs brew install git-lfs
``` ```
Download the models from HuggingFace: Download the models from Hugging Face:
``` ```
git clone https://huggingface.co/someone13574/mixtral-8x7b-32kseqlen git clone https://huggingface.co/someone13574/mixtral-8x7b-32kseqlen

View File

@ -1,9 +1,9 @@
Stable Diffusion Stable Diffusion
================ ================
Stable Diffusion in MLX. The implementation was ported from Huggingface's Stable Diffusion in MLX. The implementation was ported from Hugging Face's
[diffusers](https://huggingface.co/docs/diffusers/index) and we are fetching [diffusers](https://huggingface.co/docs/diffusers/index) and we are fetching
and using the weights available on the Huggingface Hub by Stability AI at and using the weights available on the Hugging Face Hub by Stability AI at
[stabilitiai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1). [stabilitiai/stable-diffusion-2-1](https://huggingface.co/stabilityai/stable-diffusion-2-1).
![out](generated-mlx.png) ![out](generated-mlx.png)

View File

@ -169,7 +169,7 @@ def _check_key(key: str, part: str):
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False): def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Huggingface Hub.""" """Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet") _check_key(key, "load_unet")
# Download the config and create the model # Download the config and create the model
@ -199,7 +199,7 @@ def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False): def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion text encoder from Huggingface Hub.""" """Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder") _check_key(key, "load_text_encoder")
# Download the config and create the model # Download the config and create the model
@ -226,7 +226,7 @@ def load_text_encoder(key: str = _DEFAULT_MODEL, float16: bool = False):
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False): def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Huggingface Hub.""" """Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder") _check_key(key, "load_autoencoder")
# Download the config and create the model # Download the config and create the model
@ -255,7 +255,7 @@ def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
def load_diffusion_config(key: str = _DEFAULT_MODEL): def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Huggingface Hub.""" """Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config") _check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"] diffusion_config = _MODELS[key]["diffusion_config"]

View File

@ -81,7 +81,7 @@ class Tokenizer:
if isinstance(text, list): if isinstance(text, list):
return [self.tokenize(t, prepend_bos, append_eos) for t in text] return [self.tokenize(t, prepend_bos, append_eos) for t in text]
# Lower case cleanup and split according to self.pat. Huggingface does # Lower case cleanup and split according to self.pat. Hugging Face does
# a much more thorough job here but this should suffice for 95% of # a much more thorough job here but this should suffice for 95% of
# cases. # cases.
clean_text = regex.sub(r"\s+", " ", text.lower()) clean_text = regex.sub(r"\s+", " ", text.lower())