rename --model_path to --model-path (#151)

use same argument convention for mistral/mixtral as for llama convert.
This commit is contained in:
Daniel Strobusch 2023-12-21 15:28:57 +01:00 committed by GitHub
parent 3efb1cc2cc
commit 43b6522af2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 6 additions and 6 deletions

View File

@ -10,7 +10,7 @@ import torch
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert Mistral weights to MLX.") parser = argparse.ArgumentParser(description="Convert Mistral weights to MLX.")
parser.add_argument( parser.add_argument(
"--model_path", "--model-path",
type=str, type=str,
default="mistral-7B-v0.1/", default="mistral-7B-v0.1/",
help="The path to the Mistral model. The MLX weights will also be saved there.", help="The path to the Mistral model. The MLX weights will also be saved there.",

View File

@ -225,7 +225,7 @@ def generate(prompt: mx.array, model: Mistral, temp: Optional[float] = 0.0):
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mistral inference script") parser = argparse.ArgumentParser(description="Mistral inference script")
parser.add_argument( parser.add_argument(
"--model_path", "--model-path",
type=str, type=str,
default="mistral-7B-v0.1", default="mistral-7B-v0.1",
help="The path to the model weights and tokenizer", help="The path to the model weights and tokenizer",

View File

@ -43,7 +43,7 @@ Now from `mlx-exmaples/mixtral` convert and save the weights as NumPy arrays so
MLX can read them: MLX can read them:
``` ```
python convert.py --model_path $MIXTRAL_MODEL/ python convert.py --model-path $MIXTRAL_MODEL/
``` ```
The conversion script will save the converted weights in the same location. The conversion script will save the converted weights in the same location.
@ -53,7 +53,7 @@ The conversion script will save the converted weights in the same location.
As easy as: As easy as:
``` ```
python mixtral.py --model_path $MIXTRAL_MODEL/ python mixtral.py --model-path $MIXTRAL_MODEL/
``` ```
For more options including how to prompt the model, run: For more options including how to prompt the model, run:

View File

@ -34,7 +34,7 @@ def convert(k, v, config):
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Convert Mixtral weights to MLX.") parser = argparse.ArgumentParser(description="Convert Mixtral weights to MLX.")
parser.add_argument( parser.add_argument(
"--model_path", "--model-path",
type=str, type=str,
default="Mixtral-8x7B-v0.1/", default="Mixtral-8x7B-v0.1/",
help="The path to the Mixtral model. The MLX model weights will also be saved there.", help="The path to the Mixtral model. The MLX model weights will also be saved there.",

View File

@ -282,7 +282,7 @@ def generate(prompt: mx.array, model: Mixtral, temp: Optional[float] = 0.0):
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Mixtral inference script") parser = argparse.ArgumentParser(description="Mixtral inference script")
parser.add_argument( parser.add_argument(
"--model_path", "--model-path",
type=str, type=str,
default="Mixtral-8x7B-v0.1", default="Mixtral-8x7B-v0.1",
help="The path to the model weights, tokenizer, and config", help="The path to the model weights, tokenizer, and config",