mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-10-24 14:38:09 +08:00
Support --add_eos_token argument within Lora training (#760)
* Support `--add_eos_token` argument to empower users to control the addition of the eos token during LoRA training, addressing issues like incomplete text generation. * Support `--add_eos_token`, code format --------- Co-authored-by: Zhan ChengLong <zhanchenglong@bytedance.com>
This commit is contained in:
@@ -122,7 +122,7 @@ def save_model(save_dir: str, weights, tokenizer, config):
|
||||
)
|
||||
|
||||
|
||||
def load(path_or_hf_repo: str):
|
||||
def load(path_or_hf_repo: str, tokenizer_config={}):
|
||||
# If the path exists, it will try to load model form it
|
||||
# otherwise download and cache from the hf_repo and cache
|
||||
model_path = Path(path_or_hf_repo)
|
||||
@@ -162,7 +162,9 @@ def load(path_or_hf_repo: str):
|
||||
model.load_weights(list(weights.items()))
|
||||
|
||||
mx.eval(model.parameters())
|
||||
tokenizer = transformers.AutoTokenizer.from_pretrained(model_path)
|
||||
tokenizer = transformers.AutoTokenizer.from_pretrained(
|
||||
model_path, **tokenizer_config
|
||||
)
|
||||
return model, tokenizer, config
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user