Update LLM generation docs to use chat template (#973)

* fix docs

* add template to model cards as well

* revert

* version
This commit is contained in:
Awni Hannun
2024-09-07 06:06:15 -07:00
committed by GitHub
parent 324184d670
commit c3e3411756
3 changed files with 24 additions and 3 deletions

View File

@@ -1,3 +1,3 @@
# Copyright © 2023-2024 Apple Inc.
__version__ = "0.18.1"
__version__ = "0.18.2"

View File

@@ -577,7 +577,16 @@ def upload_to_hub(path: str, upload_repo: str, hf_path: str):
from mlx_lm import load, generate
model, tokenizer = load("{upload_repo}")
response = generate(model, tokenizer, prompt="hello", verbose=True)
prompt="hello"
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
messages = [{"role": "user", "content": prompt}]
prompt = tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
response = generate(model, tokenizer, prompt=prompt, verbose=True)
```
"""
)