mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-09-01 04:14:38 +08:00
Update LLM generation docs to use chat template (#973)
* fix docs * add template to model cards as well * revert * version
This commit is contained in:
@@ -1,3 +1,3 @@
|
||||
# Copyright © 2023-2024 Apple Inc.
|
||||
|
||||
__version__ = "0.18.1"
|
||||
__version__ = "0.18.2"
|
||||
|
@@ -577,7 +577,16 @@ def upload_to_hub(path: str, upload_repo: str, hf_path: str):
|
||||
from mlx_lm import load, generate
|
||||
|
||||
model, tokenizer = load("{upload_repo}")
|
||||
response = generate(model, tokenizer, prompt="hello", verbose=True)
|
||||
|
||||
prompt="hello"
|
||||
|
||||
if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None:
|
||||
messages = [{"role": "user", "content": prompt}]
|
||||
prompt = tokenizer.apply_chat_template(
|
||||
messages, tokenize=False, add_generation_prompt=True
|
||||
)
|
||||
|
||||
response = generate(model, tokenizer, prompt=prompt, verbose=True)
|
||||
```
|
||||
"""
|
||||
)
|
||||
|
Reference in New Issue
Block a user