From 8179b9943643e9f155e85f8696c4b599b184d230 Mon Sep 17 00:00:00 2001 From: Goekdeniz-Guelmez Date: Wed, 12 Feb 2025 19:24:35 +0100 Subject: [PATCH] quick prompting fix --- llms/mlx_lm/tuner/datasets.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/llms/mlx_lm/tuner/datasets.py b/llms/mlx_lm/tuner/datasets.py index 2ca012f3..01ffd81b 100644 --- a/llms/mlx_lm/tuner/datasets.py +++ b/llms/mlx_lm/tuner/datasets.py @@ -30,7 +30,7 @@ class GRPODataset: prompt_tokens = tokenizer.apply_chat_template( [ {'role': 'system', 'content': """A conversation between User and Assistant. The user asks a question, and the Assistant solves it. - The assistantfirst thinks about the reasoning process in the mind and then provides the user with the answer. + The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here ."""}, {'role': 'user', 'content': prompt_str} ], @@ -39,7 +39,7 @@ class GRPODataset: else: if use_prompt: prompt_tokens = tokenizer.encode(f"""A conversation between User and Assistant. The user asks a question, and the Assistant solves it. - The assistantfirst thinks about the reasoning process in the mind and then provides the user with the answer. + The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . User: {prompt_str} Assistant: """) else: