From 956da0ddc7485cab97a01487fcc04c5174a5e3b7 Mon Sep 17 00:00:00 2001 From: Neil Mehta Date: Sat, 8 Mar 2025 14:08:55 -0500 Subject: [PATCH] Create sampler chain --- llms/mlx_lm/sample_utils.py | 33 ++++++++++++++++++++++----------- llms/tests/test_sample_utils.py | 32 ++++++++++++++++---------------- 2 files changed, 38 insertions(+), 27 deletions(-) diff --git a/llms/mlx_lm/sample_utils.py b/llms/mlx_lm/sample_utils.py index 5ad3d2c5..d62c7f75 100644 --- a/llms/mlx_lm/sample_utils.py +++ b/llms/mlx_lm/sample_utils.py @@ -35,14 +35,25 @@ def make_sampler( """ if temp == 0: return lambda x: mx.argmax(x, axis=-1) - elif top_p > 0 and top_p < 1.0: - return lambda x: top_p_sampling(x, top_p, temp) - elif min_p != 0.0: - return lambda x: min_p_sampling(x, min_p, min_tokens_to_keep, temp) - elif top_k > 0: - return lambda x: top_k_sampling(x, top_k, temp) - else: - return lambda x: categorical_sampling(x, temp) + + # Create sampler chain + sampling_methods = [] + if top_k > 0: + sampling_methods.append(lambda x: apply_top_k(x, top_k)) + if top_p > 0 and top_p < 1.0: + sampling_methods.append(lambda x: apply_top_p(x, top_p)) + if min_p != 0.0: + sampling_methods.append(lambda x: apply_min_p(x, min_p, min_tokens_to_keep)) + + # Apply the sampling methods + def sampler(logits): + for method in sampling_methods: + logits = method(logits) + + # Return the sampled token + return categorical_sampling(logits, temp) + + return sampler def make_logits_processors( @@ -85,7 +96,7 @@ def make_logits_processors( @partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state) -def top_k_sampling( +def apply_top_k( logprobs: mx.array, top_k: int, ) -> mx.array: @@ -110,7 +121,7 @@ def top_k_sampling( @partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state) -def min_p_sampling( +def apply_min_p( logprobs: mx.array, min_p: float, min_tokens_to_keep: int = 1, @@ -171,7 +182,7 @@ def min_p_sampling( @partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state) -def top_p_sampling(logits: mx.array, top_p: float) -> mx.array: +def apply_top_p(logits: mx.array, top_p: float) -> mx.array: """ Apply top-p (nucleus) sampling to logits. diff --git a/llms/tests/test_sample_utils.py b/llms/tests/test_sample_utils.py index 19b65e4f..a8664fd9 100644 --- a/llms/tests/test_sample_utils.py +++ b/llms/tests/test_sample_utils.py @@ -1,35 +1,35 @@ import unittest import mlx.core as mx -from mlx_lm.sample_utils import min_p_sampling, top_k_sampling, top_p_sampling +from mlx_lm.sample_utils import apply_min_p, apply_top_k, apply_top_p class TestSampleUtils(unittest.TestCase): - def test_top_p_sampling(self): + def test_apply_top_p(self): probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] logits = mx.log(probs) - new_logits = top_p_sampling(logits, 0.3) + new_logits = apply_top_p(logits, 0.3) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0]) - new_logits = top_p_sampling(logits, 0.95) + new_logits = apply_top_p(logits, 0.95) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual(probs.squeeze().tolist(), actual_probs.tolist()) probs = mx.array([0.0, 0.5, 0.4, 0.1])[None] logits = mx.log(probs) - new_logits = top_p_sampling(logits, 0.4) + new_logits = apply_top_p(logits, 0.4) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual(actual_probs.tolist(), [0.0, 1.0, 0.0, 0.0]) - new_logits = top_p_sampling(logits, 0.6) + new_logits = apply_top_p(logits, 0.6) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual( [round(p, 4) for p in actual_probs.tolist()], [0.0, 0.5556, 0.4444, 0.0] ) - new_logits = top_p_sampling(logits, 0.95) + new_logits = apply_top_p(logits, 0.95) actual_probs = mx.softmax(new_logits.squeeze()) actual_rounded = [round(p, 4) for p in actual_probs.tolist()] expected_rounded = [0.0, 0.5, 0.4, 0.1] @@ -39,45 +39,45 @@ class TestSampleUtils(unittest.TestCase): # Batch mode works probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1]]) logits = mx.log(probs) - new_logits = top_p_sampling(logits, 0.5) + new_logits = apply_top_p(logits, 0.5) actual_probs = mx.softmax(new_logits, axis=-1) self.assertEqual( actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]] ) - def test_min_p_sampling(self): + def test_apply_min_p(self): probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] logits = mx.log(probs) - new_logits = min_p_sampling(logits, 0.8) + new_logits = apply_min_p(logits, 0.8) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0]) probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] logits = mx.log(probs) - new_logits = min_p_sampling(logits, 0.05) + new_logits = apply_min_p(logits, 0.05) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual(actual_probs.tolist(), mx.squeeze(probs).tolist()) # Batch mode works probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]]) logits = mx.log(probs) - new_logits = min_p_sampling(logits, 0.7) + new_logits = apply_min_p(logits, 0.7) actual_probs = mx.softmax(new_logits, axis=-1) self.assertEqual( actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]] ) - def test_top_k_sampling(self): + def test_apply_top_k(self): probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] logits = mx.log(probs) - new_logits = top_k_sampling(logits, 1) + new_logits = apply_top_k(logits, 1) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0]) probs = mx.array([0.6, 0.0, 0.1, 0.3])[None] logits = mx.log(probs) - new_logits = top_k_sampling(logits, 2) + new_logits = apply_top_k(logits, 2) actual_probs = mx.softmax(new_logits.squeeze()) self.assertEqual( [round(p, 4) for p in actual_probs.tolist()], [0.6667, 0.0, 0.0, 0.3333] @@ -87,7 +87,7 @@ class TestSampleUtils(unittest.TestCase): probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]]) logits = mx.log(probs) - new_logits = top_k_sampling(logits, 1) + new_logits = apply_top_k(logits, 1) actual_probs = mx.softmax(new_logits, axis=-1) self.assertEqual( actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]