Create sampler chain

This commit is contained in:
Neil Mehta 2025-03-08 14:08:55 -05:00
parent 932b7c0510
commit 956da0ddc7
2 changed files with 38 additions and 27 deletions

View File

@ -35,14 +35,25 @@ def make_sampler(
""" """
if temp == 0: if temp == 0:
return lambda x: mx.argmax(x, axis=-1) return lambda x: mx.argmax(x, axis=-1)
elif top_p > 0 and top_p < 1.0:
return lambda x: top_p_sampling(x, top_p, temp) # Create sampler chain
elif min_p != 0.0: sampling_methods = []
return lambda x: min_p_sampling(x, min_p, min_tokens_to_keep, temp) if top_k > 0:
elif top_k > 0: sampling_methods.append(lambda x: apply_top_k(x, top_k))
return lambda x: top_k_sampling(x, top_k, temp) if top_p > 0 and top_p < 1.0:
else: sampling_methods.append(lambda x: apply_top_p(x, top_p))
return lambda x: categorical_sampling(x, temp) if min_p != 0.0:
sampling_methods.append(lambda x: apply_min_p(x, min_p, min_tokens_to_keep))
# Apply the sampling methods
def sampler(logits):
for method in sampling_methods:
logits = method(logits)
# Return the sampled token
return categorical_sampling(logits, temp)
return sampler
def make_logits_processors( def make_logits_processors(
@ -85,7 +96,7 @@ def make_logits_processors(
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state) @partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
def top_k_sampling( def apply_top_k(
logprobs: mx.array, logprobs: mx.array,
top_k: int, top_k: int,
) -> mx.array: ) -> mx.array:
@ -110,7 +121,7 @@ def top_k_sampling(
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state) @partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
def min_p_sampling( def apply_min_p(
logprobs: mx.array, logprobs: mx.array,
min_p: float, min_p: float,
min_tokens_to_keep: int = 1, min_tokens_to_keep: int = 1,
@ -171,7 +182,7 @@ def min_p_sampling(
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state) @partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
def top_p_sampling(logits: mx.array, top_p: float) -> mx.array: def apply_top_p(logits: mx.array, top_p: float) -> mx.array:
""" """
Apply top-p (nucleus) sampling to logits. Apply top-p (nucleus) sampling to logits.

View File

@ -1,35 +1,35 @@
import unittest import unittest
import mlx.core as mx import mlx.core as mx
from mlx_lm.sample_utils import min_p_sampling, top_k_sampling, top_p_sampling from mlx_lm.sample_utils import apply_min_p, apply_top_k, apply_top_p
class TestSampleUtils(unittest.TestCase): class TestSampleUtils(unittest.TestCase):
def test_top_p_sampling(self): def test_apply_top_p(self):
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
logits = mx.log(probs) logits = mx.log(probs)
new_logits = top_p_sampling(logits, 0.3) new_logits = apply_top_p(logits, 0.3)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0]) self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
new_logits = top_p_sampling(logits, 0.95) new_logits = apply_top_p(logits, 0.95)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual(probs.squeeze().tolist(), actual_probs.tolist()) self.assertEqual(probs.squeeze().tolist(), actual_probs.tolist())
probs = mx.array([0.0, 0.5, 0.4, 0.1])[None] probs = mx.array([0.0, 0.5, 0.4, 0.1])[None]
logits = mx.log(probs) logits = mx.log(probs)
new_logits = top_p_sampling(logits, 0.4) new_logits = apply_top_p(logits, 0.4)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual(actual_probs.tolist(), [0.0, 1.0, 0.0, 0.0]) self.assertEqual(actual_probs.tolist(), [0.0, 1.0, 0.0, 0.0])
new_logits = top_p_sampling(logits, 0.6) new_logits = apply_top_p(logits, 0.6)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual( self.assertEqual(
[round(p, 4) for p in actual_probs.tolist()], [0.0, 0.5556, 0.4444, 0.0] [round(p, 4) for p in actual_probs.tolist()], [0.0, 0.5556, 0.4444, 0.0]
) )
new_logits = top_p_sampling(logits, 0.95) new_logits = apply_top_p(logits, 0.95)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
actual_rounded = [round(p, 4) for p in actual_probs.tolist()] actual_rounded = [round(p, 4) for p in actual_probs.tolist()]
expected_rounded = [0.0, 0.5, 0.4, 0.1] expected_rounded = [0.0, 0.5, 0.4, 0.1]
@ -39,45 +39,45 @@ class TestSampleUtils(unittest.TestCase):
# Batch mode works # Batch mode works
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1]]) probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1]])
logits = mx.log(probs) logits = mx.log(probs)
new_logits = top_p_sampling(logits, 0.5) new_logits = apply_top_p(logits, 0.5)
actual_probs = mx.softmax(new_logits, axis=-1) actual_probs = mx.softmax(new_logits, axis=-1)
self.assertEqual( self.assertEqual(
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]] actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
) )
def test_min_p_sampling(self): def test_apply_min_p(self):
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
logits = mx.log(probs) logits = mx.log(probs)
new_logits = min_p_sampling(logits, 0.8) new_logits = apply_min_p(logits, 0.8)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0]) self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
logits = mx.log(probs) logits = mx.log(probs)
new_logits = min_p_sampling(logits, 0.05) new_logits = apply_min_p(logits, 0.05)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual(actual_probs.tolist(), mx.squeeze(probs).tolist()) self.assertEqual(actual_probs.tolist(), mx.squeeze(probs).tolist())
# Batch mode works # Batch mode works
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]]) probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
logits = mx.log(probs) logits = mx.log(probs)
new_logits = min_p_sampling(logits, 0.7) new_logits = apply_min_p(logits, 0.7)
actual_probs = mx.softmax(new_logits, axis=-1) actual_probs = mx.softmax(new_logits, axis=-1)
self.assertEqual( self.assertEqual(
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]] actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
) )
def test_top_k_sampling(self): def test_apply_top_k(self):
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None] probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
logits = mx.log(probs) logits = mx.log(probs)
new_logits = top_k_sampling(logits, 1) new_logits = apply_top_k(logits, 1)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0]) self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
probs = mx.array([0.6, 0.0, 0.1, 0.3])[None] probs = mx.array([0.6, 0.0, 0.1, 0.3])[None]
logits = mx.log(probs) logits = mx.log(probs)
new_logits = top_k_sampling(logits, 2) new_logits = apply_top_k(logits, 2)
actual_probs = mx.softmax(new_logits.squeeze()) actual_probs = mx.softmax(new_logits.squeeze())
self.assertEqual( self.assertEqual(
[round(p, 4) for p in actual_probs.tolist()], [0.6667, 0.0, 0.0, 0.3333] [round(p, 4) for p in actual_probs.tolist()], [0.6667, 0.0, 0.0, 0.3333]
@ -87,7 +87,7 @@ class TestSampleUtils(unittest.TestCase):
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]]) probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
logits = mx.log(probs) logits = mx.log(probs)
new_logits = top_k_sampling(logits, 1) new_logits = apply_top_k(logits, 1)
actual_probs = mx.softmax(new_logits, axis=-1) actual_probs = mx.softmax(new_logits, axis=-1)
self.assertEqual( self.assertEqual(
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]] actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]