mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 09:21:18 +08:00
Merge branch 'ml-explore:main' into adding-dpo-training
This commit is contained in:
commit
4a9d75ae4f
@ -35,14 +35,25 @@ def make_sampler(
|
|||||||
"""
|
"""
|
||||||
if temp == 0:
|
if temp == 0:
|
||||||
return lambda x: mx.argmax(x, axis=-1)
|
return lambda x: mx.argmax(x, axis=-1)
|
||||||
elif top_p > 0 and top_p < 1.0:
|
|
||||||
return lambda x: top_p_sampling(x, top_p, temp)
|
# Create sampler chain
|
||||||
elif min_p != 0.0:
|
sampling_methods = []
|
||||||
return lambda x: min_p_sampling(x, min_p, min_tokens_to_keep, temp)
|
if top_k > 0:
|
||||||
elif top_k > 0:
|
sampling_methods.append(lambda x: apply_top_k(x, top_k))
|
||||||
return lambda x: top_k_sampling(x, top_k, temp)
|
if top_p > 0 and top_p < 1.0:
|
||||||
else:
|
sampling_methods.append(lambda x: apply_top_p(x, top_p))
|
||||||
return lambda x: categorical_sampling(x, temp)
|
if min_p != 0.0:
|
||||||
|
sampling_methods.append(lambda x: apply_min_p(x, min_p, min_tokens_to_keep))
|
||||||
|
|
||||||
|
# Apply the sampling methods
|
||||||
|
def sampler(logits):
|
||||||
|
for method in sampling_methods:
|
||||||
|
logits = method(logits)
|
||||||
|
|
||||||
|
# Return the sampled token
|
||||||
|
return categorical_sampling(logits, temp)
|
||||||
|
|
||||||
|
return sampler
|
||||||
|
|
||||||
|
|
||||||
def make_logits_processors(
|
def make_logits_processors(
|
||||||
@ -85,10 +96,9 @@ def make_logits_processors(
|
|||||||
|
|
||||||
|
|
||||||
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
||||||
def top_k_sampling(
|
def apply_top_k(
|
||||||
logprobs: mx.array,
|
logprobs: mx.array,
|
||||||
top_k: int,
|
top_k: int,
|
||||||
temperature=1.0,
|
|
||||||
) -> mx.array:
|
) -> mx.array:
|
||||||
"""
|
"""
|
||||||
Sample from only the top K tokens ranked by probability.
|
Sample from only the top K tokens ranked by probability.
|
||||||
@ -103,20 +113,18 @@ def top_k_sampling(
|
|||||||
f"`top_k` has to be an integer in the (0, {vocab_size}] interval,"
|
f"`top_k` has to be an integer in the (0, {vocab_size}] interval,"
|
||||||
f" but is {top_k}."
|
f" but is {top_k}."
|
||||||
)
|
)
|
||||||
logprobs = logprobs * (1 / temperature)
|
|
||||||
mask_idx = mx.argpartition(-logprobs, kth=top_k - 1, axis=-1)[..., top_k:]
|
mask_idx = mx.argpartition(-logprobs, kth=top_k - 1, axis=-1)[..., top_k:]
|
||||||
masked_logprobs = mx.put_along_axis(
|
masked_logprobs = mx.put_along_axis(
|
||||||
logprobs, mask_idx, mx.array(-float("inf"), logprobs.dtype), axis=-1
|
logprobs, mask_idx, mx.array(-float("inf"), logprobs.dtype), axis=-1
|
||||||
)
|
)
|
||||||
return mx.random.categorical(masked_logprobs, axis=-1)
|
return masked_logprobs
|
||||||
|
|
||||||
|
|
||||||
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
||||||
def min_p_sampling(
|
def apply_min_p(
|
||||||
logprobs: mx.array,
|
logprobs: mx.array,
|
||||||
min_p: float,
|
min_p: float,
|
||||||
min_tokens_to_keep: int = 1,
|
min_tokens_to_keep: int = 1,
|
||||||
temperature=1.0,
|
|
||||||
) -> mx.array:
|
) -> mx.array:
|
||||||
"""
|
"""
|
||||||
Apply min-p sampling to the logprobs.
|
Apply min-p sampling to the logprobs.
|
||||||
@ -144,8 +152,6 @@ def min_p_sampling(
|
|||||||
)
|
)
|
||||||
# reference implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L531-L605
|
# reference implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L531-L605
|
||||||
|
|
||||||
logprobs = logprobs * (1 / temperature)
|
|
||||||
|
|
||||||
# Indices sorted in decreasing order
|
# Indices sorted in decreasing order
|
||||||
sorted_indices = mx.argsort(-logprobs, axis=-1)
|
sorted_indices = mx.argsort(-logprobs, axis=-1)
|
||||||
sorted_logprobs = mx.take_along_axis(logprobs, sorted_indices, axis=-1)
|
sorted_logprobs = mx.take_along_axis(logprobs, sorted_indices, axis=-1)
|
||||||
@ -163,25 +169,31 @@ def min_p_sampling(
|
|||||||
# Create pool of tokens with probability less than scaled min_p
|
# Create pool of tokens with probability less than scaled min_p
|
||||||
selected_logprobs = mx.where(tokens_to_remove, -float("inf"), sorted_logprobs)
|
selected_logprobs = mx.where(tokens_to_remove, -float("inf"), sorted_logprobs)
|
||||||
|
|
||||||
# Return sampled tokens
|
# Create a mapping to rearrange back to original indices
|
||||||
sorted_tokens = mx.random.categorical(selected_logprobs, axis=-1)[:, None]
|
# Use argsort of sorted_indices to get the inverse permutation
|
||||||
return mx.take_along_axis(sorted_indices, sorted_tokens, axis=-1).squeeze(1)
|
inverse_indices = mx.argsort(sorted_indices, axis=-1)
|
||||||
|
|
||||||
|
# Rearrange selected_logprobs back to original order
|
||||||
|
original_order_logprobs = mx.take_along_axis(
|
||||||
|
selected_logprobs, inverse_indices, axis=-1
|
||||||
|
)
|
||||||
|
|
||||||
|
return original_order_logprobs
|
||||||
|
|
||||||
|
|
||||||
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
||||||
def top_p_sampling(logits: mx.array, top_p: float, temperature: float) -> mx.array:
|
def apply_top_p(logits: mx.array, top_p: float) -> mx.array:
|
||||||
"""
|
"""
|
||||||
Apply top-p (nucleus) sampling to logits.
|
Apply top-p (nucleus) sampling to logits.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
logits: The logits from the model's output.
|
logits: The logits from the model's output.
|
||||||
top_p: The cumulative probability threshold for top-p filtering.
|
top_p: The cumulative probability threshold for top-p filtering.
|
||||||
temperature: Temperature parameter for softmax distribution reshaping.
|
|
||||||
Returns:
|
Returns:
|
||||||
token selected based on the top-p criterion.
|
token selected based on the top-p criterion.
|
||||||
"""
|
"""
|
||||||
# referenced implementation from https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L449-L460
|
# referenced implementation from https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L449-L460
|
||||||
probs = mx.softmax(logits * (1 / temperature), axis=-1)
|
probs = mx.softmax(logits, axis=-1)
|
||||||
|
|
||||||
# sort probs in ascending order
|
# sort probs in ascending order
|
||||||
sorted_indices = mx.argsort(probs, axis=-1)
|
sorted_indices = mx.argsort(probs, axis=-1)
|
||||||
@ -196,8 +208,15 @@ def top_p_sampling(logits: mx.array, top_p: float, temperature: float) -> mx.arr
|
|||||||
0,
|
0,
|
||||||
)
|
)
|
||||||
|
|
||||||
sorted_tokens = mx.random.categorical(mx.log(top_probs), axis=-1)[:, None]
|
# Create a mapping to rearrange back to original indices
|
||||||
return mx.take_along_axis(sorted_indices, sorted_tokens, axis=-1).squeeze(1)
|
# Use argsort of sorted_indices to get the inverse permutation
|
||||||
|
inverse_indices = mx.argsort(sorted_indices, axis=-1)
|
||||||
|
|
||||||
|
# Rearrange top_probs back to original order
|
||||||
|
original_order_probs = mx.take_along_axis(top_probs, inverse_indices, axis=-1)
|
||||||
|
|
||||||
|
# Convert back to logits and return
|
||||||
|
return mx.log(original_order_probs)
|
||||||
|
|
||||||
|
|
||||||
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
||||||
|
@ -1,79 +1,97 @@
|
|||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import mlx.core as mx
|
import mlx.core as mx
|
||||||
from mlx_lm.sample_utils import min_p_sampling, top_k_sampling, top_p_sampling
|
from mlx_lm.sample_utils import apply_min_p, apply_top_k, apply_top_p
|
||||||
|
|
||||||
|
|
||||||
class TestSampleUtils(unittest.TestCase):
|
class TestSampleUtils(unittest.TestCase):
|
||||||
def test_top_p_sampling(self):
|
def test_apply_top_p(self):
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
temperature = 1.0
|
|
||||||
|
|
||||||
token = top_p_sampling(logits, 0.3, temperature).item()
|
new_logits = apply_top_p(logits, 0.3)
|
||||||
self.assertEqual(token, 0)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
||||||
|
|
||||||
token = top_p_sampling(logits, 0.95, temperature).item()
|
new_logits = apply_top_p(logits, 0.95)
|
||||||
self.assertTrue(token in (0, 3))
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertTrue(mx.allclose(probs.squeeze(), actual_probs))
|
||||||
|
|
||||||
probs = mx.array([0.0, 0.5, 0.4, 0.1])[None]
|
probs = mx.array([0.0, 0.5, 0.4, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
|
new_logits = apply_top_p(logits, 0.4)
|
||||||
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertEqual(actual_probs.tolist(), [0.0, 1.0, 0.0, 0.0])
|
||||||
|
|
||||||
token = top_p_sampling(logits, 0.4, temperature).item()
|
new_logits = apply_top_p(logits, 0.6)
|
||||||
self.assertEqual(token, 1)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertEqual(
|
||||||
|
[round(p, 4) for p in actual_probs.tolist()], [0.0, 0.5556, 0.4444, 0.0]
|
||||||
|
)
|
||||||
|
|
||||||
token = top_p_sampling(logits, 0.6, temperature).item()
|
new_logits = apply_top_p(logits, 0.95)
|
||||||
self.assertTrue(token in (1, 2))
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
actual_rounded = [round(p, 4) for p in actual_probs.tolist()]
|
||||||
|
expected_rounded = [0.0, 0.5, 0.4, 0.1]
|
||||||
|
self.assertEqual(actual_rounded, expected_rounded)
|
||||||
|
self.assertAlmostEqual(sum(actual_probs.tolist()), 1.0)
|
||||||
|
|
||||||
token = top_p_sampling(logits, 0.95, temperature).item()
|
# Batch mode works
|
||||||
self.assertTrue(token in (1, 2, 3))
|
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1]])
|
||||||
|
logits = mx.log(probs)
|
||||||
|
new_logits = apply_top_p(logits, 0.5)
|
||||||
|
actual_probs = mx.softmax(new_logits, axis=-1)
|
||||||
|
self.assertEqual(
|
||||||
|
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_apply_min_p(self):
|
||||||
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
|
logits = mx.log(probs)
|
||||||
|
new_logits = apply_min_p(logits, 0.8)
|
||||||
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
||||||
|
|
||||||
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
|
logits = mx.log(probs)
|
||||||
|
new_logits = apply_min_p(logits, 0.05)
|
||||||
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertTrue(mx.allclose(actual_probs, mx.squeeze(probs)))
|
||||||
|
|
||||||
# Batch mode works
|
# Batch mode works
|
||||||
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
tokens = top_p_sampling(logits, 0.5, temperature)
|
new_logits = apply_min_p(logits, 0.7)
|
||||||
self.assertEqual(tokens.tolist(), [0, 1])
|
actual_probs = mx.softmax(new_logits, axis=-1)
|
||||||
|
self.assertEqual(
|
||||||
|
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
||||||
|
)
|
||||||
|
|
||||||
def test_min_p_sampling(self):
|
def test_apply_top_k(self):
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
|
||||||
logits = mx.log(probs)
|
|
||||||
temperature = 1.0
|
|
||||||
token = min_p_sampling(logits, 0.8)
|
|
||||||
self.assertEqual(token, 0)
|
|
||||||
|
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
|
||||||
logits = mx.log(probs)
|
|
||||||
temperature = 1.0
|
|
||||||
for _ in range(5):
|
|
||||||
token = min_p_sampling(logits, 0.05)
|
|
||||||
self.assertTrue(token in (0, 3))
|
|
||||||
|
|
||||||
# Batch mode works
|
|
||||||
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
|
||||||
logits = mx.log(probs)
|
|
||||||
tokens = min_p_sampling(logits, 0.7)
|
|
||||||
self.assertEqual(tokens.tolist(), [0, 1])
|
|
||||||
|
|
||||||
def test_top_k_sampling(self):
|
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
|
|
||||||
token = top_k_sampling(logits, 1).item()
|
new_logits = apply_top_k(logits, 1)
|
||||||
self.assertEqual(token, 0)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
||||||
|
|
||||||
probs = mx.array([0.5, 0.0, 0.0, 0.5])[None]
|
probs = mx.array([0.6, 0.0, 0.1, 0.3])[None]
|
||||||
tokens = set()
|
logits = mx.log(probs)
|
||||||
for _ in range(100):
|
new_logits = apply_top_k(logits, 2)
|
||||||
token = top_k_sampling(logits, 2)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
tokens.add(token.item())
|
self.assertEqual(
|
||||||
self.assertEqual(tokens, {0, 3})
|
[round(p, 4) for p in actual_probs.tolist()], [0.6667, 0.0, 0.0, 0.3333]
|
||||||
|
)
|
||||||
|
|
||||||
# Batch mode works
|
# Batch mode works
|
||||||
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
|
|
||||||
tokens = top_k_sampling(logits, 1)
|
new_logits = apply_top_k(logits, 1)
|
||||||
self.assertEqual(tokens.tolist(), [0, 1])
|
actual_probs = mx.softmax(new_logits, axis=-1)
|
||||||
|
self.assertEqual(
|
||||||
|
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
Loading…
Reference in New Issue
Block a user