mirror of
https://github.com/ml-explore/mlx-examples.git
synced 2025-06-24 09:21:18 +08:00
top_k and min_p refactor
This commit is contained in:
parent
58e912966a
commit
932b7c0510
@ -88,7 +88,6 @@ def make_logits_processors(
|
|||||||
def top_k_sampling(
|
def top_k_sampling(
|
||||||
logprobs: mx.array,
|
logprobs: mx.array,
|
||||||
top_k: int,
|
top_k: int,
|
||||||
temperature=1.0,
|
|
||||||
) -> mx.array:
|
) -> mx.array:
|
||||||
"""
|
"""
|
||||||
Sample from only the top K tokens ranked by probability.
|
Sample from only the top K tokens ranked by probability.
|
||||||
@ -103,12 +102,11 @@ def top_k_sampling(
|
|||||||
f"`top_k` has to be an integer in the (0, {vocab_size}] interval,"
|
f"`top_k` has to be an integer in the (0, {vocab_size}] interval,"
|
||||||
f" but is {top_k}."
|
f" but is {top_k}."
|
||||||
)
|
)
|
||||||
logprobs = logprobs * (1 / temperature)
|
|
||||||
mask_idx = mx.argpartition(-logprobs, kth=top_k - 1, axis=-1)[..., top_k:]
|
mask_idx = mx.argpartition(-logprobs, kth=top_k - 1, axis=-1)[..., top_k:]
|
||||||
masked_logprobs = mx.put_along_axis(
|
masked_logprobs = mx.put_along_axis(
|
||||||
logprobs, mask_idx, mx.array(-float("inf"), logprobs.dtype), axis=-1
|
logprobs, mask_idx, mx.array(-float("inf"), logprobs.dtype), axis=-1
|
||||||
)
|
)
|
||||||
return mx.random.categorical(masked_logprobs, axis=-1)
|
return masked_logprobs
|
||||||
|
|
||||||
|
|
||||||
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
||||||
@ -116,7 +114,6 @@ def min_p_sampling(
|
|||||||
logprobs: mx.array,
|
logprobs: mx.array,
|
||||||
min_p: float,
|
min_p: float,
|
||||||
min_tokens_to_keep: int = 1,
|
min_tokens_to_keep: int = 1,
|
||||||
temperature=1.0,
|
|
||||||
) -> mx.array:
|
) -> mx.array:
|
||||||
"""
|
"""
|
||||||
Apply min-p sampling to the logprobs.
|
Apply min-p sampling to the logprobs.
|
||||||
@ -144,8 +141,6 @@ def min_p_sampling(
|
|||||||
)
|
)
|
||||||
# reference implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L531-L605
|
# reference implementation: https://github.com/huggingface/transformers/blob/main/src/transformers/generation/logits_process.py#L531-L605
|
||||||
|
|
||||||
logprobs = logprobs * (1 / temperature)
|
|
||||||
|
|
||||||
# Indices sorted in decreasing order
|
# Indices sorted in decreasing order
|
||||||
sorted_indices = mx.argsort(-logprobs, axis=-1)
|
sorted_indices = mx.argsort(-logprobs, axis=-1)
|
||||||
sorted_logprobs = mx.take_along_axis(logprobs, sorted_indices, axis=-1)
|
sorted_logprobs = mx.take_along_axis(logprobs, sorted_indices, axis=-1)
|
||||||
@ -163,9 +158,16 @@ def min_p_sampling(
|
|||||||
# Create pool of tokens with probability less than scaled min_p
|
# Create pool of tokens with probability less than scaled min_p
|
||||||
selected_logprobs = mx.where(tokens_to_remove, -float("inf"), sorted_logprobs)
|
selected_logprobs = mx.where(tokens_to_remove, -float("inf"), sorted_logprobs)
|
||||||
|
|
||||||
# Return sampled tokens
|
# Create a mapping to rearrange back to original indices
|
||||||
sorted_tokens = mx.random.categorical(selected_logprobs, axis=-1)[:, None]
|
# Use argsort of sorted_indices to get the inverse permutation
|
||||||
return mx.take_along_axis(sorted_indices, sorted_tokens, axis=-1).squeeze(1)
|
inverse_indices = mx.argsort(sorted_indices, axis=-1)
|
||||||
|
|
||||||
|
# Rearrange selected_logprobs back to original order
|
||||||
|
original_order_logprobs = mx.take_along_axis(
|
||||||
|
selected_logprobs, inverse_indices, axis=-1
|
||||||
|
)
|
||||||
|
|
||||||
|
return original_order_logprobs
|
||||||
|
|
||||||
|
|
||||||
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
@partial(mx.compile, inputs=mx.random.state, outputs=mx.random.state)
|
||||||
|
@ -9,28 +9,28 @@ class TestSampleUtils(unittest.TestCase):
|
|||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
|
|
||||||
actual_logits = top_p_sampling(logits, 0.3)
|
new_logits = top_p_sampling(logits, 0.3)
|
||||||
actual_probs = mx.softmax(actual_logits.squeeze())
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
||||||
|
|
||||||
actual_logits = top_p_sampling(logits, 0.95)
|
new_logits = top_p_sampling(logits, 0.95)
|
||||||
actual_probs = mx.softmax(actual_logits.squeeze())
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
self.assertEqual(probs.squeeze().tolist(), actual_probs.tolist())
|
self.assertEqual(probs.squeeze().tolist(), actual_probs.tolist())
|
||||||
|
|
||||||
probs = mx.array([0.0, 0.5, 0.4, 0.1])[None]
|
probs = mx.array([0.0, 0.5, 0.4, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
actual_logits = top_p_sampling(logits, 0.4)
|
new_logits = top_p_sampling(logits, 0.4)
|
||||||
actual_probs = mx.softmax(actual_logits.squeeze())
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
self.assertEqual(actual_probs.tolist(), [0.0, 1.0, 0.0, 0.0])
|
self.assertEqual(actual_probs.tolist(), [0.0, 1.0, 0.0, 0.0])
|
||||||
|
|
||||||
actual_logits = top_p_sampling(logits, 0.6)
|
new_logits = top_p_sampling(logits, 0.6)
|
||||||
actual_probs = mx.softmax(actual_logits.squeeze())
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
[round(p, 4) for p in actual_probs.tolist()], [0.0, 0.5556, 0.4444, 0.0]
|
[round(p, 4) for p in actual_probs.tolist()], [0.0, 0.5556, 0.4444, 0.0]
|
||||||
)
|
)
|
||||||
|
|
||||||
actual_logits = top_p_sampling(logits, 0.95)
|
new_logits = top_p_sampling(logits, 0.95)
|
||||||
actual_probs = mx.softmax(actual_logits.squeeze())
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
actual_rounded = [round(p, 4) for p in actual_probs.tolist()]
|
actual_rounded = [round(p, 4) for p in actual_probs.tolist()]
|
||||||
expected_rounded = [0.0, 0.5, 0.4, 0.1]
|
expected_rounded = [0.0, 0.5, 0.4, 0.1]
|
||||||
self.assertEqual(actual_rounded, expected_rounded)
|
self.assertEqual(actual_rounded, expected_rounded)
|
||||||
@ -39,8 +39,8 @@ class TestSampleUtils(unittest.TestCase):
|
|||||||
# Batch mode works
|
# Batch mode works
|
||||||
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1]])
|
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.1, 0.1]])
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
actual_logits = top_p_sampling(logits, 0.5)
|
new_logits = top_p_sampling(logits, 0.5)
|
||||||
actual_probs = mx.softmax(actual_logits, axis=-1)
|
actual_probs = mx.softmax(new_logits, axis=-1)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
||||||
)
|
)
|
||||||
@ -48,43 +48,50 @@ class TestSampleUtils(unittest.TestCase):
|
|||||||
def test_min_p_sampling(self):
|
def test_min_p_sampling(self):
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
temperature = 1.0
|
new_logits = min_p_sampling(logits, 0.8)
|
||||||
token = min_p_sampling(logits, 0.8)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
self.assertEqual(token, 0)
|
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
||||||
|
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
temperature = 1.0
|
new_logits = min_p_sampling(logits, 0.05)
|
||||||
for _ in range(5):
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
token = min_p_sampling(logits, 0.05)
|
self.assertEqual(actual_probs.tolist(), mx.squeeze(probs).tolist())
|
||||||
self.assertTrue(token in (0, 3))
|
|
||||||
|
|
||||||
# Batch mode works
|
# Batch mode works
|
||||||
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
tokens = min_p_sampling(logits, 0.7)
|
new_logits = min_p_sampling(logits, 0.7)
|
||||||
self.assertEqual(tokens.tolist(), [0, 1])
|
actual_probs = mx.softmax(new_logits, axis=-1)
|
||||||
|
self.assertEqual(
|
||||||
|
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
||||||
|
)
|
||||||
|
|
||||||
def test_top_k_sampling(self):
|
def test_top_k_sampling(self):
|
||||||
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
probs = mx.array([0.9, 0.0, 0.0, 0.1])[None]
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
|
|
||||||
token = top_k_sampling(logits, 1).item()
|
new_logits = top_k_sampling(logits, 1)
|
||||||
self.assertEqual(token, 0)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
|
self.assertEqual(actual_probs.tolist(), [1.0, 0.0, 0.0, 0.0])
|
||||||
|
|
||||||
probs = mx.array([0.5, 0.0, 0.0, 0.5])[None]
|
probs = mx.array([0.6, 0.0, 0.1, 0.3])[None]
|
||||||
tokens = set()
|
logits = mx.log(probs)
|
||||||
for _ in range(100):
|
new_logits = top_k_sampling(logits, 2)
|
||||||
token = top_k_sampling(logits, 2)
|
actual_probs = mx.softmax(new_logits.squeeze())
|
||||||
tokens.add(token.item())
|
self.assertEqual(
|
||||||
self.assertEqual(tokens, {0, 3})
|
[round(p, 4) for p in actual_probs.tolist()], [0.6667, 0.0, 0.0, 0.3333]
|
||||||
|
)
|
||||||
|
|
||||||
# Batch mode works
|
# Batch mode works
|
||||||
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
probs = mx.array([[0.9, 0.0, 0.0, 0.1], [0.0, 0.8, 0.0, 0.1]])
|
||||||
logits = mx.log(probs)
|
logits = mx.log(probs)
|
||||||
|
|
||||||
tokens = top_k_sampling(logits, 1)
|
new_logits = top_k_sampling(logits, 1)
|
||||||
self.assertEqual(tokens.tolist(), [0, 1])
|
actual_probs = mx.softmax(new_logits, axis=-1)
|
||||||
|
self.assertEqual(
|
||||||
|
actual_probs.tolist(), [[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0]]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
Loading…
Reference in New Issue
Block a user