diff --git a/llms/deepseek-coder/convert.py b/llms/deepseek-coder/convert.py index 06c681c4..f37dbe38 100644 --- a/llms/deepseek-coder/convert.py +++ b/llms/deepseek-coder/convert.py @@ -44,7 +44,7 @@ def convert(args): config = model.config.to_dict() state_dict = model.state_dict() - tokenizer = AutoTokenizer.from_pretrained(str(hf_path), trust_remote_code=True) + tokenizer = AutoTokenizer.from_pretrained(str(hf_path), trust_remote_code=True, use_fast=False) # things to change # 1. there's no "model." in the weight names @@ -84,7 +84,7 @@ def convert(args): weights = {k: v.numpy() for k, v in state_dict.items()} - config["rope_scaling_factor"] = config["rope_scaling"]["factor"] + config["rope_scaling_factor"] = config["rope_scaling"]["factor"] if config["rope_scaling"] is not None else 1.0 keep_keys = set( [ "vocab_size", @@ -96,6 +96,7 @@ def convert(args): "rms_norm_eps", "intermediate_size", "rope_scaling_factor", + "rope_theta" ] ) for k in list(config.keys()): @@ -151,4 +152,4 @@ if __name__ == "__main__": tokenizer.save_pretrained(mlx_path) with open(mlx_path / "config.json", "w") as f: config["model_type"] = "deepseek_coder" - json.dump(config, f, indent=4) + json.dump(config, f, indent=4)%