Make sure to use UTF-8 when loading tokenizer.json

This commit is contained in:
Mirko Nasato 2025-03-12 14:24:32 +00:00
parent 4c3df00162
commit d8e6996254

View File

@ -352,7 +352,7 @@ def load_tokenizer(model_path, tokenizer_config_extra={}, eos_token_ids=None):
tokenizer_file = model_path / "tokenizer.json"
if tokenizer_file.exists():
with open(tokenizer_file, "r") as fid:
with open(tokenizer_file, "r", encoding="utf-8") as fid:
tokenizer_content = json.load(fid)
if "decoder" in tokenizer_content:
if _is_spm_decoder(tokenizer_content["decoder"]):