Skip to content

Commit 9403622

Browse files
llama : add support for Tekken pre-tokenizer (#8579)
* llama : Added support for Tekken pre-tokenizer (#8577) Removed uneeded `vocab.tokenizer_clean_spaces` assignment * llama : fix order of pre-tokenizers * * Tekken pre-tokenizer no longer uses clean_up_tokenization_spaces * Updated chkhsh for Tekken tokenizer --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent 69b9945 commit 9403622

File tree

4 files changed

+18
-0
lines changed

4 files changed

+18
-0
lines changed

convert_hf_to_gguf.py

+3
Original file line numberDiff line numberDiff line change
@@ -593,6 +593,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
593593
if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901":
594594
# ref: https://huggingface.co/core42/jais-13b
595595
res = "jais"
596+
if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e":
597+
# ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407
598+
res = "tekken"
596599

597600
if res is None:
598601
logger.warning("\n")

convert_hf_to_gguf_update.py

+1
Original file line numberDiff line numberDiff line change
@@ -91,6 +91,7 @@ class TOKENIZER_TYPE(IntEnum):
9191
{"name": "gemma-2", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2-9b", },
9292
{"name": "jais", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/core42/jais-13b", },
9393
{"name": "t5", "tokt": TOKENIZER_TYPE.UGM, "repo": "https://huggingface.co/google-t5/t5-small", },
94+
{"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", },
9495
]
9596

9697

include/llama.h

+1
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ extern "C" {
9292
LLAMA_VOCAB_PRE_TYPE_CHATGLM4 = 17,
9393
LLAMA_VOCAB_PRE_TYPE_VIKING = 18,
9494
LLAMA_VOCAB_PRE_TYPE_JAIS = 19,
95+
LLAMA_VOCAB_PRE_TYPE_TEKKEN = 20,
9596
};
9697

9798
// note: these values should be synchronized with ggml_rope

src/llama.cpp

+13
Original file line numberDiff line numberDiff line change
@@ -5524,6 +5524,12 @@ static void llm_load_vocab(
55245524
} else if (
55255525
tokenizer_pre == "jais") {
55265526
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_JAIS;
5527+
} else if (
5528+
tokenizer_pre == "tekken") {
5529+
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_TEKKEN;
5530+
vocab.tokenizer_clean_spaces = false;
5531+
vocab.tokenizer_ignore_merges = true;
5532+
vocab.tokenizer_add_bos = true;
55275533
} else {
55285534
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
55295535
}
@@ -15585,6 +15591,13 @@ struct llm_tokenizer_bpe {
1558515591
"\\p{N}",
1558615592
};
1558715593
break;
15594+
case LLAMA_VOCAB_PRE_TYPE_TEKKEN:
15595+
// original regex from tokenizer.json
15596+
// "[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]*[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]+|[^\\r\\n\\p{L}\\p{N}]?[\\p{Lu}\\p{Lt}\\p{Lm}\\p{Lo}\\p{M}]+[\\p{Ll}\\p{Lm}\\p{Lo}\\p{M}]*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
15597+
regex_exprs = {
15598+
"[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))*((?=[\\p{L}])([^A-Z]))+|[^\\r\\n\\p{L}\\p{N}]?((?=[\\p{L}])([^a-z]))+((?=[\\p{L}])([^A-Z]))*|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n/]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
15599+
};
15600+
break;
1558815601
default:
1558915602
// default regex for BPE tokenization pre-processing
1559015603
regex_exprs = {

0 commit comments

Comments
 (0)