Skip to content

Commit f5d7b26

Browse files
JoanFMggerganov
andauthored
llama : add jina v2 base code (#7596)
* feat: add changes to handle jina v2 base code * fix: do not complicate things * fix: fix the usage of the code model * fix: fix comments * fix: fix linting issues * fix: remove ollama patches * style : minor --------- Co-authored-by: Georgi Gerganov <[email protected]>
1 parent 2d08b7f commit f5d7b26

5 files changed

+24
-5
lines changed

convert-hf-to-gguf-update.py

+1
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,7 @@ class TOKENIZER_TYPE(IntEnum):
8383
{"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
8484
{"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
8585
{"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", },
86+
{"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", },
8687
]
8788

8889

convert-hf-to-gguf.py

+6-1
Original file line numberDiff line numberDiff line change
@@ -475,6 +475,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
475475
if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
476476
# ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
477477
res = "smaug-bpe"
478+
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
479+
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
480+
res = "jina-v2-code"
478481

479482
if res is None:
480483
logger.warning("\n")
@@ -2452,11 +2455,13 @@ def __init__(self, *args, **kwargs):
24522455

24532456
def get_tensors(self):
24542457
for name, data in super().get_tensors():
2455-
if 'gated_layers' in name:
2458+
if 'gated_layer' in name:
24562459
d1 = data[:self.intermediate_size, :]
24572460
name1 = name.replace('gated_layers', 'gated_layers_w')
2461+
name1 = name1.replace('up_gated_layer', 'gated_layers_v')
24582462
d2 = data[self.intermediate_size:, :]
24592463
name2 = name.replace('gated_layers', 'gated_layers_v')
2464+
name2 = name2.replace('up_gated_layer', 'gated_layers_w')
24602465
yield name1, d1
24612466
yield name2, d2
24622467
continue

gguf-py/gguf/constants.py

+1
Original file line numberDiff line numberDiff line change
@@ -415,6 +415,7 @@ class MODEL_TENSOR(IntEnum):
415415
MODEL_TENSOR.TOKEN_EMBD,
416416
MODEL_TENSOR.TOKEN_EMBD_NORM,
417417
MODEL_TENSOR.TOKEN_TYPES,
418+
MODEL_TENSOR.ATTN_NORM_2,
418419
MODEL_TENSOR.ATTN_OUT_NORM,
419420
MODEL_TENSOR.ATTN_Q,
420421
MODEL_TENSOR.ATTN_Q_NORM,

gguf-py/gguf/tensor_mapping.py

+3
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@ class TensorNameMap:
102102
# Attention norm 2
103103
MODEL_TENSOR.ATTN_NORM_2: (
104104
"transformer.h.{bid}.ln_attn", # falcon40b
105+
"encoder.layer.{bid}.layer_norm_1", # jina-v2-code
105106
),
106107

107108
# Attention query-key-value
@@ -311,6 +312,7 @@ class TensorNameMap:
311312
"model.layers.{bid}.mlp.c_proj", # starcoder2
312313
"encoder.layer.{bid}.mlp.wo", # jina-bert-v2
313314
"model.layers.{bid}.residual_mlp.w2", # arctic
315+
"encoder.layer.{bid}.mlp.down_layer", # jina-bert-v2
314316
),
315317

316318
MODEL_TENSOR.FFN_DOWN_EXP: (
@@ -350,6 +352,7 @@ class TensorNameMap:
350352
"encoder.layers.{bid}.norm2", # nomic-bert
351353
"transformer.decoder_layer.{bid}.rms_norm_3", # Grok
352354
"encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
355+
"encoder.layer.{bid}.layer_norm_2" # jina-v2-code
353356
),
354357

355358
MODEL_TENSOR.SSM_IN: (

llama.cpp

+13-4
Original file line numberDiff line numberDiff line change
@@ -704,6 +704,7 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
704704
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
705705
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
706706
{ LLM_TENSOR_TOKEN_TYPES, "token_types" },
707+
{ LLM_TENSOR_ATTN_NORM_2, "blk.%d.attn_norm_2" },
707708
{ LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
708709
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
709710
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
@@ -4653,8 +4654,7 @@ static void llm_load_vocab(
46534654
LLAMA_LOG_WARN("%s: ************************************ \n", __func__);
46544655
LLAMA_LOG_WARN("%s: \n", __func__);
46554656
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
4656-
} else if (
4657-
tokenizer_pre == "default") {
4657+
} else if (tokenizer_pre == "default") {
46584658
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DEFAULT;
46594659
} else if (
46604660
tokenizer_pre == "llama3" ||
@@ -4681,7 +4681,8 @@ static void llm_load_vocab(
46814681
tokenizer_pre == "jina-es" ||
46824682
tokenizer_pre == "jina-de" ||
46834683
tokenizer_pre == "jina-v2-es" ||
4684-
tokenizer_pre == "jina-v2-de") {
4684+
tokenizer_pre == "jina-v2-de" ||
4685+
tokenizer_pre == "jina-v2-code") {
46854686
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
46864687
} else if (
46874688
tokenizer_pre == "refact") {
@@ -5515,7 +5516,7 @@ static bool llm_load_tensors(
55155516

55165517
layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
55175518
} else {
5518-
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
5519+
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
55195520
}
55205521

55215522
layer.layer_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
@@ -5556,6 +5557,9 @@ static bool llm_load_tensors(
55565557
layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm
55575558
layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
55585559

5560+
layer.attn_norm_2 = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "weight", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
5561+
layer.attn_norm_2_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_NORM_2, "bias", i), {n_embd}, llama_model_loader::TENSOR_NOT_REQUIRED);
5562+
55595563
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
55605564
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
55615565

@@ -8519,6 +8523,11 @@ struct llm_build_context {
85198523
// attention layer norm
85208524
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_out_norm, model.layers[il].attn_out_norm_b, LLM_NORM, cb, il);
85218525

8526+
if (model.layers[il].attn_norm_2 != nullptr) {
8527+
cur = ggml_add(ctx0, cur, inpL); // re-add the layer input
8528+
cur = llm_build_norm(ctx0, cur, hparams, model.layers[il].attn_norm_2, model.layers[il].attn_norm_2_b, LLM_NORM, cb, il);
8529+
}
8530+
85228531
struct ggml_tensor * ffn_inp = cur;
85238532
cb(ffn_inp, "ffn_inp", il);
85248533

0 commit comments

Comments
 (0)