Skip to content

Commit d5a1cbd

Browse files
committed
llama : support optional tensors (#4283)
1 parent b220222 commit d5a1cbd

File tree

2 files changed

+10
-25
lines changed

2 files changed

+10
-25
lines changed

examples/server/server.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -1469,7 +1469,7 @@ struct llama_server_context
14691469

14701470
int split_multiprompt_task(task_server& multiprompt_task)
14711471
{
1472-
auto prompt_count = multiprompt_task.data.at("prompt").size();
1472+
int prompt_count = multiprompt_task.data.at("prompt").size();
14731473
assert(prompt_count > 1);
14741474

14751475
int multitask_id = id_gen++;

llama.cpp

+9-24
Original file line numberDiff line numberDiff line change
@@ -1991,10 +1991,13 @@ struct llama_model_loader {
19911991
return tensor;
19921992
}
19931993

1994-
struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend) {
1994+
struct ggml_tensor * create_tensor(struct ggml_context * ctx, const std::string & name, const std::vector<int64_t> & ne, ggml_backend_type backend, bool optional = false) {
19951995
struct ggml_tensor * cur = ggml_get_tensor(ctx_meta, name.c_str());
19961996

19971997
if (cur == NULL) {
1998+
if (optional) {
1999+
return NULL;
2000+
}
19982001
throw std::runtime_error(format("%s: tensor '%s' not found", __func__, name.c_str()));
19992002
}
20002003

@@ -2812,29 +2815,11 @@ static void llm_load_tensors(
28122815
layer.wv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa}, backend_split);
28132816
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
28142817

2815-
try {
2816-
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend);
2817-
} catch (const std::runtime_error& e) {
2818-
if (std::string(e.what()).find("not found") != std::string::npos) layer.bq = NULL; else throw;
2819-
}
2820-
2821-
try {
2822-
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend);
2823-
} catch (const std::runtime_error& e) {
2824-
if (std::string(e.what()).find("not found") != std::string::npos) layer.bk = NULL; else throw;
2825-
}
2826-
2827-
try {
2828-
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend);
2829-
} catch (const std::runtime_error& e) {
2830-
if (std::string(e.what()).find("not found") != std::string::npos) layer.bv = NULL; else throw;
2831-
}
2832-
2833-
try {
2834-
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend);
2835-
} catch (const std::runtime_error& e) {
2836-
if (std::string(e.what()).find("not found") != std::string::npos) layer.bo = NULL; else throw;
2837-
}
2818+
// optional bias tensors
2819+
layer.bq = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd}, backend, true);
2820+
layer.bk = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, backend, true);
2821+
layer.bv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, backend, true);
2822+
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend, true);
28382823

28392824
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
28402825

0 commit comments

Comments
 (0)