@@ -716,6 +716,8 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
716
716
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
717
717
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
718
718
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
719
+ { LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
720
+ { LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
719
721
},
720
722
},
721
723
{
@@ -1744,6 +1746,7 @@ enum e_model {
1744
1746
MODEL_4B,
1745
1747
MODEL_7B,
1746
1748
MODEL_8B,
1749
+ MODEL_12B,
1747
1750
MODEL_13B,
1748
1751
MODEL_14B,
1749
1752
MODEL_15B,
@@ -3607,6 +3610,7 @@ static const char * llama_model_type_name(e_model type) {
3607
3610
case MODEL_3B: return "3B";
3608
3611
case MODEL_7B: return "7B";
3609
3612
case MODEL_8B: return "8B";
3613
+ case MODEL_12B: return "12B";
3610
3614
case MODEL_13B: return "13B";
3611
3615
case MODEL_14B: return "14B";
3612
3616
case MODEL_15B: return "15B";
@@ -3898,6 +3902,7 @@ static void llm_load_hparams(
3898
3902
switch (hparams.n_layer) {
3899
3903
case 24: model.type = e_model::MODEL_1B; break;
3900
3904
case 32: model.type = e_model::MODEL_3B; break;
3905
+ case 40: model.type = e_model::MODEL_12B; break;
3901
3906
default: model.type = e_model::MODEL_UNKNOWN;
3902
3907
}
3903
3908
} break;
@@ -5128,8 +5133,13 @@ static bool llm_load_tensors(
5128
5133
layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa}, false);
5129
5134
layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa}, false);
5130
5135
5131
- layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd});
5132
- layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd});
5136
+ // optional q and k layernorms, present in StableLM 2 12B
5137
+ layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head}, false);
5138
+ layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {hparams.n_embd_head_k, hparams.n_head_kv}, false);
5139
+
5140
+ // optional FFN norm, not present in StableLM 2 12B which uses parallel residual
5141
+ layer.ffn_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, false);
5142
+ layer.ffn_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, false);
5133
5143
5134
5144
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
5135
5145
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), { n_ff, n_embd});
@@ -8197,7 +8207,7 @@ struct llm_build_context {
8197
8207
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
8198
8208
8199
8209
for (int il = 0; il < n_layer; ++il) {
8200
- struct ggml_tensor * inpSA = inpL;
8210
+
8201
8211
8202
8212
// norm
8203
8213
cur = llm_build_norm(ctx0, inpL, hparams,
@@ -8206,6 +8216,8 @@ struct llm_build_context {
8206
8216
LLM_NORM, cb, il);
8207
8217
cb(cur, "attn_norm", il);
8208
8218
8219
+ struct ggml_tensor * inpSA = cur;
8220
+
8209
8221
// self-attention
8210
8222
{
8211
8223
// compute Q and K and RoPE them
@@ -8230,15 +8242,36 @@ struct llm_build_context {
8230
8242
cb(Vcur, "Vcur", il);
8231
8243
}
8232
8244
8245
+ Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
8246
+ cb(Qcur, "Qcur", il);
8247
+ Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
8248
+ cb(Kcur, "Kcur", il);
8249
+
8250
+ if (model.layers[il].attn_q_norm) {
8251
+ Qcur = llm_build_norm(ctx0, Qcur, hparams,
8252
+ model.layers[il].attn_q_norm,
8253
+ NULL,
8254
+ LLM_NORM, cb, il);
8255
+ cb(Qcur, "Qcur", il);
8256
+ }
8257
+ if (model.layers[il].attn_k_norm) {
8258
+ Kcur = llm_build_norm(ctx0, Kcur, hparams,
8259
+ model.layers[il].attn_k_norm,
8260
+ NULL,
8261
+ LLM_NORM, cb, il);
8262
+ cb(Kcur, "Kcur", il);
8263
+ }
8264
+
8265
+
8233
8266
Qcur = ggml_rope_custom(
8234
- ctx0, ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens) , inp_pos,
8267
+ ctx0, Qcur, inp_pos,
8235
8268
n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
8236
8269
ext_factor, attn_factor, beta_fast, beta_slow
8237
8270
);
8238
8271
cb(Qcur, "Qcur", il);
8239
8272
8240
8273
Kcur = ggml_rope_custom(
8241
- ctx0, ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens) , inp_pos,
8274
+ ctx0, Kcur, inp_pos,
8242
8275
n_rot, rope_type, 0, n_orig_ctx, freq_base, freq_scale,
8243
8276
ext_factor, attn_factor, beta_fast, beta_slow
8244
8277
);
@@ -8253,20 +8286,25 @@ struct llm_build_context {
8253
8286
// skip computing output for unused tokens
8254
8287
struct ggml_tensor * inp_out_ids = build_inp_out_ids();
8255
8288
cur = ggml_get_rows(ctx0, cur, inp_out_ids);
8289
+ inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
8256
8290
inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
8257
8291
}
8258
8292
8259
- struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA );
8293
+ struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL );
8260
8294
cb(ffn_inp, "ffn_inp", il);
8261
8295
8262
8296
// feed-forward network
8263
8297
{
8264
- cur = llm_build_norm(ctx0, ffn_inp, hparams,
8265
- model.layers[il].ffn_norm,
8266
- model.layers[il].ffn_norm_b,
8267
- LLM_NORM, cb, il);
8268
- cb(cur, "ffn_norm", il);
8269
-
8298
+ if (model.layers[il].ffn_norm) {
8299
+ cur = llm_build_norm(ctx0, ffn_inp, hparams,
8300
+ model.layers[il].ffn_norm,
8301
+ model.layers[il].ffn_norm_b,
8302
+ LLM_NORM, cb, il);
8303
+ cb(cur, "ffn_norm", il);
8304
+ } else {
8305
+ // parallel residual
8306
+ cur = inpSA;
8307
+ }
8270
8308
cur = llm_build_ffn(ctx0, cur,
8271
8309
model.layers[il].ffn_up, NULL,
8272
8310
model.layers[il].ffn_gate, NULL,
0 commit comments