Skip to content

Commit ac36aee

Browse files
committedDec 3, 2023
Merge branch 'master' into concedo_experimental
# Conflicts: # CMakeLists.txt # Makefile
2 parents 48544cd + 33e171d commit ac36aee

File tree

10 files changed

+443
-46
lines changed

10 files changed

+443
-46
lines changed
 

‎convert-hf-to-gguf.py

+130-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@
1010
import sys
1111
from enum import IntEnum
1212
from pathlib import Path
13-
from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast
13+
from typing import TYPE_CHECKING, Any, ContextManager, Iterator, cast, Optional
1414

1515
import numpy as np
1616
import torch
@@ -168,6 +168,8 @@ def from_model_architecture(model_architecture):
168168
return PersimmonModel
169169
if model_architecture in ("StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM"):
170170
return StableLMModel
171+
if model_architecture == "QWenLMHeadModel":
172+
return QwenModel
171173
return Model
172174

173175
def _is_model_safetensors(self) -> bool:
@@ -203,6 +205,8 @@ def _get_model_architecture(self) -> gguf.MODEL_ARCH:
203205
return gguf.MODEL_ARCH.PERSIMMON
204206
if arch in ("StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM"):
205207
return gguf.MODEL_ARCH.STABLELM
208+
if arch == "QWenLMHeadModel":
209+
return gguf.MODEL_ARCH.QWEN
206210

207211
raise NotImplementedError(f'Architecture "{arch}" not supported!')
208212

@@ -832,6 +836,131 @@ def set_gguf_parameters(self):
832836
self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True)
833837
self.gguf_writer.add_layer_norm_eps(1e-5)
834838

839+
840+
class QwenModel(Model):
841+
@staticmethod
842+
def token_bytes_to_string(b):
843+
from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode
844+
byte_encoder = bytes_to_unicode()
845+
return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')])
846+
847+
@staticmethod
848+
def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: Optional[int] = None) -> list[bytes]:
849+
parts = [bytes([b]) for b in token]
850+
while True:
851+
min_idx = None
852+
min_rank = None
853+
for i, pair in enumerate(zip(parts[:-1], parts[1:])):
854+
rank = mergeable_ranks.get(pair[0] + pair[1])
855+
if rank is not None and (min_rank is None or rank < min_rank):
856+
min_idx = i
857+
min_rank = rank
858+
if min_rank is None or (max_rank is not None and min_rank >= max_rank):
859+
break
860+
assert min_idx is not None
861+
parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:]
862+
return parts
863+
864+
def set_vocab(self):
865+
dir_model = self.dir_model
866+
hparams = self.hparams
867+
tokens: list[bytearray] = []
868+
toktypes: list[int] = []
869+
870+
from transformers import AutoTokenizer # type: ignore[attr-defined]
871+
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
872+
vocab_size = hparams["vocab_size"]
873+
assert max(tokenizer.get_vocab().values()) < vocab_size
874+
875+
merges = []
876+
vocab = {}
877+
mergeable_ranks = tokenizer.mergeable_ranks
878+
for token, rank in mergeable_ranks.items():
879+
vocab[self.token_bytes_to_string(token)] = rank
880+
if len(token) == 1:
881+
continue
882+
merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank)
883+
assert len(merged) == 2
884+
merges.append(' '.join(map(self.token_bytes_to_string, merged)))
885+
886+
reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in vocab.items()}
887+
added_vocab = tokenizer.special_tokens
888+
889+
for i in range(vocab_size):
890+
if i not in reverse_vocab:
891+
pad_token = f"[PAD{i}]".encode("utf-8")
892+
tokens.append(bytearray(pad_token))
893+
toktypes.append(gguf.TokenType.USER_DEFINED)
894+
elif reverse_vocab[i] in added_vocab:
895+
tokens.append(reverse_vocab[i])
896+
toktypes.append(gguf.TokenType.CONTROL)
897+
else:
898+
tokens.append(reverse_vocab[i])
899+
toktypes.append(gguf.TokenType.NORMAL)
900+
901+
self.gguf_writer.add_tokenizer_model("gpt2")
902+
self.gguf_writer.add_token_list(tokens)
903+
self.gguf_writer.add_token_types(toktypes)
904+
905+
special_vocab = gguf.SpecialVocab(dir_model, load_merges=False)
906+
special_vocab.merges = merges
907+
special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"])
908+
special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"])
909+
special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"])
910+
special_vocab.add_to_gguf(self.gguf_writer)
911+
912+
def set_gguf_parameters(self):
913+
self.gguf_writer.add_name("Qwen")
914+
self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"])
915+
self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"])
916+
self.gguf_writer.add_embedding_length(self.hparams["hidden_size"])
917+
self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"])
918+
self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"])
919+
self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"])
920+
self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
921+
self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"])
922+
923+
def write_tensors(self):
924+
block_count = self.hparams["num_hidden_layers"]
925+
model_kv = dict(self.get_tensors())
926+
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
927+
for name, data_torch in model_kv.items():
928+
# we don't need these
929+
if name.endswith(".rotary_emb.inv_freq"):
930+
continue
931+
932+
old_dtype = data_torch.dtype
933+
934+
# convert any unsupported data types to float32
935+
if data_torch.dtype not in (torch.float16, torch.float32):
936+
data_torch = data_torch.to(torch.float32)
937+
938+
data = data_torch.squeeze().numpy()
939+
940+
# map tensor names
941+
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
942+
if new_name is None:
943+
print(f"Can not map tensor {name!r}")
944+
sys.exit()
945+
946+
n_dims = len(data.shape)
947+
data_dtype = data.dtype
948+
949+
# if f32 desired, convert any float16 to float32
950+
if self.ftype == 0 and data_dtype == np.float16:
951+
data = data.astype(np.float32)
952+
953+
# TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
954+
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
955+
data = data.astype(np.float32)
956+
957+
# if f16 desired, convert any float32 2-dim weight tensors to float16
958+
if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
959+
data = data.astype(np.float16)
960+
961+
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
962+
self.gguf_writer.add_tensor(new_name, data)
963+
835964
###### CONVERSION LOGIC ######
836965

837966

‎examples/batched.swift/Sources/main.swift

+3-7
Original file line numberDiff line numberDiff line change
@@ -230,18 +230,15 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String
230230
var result = [CChar](repeating: 0, count: 8)
231231
let nTokens = llama_token_to_piece(model, token, &result, Int32(result.count))
232232
if nTokens < 0 {
233-
if result.count >= -Int(nTokens) {
234-
result.removeLast(-Int(nTokens))
235-
} else {
236-
result.removeAll()
237-
}
233+
let actualTokensCount = -Int(nTokens)
234+
result = .init(repeating: 0, count: actualTokensCount)
238235
let check = llama_token_to_piece(
239236
model,
240237
token,
241238
&result,
242239
Int32(result.count)
243240
)
244-
assert(check == nTokens)
241+
assert(check == actualTokensCount)
245242
} else {
246243
result.removeLast(result.count - Int(nTokens))
247244
}
@@ -259,5 +256,4 @@ private func token_to_piece(token: llama_token, buffer: inout [CChar]) -> String
259256
buffer = []
260257
return bufferString
261258
}
262-
return nil
263259
}

‎examples/llama.swiftui/llama.cpp.swift/LibLlama.swift

+16-8
Original file line numberDiff line numberDiff line change
@@ -164,13 +164,21 @@ actor LlamaContext {
164164
private func token_to_piece(token: llama_token) -> String {
165165
let result = UnsafeMutablePointer<Int8>.allocate(capacity: 8)
166166
result.initialize(repeating: Int8(0), count: 8)
167-
168-
let _ = llama_token_to_piece(model, token, result, 8)
169-
170-
let resultStr = String(cString: result)
171-
172-
result.deallocate()
173-
174-
return resultStr
167+
defer {
168+
result.deallocate()
169+
}
170+
let nTokens = llama_token_to_piece(model, token, result, 8)
171+
172+
if nTokens < 0 {
173+
let newResult = UnsafeMutablePointer<Int8>.allocate(capacity: Int(-nTokens))
174+
newResult.initialize(repeating: Int8(0), count: Int(-nTokens))
175+
defer {
176+
newResult.deallocate()
177+
}
178+
_ = llama_token_to_piece(model, token, newResult, -nTokens)
179+
return String(cString: newResult)
180+
} else {
181+
return String(cString: result)
182+
}
175183
}
176184
}

‎examples/server/api_like_OAI.py

+1
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,7 @@ def make_postData(body, chat=False, stream=False):
7070
if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"]
7171
if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"]
7272
if(is_present(body, "seed")): postData["seed"] = body["seed"]
73+
if(is_present(body, "grammar")): postData["grammar"] = body["grammar"]
7374
if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()]
7475
if (args.stop != ""):
7576
postData["stop"] = [args.stop]

‎examples/server/server.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1470,7 +1470,7 @@ struct llama_server_context
14701470

14711471
int split_multiprompt_task(task_server& multiprompt_task)
14721472
{
1473-
auto prompt_count = multiprompt_task.data.at("prompt").size();
1473+
int prompt_count = multiprompt_task.data.at("prompt").size();
14741474
assert(prompt_count > 1);
14751475

14761476
int multitask_id = id_gen++;
@@ -2411,9 +2411,7 @@ json oaicompat_completion_params_parse(
24112411
}
24122412

24132413
// Handle 'stop' field
2414-
if (body["stop"].is_null()) {
2415-
llama_params["stop"] = json::array({});
2416-
} else if (body["stop"].is_string()) {
2414+
if (body.contains("stop") && body["stop"].is_string()) {
24172415
llama_params["stop"] = json::array({body["stop"].get<std::string>()});
24182416
} else {
24192417
llama_params["stop"] = json_value(body, "stop", json::array());

‎ggml-metal.m

+1-1
Original file line numberDiff line numberDiff line change
@@ -1083,7 +1083,7 @@ void ggml_metal_graph_compute(
10831083

10841084
// find the break-even point where the matrix-matrix kernel becomes more efficient compared
10851085
// to the matrix-vector kernel
1086-
int ne11_mm_min = 1;
1086+
int ne11_mm_min = src0t == GGML_TYPE_F16 ? 1 : 16;
10871087

10881088
#if 0
10891089
// the numbers below are measured on M2 Ultra for 7B and 13B models

‎gguf-py/gguf/constants.py

+20
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,7 @@ class MODEL_ARCH(IntEnum):
9292
BERT = auto()
9393
BLOOM = auto()
9494
STABLELM = auto()
95+
QWEN = auto()
9596

9697

9798
class MODEL_TENSOR(IntEnum):
@@ -132,6 +133,7 @@ class MODEL_TENSOR(IntEnum):
132133
MODEL_ARCH.BERT: "bert",
133134
MODEL_ARCH.BLOOM: "bloom",
134135
MODEL_ARCH.STABLELM: "stablelm",
136+
MODEL_ARCH.QWEN: "qwen",
135137
}
136138

137139
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
@@ -317,6 +319,20 @@ class MODEL_TENSOR(IntEnum):
317319
MODEL_TENSOR.FFN_DOWN,
318320
MODEL_TENSOR.FFN_UP,
319321
],
322+
MODEL_ARCH.QWEN: [
323+
MODEL_TENSOR.TOKEN_EMBD,
324+
MODEL_TENSOR.OUTPUT_NORM,
325+
MODEL_TENSOR.OUTPUT,
326+
MODEL_TENSOR.ROPE_FREQS,
327+
MODEL_TENSOR.ATTN_NORM,
328+
MODEL_TENSOR.ATTN_QKV,
329+
MODEL_TENSOR.ATTN_OUT,
330+
MODEL_TENSOR.ATTN_ROT_EMBD,
331+
MODEL_TENSOR.FFN_NORM,
332+
MODEL_TENSOR.FFN_GATE,
333+
MODEL_TENSOR.FFN_DOWN,
334+
MODEL_TENSOR.FFN_UP,
335+
],
320336
MODEL_ARCH.GPT2: [
321337
# TODO
322338
],
@@ -336,6 +352,10 @@ class MODEL_TENSOR(IntEnum):
336352
MODEL_ARCH.PERSIMMON: [
337353
MODEL_TENSOR.ROPE_FREQS,
338354
],
355+
MODEL_ARCH.QWEN: [
356+
MODEL_TENSOR.ROPE_FREQS,
357+
MODEL_TENSOR.ATTN_ROT_EMBD,
358+
],
339359
}
340360

341361
#

‎gguf-py/gguf/tensor_mapping.py

+10-8
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ class TensorNameMap:
1010
# Token embeddings
1111
MODEL_TENSOR.TOKEN_EMBD: (
1212
"gpt_neox.embed_in", # gptneox
13-
"transformer.wte", # gpt2 gpt-j mpt refact
13+
"transformer.wte", # gpt2 gpt-j mpt refact qwen
1414
"transformer.word_embeddings", # falcon
1515
"word_embeddings", # bloom
1616
"model.embed_tokens", # llama-hf
@@ -38,7 +38,7 @@ class TensorNameMap:
3838
# Output
3939
MODEL_TENSOR.OUTPUT: (
4040
"embed_out", # gptneox
41-
"lm_head", # gpt2 mpt falcon llama-hf baichuan
41+
"lm_head", # gpt2 mpt falcon llama-hf baichuan qwen
4242
"output", # llama-pth bloom
4343
"word_embeddings_for_head", # persimmon
4444
),
@@ -51,7 +51,7 @@ class TensorNameMap:
5151
"norm", # llama-pth
5252
"embeddings.LayerNorm", # bert
5353
"transformer.norm_f", # mpt
54-
"ln_f", # refact bloom
54+
"ln_f", # refact bloom qwen
5555
"language_model.encoder.final_layernorm", # persimmon
5656
),
5757

@@ -65,7 +65,7 @@ class TensorNameMap:
6565
# Attention norm
6666
MODEL_TENSOR.ATTN_NORM: (
6767
"gpt_neox.layers.{bid}.input_layernorm", # gptneox
68-
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact
68+
"transformer.h.{bid}.ln_1", # gpt2 gpt-j refact qwen
6969
"transformer.blocks.{bid}.norm_1", # mpt
7070
"transformer.h.{bid}.input_layernorm", # falcon7b
7171
"h.{bid}.input_layernorm", # bloom
@@ -85,7 +85,7 @@ class TensorNameMap:
8585
# Attention query-key-value
8686
MODEL_TENSOR.ATTN_QKV: (
8787
"gpt_neox.layers.{bid}.attention.query_key_value", # gptneox
88-
"transformer.h.{bid}.attn.c_attn", # gpt2
88+
"transformer.h.{bid}.attn.c_attn", # gpt2 qwen
8989
"transformer.blocks.{bid}.attn.Wqkv", # mpt
9090
"transformer.h.{bid}.self_attention.query_key_value", # falcon
9191
"h.{bid}.self_attention.query_key_value", # bloom
@@ -119,7 +119,7 @@ class TensorNameMap:
119119
# Attention output
120120
MODEL_TENSOR.ATTN_OUT: (
121121
"gpt_neox.layers.{bid}.attention.dense", # gptneox
122-
"transformer.h.{bid}.attn.c_proj", # gpt2 refact
122+
"transformer.h.{bid}.attn.c_proj", # gpt2 refact qwen
123123
"transformer.blocks.{bid}.attn.out_proj", # mpt
124124
"transformer.h.{bid}.self_attention.dense", # falcon
125125
"h.{bid}.self_attention.dense", # bloom
@@ -139,7 +139,7 @@ class TensorNameMap:
139139
# Feed-forward norm
140140
MODEL_TENSOR.FFN_NORM: (
141141
"gpt_neox.layers.{bid}.post_attention_layernorm", # gptneox
142-
"transformer.h.{bid}.ln_2", # gpt2 refact
142+
"transformer.h.{bid}.ln_2", # gpt2 refact qwen
143143
"h.{bid}.post_attention_layernorm", # bloom
144144
"transformer.blocks.{bid}.norm_2", # mpt
145145
"model.layers.{bid}.post_attention_layernorm", # llama-hf
@@ -161,18 +161,20 @@ class TensorNameMap:
161161
"encoder.layer.{bid}.intermediate.dense", # bert
162162
"transformer.h.{bid}.mlp.fc_in", # gpt-j
163163
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
164+
"transformer.h.{bid}.mlp.w1", # qwen
164165
),
165166

166167
# Feed-forward gate
167168
MODEL_TENSOR.FFN_GATE: (
168169
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact
169170
"layers.{bid}.feed_forward.w1", # llama-pth
171+
"transformer.h.{bid}.mlp.w2", # qwen
170172
),
171173

172174
# Feed-forward down
173175
MODEL_TENSOR.FFN_DOWN: (
174176
"gpt_neox.layers.{bid}.mlp.dense_4h_to_h", # gptneox
175-
"transformer.h.{bid}.mlp.c_proj", # gpt2 refact
177+
"transformer.h.{bid}.mlp.c_proj", # gpt2 refact qwen
176178
"transformer.blocks.{bid}.ffn.down_proj", # mpt
177179
"transformer.h.{bid}.mlp.dense_4h_to_h", # falcon
178180
"h.{bid}.mlp.dense_4h_to_h", # bloom

‎llama.cpp

+259-17
Large diffs are not rendered by default.

‎prompts/chat-with-qwen.txt

+1
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
You are a helpful assistant.

0 commit comments

Comments
 (0)
Please sign in to comment.