Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit d8ea75e

Browse files
committedApr 29, 2023
Merge 'origin/master' into hipblas
2 parents d194586 + 334637e commit d8ea75e

19 files changed

+989
-584
lines changed
 

Diff for: ‎Makefile

+3-2
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,7 @@ ifdef LLAMA_OPENBLAS
106106
endif
107107
ifdef LLAMA_CUBLAS
108108
CFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
109+
CXXFLAGS += -DGGML_USE_CUBLAS -I/usr/local/cuda/include -I/opt/cuda/include -I$(CUDA_PATH)/targets/x86_64-linux/include
109110
LDFLAGS += -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
110111
OBJS += ggml-cuda.o
111112
NVCC = nvcc
@@ -177,10 +178,10 @@ $(info )
177178
# Build library
178179
#
179180

180-
ggml.o: ggml.c ggml.h
181+
ggml.o: ggml.c ggml.h ggml-cuda.h
181182
$(CC) $(CFLAGS) -c $< -o $@
182183

183-
llama.o: llama.cpp ggml.h llama.h llama_util.h
184+
llama.o: llama.cpp ggml.h ggml-cuda.h llama.h llama_util.h
184185
$(CXX) $(CXXFLAGS) -c $< -o $@
185186

186187
common.o: examples/common.cpp examples/common.h

Diff for: ‎README.md

+18-19
Original file line numberDiff line numberDiff line change
@@ -281,30 +281,29 @@ When running the larger models, make sure you have enough disk space to store al
281281
282282
As the models are currently fully loaded into memory, you will need adequate disk space to save them and sufficient RAM to load them. At the moment, memory and disk requirements are the same.
283283
284-
| model | original size | quantized size (4-bit) |
285-
|-------|---------------|------------------------|
286-
| 7B | 13 GB | 3.9 GB |
287-
| 13B | 24 GB | 7.8 GB |
288-
| 30B | 60 GB | 19.5 GB |
289-
| 65B | 120 GB | 38.5 GB |
284+
| Model | Original size | Quantized size (4-bit) |
285+
|------:|--------------:|-----------------------:|
286+
| 7B | 13 GB | 3.9 GB |
287+
| 13B | 24 GB | 7.8 GB |
288+
| 30B | 60 GB | 19.5 GB |
289+
| 65B | 120 GB | 38.5 GB |
290290
291291
### Quantization
292292
293293
Several quantization methods are supported. They differ in the resulting model disk size and inference speed.
294294
295-
Model | F16 | Q4_0 | Q4_1 | Q4_2 | Q4_3 | Q5_0 | Q5_1 | Q8_0
296-
-- | -- | -- | -- | -- | -- | -- | -- | --
297-
7B (ppl) | 5.9565 | 6.2103 | 6.1286 | 6.1698 | 6.0617 | 6.0139 | 5.9934 | 5.9571
298-
7B (size) | 13.0G | 4.0G | 4.8G | 4.0G | 4.8G | 4.4G | 4.8G | 7.1G
299-
7B (ms/tok @ 4th) | 128 | 56 | 61 | 84 | 91 | 91 | 95 | 75
300-
7B (ms/tok @ 8th) | 128 | 47 | 55 | 48 | 53 | 53 | 59 | 75
301-
7B (bpw) | 16.0 | 5.0 | 6.0 | 5.0 | 6.0 | 5.5 | 6.0 | 9.0
302-
-- | -- | -- | -- | -- | -- | -- | -- | --
303-
13B (ppl) | 5.2455 | 5.3748 | 5.3471 | 5.3433 | 5.3234 | 5.2768 | 5.2582 | 5.2458
304-
13B (size) | 25.0G | 7.6G | 9.1G | 7.6G | 9.1G | 8.4G | 9.1G | 14G
305-
13B (ms/tok @ 4th) | 239 | 104 | 113 | 160 | 175 | 176 | 185 | 141
306-
13B (ms/tok @ 8th) | 240 | 85 | 99 | 97 | 114 | 108 | 117 | 147
307-
13B (bpw) | 16.0 | 5.0 | 6.0 | 5.0 | 6.0 | 5.5 | 6.0 | 9.0
295+
| Model | Measure | F16 | Q4_0 | Q4_1 | Q4_2 | Q5_0 | Q5_1 | Q8_0 |
296+
|------:|--------------|-------:|-------:|-------:|-------:|-------:|-------:|-------:|
297+
| 7B | perplexity | 5.9565 | 6.2103 | 6.1286 | 6.1698 | 6.0139 | 5.9934 | 5.9571 |
298+
| 7B | file size | 13.0G | 4.0G | 4.8G | 4.0G | 4.4G | 4.8G | 7.1G |
299+
| 7B | ms/tok @ 4th | 128 | 56 | 61 | 84 | 91 | 95 | 75 |
300+
| 7B | ms/tok @ 8th | 128 | 47 | 55 | 48 | 53 | 59 | 75 |
301+
| 7B | bits/weight | 16.0 | 5.0 | 6.0 | 5.0 | 5.5 | 6.0 | 9.0 |
302+
| 13B | perplexity | 5.2455 | 5.3748 | 5.3471 | 5.3433 | 5.2768 | 5.2582 | 5.2458 |
303+
| 13B | file size | 25.0G | 7.6G | 9.1G | 7.6G | 8.4G | 9.1G | 14G |
304+
| 13B | ms/tok @ 4th | 239 | 104 | 113 | 160 | 176 | 185 | 141 |
305+
| 13B | ms/tok @ 8th | 240 | 85 | 99 | 97 | 108 | 117 | 147 |
306+
| 13B | bits/weight | 16.0 | 5.0 | 6.0 | 5.0 | 5.5 | 6.0 | 9.0 |
308307
309308
### Interactive mode
310309

Diff for: ‎SHA256SUMS

-4
Original file line numberDiff line numberDiff line change
@@ -3,15 +3,13 @@
33
99aeb35f26b577fa2732716cca4d8b5ada39a78ea9b2dca2651fc632b5d101b6 models/7B/ggml-model-q4_0.bin
44
cc061458339a3eb8bcecbf0a825e9924fb7d1a8150f63cd5d091caa99215aafe models/7B/ggml-model-q4_1.bin
55
25b050337a87344da687a7f2adddc03bd99b7f6c140450e836649f3585fb6496 models/7B/ggml-model-q4_2.bin
6-
3429bf198ec771886cf81a574df45245f3ebf04f0ce0956b73ef5d0ab01ff48b models/7B/ggml-model-q4_3.bin
76
7e89e242ddc0dd6f060b43ca219ce8b3e8f08959a72cb3c0855df8bb04d46265 models/7B/params.json
87
745bf4e29a4dd6f411e72976d92b452da1b49168a4f41c951cfcc8051823cf08 models/13B/consolidated.00.pth
98
d5ccbcc465c71c0de439a5aeffebe8344c68a519bce70bc7f9f92654ee567085 models/13B/consolidated.01.pth
109
2b206e9b21fb1076f11cafc624e2af97c9e48ea09312a0962153acc20d45f808 models/13B/ggml-model-f16.bin
1110
eecb575d325d935157761172e2bf05984dad216eb2b06777b73463cf9b818bab models/13B/ggml-model-q4_0.bin
1211
d9581b5b88e5622532fe897c9f9b0e67a317d22dd27a6f90fa4ab8c6d23ccdbb models/13B/ggml-model-q4_1.bin
1312
75a218a47df03f5f96354656329864613abcb67779412b9bc2282b28c1c3cbaa models/13B/ggml-model-q4_2.bin
14-
4208cdec9788ffa48dc1a17af2c36a0299f5bf3eb0e2b87889dda7fad591fca3 models/13B/ggml-model-q4_3.bin
1513
4ab77bec4d4405ccb66a97b282574c89a94417e3c32e5f68f37e2876fc21322f models/13B/params.json
1614
e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/consolidated.00.pth
1715
4e077b7136c7ae2302e954860cf64930458d3076fcde9443f4d0e939e95903ff models/30B/consolidated.01.pth
@@ -21,7 +19,6 @@ e23294a58552d8cdec5b7e8abb87993b97ea6eced4178ff2697c02472539d067 models/30B/con
2119
517b9e525742c42b5478a6280a4b41ec66f46298c57aba7f0453d491682fe42d models/30B/ggml-model-q4_0.bin
2220
7b75ac615fa369ee593493a7e6ef87542bf0350255db928b22c5a24f6d598bcd models/30B/ggml-model-q4_1.bin
2321
aadbc9cf806313a55be570f62884eed289d30c313fac3b7838717e01bd553204 models/30B/ggml-model-q4_2.bin
24-
a6188660199dbcb8d5658abe7d89169869e50423494385830d9e6b330ea7fc33 models/30B/ggml-model-q4_3.bin
2522
2c07118ea98d69dbe7810d88520e30288fa994751b337f8fca02b171955f44cb models/30B/params.json
2623
135c563f6b3938114458183afb01adc9a63bef3d8ff7cccc3977e5d3664ecafe models/65B/consolidated.00.pth
2724
9a600b37b19d38c7e43809485f70d17d1dc12206c07efa83bc72bb498a568bde models/65B/consolidated.01.pth
@@ -35,6 +32,5 @@ d27f5b0677d7ff129ceacd73fd461c4d06910ad7787cf217b249948c3f3bc638 models/65B/con
3532
01672072136f8be6ca9d7cebe5f86ed316e8b85851b9fe3de951809233cea4f2 models/65B/ggml-model-q4_0.bin
3633
4743a28aac3e5f32a6e838a815f51d3779de44fbbe251d745251e66c23c5950f models/65B/ggml-model-q4_1.bin
3734
1b6f6588d0e2ecfe6c4d849088e48e5e3083466b962daa32e3261363e21fc5e9 models/65B/ggml-model-q4_2.bin
38-
305e91a4608b4f627b9b8ad5b4af75187d2684254bfd76dcb9db571618ef293c models/65B/ggml-model-q4_3.bin
3935
999ed1659b469ccc2a941714c0a9656fa571d17c9f7c8c7589817ca90edef51b models/65B/params.json
4036
9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347 models/tokenizer.model

Diff for: ‎examples/common.cpp

+85-6
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,8 @@
66
#include <string>
77
#include <iterator>
88
#include <algorithm>
9+
#include <sstream>
10+
#include <iostream>
911

1012
#if defined (_WIN32)
1113
#include <fcntl.h>
@@ -114,6 +116,18 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
114116
break;
115117
}
116118
params.temp = std::stof(argv[i]);
119+
} else if (arg == "--tfs") {
120+
if (++i >= argc) {
121+
invalid_param = true;
122+
break;
123+
}
124+
params.tfs_z = std::stof(argv[i]);
125+
} else if (arg == "--typical") {
126+
if (++i >= argc) {
127+
invalid_param = true;
128+
break;
129+
}
130+
params.typical_p = std::stof(argv[i]);
117131
} else if (arg == "--repeat_last_n") {
118132
if (++i >= argc) {
119133
invalid_param = true;
@@ -126,6 +140,36 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
126140
break;
127141
}
128142
params.repeat_penalty = std::stof(argv[i]);
143+
} else if (arg == "--frequency_penalty") {
144+
if (++i >= argc) {
145+
invalid_param = true;
146+
break;
147+
}
148+
params.frequency_penalty = std::stof(argv[i]);
149+
} else if (arg == "--presence_penalty") {
150+
if (++i >= argc) {
151+
invalid_param = true;
152+
break;
153+
}
154+
params.presence_penalty = std::stof(argv[i]);
155+
} else if (arg == "--mirostat") {
156+
if (++i >= argc) {
157+
invalid_param = true;
158+
break;
159+
}
160+
params.mirostat = std::stoi(argv[i]);
161+
} else if (arg == "--mirostat_lr") {
162+
if (++i >= argc) {
163+
invalid_param = true;
164+
break;
165+
}
166+
params.mirostat_eta = std::stof(argv[i]);
167+
} else if (arg == "--mirostat_ent") {
168+
if (++i >= argc) {
169+
invalid_param = true;
170+
break;
171+
}
172+
params.mirostat_tau = std::stof(argv[i]);
129173
} else if (arg == "-b" || arg == "--batch_size") {
130174
if (++i >= argc) {
131175
invalid_param = true;
@@ -185,7 +229,28 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
185229
} else if (arg == "--perplexity") {
186230
params.perplexity = true;
187231
} else if (arg == "--ignore-eos") {
188-
params.ignore_eos = true;
232+
params.logit_bias[llama_token_eos()] = -INFINITY;
233+
} else if (arg == "--no-penalize-nl") {
234+
params.penalize_nl = false;
235+
} else if (arg == "-l" || arg == "--logit-bias") {
236+
if (++i >= argc) {
237+
invalid_param = true;
238+
break;
239+
}
240+
std::stringstream ss(argv[i]);
241+
llama_token key;
242+
char sign;
243+
std::string value_str;
244+
try {
245+
if (ss >> key && ss >> sign && std::getline(ss, value_str) && (sign == '+' || sign == '-')) {
246+
params.logit_bias[key] = std::stof(value_str) * ((sign == '-') ? -1.0f : 1.0f);
247+
} else {
248+
throw std::exception();
249+
}
250+
} catch (const std::exception &e) {
251+
invalid_param = true;
252+
break;
253+
}
189254
} else if (arg == "--n_parts") {
190255
if (++i >= argc) {
191256
invalid_param = true;
@@ -240,12 +305,26 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
240305
fprintf(stderr, " -f FNAME, --file FNAME\n");
241306
fprintf(stderr, " prompt file to start generation.\n");
242307
fprintf(stderr, " -n N, --n_predict N number of tokens to predict (default: %d, -1 = infinity)\n", params.n_predict);
243-
fprintf(stderr, " --top_k N top-k sampling (default: %d)\n", params.top_k);
244-
fprintf(stderr, " --top_p N top-p sampling (default: %.1f)\n", (double)params.top_p);
245-
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d)\n", params.repeat_last_n);
246-
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f)\n", (double)params.repeat_penalty);
308+
fprintf(stderr, " --top_k N top-k sampling (default: %d, 0 = disabled)\n", params.top_k);
309+
fprintf(stderr, " --top_p N top-p sampling (default: %.1f, 1.0 = disabled)\n", (double)params.top_p);
310+
fprintf(stderr, " --tfs N tail free sampling, parameter z (default: %.1f, 1.0 = disabled)\n", (double)params.tfs_z);
311+
fprintf(stderr, " --typical N locally typical sampling, parameter p (default: %.1f, 1.0 = disabled)\n", (double)params.typical_p);
312+
fprintf(stderr, " --repeat_last_n N last n tokens to consider for penalize (default: %d, 0 = disabled, -1 = ctx_size)\n", params.repeat_last_n);
313+
fprintf(stderr, " --repeat_penalty N penalize repeat sequence of tokens (default: %.1f, 1.0 = disabled)\n", (double)params.repeat_penalty);
314+
fprintf(stderr, " --presence_penalty N repeat alpha presence penalty (default: %.1f, 0.0 = disabled)\n", (double)params.presence_penalty);
315+
fprintf(stderr, " --frequency_penalty N repeat alpha frequency penalty (default: %.1f, 0.0 = disabled)\n", (double)params.frequency_penalty);
316+
fprintf(stderr, " --mirostat N use Mirostat sampling.\n");
317+
fprintf(stderr, " Top K, Nucleus, Tail Free and Locally Typical samplers are ignored if used.\n");
318+
fprintf(stderr, " (default: %d, 0 = disabled, 1 = Mirostat, 2 = Mirostat 2.0)\n", params.mirostat);
319+
fprintf(stderr, " --mirostat_lr N Mirostat learning rate, parameter eta (default: %.1f)\n", (double)params.mirostat_eta);
320+
fprintf(stderr, " --mirostat_ent N Mirostat target entropy, parameter tau (default: %.1f)\n", (double)params.mirostat_tau);
321+
fprintf(stderr, " -l TOKEN_ID(+/-)BIAS, --logit-bias TOKEN_ID(+/-)BIAS\n");
322+
fprintf(stderr, " modifies the likelihood of token appearing in the completion,\n");
323+
fprintf(stderr, " i.e. `--logit-bias 15043+1` to increase likelihood of token ' Hello',\n");
324+
fprintf(stderr, " or `--logit-bias 15043-1` to decrease likelihood of token ' Hello'\n");
247325
fprintf(stderr, " -c N, --ctx_size N size of the prompt context (default: %d)\n", params.n_ctx);
248-
fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating\n");
326+
fprintf(stderr, " --ignore-eos ignore end of stream token and continue generating (implies --logit-bias 2-inf)\n");
327+
fprintf(stderr, " --no-penalize-nl do not penalize newline token\n");
249328
fprintf(stderr, " --memory_f32 use f32 instead of f16 for memory key+value\n");
250329
fprintf(stderr, " --temp N temperature (default: %.1f)\n", (double)params.temp);
251330
fprintf(stderr, " --n_parts N number of model parts (default: -1 = determine from dimensions)\n");

Diff for: ‎examples/common.h

+16-7
Original file line numberDiff line numberDiff line change
@@ -8,6 +8,7 @@
88
#include <vector>
99
#include <random>
1010
#include <thread>
11+
#include <unordered_map>
1112

1213
//
1314
// CLI argument parsing
@@ -16,18 +17,26 @@
1617
struct gpt_params {
1718
int32_t seed = -1; // RNG seed
1819
int32_t n_threads = std::min(4, (int32_t) std::thread::hardware_concurrency());
19-
int32_t n_predict = 128; // new tokens to predict
20-
int32_t repeat_last_n = 64; // last n tokens to penalize
20+
int32_t n_predict = -1; // new tokens to predict
2121
int32_t n_parts = -1; // amount of model parts (-1 = determine from model dimensions)
2222
int32_t n_ctx = 512; // context size
2323
int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS)
2424
int32_t n_keep = 0; // number of tokens to keep from initial prompt
2525

2626
// sampling parameters
27-
int32_t top_k = 40;
28-
float top_p = 0.95f;
29-
float temp = 0.80f;
30-
float repeat_penalty = 1.10f;
27+
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
28+
int32_t top_k = 40; // <= 0 to use vocab size
29+
float top_p = 0.95f; // 1.0 = disabled
30+
float tfs_z = 1.00f; // 1.0 = disabled
31+
float typical_p = 1.00f; // 1.0 = disabled
32+
float temp = 0.80f; // 1.0 = disabled
33+
float repeat_penalty = 1.10f; // 1.0 = disabled
34+
int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size)
35+
float frequency_penalty = 0.00f; // 0.0 = disabled
36+
float presence_penalty = 0.00f; // 0.0 = disabled
37+
int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0
38+
float mirostat_tau = 5.00f; // target entropy
39+
float mirostat_eta = 0.10f; // learning rate
3140

3241
std::string model = "models/lamma-7B/ggml-model.bin"; // model path
3342
std::string prompt = "";
@@ -47,7 +56,7 @@ struct gpt_params {
4756
bool interactive_first = false; // wait for user input immediately
4857

4958
bool instruct = false; // instruction mode (used for Alpaca models)
50-
bool ignore_eos = false; // do not stop generating after eos
59+
bool penalize_nl = true; // consider newlines as a repeatable token
5160
bool perplexity = false; // compute perplexity over the prompt
5261
bool use_mmap = true; // use mmap for faster loads
5362
bool use_mlock = false; // use mlock to keep model in memory

0 commit comments

Comments
 (0)
Please sign in to comment.