Skip to content

Commit ca679fc

Browse files
ggerganovolexiyb
authored andcommitted
llama : add option for greedy sampling with probs (ggml-org#3813)
* llama : add option for greedy sampling with probs * llama : add comment about llama_sample_token_greedy() missing probs * sampling : temp == 0.0 -> no probs, temp < 0.0 -> probs
1 parent 19ba479 commit ca679fc

File tree

4 files changed

+9
-3
lines changed

4 files changed

+9
-3
lines changed

Diff for: common/common.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -224,6 +224,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
224224
break;
225225
}
226226
sparams.temp = std::stof(argv[i]);
227+
sparams.temp = std::max(sparams.temp, 0.0f);
227228
} else if (arg == "--tfs") {
228229
if (++i >= argc) {
229230
invalid_param = true;

Diff for: common/sampling.cpp

+6-2
Original file line numberDiff line numberDiff line change
@@ -167,8 +167,12 @@ llama_token llama_sampling_sample(
167167
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
168168
}
169169

170-
if (temp <= 0) {
171-
// greedy sampling
170+
if (temp < 0.0) {
171+
// greedy sampling, with probs
172+
llama_sample_softmax(ctx_main, &cur_p);
173+
id = cur_p.data[0].id;
174+
} else if (temp == 0.0) {
175+
// greedy sampling, no probs
172176
id = llama_sample_token_greedy(ctx_main, &cur_p);
173177
} else {
174178
if (mirostat == 1) {

Diff for: examples/speculative/speculative.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -148,7 +148,7 @@ int main(int argc, char ** argv) {
148148
std::vector<seq_draft> drafts(n_seq_dft);
149149

150150
params.sparams.grammar.clear(); // the draft samplers will copy the target sampler's grammar
151-
params.sparams.temp = std::max(0.01f, params.sparams.temp);
151+
params.sparams.temp = -1.0f; // force greedy sampling with probs for the draft model
152152

153153
for (int s = 0; s < n_seq_dft; ++s) {
154154
drafts[s].ctx_sampling = llama_sampling_init(params.sparams);

Diff for: llama.h

+1
Original file line numberDiff line numberDiff line change
@@ -658,6 +658,7 @@ extern "C" {
658658
float * mu);
659659

660660
/// @details Selects the token with the highest probability.
661+
/// Does not compute the token probabilities. Use llama_sample_softmax() instead.
661662
LLAMA_API llama_token llama_sample_token_greedy(
662663
struct llama_context * ctx,
663664
llama_token_data_array * candidates);

0 commit comments

Comments
 (0)