@@ -321,12 +321,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
321
321
invalid_param = true ;
322
322
break ;
323
323
}
324
- } else if (arg == " --n-parts" ) {
325
- if (++i >= argc) {
326
- invalid_param = true ;
327
- break ;
328
- }
329
- params.n_parts = std::stoi (argv[i]);
330
324
} else if (arg == " -h" || arg == " --help" ) {
331
325
gpt_print_usage (argc, argv, default_params);
332
326
exit (0 );
@@ -418,7 +412,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
418
412
fprintf (stderr, " --no-penalize-nl do not penalize newline token\n " );
419
413
fprintf (stderr, " --memory-f32 use f32 instead of f16 for memory key+value\n " );
420
414
fprintf (stderr, " --temp N temperature (default: %.1f)\n " , (double )params.temp );
421
- fprintf (stderr, " --n-parts N number of model parts (default: -1 = determine from dimensions)\n " );
422
415
fprintf (stderr, " -b N, --batch-size N batch size for prompt processing (default: %d)\n " , params.n_batch );
423
416
fprintf (stderr, " --perplexity compute perplexity over the prompt\n " );
424
417
fprintf (stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n " , params.n_keep );
@@ -473,7 +466,6 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
473
466
auto lparams = llama_context_default_params ();
474
467
475
468
lparams.n_ctx = params.n_ctx ;
476
- lparams.n_parts = params.n_parts ;
477
469
lparams.n_gpu_layers = params.n_gpu_layers ;
478
470
lparams.seed = params.seed ;
479
471
lparams.f16_kv = params.memory_f16 ;
0 commit comments