@@ -1517,9 +1517,12 @@ int main(int argc, char ** argv) {
15171517 const cmd_params_instance * prev_inst = nullptr ;
15181518
15191519 int params_idx = 0 ;
1520+ auto params_count = params_instances.size ();
15201521 for (const auto & inst : params_instances) {
15211522 params_idx ++;
1522- LOG_TEE (" llama-bench: benchmark %d/%ld: starting\n " , params_idx, params_instances.size ());
1523+ if (params.verbose ) {
1524+ LOG_TEE (" llama-bench: benchmark %d/%ld: starting\n " , params_idx, params_count);
1525+ }
15231526 // keep the same model between tests when possible
15241527 if (!lmodel || !prev_inst || !inst.equal_mparams (*prev_inst)) {
15251528 if (lmodel) {
@@ -1569,12 +1572,16 @@ int main(int argc, char ** argv) {
15691572
15701573 // warmup run
15711574 if (t.n_prompt > 0 ) {
1572- LOG_TEE (" llama-bench: benchmark %d/%ld: warmup prompt run\n " , params_idx, params_instances.size ());
1575+ if (params.verbose ) {
1576+ LOG_TEE (" llama-bench: benchmark %d/%ld: warmup prompt run\n " , params_idx, params_count);
1577+ }
15731578 // test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
15741579 test_prompt (ctx, t.n_prompt , 0 , t.n_batch , t.n_threads );
15751580 }
15761581 if (t.n_gen > 0 ) {
1577- LOG_TEE (" llama-bench: benchmark %d/%ld: warmup generation run\n " , params_idx, params_instances.size ());
1582+ if (params.verbose ) {
1583+ LOG_TEE (" llama-bench: benchmark %d/%ld: warmup generation run\n " , params_idx, params_count);
1584+ }
15781585 test_gen (ctx, 1 , 0 , t.n_threads );
15791586 }
15801587
@@ -1584,11 +1591,15 @@ int main(int argc, char ** argv) {
15841591 uint64_t t_start = get_time_ns ();
15851592
15861593 if (t.n_prompt > 0 ) {
1587- LOG_TEE (" llama-bench: benchmark %d/%ld: prompt run %d/%d\n " , params_idx, params_instances.size (), i + 1 , params.reps );
1594+ if (params.verbose ) {
1595+ LOG_TEE (" llama-bench: benchmark %d/%ld: prompt run %d/%d\n " , params_idx, params_count, i + 1 , params.reps );
1596+ }
15881597 test_prompt (ctx, t.n_prompt , 0 , t.n_batch , t.n_threads );
15891598 }
15901599 if (t.n_gen > 0 ) {
1591- LOG_TEE (" llama-bench: benchmark %d/%ld: generation run %d/%d\n " , params_idx, params_instances.size (), i + 1 , params.reps );
1600+ if (params.verbose ) {
1601+ LOG_TEE (" llama-bench: benchmark %d/%ld: generation run %d/%d\n " , params_idx, params_count, i + 1 , params.reps );
1602+ }
15921603 test_gen (ctx, t.n_gen , t.n_prompt , t.n_threads );
15931604 }
15941605
0 commit comments