@@ -1492,9 +1492,12 @@ int main(int argc, char ** argv) {
14921492 const cmd_params_instance * prev_inst = nullptr ;
14931493
14941494 int params_idx = 0 ;
1495+ auto params_count = params_instances.size ();
14951496 for (const auto & inst : params_instances) {
14961497 params_idx ++;
1497- LOG_TEE (" llama-bench: benchmark %d/%ld: starting\n " , params_idx, params_instances.size ());
1498+ if (params.verbose ) {
1499+ LOG_TEE (" llama-bench: benchmark %d/%ld: starting\n " , params_idx, params_count);
1500+ }
14981501 // keep the same model between tests when possible
14991502 if (!lmodel || !prev_inst || !inst.equal_mparams (*prev_inst)) {
15001503 if (lmodel) {
@@ -1544,12 +1547,16 @@ int main(int argc, char ** argv) {
15441547
15451548 // warmup run
15461549 if (t.n_prompt > 0 ) {
1547- LOG_TEE (" llama-bench: benchmark %d/%ld: warmup prompt run\n " , params_idx, params_instances.size ());
1550+ if (params.verbose ) {
1551+ LOG_TEE (" llama-bench: benchmark %d/%ld: warmup prompt run\n " , params_idx, params_count);
1552+ }
15481553 // test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads);
15491554 test_prompt (ctx, t.n_prompt , 0 , t.n_batch , t.n_threads );
15501555 }
15511556 if (t.n_gen > 0 ) {
1552- LOG_TEE (" llama-bench: benchmark %d/%ld: warmup generation run\n " , params_idx, params_instances.size ());
1557+ if (params.verbose ) {
1558+ LOG_TEE (" llama-bench: benchmark %d/%ld: warmup generation run\n " , params_idx, params_count);
1559+ }
15531560 test_gen (ctx, 1 , 0 , t.n_threads );
15541561 }
15551562
@@ -1559,11 +1566,15 @@ int main(int argc, char ** argv) {
15591566 uint64_t t_start = get_time_ns ();
15601567
15611568 if (t.n_prompt > 0 ) {
1562- LOG_TEE (" llama-bench: benchmark %d/%ld: prompt run %d/%d\n " , params_idx, params_instances.size (), i + 1 , params.reps );
1569+ if (params.verbose ) {
1570+ LOG_TEE (" llama-bench: benchmark %d/%ld: prompt run %d/%d\n " , params_idx, params_count, i + 1 , params.reps );
1571+ }
15631572 test_prompt (ctx, t.n_prompt , 0 , t.n_batch , t.n_threads );
15641573 }
15651574 if (t.n_gen > 0 ) {
1566- LOG_TEE (" llama-bench: benchmark %d/%ld: generation run %d/%d\n " , params_idx, params_instances.size (), i + 1 , params.reps );
1575+ if (params.verbose ) {
1576+ LOG_TEE (" llama-bench: benchmark %d/%ld: generation run %d/%d\n " , params_idx, params_count, i + 1 , params.reps );
1577+ }
15671578 test_gen (ctx, t.n_gen , t.n_prompt , t.n_threads );
15681579 }
15691580
0 commit comments