File tree Expand file tree Collapse file tree 1 file changed +9
-2
lines changed Expand file tree Collapse file tree 1 file changed +9
-2
lines changed Original file line number Diff line number Diff line change @@ -1118,6 +1118,12 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_
11181118//
11191119
11201120struct llama_state {
1121+ llama_state () {
1122+ #ifdef GGML_USE_METAL
1123+ ggml_metal_log_set_callback (log_callback, log_callback_user_data);
1124+ #endif
1125+ }
1126+
11211127 // We save the log callback globally
11221128 ggml_log_callback log_callback = llama_log_callback_default;
11231129 void * log_callback_user_data = nullptr ;
@@ -8569,8 +8575,6 @@ struct llama_context * llama_new_context_with_model(
85698575
85708576#ifdef GGML_USE_METAL
85718577 if (model->n_gpu_layers > 0 ) {
8572- ggml_metal_log_set_callback (llama_log_callback_default, NULL );
8573-
85748578 ctx->ctx_metal = ggml_metal_init (1 );
85758579 if (!ctx->ctx_metal ) {
85768580 LLAMA_LOG_ERROR (" %s: ggml_metal_init() failed\n " , __func__);
@@ -9706,6 +9710,9 @@ const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal
97069710void llama_log_set (ggml_log_callback log_callback, void * user_data) {
97079711 g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
97089712 g_state.log_callback_user_data = user_data;
9713+ #ifdef GGML_USE_METAL
9714+ ggml_metal_log_set_callback (g_state.log_callback , g_state.log_callback_user_data );
9715+ #endif
97099716}
97109717
97119718static void llama_log_internal_v (ggml_log_level level, const char * format, va_list args) {
You can’t perform that action at this time.
0 commit comments