@@ -1991,10 +1991,13 @@ struct llama_model_loader {
19911991 return tensor;
19921992 }
19931993
1994- struct ggml_tensor * create_tensor (struct ggml_context * ctx, const std::string & name, const std::vector<int64_t > & ne, ggml_backend_type backend) {
1994+ struct ggml_tensor * create_tensor (struct ggml_context * ctx, const std::string & name, const std::vector<int64_t > & ne, ggml_backend_type backend, bool optional = false ) {
19951995 struct ggml_tensor * cur = ggml_get_tensor (ctx_meta, name.c_str ());
19961996
19971997 if (cur == NULL ) {
1998+ if (optional) {
1999+ return NULL ;
2000+ }
19982001 throw std::runtime_error (format (" %s: tensor '%s' not found" , __func__, name.c_str ()));
19992002 }
20002003
@@ -2812,29 +2815,11 @@ static void llm_load_tensors(
28122815 layer.wv = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_V, " weight" , i), {n_embd, n_embd_gqa}, backend_split);
28132816 layer.wo = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_OUT, " weight" , i), {n_embd, n_embd}, backend_split);
28142817
2815- try {
2816- layer.bq = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_Q, " bias" , i), {n_embd}, backend);
2817- } catch (const std::runtime_error& e) {
2818- if (std::string (e.what ()).find (" not found" ) != std::string::npos) layer.bq = NULL ; else throw ;
2819- }
2820-
2821- try {
2822- layer.bk = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_K, " bias" , i), {n_embd_gqa}, backend);
2823- } catch (const std::runtime_error& e) {
2824- if (std::string (e.what ()).find (" not found" ) != std::string::npos) layer.bk = NULL ; else throw ;
2825- }
2826-
2827- try {
2828- layer.bv = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_V, " bias" , i), {n_embd_gqa}, backend);
2829- } catch (const std::runtime_error& e) {
2830- if (std::string (e.what ()).find (" not found" ) != std::string::npos) layer.bv = NULL ; else throw ;
2831- }
2832-
2833- try {
2834- layer.bo = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_OUT, " bias" , i), {n_embd}, backend);
2835- } catch (const std::runtime_error& e) {
2836- if (std::string (e.what ()).find (" not found" ) != std::string::npos) layer.bo = NULL ; else throw ;
2837- }
2818+ // optional bias tensors
2819+ layer.bq = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_Q, " bias" , i), {n_embd}, backend, true );
2820+ layer.bk = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_K, " bias" , i), {n_embd_gqa}, backend, true );
2821+ layer.bv = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_V, " bias" , i), {n_embd_gqa}, backend, true );
2822+ layer.bo = ml.create_tensor (ctx, tn (LLM_TENSOR_ATTN_OUT, " bias" , i), {n_embd}, backend, true );
28382823
28392824 layer.ffn_norm = ml.create_tensor (ctx, tn (LLM_TENSOR_FFN_NORM, " weight" , i), {n_embd}, backend);
28402825
0 commit comments