@@ -421,11 +421,8 @@ struct llama_model::impl {
421421 llama_mlocks mlock_bufs;
422422 llama_mlocks mlock_mmaps;
423423
424- // contexts where the model tensors metadata is stored
425- std::vector<ggml_context_ptr> ctxs;
426-
427- // the model memory buffers for the tensor data
428- std::vector<ggml_backend_buffer_ptr> bufs;
424+ // contexts where the model tensors metadata is stored as well ass the corresponding buffers:
425+ std::vector<std::pair<ggml_context_ptr, ggml_backend_buffer_ptr>> ctxs_bufs;
429426
430427 buft_list_t cpu_buft_list;
431428 std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
@@ -2181,7 +2178,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
21812178 max_n_tensors += n_layer*2; // duplicated rope freq tensors
21822179 const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
21832180
2184- std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
2181+ // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
2182+ struct ggml_backend_buft_comparator {
2183+ bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
2184+ return ggml_backend_buft_name(lhs) < ggml_backend_buft_name(rhs);
2185+ }
2186+ };
2187+ std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
2188+
21852189 auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
21862190 auto it = ctx_map.find(buft);
21872191 if (it == ctx_map.end()) {
@@ -2196,12 +2200,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
21962200 throw std::runtime_error(format("failed to create ggml context"));
21972201 }
21982202
2199- ctx_map[buft] = ctx;
2200- pimpl->ctxs.emplace_back(ctx);
2203+ ctx_map.emplace(buft, ctx);
22012204
22022205 return ctx;
22032206 }
2204- return it->second;
2207+ return it->second.get() ;
22052208 };
22062209
22072210 const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED;
@@ -6036,16 +6039,17 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60366039 pimpl->mappings.reserve(ml.mappings.size());
60376040
60386041 // create the backend buffers
6039- std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs ;
6040- ctx_bufs .reserve(ctx_map.size());
6042+ std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_buf_maps ;
6043+ ctx_buf_maps .reserve(ctx_map.size());
60416044
60426045 // Ensure we have enough capacity for the maximum backend buffer we will potentially create
60436046 const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
6044- pimpl->bufs .reserve(n_max_backend_buffer);
6047+ pimpl->ctxs_bufs .reserve(n_max_backend_buffer);
60456048
60466049 for (auto & it : ctx_map) {
6047- ggml_backend_buffer_type_t buft = it.first;
6048- ggml_context * ctx = it.second;
6050+ ggml_backend_buffer_type_t buft = it.first;
6051+ ggml_context_ptr & ctx_ptr = it.second;
6052+ ggml_context * ctx = ctx_ptr.get();
60496053
60506054 // skip contexts without tensors
60516055 if (ggml_get_first_tensor(ctx) == nullptr) {
@@ -6069,6 +6073,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60696073 bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
60706074 bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
60716075
6076+ ggml_backend_buffer_t buf = nullptr;
60726077 if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
60736078 for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
60746079 // only the mmap region containing the tensors in the model is mapped to the backend buffer
@@ -6081,20 +6086,18 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60816086 continue;
60826087 }
60836088 const size_t max_size = ggml_get_max_tensor_size(ctx);
6084- ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
6089+ buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
60856090 if (buf == nullptr) {
60866091 throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
60876092 }
6088- pimpl->bufs.emplace_back(buf);
60896093 buf_map.emplace(idx, buf);
60906094 }
60916095 }
60926096 else {
6093- ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
6097+ buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
60946098 if (buf == nullptr) {
60956099 throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
60966100 }
6097- pimpl->bufs.emplace_back(buf);
60986101 if (use_mlock && ggml_backend_buffer_is_host(buf)) {
60996102 pimpl->mlock_bufs.emplace_back(new llama_mlock);
61006103 auto & mlock_buf = pimpl->mlock_bufs.back();
@@ -6105,18 +6108,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
61056108 buf_map.emplace(idx, buf);
61066109 }
61076110 }
6108-
6109- if (pimpl->bufs.empty()) {
6110- throw std::runtime_error("failed to allocate buffer");
6111- }
6111+ pimpl->ctxs_bufs.emplace_back(std::move(ctx_ptr), buf);
61126112
61136113 for (auto & buf : buf_map) {
61146114 // indicate that this buffer contains weights
61156115 // this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
61166116 ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
61176117 }
61186118
6119- ctx_bufs .emplace_back(ctx, buf_map);
6119+ ctx_buf_maps .emplace_back(ctx, buf_map);
61206120 }
61216121
61226122 if (llama_supports_gpu_offload()) {
@@ -6134,19 +6134,21 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
61346134 }
61356135
61366136 // print memory requirements per buffer type
6137- for (auto & buf : pimpl->bufs) {
6138- LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
6137+ for (auto & ctx_buf : pimpl->ctxs_bufs) {
6138+ ggml_backend_buffer_t buf = ctx_buf.second.get();
6139+ LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf) / 1024.0 / 1024.0);
61396140 }
61406141
61416142 // populate tensors_by_name
6142- for (auto & ctx : pimpl->ctxs) {
6143- for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
6143+ for (auto & ctx_buf : pimpl->ctxs_bufs) {
6144+ ggml_context * ctx = ctx_buf.first.get();
6145+ for (auto * cur = ggml_get_first_tensor(ctx); cur != NULL; cur = ggml_get_next_tensor(ctx, cur)) {
61446146 tensors_by_name.emplace_back(ggml_get_name(cur), cur);
61456147 }
61466148 }
61476149
61486150 // load tensor data
6149- for (auto & it : ctx_bufs ) {
6151+ for (auto & it : ctx_buf_maps ) {
61506152 ggml_context * ctx = it.first;
61516153 auto & bufs = it.second;
61526154 if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
@@ -6189,8 +6191,9 @@ size_t llama_model::n_devices() const {
61896191
61906192std::map<ggml_backend_buffer_type_t, size_t> llama_model::memory_breakdown() const {
61916193 std::map<ggml_backend_buffer_type_t, size_t> ret;
6192- for (const ggml_backend_buffer_ptr & buf_ptr : pimpl->bufs) {
6193- ret[ggml_backend_buffer_get_type(buf_ptr.get())] += ggml_backend_buffer_get_size(buf_ptr.get());
6194+ for (const auto & ctx_buf : pimpl->ctxs_bufs) {
6195+ ggml_backend_buffer_t buf = ctx_buf.second.get();
6196+ ret[ggml_backend_buffer_get_type(buf)] += ggml_backend_buffer_get_size(buf);
61946197 }
61956198 return ret;
61966199}
0 commit comments