Skip to content

Commit 092e2e2

Browse files
llama-model: fix insonsistent ctxs <-> bufs order
1 parent fa882fd commit 092e2e2

File tree

1 file changed

+28
-32
lines changed

1 file changed

+28
-32
lines changed

src/llama-model.cpp

Lines changed: 28 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -421,11 +421,8 @@ struct llama_model::impl {
421421
llama_mlocks mlock_bufs;
422422
llama_mlocks mlock_mmaps;
423423

424-
// contexts where the model tensors metadata is stored
425-
std::vector<ggml_context_ptr> ctxs;
426-
427-
// the model memory buffers for the tensor data
428-
std::vector<ggml_backend_buffer_ptr> bufs;
424+
// contexts where the model tensors metadata is stored as well ass the corresponding buffers:
425+
std::vector<std::pair<ggml_context_ptr, ggml_backend_buffer_ptr>> ctxs_bufs;
429426

430427
buft_list_t cpu_buft_list;
431428
std::map<ggml_backend_dev_t, buft_list_t> gpu_buft_list;
@@ -2181,7 +2178,14 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
21812178
max_n_tensors += n_layer*2; // duplicated rope freq tensors
21822179
const size_t ctx_size = ggml_tensor_overhead()*max_n_tensors;
21832180

2184-
std::map<ggml_backend_buffer_type_t, ggml_context *> ctx_map;
2181+
// define a comparator for the buft -> ctx map to ensure that the order is well-defined:
2182+
struct ggml_backend_buft_comparator {
2183+
bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
2184+
return ggml_backend_buft_name(lhs) < ggml_backend_buft_name(rhs);
2185+
}
2186+
};
2187+
std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
2188+
21852189
auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
21862190
auto it = ctx_map.find(buft);
21872191
if (it == ctx_map.end()) {
@@ -2196,12 +2200,11 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
21962200
throw std::runtime_error(format("failed to create ggml context"));
21972201
}
21982202

2199-
ctx_map[buft] = ctx;
2200-
pimpl->ctxs.emplace_back(ctx);
2203+
ctx_map.emplace(buft, ctx);
22012204

22022205
return ctx;
22032206
}
2204-
return it->second;
2207+
return it->second.get();
22052208
};
22062209

22072210
const auto TENSOR_DUPLICATED = llama_model_loader::TENSOR_DUPLICATED;
@@ -6036,16 +6039,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60366039
pimpl->mappings.reserve(ml.mappings.size());
60376040

60386041
// create the backend buffers
6039-
std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_bufs;
6040-
ctx_bufs.reserve(ctx_map.size());
6042+
std::vector<std::pair<ggml_context *, llama_buf_map>> ctx_buf_maps;
6043+
ctx_buf_maps.reserve(ctx_map.size());
60416044

60426045
// Ensure we have enough capacity for the maximum backend buffer we will potentially create
60436046
const size_t n_max_backend_buffer = ctx_map.size() * ml.files.size();
6044-
pimpl->bufs.reserve(n_max_backend_buffer);
6047+
pimpl->ctxs_bufs.reserve(n_max_backend_buffer);
60456048

6046-
for (auto & it : ctx_map) {
6047-
ggml_backend_buffer_type_t buft = it.first;
6048-
ggml_context * ctx = it.second;
6049+
for (auto & [buft, ctx_ptr] : ctx_map) {
6050+
ggml_context * ctx = ctx_ptr.get();
60496051

60506052
// skip contexts without tensors
60516053
if (ggml_get_first_tensor(ctx) == nullptr) {
@@ -6069,6 +6071,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60696071
bool buffer_from_host_ptr_supported = props.caps.buffer_from_host_ptr;
60706072
bool is_default_buft = buft == ggml_backend_dev_buffer_type(dev);
60716073

6074+
ggml_backend_buffer_t buf = nullptr;
60726075
if (ml.use_mmap && use_mmap_buffer && buffer_from_host_ptr_supported && is_default_buft) {
60736076
for (uint32_t idx = 0; idx < ml.files.size(); idx++) {
60746077
// only the mmap region containing the tensors in the model is mapped to the backend buffer
@@ -6081,20 +6084,18 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
60816084
continue;
60826085
}
60836086
const size_t max_size = ggml_get_max_tensor_size(ctx);
6084-
ggml_backend_buffer_t buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
6087+
buf = ggml_backend_dev_buffer_from_host_ptr(dev, (char *) addr + first, last - first, max_size);
60856088
if (buf == nullptr) {
60866089
throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
60876090
}
6088-
pimpl->bufs.emplace_back(buf);
60896091
buf_map.emplace(idx, buf);
60906092
}
60916093
}
60926094
else {
6093-
ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
6095+
buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx, buft);
60946096
if (buf == nullptr) {
60956097
throw std::runtime_error(format("unable to allocate %s buffer", ggml_backend_buft_name(buft)));
60966098
}
6097-
pimpl->bufs.emplace_back(buf);
60986099
if (use_mlock && ggml_backend_buffer_is_host(buf)) {
60996100
pimpl->mlock_bufs.emplace_back(new llama_mlock);
61006101
auto & mlock_buf = pimpl->mlock_bufs.back();
@@ -6105,18 +6106,15 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
61056106
buf_map.emplace(idx, buf);
61066107
}
61076108
}
6108-
6109-
if (pimpl->bufs.empty()) {
6110-
throw std::runtime_error("failed to allocate buffer");
6111-
}
6109+
pimpl->ctxs_bufs.emplace_back(std::move(ctx_ptr), buf);
61126110

61136111
for (auto & buf : buf_map) {
61146112
// indicate that this buffer contains weights
61156113
// this is used by ggml_backend_sched to improve op scheduling: ops that use a weight are preferably scheduled to the backend that contains the weight
61166114
ggml_backend_buffer_set_usage(buf.second, GGML_BACKEND_BUFFER_USAGE_WEIGHTS);
61176115
}
61186116

6119-
ctx_bufs.emplace_back(ctx, buf_map);
6117+
ctx_buf_maps.emplace_back(ctx, buf_map);
61206118
}
61216119

61226120
if (llama_supports_gpu_offload()) {
@@ -6134,22 +6132,20 @@ bool llama_model::load_tensors(llama_model_loader & ml) {
61346132
}
61356133

61366134
// print memory requirements per buffer type
6137-
for (auto & buf : pimpl->bufs) {
6135+
for (auto & [_, buf] : pimpl->ctxs_bufs) {
61386136
LLAMA_LOG_INFO("%s: %12s model buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf.get()), ggml_backend_buffer_get_size(buf.get()) / 1024.0 / 1024.0);
61396137
}
61406138

61416139
// populate tensors_by_name
6142-
for (auto & ctx : pimpl->ctxs) {
6140+
for (auto & [ctx, _] : pimpl->ctxs_bufs) {
61436141
for (auto * cur = ggml_get_first_tensor(ctx.get()); cur != NULL; cur = ggml_get_next_tensor(ctx.get(), cur)) {
61446142
tensors_by_name.emplace_back(ggml_get_name(cur), cur);
61456143
}
61466144
}
61476145

61486146
// load tensor data
6149-
for (auto & it : ctx_bufs) {
6150-
ggml_context * ctx = it.first;
6151-
auto & bufs = it.second;
6152-
if (!ml.load_all_data(ctx, bufs, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
6147+
for (auto & [ctx, buf_map] : ctx_buf_maps) {
6148+
if (!ml.load_all_data(ctx, buf_map, use_mlock ? &pimpl->mlock_mmaps : NULL, params.progress_callback, params.progress_callback_user_data)) {
61536149
return false;
61546150
}
61556151
}
@@ -6189,8 +6185,8 @@ size_t llama_model::n_devices() const {
61896185

61906186
std::map<ggml_backend_buffer_type_t, size_t> llama_model::memory_breakdown() const {
61916187
std::map<ggml_backend_buffer_type_t, size_t> ret;
6192-
for (const ggml_backend_buffer_ptr & buf_ptr : pimpl->bufs) {
6193-
ret[ggml_backend_buffer_get_type(buf_ptr.get())] += ggml_backend_buffer_get_size(buf_ptr.get());
6188+
for (const auto & [_, buf] : pimpl->ctxs_bufs) {
6189+
ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get());
61946190
}
61956191
return ret;
61966192
}

0 commit comments

Comments
 (0)