Skip to content

Commit 0526560

Browse files
committed
only rm when params.escape, rm space if possible which is added back or rm added space token
1 parent b4046aa commit 0526560

File tree

2 files changed

+13
-4
lines changed

2 files changed

+13
-4
lines changed

examples/infill/infill.cpp

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -233,12 +233,16 @@ int main(int argc, char ** argv) {
233233
const bool add_bos = llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM;
234234
LOG("add_bos: %d\n", add_bos);
235235

236+
bool suff_rm_leading_spc = params.escape;
237+
if (suff_rm_leading_spc && params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) {
238+
params.input_suffix.erase(0, 1);
239+
suff_rm_leading_spc = false;
240+
}
236241
std::vector<llama_token> embd_inp;
237242
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
238-
// params.input_suffix.erase(0, params.input_suffix.find_first_not_of(" "));
239243
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
240244
const int space_token = 29871;
241-
if (params.escape && inp_sfx.size() > 1 && inp_sfx[0] == space_token) {
245+
if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
242246
inp_sfx.erase(inp_sfx.begin());
243247
}
244248
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(ctx));

examples/server/server.cpp

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -344,11 +344,16 @@ struct llama_server_context
344344

345345
void loadInfill()
346346
{
347-
params.input_suffix.erase(0, params.input_suffix.find_first_not_of(" "));
347+
bool suff_rm_leading_spc = params.escape;
348+
if (suff_rm_leading_spc && params.input_suffix.find_first_of(" ") == 0 && params.input_suffix.size() > 1) {
349+
params.input_suffix.erase(0, 1);
350+
suff_rm_leading_spc = false;
351+
}
352+
348353
auto prefix_tokens = tokenize(params.input_prefix, false);
349354
auto suffix_tokens = tokenize(params.input_suffix, false);
350355
const int space_token = 29871;
351-
if (params.escape && suffix_tokens.size() > 1 && suffix_tokens[0] == space_token) {
356+
if (suff_rm_leading_spc && suffix_tokens[0] == space_token) {
352357
suffix_tokens.erase(suffix_tokens.begin());
353358
}
354359
prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(ctx));

0 commit comments

Comments
 (0)