Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

llama : refactor llama_kv_cache, llama_context and llm_build_context #11213

Draft
wants to merge 62 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
62 commits
Select commit Hold shift + click to select a range
f78b396
llama : add struct llama_kv_cache (wip) [no ci]
ggerganov Jan 13, 2025
e4550fb
llama : cont
ggerganov Jan 13, 2025
4d7bd03
kv_cache : functions -> members
ggerganov Jan 13, 2025
fef90cb
kv_cache : fix
ggerganov Jan 13, 2025
73a14ec
kv_cache : minor
ggerganov Jan 14, 2025
4cd1b6f
context : prepare kv_cache_read/write to be moved to kv_cache
ggerganov Jan 14, 2025
fd05ab8
kv_cache : move state read/write to llama_kv_cache
ggerganov Jan 14, 2025
17b363a
llama : update llama_kv_self API
ggerganov Jan 14, 2025
a19f671
context : minor
ggerganov Jan 15, 2025
ae274f9
llama : fix names [no ci]
ggerganov Jan 15, 2025
f2524c0
llama : remove references to llama_kv_cache (wip)
ggerganov Jan 16, 2025
b4ec1d4
cont : move kv_self update to llama_context
ggerganov Jan 16, 2025
f071349
context : add get_ctx_padding()
ggerganov Jan 17, 2025
c75ba68
context : move adapter code in the implementation [no ci]
ggerganov Jan 17, 2025
133ad6a
context : initial need_reserve logic
ggerganov Jan 17, 2025
cb8f209
wip
ggerganov Jan 17, 2025
99422df
context : introduce llama_batch_manager
ggerganov Jan 17, 2025
a0c500b
context : prepare for abstraction
ggerganov Jan 17, 2025
e665b57
Merge branch 'master' into gg/llama-kv-cache
ggerganov Jan 27, 2025
9188856
llama : resolve rwkv conflict
ggerganov Jan 29, 2025
c30e34c
Merge branch 'master' into gg/llama-kv-cache
ggerganov Jan 29, 2025
a40ba49
Merge branch 'master' into gg/llama-kv-cache
ggerganov Jan 30, 2025
5d3491e
Merge branch 'master' into gg/llama-kv-cache
ggerganov Jan 31, 2025
3e23be7
context : store graph build function callback
ggerganov Feb 2, 2025
74b0807
Merge branch 'master' into gg/llama-kv-cache
ggerganov Feb 2, 2025
1eca891
llama : fix rwkv inference (#11618)
MollySophia Feb 3, 2025
e0d913f
llama : clear whitespaces
ggerganov Feb 6, 2025
0f1c1ca
Merge branch 'master' into gg/llama-kv-cache
ggerganov Feb 6, 2025
b15fede
kv-cache : fix defrag condition
ggerganov Feb 6, 2025
972f91c
Merge branch 'master' into gg/llama-kv-cache
ggerganov Feb 10, 2025
f9971ef
llama : dedup reserve code
ggerganov Feb 10, 2025
879ba82
server : increase context size for the tests
ggerganov Feb 10, 2025
ef358ee
context : add decode/encode
ggerganov Feb 10, 2025
d1d8d53
bman : remove ubatch member
ggerganov Feb 10, 2025
2cd8a90
context : make output functions members
ggerganov Feb 10, 2025
02ef4be
context : initial abstraction
ggerganov Feb 11, 2025
b52b79b
context : move encode/decode to llama-context.cpp
ggerganov Feb 12, 2025
8da7f61
context : improve llama_context encapsulation
ggerganov Feb 12, 2025
d146a14
context : minor naming fix
ggerganov Feb 12, 2025
5eae8e5
context : move build_rope_factors to base class
ggerganov Feb 12, 2025
e633dc1
context : introduce llama_graph_i
ggerganov Feb 12, 2025
0ab50f1
context : prepare llama_model graph build
ggerganov Feb 12, 2025
f63aeec
llama : models now build their graphs using llama_graph_i
ggerganov Feb 12, 2025
6ee86e5
graph : restore ubatch in build_cb
ggerganov Feb 12, 2025
fbe6a07
context : rename to llama_context_kv_self
ggerganov Feb 12, 2025
3a504d9
llama : introduce llama_io interfaces
ggerganov Feb 13, 2025
f7c7757
context : abstract state read/write
ggerganov Feb 13, 2025
e08f38d
context : minor cleanup
ggerganov Feb 13, 2025
107d1e2
context : move output functionality to base class
ggerganov Feb 13, 2025
ed3cb55
context : abstract input
ggerganov Feb 13, 2025
131743f
context : abstract constructor and init
ggerganov Feb 13, 2025
d5e8e1a
context : remove batch_manager
ggerganov Feb 14, 2025
8280645
context : move common inputs to base class
ggerganov Feb 14, 2025
1d801d2
graph : update attn/kv_self names
ggerganov Feb 14, 2025
f0d3ff2
Merge branch 'master' into gg/llama-kv-cache
ggerganov Feb 18, 2025
c235903
graph : add llama_graph_result
ggerganov Feb 18, 2025
172f616
cont : return important tensors
ggerganov Feb 18, 2025
bc6f187
cont : use returend tensors from the graph build
ggerganov Feb 18, 2025
befe14f
llama : reorder encode/decode in sources
ggerganov Feb 18, 2025
9e50456
context : minor simplify
ggerganov Feb 18, 2025
2bffc2d
model : pass llama_graph_i as ptr
ggerganov Feb 18, 2025
f5cedbc
kv-cache : prepare for abstraction
ggerganov Feb 18, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -953,7 +953,7 @@ struct common_init_result common_init_from_params(common_params & params) {
return iparams;
}

if (params.ctx_shift && !llama_kv_cache_can_shift(lctx)) {
if (params.ctx_shift && !llama_kv_self_can_shift(lctx)) {
LOG_WRN("%s: KV cache shifting is not supported for this model, disabling KV cache shifting\n", __func__);
params.ctx_shift = false;
}
Expand Down Expand Up @@ -1058,7 +1058,7 @@ struct common_init_result common_init_from_params(common_params & params) {
if (llama_model_has_decoder(model)) {
llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch)));
}
llama_kv_cache_clear(lctx);
llama_kv_self_clear(lctx);
llama_synchronize(lctx);
llama_perf_context_reset(lctx);
}
Expand Down
8 changes: 4 additions & 4 deletions common/speculative.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ llama_tokens common_speculative_gen_draft(
result.reserve(params.n_draft);

if (reuse_n == 0) {
llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);

prompt.clear();
} else {
Expand All @@ -191,14 +191,14 @@ llama_tokens common_speculative_gen_draft(
}

if (reuse_i > 0) {
llama_kv_cache_seq_rm (ctx, 0, 0, reuse_i);
llama_kv_cache_seq_add(ctx, 0, reuse_i, -1, -reuse_i);
llama_kv_self_seq_rm (ctx, 0, 0, reuse_i);
llama_kv_self_seq_add(ctx, 0, reuse_i, -1, -reuse_i);

prompt.erase(prompt.begin(), prompt.begin() + reuse_i);
}

if (reuse_n < (int) prompt.size()) {
llama_kv_cache_seq_rm (ctx, 0, reuse_n, -1);
llama_kv_self_seq_rm (ctx, 0, reuse_n, -1);

prompt.erase(prompt.begin() + reuse_n, prompt.end());
}
Expand Down
4 changes: 2 additions & 2 deletions examples/batched-bench/batched-bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ int main(int argc, char ** argv) {

const auto t_pp_start = ggml_time_us();

llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);

if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
LOG_ERR("%s: llama_decode() failed\n", __func__);
Expand All @@ -141,7 +141,7 @@ int main(int argc, char ** argv) {

if (is_pp_shared) {
for (int32_t i = 1; i < pl; ++i) {
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
}
}

Expand Down
2 changes: 1 addition & 1 deletion examples/batched.swift/Sources/main.swift
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ if llama_decode(context, batch) != 0 {
}

for i in 1 ..< n_parallel {
llama_kv_cache_seq_cp(context, 0, Int32(i), 0, batch.n_tokens)
llama_kv_self_seq_cp(context, 0, Int32(i), 0, batch.n_tokens)
}

if n_parallel > 1 {
Expand Down
2 changes: 1 addition & 1 deletion examples/cvector-generator/cvector-generator.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
}

static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
fprintf(stderr, "%s : failed to eval\n", __func__);
return false;
Expand Down
2 changes: 1 addition & 1 deletion examples/embedding/embedding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
const struct llama_model * model = llama_get_model(ctx);

// clear previous kv_cache values (irrelevant for embeddings)
llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);

// run model
LOG_INF("%s: n_tokens = %d, n_seq = %d\n", __func__, batch.n_tokens, n_seq);
Expand Down
4 changes: 2 additions & 2 deletions examples/gritlm/gritlm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
}

// clear previous kv_cache values (irrelevant for embeddings)
llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);
llama_set_embeddings(ctx, true);
llama_set_causal_attn(ctx, false);

Expand Down Expand Up @@ -102,7 +102,7 @@ static std::string generate(llama_context * ctx, llama_sampler * smpl, const std

llama_token eos_token = llama_vocab_eos(vocab);

llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);
llama_set_embeddings(ctx, false);
llama_set_causal_attn(ctx, true);

Expand Down
2 changes: 1 addition & 1 deletion examples/imatrix/imatrix.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ static bool compute_imatrix(llama_context * ctx, const common_params & params) {
const auto t_start = std::chrono::high_resolution_clock::now();

// clear the KV cache
llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);

llama_batch batch = llama_batch_init(n_batch, 0, 1);

Expand Down
4 changes: 2 additions & 2 deletions examples/infill/infill.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -332,8 +332,8 @@ int main(int argc, char ** argv) {
LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
n_past, n_left, n_ctx, params.n_keep, n_discard);

llama_kv_cache_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1);
llama_kv_cache_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard);
llama_kv_self_seq_rm (ctx, 0, params.n_keep + 1 , params.n_keep + n_discard + 1);
llama_kv_self_seq_add(ctx, 0, params.n_keep + 1 + n_discard, n_past, -n_discard);

n_past -= n_discard;

Expand Down
4 changes: 2 additions & 2 deletions examples/llama-bench/llama-bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1578,7 +1578,7 @@ int main(int argc, char ** argv) {

test t(inst, lmodel, ctx);

llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);

// cool off before the test
if (params.delay) {
Expand Down Expand Up @@ -1618,7 +1618,7 @@ int main(int argc, char ** argv) {
}

for (int i = 0; i < params.reps; i++) {
llama_kv_cache_clear(ctx);
llama_kv_self_clear(ctx);

uint64_t t_start = get_time_ns();

Expand Down
8 changes: 4 additions & 4 deletions examples/llama.android/llama/src/main/cpp/llama-android.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model(
}

batch->logits[batch->n_tokens - 1] = true;
llama_kv_cache_clear(context);
llama_kv_self_clear(context);

const auto t_pp_start = ggml_time_us();
if (llama_decode(context, *batch) != 0) {
Expand All @@ -206,7 +206,7 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model(

LOGi("Benchmark text generation (tg)");

llama_kv_cache_clear(context);
llama_kv_self_clear(context);
const auto t_tg_start = ggml_time_us();
for (i = 0; i < tg; i++) {

Expand All @@ -223,7 +223,7 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model(

const auto t_tg_end = ggml_time_us();

llama_kv_cache_clear(context);
llama_kv_self_clear(context);

const auto t_pp = double(t_pp_end - t_pp_start) / 1000000.0;
const auto t_tg = double(t_tg_end - t_tg_start) / 1000000.0;
Expand Down Expand Up @@ -448,5 +448,5 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop(
extern "C"
JNIEXPORT void JNICALL
Java_android_llama_cpp_LLamaAndroid_kv_1cache_1clear(JNIEnv *, jobject, jlong context) {
llama_kv_cache_clear(reinterpret_cast<llama_context *>(context));
llama_kv_self_clear(reinterpret_cast<llama_context *>(context));
}
8 changes: 4 additions & 4 deletions examples/llama.swiftui/llama.cpp.swift/LibLlama.swift
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ actor LlamaContext {
}
batch.logits[Int(batch.n_tokens) - 1] = 1 // true

llama_kv_cache_clear(context)
llama_kv_self_clear(context)

let t_pp_start = DispatchTime.now().uptimeNanoseconds / 1000;

Expand All @@ -223,7 +223,7 @@ actor LlamaContext {

// bench text generation

llama_kv_cache_clear(context)
llama_kv_self_clear(context)

let t_tg_start = DispatchTime.now().uptimeNanoseconds / 1000;

Expand All @@ -242,7 +242,7 @@ actor LlamaContext {

let t_tg_end = DispatchTime.now().uptimeNanoseconds / 1000;

llama_kv_cache_clear(context)
llama_kv_self_clear(context)

let t_pp = Double(t_pp_end - t_pp_start) / 1000000.0
let t_tg = Double(t_tg_end - t_tg_start) / 1000000.0
Expand Down Expand Up @@ -292,7 +292,7 @@ actor LlamaContext {
func clear() {
tokens_list.removeAll()
temporary_invalid_cchars.removeAll()
llama_kv_cache_clear(context)
llama_kv_self_clear(context)
}

private func tokenize(text: String, add_bos: Bool) -> [llama_token] {
Expand Down
12 changes: 6 additions & 6 deletions examples/lookahead/lookahead.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ int main(int argc, char ** argv) {
llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));

for (int s = 1; s < W + G + 1; ++s) {
llama_kv_cache_seq_cp(ctx, 0, s, -1, -1);
llama_kv_self_seq_cp(ctx, 0, s, -1, -1);
}

const auto t_enc_end = ggml_time_us();
Expand Down Expand Up @@ -437,17 +437,17 @@ int main(int argc, char ** argv) {

// KV cache management
// if no verification token matched, we simply remove all cells from this batch -> no fragmentation
llama_kv_cache_seq_rm(ctx, -1, n_past, -1);
llama_kv_self_seq_rm(ctx, -1, n_past, -1);

if (seq_id_best != 0) {
// if a verification token matched, we keep the best sequence and remove the rest
// this leads to some KV cache fragmentation
llama_kv_cache_seq_keep(ctx, seq_id_best);
llama_kv_cache_seq_cp (ctx, seq_id_best, 0, -1, -1);
llama_kv_cache_seq_rm (ctx, seq_id_best, -1, -1);
llama_kv_self_seq_keep(ctx, seq_id_best);
llama_kv_self_seq_cp (ctx, seq_id_best, 0, -1, -1);
llama_kv_self_seq_rm (ctx, seq_id_best, -1, -1);

for (int s = 1; s < W + G + 1; ++s) {
llama_kv_cache_seq_cp(ctx, 0, s, -1, -1);
llama_kv_self_seq_cp(ctx, 0, s, -1, -1);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion examples/lookup/lookup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ int main(int argc, char ** argv){

// KV cache management
// clean the cache of draft tokens that weren't accepted
llama_kv_cache_seq_rm(ctx, 0, n_past, -1);
llama_kv_self_seq_rm(ctx, 0, n_past, -1);

common_batch_clear(batch_tgt);
common_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
Expand Down
12 changes: 6 additions & 6 deletions examples/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -328,7 +328,7 @@ int main(int argc, char ** argv) {
}

// remove any "future" tokens that we might have inherited from the previous session
llama_kv_cache_seq_rm(ctx, -1, n_matching_session_tokens, -1);
llama_kv_self_seq_rm(ctx, -1, n_matching_session_tokens, -1);
}

LOG_DBG("recalculate the cached logits (check): embd_inp.size() %zu, n_matching_session_tokens %zu, embd_inp.size() %zu, session_tokens.size() %zu\n",
Expand Down Expand Up @@ -571,8 +571,8 @@ int main(int argc, char ** argv) {
LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
n_past, n_left, n_ctx, params.n_keep, n_discard);

llama_kv_cache_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
llama_kv_cache_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);
llama_kv_self_seq_rm (ctx, 0, params.n_keep , params.n_keep + n_discard);
llama_kv_self_seq_add(ctx, 0, params.n_keep + n_discard, n_past, -n_discard);

n_past -= n_discard;

Expand All @@ -595,9 +595,9 @@ int main(int argc, char ** argv) {
LOG_DBG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
LOG_DBG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);

llama_kv_cache_seq_add(ctx, 0, ga_i, n_past, ib*bd);
llama_kv_cache_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
llama_kv_cache_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
llama_kv_self_seq_add(ctx, 0, ga_i, n_past, ib*bd);
llama_kv_self_seq_div(ctx, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
llama_kv_self_seq_add(ctx, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);

n_past -= bd;

Expand Down
10 changes: 5 additions & 5 deletions examples/parallel/parallel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ int main(int argc, char ** argv) {

// assign the system KV cache to all parallel sequences
for (int32_t i = 1; i <= n_clients; ++i) {
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
}

LOG_INF("\n");
Expand Down Expand Up @@ -233,9 +233,9 @@ int main(int argc, char ** argv) {
if (batch.n_tokens == 0) {
// all sequences have ended - clear the entire KV cache
for (int i = 1; i <= n_clients; ++i) {
llama_kv_cache_seq_rm(ctx, i, -1, -1);
llama_kv_self_seq_rm(ctx, i, -1, -1);
// but keep the system prompt
llama_kv_cache_seq_cp(ctx, 0, i, -1, -1);
llama_kv_self_seq_cp(ctx, 0, i, -1, -1);
}

LOG_INF("%s: clearing the KV cache\n", __func__);
Expand Down Expand Up @@ -371,8 +371,8 @@ int main(int argc, char ** argv) {
}

// delete only the generated part of the sequence, i.e. keep the system prompt in the cache
llama_kv_cache_seq_rm(ctx, client.id + 1, -1, -1);
llama_kv_cache_seq_cp(ctx, 0, client.id + 1, -1, -1);
llama_kv_self_seq_rm(ctx, client.id + 1, -1, -1);
llama_kv_self_seq_cp(ctx, 0, client.id + 1, -1, -1);

const auto t_main_end = ggml_time_us();

Expand Down
28 changes: 14 additions & 14 deletions examples/passkey/passkey.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -132,11 +132,11 @@ int main(int argc, char ** argv) {
const int ib = i/n_batch - 1;
const int bd = n_batch_grp*(n_grp - 1);

llama_kv_cache_seq_add (ctx, 0, n_past - n_batch, n_past, ib*bd);
llama_kv_cache_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp);
llama_kv_cache_update (ctx);
llama_kv_self_seq_add (ctx, 0, n_past - n_batch, n_past, ib*bd);
llama_kv_self_seq_div (ctx, 0, n_past - n_batch + ib*bd, n_past + ib*bd, n_grp);
llama_kv_self_update (ctx);

n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1;
}

common_batch_clear(batch);
Expand Down Expand Up @@ -166,12 +166,12 @@ int main(int argc, char ** argv) {

LOG_INF("%s: shifting KV cache with %d\n", __func__, n_discard);

llama_kv_cache_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
llama_kv_cache_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
//llama_kv_cache_defrag (ctx);
llama_kv_cache_update (ctx);
llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
//llama_kv_self_defrag (ctx);
llama_kv_self_update (ctx);

n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1;

common_batch_clear(batch);

Expand All @@ -197,12 +197,12 @@ int main(int argc, char ** argv) {
if (n_discard > 0) {
LOG_INF("%s: shifting KV cache with %d to free space for the answer\n", __func__, n_discard);

llama_kv_cache_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
llama_kv_cache_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
//llama_kv_cache_defrag (ctx);
llama_kv_cache_update (ctx);
llama_kv_self_seq_rm (ctx, 0, n_keep , n_keep + n_discard);
llama_kv_self_seq_add(ctx, 0, n_keep + n_discard, n_ctx, -n_discard);
//llama_kv_self_defrag (ctx);
llama_kv_self_update (ctx);

n_past = llama_kv_cache_seq_pos_max(ctx, 0) + 1;
n_past = llama_kv_self_seq_pos_max(ctx, 0) + 1;
}
}

Expand Down
Loading
Loading