From 4683906125efedb1bc8e43316bfecedfe734b7b9 Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Thu, 26 Oct 2023 13:05:04 -0700 Subject: [PATCH 1/5] feat: upgrade llama.cpp --- crates/llama-cpp-bindings/llama.cpp | 2 +- crates/llama-cpp-bindings/src/engine.cc | 18 +++--------------- crates/tabby-common/src/path.rs | 4 ++++ crates/tabby/src/serve/engine.rs | 2 +- 4 files changed, 9 insertions(+), 17 deletions(-) diff --git a/crates/llama-cpp-bindings/llama.cpp b/crates/llama-cpp-bindings/llama.cpp index 6ed7dce31afd..5cc49e631f09 160000 --- a/crates/llama-cpp-bindings/llama.cpp +++ b/crates/llama-cpp-bindings/llama.cpp @@ -1 +1 @@ -Subproject commit 6ed7dce31afdf4d5a11ed8bfd0f993dcb8df39c0 +Subproject commit 5cc49e631f0902f33b10b7703b4d174fd635ccd9 diff --git a/crates/llama-cpp-bindings/src/engine.cc b/crates/llama-cpp-bindings/src/engine.cc index 7f3f2986cd2a..e5d10c26821b 100644 --- a/crates/llama-cpp-bindings/src/engine.cc +++ b/crates/llama-cpp-bindings/src/engine.cc @@ -21,7 +21,6 @@ class TextInferenceEngineImpl : public TextInferenceEngine { TextInferenceEngineImpl(owned model, owned ctx) : model_(std::move(model)), ctx_(std::move(ctx)) { - batch_ = llama_batch_init(N_BATCH, 0); } void start(rust::Slice input_token_ids) override { @@ -46,14 +45,14 @@ class TextInferenceEngineImpl : public TextInferenceEngine { } uint32_t eos_token() const override { - return llama_token_eos(ctx_.get()); + return llama_token_eos(llama_get_model(ctx_.get())); } private: uint32_t sample() const { auto* ctx = ctx_.get(); - auto logits = llama_get_logits_ith(ctx, batch_.n_tokens - 1); + auto logits = llama_get_logits_ith(ctx, 0); auto n_vocab = llama_n_vocab(llama_get_model(ctx)); // Greedy sampling (always select the highest logit). @@ -65,18 +64,9 @@ class TextInferenceEngineImpl : public TextInferenceEngine { n_past_ = 0; } - batch_.n_tokens = size; - for (size_t i = 0; i < size; ++i) { - batch_.token[i] = data[i]; - batch_.pos[i] = n_past_ + i; - batch_.seq_id[i] = 0; - batch_.logits[i] = false; - } - batch_.logits[size - 1] = true; - auto* ctx = ctx_.get(); llama_kv_cache_tokens_rm(ctx, n_past_, -1); - if (llama_decode(ctx, batch_)) { + if (llama_decode(ctx, llama_batch_get_one(data, size, n_past_, 0))) { throw std::runtime_error("Failed to eval"); } @@ -86,8 +76,6 @@ class TextInferenceEngineImpl : public TextInferenceEngine { size_t n_past_; owned model_; owned ctx_; - - llama_batch batch_; }; static int g_llama_cpp_log_level = 0; diff --git a/crates/tabby-common/src/path.rs b/crates/tabby-common/src/path.rs index 9dc0ec0fc8f9..17717a40f114 100644 --- a/crates/tabby-common/src/path.rs +++ b/crates/tabby-common/src/path.rs @@ -89,4 +89,8 @@ impl ModelDir { pub fn ggml_q8_0_file(&self) -> String { self.path_string("ggml/q8_0.gguf") } + + pub fn ggml_q8_0_v2_file(&self) -> String { + self.path_string("ggml/q8_0.v2.gguf") + } } diff --git a/crates/tabby/src/serve/engine.rs b/crates/tabby/src/serve/engine.rs index 8675bf32b097..b2e14ae08ce0 100644 --- a/crates/tabby/src/serve/engine.rs +++ b/crates/tabby/src/serve/engine.rs @@ -82,7 +82,7 @@ fn create_ctranslate2_engine( fn create_ggml_engine(device: &super::Device, model_dir: &ModelDir) -> Box { let options = llama_cpp_bindings::LlamaEngineOptionsBuilder::default() - .model_path(model_dir.ggml_q8_0_file()) + .model_path(model_dir.ggml_q8_0_v2_file()) .tokenizer_path(model_dir.tokenizer_file()) .use_gpu(device.ggml_use_gpu()) .build() From 6ca603aff9b3025d44236eaf05f78011fcffc28f Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Thu, 26 Oct 2023 13:41:56 -0700 Subject: [PATCH 2/5] update download files --- crates/tabby-download/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/crates/tabby-download/src/lib.rs b/crates/tabby-download/src/lib.rs index 4ce9a8abfa27..16cf31a8aa4a 100644 --- a/crates/tabby-download/src/lib.rs +++ b/crates/tabby-download/src/lib.rs @@ -48,7 +48,7 @@ impl Downloader { let files = vec![ ("tabby.json", true), ("tokenizer.json", true), - ("ggml/q8_0.gguf", true), + ("ggml/q8_0.v2.gguf", true), ]; self.download_files(&files).await } From 00accd7d02fb500e2b8d36ee88a8d92f060c6399 Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Thu, 26 Oct 2023 13:51:08 -0700 Subject: [PATCH 3/5] update changelog --- CHANGELOG.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 744449f2dafb..d573b63e7e81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,8 +1,12 @@ # v0.5.0 [Unreleased] +## BREAKING CHANGES +* llama.cpp backend (CPU, Metal) now requires a redownload of gguf model due to upstream format changes: https://github.com/TabbyML/tabby/pull/645 + ## Features ## Fixes and Improvements + * Switch cpu backend to llama.cpp: https://github.com/TabbyML/tabby/pull/638 * add `server.completion_timeout` to control the code completion interface timeout: https://github.com/TabbyML/tabby/pull/637 From a7e070cc085f9425e5eeeb85c895a9d6b142f57c Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Thu, 26 Oct 2023 15:59:31 -0700 Subject: [PATCH 4/5] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d573b63e7e81..185f76abed94 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,7 @@ # v0.5.0 [Unreleased] ## BREAKING CHANGES -* llama.cpp backend (CPU, Metal) now requires a redownload of gguf model due to upstream format changes: https://github.com/TabbyML/tabby/pull/645 +* llama.cpp backend (CPU, Metal) now requires a redownload of gguf model due to upstream format changes: https://github.com/TabbyML/tabby/pull/645 https://github.com/ggerganov/llama.cpp/pull/3252 ## Features From bc68bacc795d3e8dd350480e5782ac236ab0cae7 Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Thu, 26 Oct 2023 16:19:24 -0700 Subject: [PATCH 5/5] Update CHANGELOG.md --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 185f76abed94..ead851f841f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,6 @@ # v0.5.0 [Unreleased] -## BREAKING CHANGES +## Notice * llama.cpp backend (CPU, Metal) now requires a redownload of gguf model due to upstream format changes: https://github.com/TabbyML/tabby/pull/645 https://github.com/ggerganov/llama.cpp/pull/3252 ## Features