fix: avoid llama.cpp's racing (#923)

add-signin-page
Meng Zhang 2023-11-30 23:52:20 +08:00 committed by GitHub
parent b1dcea0b6c
commit ffd5ef3449
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 10 additions and 0 deletions

View File

@ -4,6 +4,7 @@
#include <vector>
#include <deque>
#include <unordered_set>
#include <mutex>
#include <ggml.h>
#include <llama.h>
@ -126,6 +127,8 @@ class TextInferenceEngineImpl : public TextInferenceEngine {
}
rust::Vec<StepOutput> step() override {
std::lock_guard<std::mutex> guard(g_mutex_);
auto* ctx = ctx_.get();
auto n_vocab = llama_n_vocab(llama_get_model(ctx));
@ -275,8 +278,15 @@ class TextInferenceEngineImpl : public TextInferenceEngine {
std::unordered_set<uint32_t> stopped_requests_;
uint32_t parallelism_;
// llama.cpp is not thread safe
// FIXME(meng): remove the mutex once https://github.com/ggerganov/llama.cpp/issues/3960 is fixed
// and integrated to tabby's fork.
static std::mutex g_mutex_;
};
std::mutex TextInferenceEngineImpl::g_mutex_;
static int g_llama_cpp_log_level = 0;
static void llama_log_callback(ggml_log_level level, const char * text, void * user_data) {
(void)user_data;