2023-09-03 01:59:07 +00:00
|
|
|
#include "engine.h"
|
|
|
|
|
|
|
|
|
|
#include <functional>
|
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
|
|
#include <ggml.h>
|
|
|
|
|
#include <llama.h>
|
|
|
|
|
|
|
|
|
|
namespace llama {
|
|
|
|
|
TextInferenceEngine::~TextInferenceEngine() {}
|
|
|
|
|
|
|
|
|
|
namespace {
|
2023-09-08 16:20:51 +00:00
|
|
|
static size_t N_BATCH = 512;
|
|
|
|
|
|
2023-09-03 01:59:07 +00:00
|
|
|
template<class T>
|
|
|
|
|
using owned = std::unique_ptr<T, std::function<void(T*)>>;
|
|
|
|
|
|
2023-09-08 16:20:51 +00:00
|
|
|
std::vector<llama_token> tokenize(struct llama_context * ctx, const std::string & text, size_t max_input_length, bool add_bos) {
|
2023-09-03 01:59:07 +00:00
|
|
|
// upper limit for the number of tokens
|
2023-09-08 16:20:51 +00:00
|
|
|
int n_tokens = max_input_length;
|
2023-09-03 01:59:07 +00:00
|
|
|
std::vector<llama_token> result(n_tokens);
|
|
|
|
|
n_tokens = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
|
|
|
|
if (n_tokens < 0) {
|
|
|
|
|
result.resize(-n_tokens);
|
|
|
|
|
int check = llama_tokenize(ctx, text.c_str(), result.data(), result.size(), add_bos);
|
|
|
|
|
GGML_ASSERT(check == -n_tokens);
|
2023-09-08 16:20:51 +00:00
|
|
|
|
|
|
|
|
int start = check - max_input_length;
|
|
|
|
|
GGML_ASSERT(start >= 0);
|
|
|
|
|
result = std::vector<llama_token>(result.begin() + start, result.end());
|
|
|
|
|
if (add_bos) {
|
|
|
|
|
result[0] = llama_token_bos(ctx);
|
|
|
|
|
}
|
2023-09-03 01:59:07 +00:00
|
|
|
} else {
|
|
|
|
|
result.resize(n_tokens);
|
|
|
|
|
}
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class TextInferenceEngineImpl : public TextInferenceEngine {
|
|
|
|
|
public:
|
|
|
|
|
TextInferenceEngineImpl(owned<llama_model> model, owned<llama_context> ctx) :
|
|
|
|
|
model_(std::move(model)),
|
|
|
|
|
ctx_(std::move(ctx)) {
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-08 16:20:51 +00:00
|
|
|
uint32_t start(const rust::Str prompt, size_t max_input_length) const override {
|
2023-09-03 01:59:07 +00:00
|
|
|
auto* ctx = ctx_.get();
|
2023-09-05 02:14:29 +00:00
|
|
|
llama_reset_timings(ctx);
|
2023-09-16 03:41:49 +00:00
|
|
|
std::vector<llama_token> tokens_list = tokenize(ctx, std::string(prompt), max_input_length, /* add_bos = */ false);
|
2023-09-08 16:20:51 +00:00
|
|
|
|
|
|
|
|
for (size_t i = 0; i < tokens_list.size(); i += N_BATCH) {
|
|
|
|
|
const size_t size = std::min(N_BATCH, tokens_list.size() - i);
|
|
|
|
|
eval(tokens_list.data() + i, size, /* reset = */ i == 0);
|
|
|
|
|
}
|
2023-09-03 01:59:07 +00:00
|
|
|
return sample();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t step(uint32_t next_token_id) const override {
|
2023-09-08 16:20:51 +00:00
|
|
|
const llama_token id = next_token_id;
|
|
|
|
|
eval(&id, 1, /* reset = */ false);
|
2023-09-03 01:59:07 +00:00
|
|
|
return sample();
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-05 02:14:29 +00:00
|
|
|
void end() const override {
|
|
|
|
|
llama_print_timings(ctx_.get());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
uint32_t eos_token() const override {
|
|
|
|
|
return llama_token_eos(ctx_.get());
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-03 01:59:07 +00:00
|
|
|
private:
|
|
|
|
|
uint32_t sample() const {
|
|
|
|
|
auto* ctx = ctx_.get();
|
|
|
|
|
|
|
|
|
|
auto logits = llama_get_logits(ctx);
|
|
|
|
|
auto n_vocab = llama_n_vocab(ctx);
|
|
|
|
|
|
|
|
|
|
// Greedy sampling (always select the highest logit).
|
|
|
|
|
return std::distance(logits, std::max_element(logits, logits + n_vocab));
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-08 16:20:51 +00:00
|
|
|
bool eval(const llama_token* data, size_t size, bool reset) const {
|
2023-09-03 01:59:07 +00:00
|
|
|
auto* ctx = ctx_.get();
|
|
|
|
|
if (llama_eval(
|
|
|
|
|
ctx,
|
2023-09-08 16:20:51 +00:00
|
|
|
data,
|
|
|
|
|
size,
|
2023-09-03 01:59:07 +00:00
|
|
|
reset ? 0 : llama_get_kv_cache_token_count(ctx),
|
2023-09-05 02:14:29 +00:00
|
|
|
/* n_threads = */ 4)) {
|
2023-09-03 01:59:07 +00:00
|
|
|
fprintf(stderr, "%s : failed to eval\n", __func__);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
owned<llama_model> model_;
|
|
|
|
|
owned<llama_context> ctx_;
|
|
|
|
|
};
|
|
|
|
|
|
2023-09-12 14:41:39 +00:00
|
|
|
static int g_llama_cpp_log_level = 0;
|
|
|
|
|
static void llama_log_callback(llama_log_level level, const char * text, void * user_data) {
|
|
|
|
|
(void)user_data;
|
|
|
|
|
if (level < g_llama_cpp_log_level) {
|
|
|
|
|
fputs(text, stderr);
|
|
|
|
|
fflush(stderr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-03 01:59:07 +00:00
|
|
|
struct BackendInitializer {
|
|
|
|
|
BackendInitializer() {
|
2023-09-12 14:41:39 +00:00
|
|
|
if (const char* level = std::getenv("LLAMA_CPP_LOG_LEVEL")) {
|
|
|
|
|
g_llama_cpp_log_level = std::stoi(level);
|
|
|
|
|
}
|
|
|
|
|
llama_log_set(llama_log_callback, nullptr);
|
2023-09-03 01:59:07 +00:00
|
|
|
llama_backend_init(false);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
~BackendInitializer() {
|
|
|
|
|
llama_backend_free();
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
} // namespace
|
|
|
|
|
|
|
|
|
|
std::shared_ptr<TextInferenceEngine> create_engine(rust::Str model_path) {
|
|
|
|
|
static BackendInitializer initializer;
|
|
|
|
|
|
|
|
|
|
llama_context_params ctx_params = llama_context_default_params();
|
2023-09-05 02:14:29 +00:00
|
|
|
ctx_params.n_ctx = 2048;
|
2023-09-08 16:20:51 +00:00
|
|
|
ctx_params.n_batch = N_BATCH;
|
2023-09-05 02:14:29 +00:00
|
|
|
ctx_params.n_gpu_layers = 1;
|
2023-09-03 01:59:07 +00:00
|
|
|
|
|
|
|
|
llama_model* model = llama_load_model_from_file(std::string(model_path).c_str(), ctx_params);
|
|
|
|
|
|
|
|
|
|
if (!model) {
|
|
|
|
|
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
|
|
|
|
|
return nullptr;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
llama_context* ctx = llama_new_context_with_model(model, ctx_params);
|
|
|
|
|
|
|
|
|
|
return std::make_shared<TextInferenceEngineImpl>(
|
|
|
|
|
owned<llama_model>(model, llama_free_model),
|
|
|
|
|
owned<llama_context>(ctx, llama_free)
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} // namespace tabby
|