fix(llama.cpp): bump upstream fix for starcoder model on cuda

release-notes-05
Meng Zhang 2023-10-28 02:03:34 -07:00
parent 3151d9100b
commit 444222683a
2 changed files with 2 additions and 2 deletions

@ -1 +1 @@
Subproject commit 5cc49e631f0902f33b10b7703b4d174fd635ccd9
Subproject commit 638ff1aba1fa200f0bdc0ee3709176ddd783a49d

View File

@ -106,7 +106,7 @@ std::unique_ptr<TextInferenceEngine> create_engine(bool use_gpu, rust::Str model
static BackendInitializer initializer;
llama_model_params model_params = llama_model_default_params();
model_params.n_gpu_layers = use_gpu ? 1 : 0;
model_params.n_gpu_layers = use_gpu ? 9999 : 0;
llama_model* model = llama_load_model_from_file(std::string(model_path).c_str(), model_params);
if (!model) {