fix: when llama model loads failed, panic in rust stack

release-0.2
Meng Zhang 2023-10-01 22:25:25 -07:00
parent 1fd3adbe0c
commit dfdd0373a6
2 changed files with 5 additions and 2 deletions

View File

@ -123,7 +123,6 @@ std::unique_ptr<TextInferenceEngine> create_engine(rust::Str model_path) {
llama_model* model = llama_load_model_from_file(std::string(model_path).c_str(), model_params); llama_model* model = llama_load_model_from_file(std::string(model_path).c_str(), model_params);
if (!model) { if (!model) {
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
return nullptr; return nullptr;
} }

View File

@ -42,8 +42,12 @@ pub struct LlamaEngine {
impl LlamaEngine { impl LlamaEngine {
pub fn create(options: LlamaEngineOptions) -> Self { pub fn create(options: LlamaEngineOptions) -> Self {
let engine = create_engine(&options.model_path);
if engine.is_null() {
panic!("Unable to load model: {}", options.model_path);
}
LlamaEngine { LlamaEngine {
engine: Mutex::new(create_engine(&options.model_path)), engine: Mutex::new(engine),
tokenizer: Arc::new(Tokenizer::from_file(&options.tokenizer_path).unwrap()), tokenizer: Arc::new(Tokenizer::from_file(&options.tokenizer_path).unwrap()),
decoding_factory: DecodingFactory::default(), decoding_factory: DecodingFactory::default(),
} }