fix: when llama model loads failed, panic in rust stack
parent
1fd3adbe0c
commit
dfdd0373a6
|
|
@ -123,7 +123,6 @@ std::unique_ptr<TextInferenceEngine> create_engine(rust::Str model_path) {
|
|||
llama_model* model = llama_load_model_from_file(std::string(model_path).c_str(), model_params);
|
||||
|
||||
if (!model) {
|
||||
fprintf(stderr , "%s: error: unable to load model\n" , __func__);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -42,8 +42,12 @@ pub struct LlamaEngine {
|
|||
|
||||
impl LlamaEngine {
|
||||
pub fn create(options: LlamaEngineOptions) -> Self {
|
||||
let engine = create_engine(&options.model_path);
|
||||
if engine.is_null() {
|
||||
panic!("Unable to load model: {}", options.model_path);
|
||||
}
|
||||
LlamaEngine {
|
||||
engine: Mutex::new(create_engine(&options.model_path)),
|
||||
engine: Mutex::new(engine),
|
||||
tokenizer: Arc::new(Tokenizer::from_file(&options.tokenizer_path).unwrap()),
|
||||
decoding_factory: DecodingFactory::default(),
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue