diff --git a/crates/llama-cpp-bindings/src/engine.cc b/crates/llama-cpp-bindings/src/engine.cc index 2aeedab..0b335ae 100644 --- a/crates/llama-cpp-bindings/src/engine.cc +++ b/crates/llama-cpp-bindings/src/engine.cc @@ -123,7 +123,6 @@ std::unique_ptr create_engine(rust::Str model_path) { llama_model* model = llama_load_model_from_file(std::string(model_path).c_str(), model_params); if (!model) { - fprintf(stderr , "%s: error: unable to load model\n" , __func__); return nullptr; } diff --git a/crates/llama-cpp-bindings/src/lib.rs b/crates/llama-cpp-bindings/src/lib.rs index da91aa2..af26ede 100644 --- a/crates/llama-cpp-bindings/src/lib.rs +++ b/crates/llama-cpp-bindings/src/lib.rs @@ -42,8 +42,12 @@ pub struct LlamaEngine { impl LlamaEngine { pub fn create(options: LlamaEngineOptions) -> Self { + let engine = create_engine(&options.model_path); + if engine.is_null() { + panic!("Unable to load model: {}", options.model_path); + } LlamaEngine { - engine: Mutex::new(create_engine(&options.model_path)), + engine: Mutex::new(engine), tokenizer: Arc::new(Tokenizer::from_file(&options.tokenizer_path).unwrap()), decoding_factory: DecodingFactory::default(), }