From 43b1d71e43beeb6aa2041c0b6f42b21f73d89dc4 Mon Sep 17 00:00:00 2001 From: jhen Date: Sat, 21 Oct 2023 06:24:43 +0800 Subject: [PATCH] fix: handle sampling init failure --- android/src/main/jni.cpp | 6 +++++- cpp/rn-llama.hpp | 3 ++- ios/RNLlamaContext.mm | 4 +++- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/android/src/main/jni.cpp b/android/src/main/jni.cpp index 970353c7..fa71c8e7 100644 --- a/android/src/main/jni.cpp +++ b/android/src/main/jni.cpp @@ -362,7 +362,11 @@ Java_com_rnllama_LlamaContext_doCompletion( env->ReleaseStringUTFChars(stop_str, stop_chars); } - llama->initSampling(); + if (!llama->initSampling()) { + auto result = createWriteableMap(env); + putString(env, result, "error", "Failed to initialize sampling"); + return reinterpret_cast(result); + } llama->loadPrompt(); llama->beginCompletion(); diff --git a/cpp/rn-llama.hpp b/cpp/rn-llama.hpp index 074fd633..aeae7eb2 100644 --- a/cpp/rn-llama.hpp +++ b/cpp/rn-llama.hpp @@ -193,11 +193,12 @@ struct llama_rn_context params.sparams.n_prev = n_ctx; } - void initSampling() { + bool initSampling() { if (ctx_sampling != nullptr) { llama_sampling_free(ctx_sampling); } ctx_sampling = llama_sampling_init(params.sparams); + return ctx_sampling != nullptr; } bool loadModel(gpt_params ¶ms_) diff --git a/ios/RNLlamaContext.mm b/ios/RNLlamaContext.mm index ee5e2177..29aab44e 100644 --- a/ios/RNLlamaContext.mm +++ b/ios/RNLlamaContext.mm @@ -197,7 +197,9 @@ - (NSDictionary *)completion:(NSDictionary *)params } } - llama->initSampling(); + if (!llama->initSampling()) { + @throw [NSException exceptionWithName:@"LlamaException" reason:@"Failed to initialize sampling" userInfo:nil]; + } llama->loadPrompt(); llama->beginCompletion();