diff --git a/CMakeLists.txt b/CMakeLists.txt index 4aec819..435cef0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -64,6 +64,15 @@ if (VULKAN_SDK) find_package(Vulkan REQUIRED) endif() +find_program(PATCH patch REQUIRED) + +add_custom_target( + patch ALL + COMMAND ${PATCH} -p1 -N < ${CMAKE_SOURCE_DIR}/patches/llama.patch || true + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/src/llama.cpp + COMMENT "Applying patches" +) + set(LLAMA_STATIC ON CACHE BOOL "Build llama as static library") add_subdirectory("src/llama.cpp") diff --git a/package.json b/package.json index b66fabf..8ce1614 100644 --- a/package.json +++ b/package.json @@ -38,6 +38,7 @@ ] }, "files": [ + "patches/*.patch", "bin/**/*", "src/**/*.{c,cc,cpp,h,hh,hpp,txt,cmake}", "lib/*.js", diff --git a/patches/llama.patch b/patches/llama.patch new file mode 100644 index 0000000..7db150e --- /dev/null +++ b/patches/llama.patch @@ -0,0 +1,22 @@ +diff --git a/ggml-vulkan.cpp b/ggml-vulkan.cpp +index b9449be0..cfa0f774 100644 +--- a/ggml-vulkan.cpp ++++ b/ggml-vulkan.cpp +@@ -525,9 +525,15 @@ static void ggml_vk_create_pipeline(ggml_backend_vk_context * ctx, vk_pipeline& + vk::PipelineCreateFlags(), + pipeline_shader_create_info, + pipeline->layout); +- pipeline->pipeline = ctx->device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value; + +- ctx->device->pipelines.push_back(pipeline); ++ try { ++ pipeline->pipeline = ctx->device->device.createComputePipeline(VK_NULL_HANDLE, compute_pipeline_create_info).value; ++ ctx->device->pipelines.push_back(pipeline); ++ } catch (vk::UnknownError const&) { ++ std::cerr << "ggml_vk_create_pipeline: Failed to create pipeline " << name << std::endl; ++ ggml_vk_destroy_pipeline(ctx->device->device, pipeline); ++ pipeline.reset(); ++ } + } + + static void ggml_vk_destroy_pipeline(vk::Device& device, vk_pipeline& pipeline) {