diff --git a/.editorconfig b/.editorconfig
index 16d16b3b55bf5..f88f8da67cd78 100644
--- a/.editorconfig
+++ b/.editorconfig
@@ -26,3 +26,7 @@ indent_size = 2
[examples/llama.swiftui/llama.swiftui.xcodeproj/*]
indent_style = tab
+
+[examples/cvector-generator/*.txt]
+trim_trailing_whitespace = unset
+insert_final_newline = unset
diff --git a/.github/labeler.yml b/.github/labeler.yml
index 97d739b5811e8..5c12bab735e9c 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -42,7 +42,6 @@ build:
- cmake/**
- CMakeLists.txt
- CMakePresets.json
- - codecov.yml
examples:
- changed-files:
- any-glob-to-any-file: examples/**
diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
index e6d032d87df57..997c6d9d05397 100644
--- a/.github/pull_request_template.md
+++ b/.github/pull_request_template.md
@@ -1,5 +1,7 @@
-- Self Reported Review Complexity:
- - [ ] Review Complexity : Low
- - [ ] Review Complexity : Medium
- - [ ] Review Complexity : High
-- [ ] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md)
+
+
+- [x] I have read the [contributing guidelines](https://github.com/ggerganov/llama.cpp/blob/master/CONTRIBUTING.md)
+- Self-reported review complexity:
+ - [ ] Low
+ - [ ] Medium
+ - [ ] High
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 81ce770cce3a1..a8fcae0435e00 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -84,7 +84,7 @@ jobs:
name: llama-bin-macos-arm64.zip
macOS-latest-cmake-x64:
- runs-on: macos-latest
+ runs-on: macos-12
steps:
- name: Clone
diff --git a/.github/workflows/code-coverage.yml b/.github/workflows/code-coverage.yml
deleted file mode 100644
index f12c558f81bae..0000000000000
--- a/.github/workflows/code-coverage.yml
+++ /dev/null
@@ -1,40 +0,0 @@
-name: Code Coverage
-on: [push, pull_request]
-
-env:
- GGML_NLOOP: 3
- GGML_N_THREADS: 1
-
-concurrency:
- group: ${{ github.workflow }}-${{ github.head_ref && github.ref || github.run_id }}
- cancel-in-progress: true
-
-jobs:
- run:
- runs-on: ubuntu-20.04
- steps:
- - name: Checkout
- uses: actions/checkout@v4
-
- - name: Dependencies
- run: |
- sudo apt-get update
- sudo apt-get install build-essential gcc-8 lcov
-
- - name: Build
- run: CC=gcc-8 make -j LLAMA_CODE_COVERAGE=1 tests
-
- - name: Run tests
- run: CC=gcc-8 make test
-
- - name: Generate coverage report
- run: |
- make coverage
- make lcov-report
-
- - name: Upload coverage to Codecov
- uses: codecov/codecov-action@v3
- env:
- CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
- with:
- files: lcov-report/coverage.info
diff --git a/.github/workflows/server.yml b/.github/workflows/server.yml
index 1fee9ac281943..6155e94156e42 100644
--- a/.github/workflows/server.yml
+++ b/.github/workflows/server.yml
@@ -87,8 +87,22 @@ jobs:
exit 1
fi
+ - name: Build (no OpenMP)
+ id: cmake_build_no_openmp
+ if: ${{ matrix.sanitizer == 'THREAD' }}
+ run: |
+ cmake -B build \
+ -DLLAMA_NATIVE=OFF \
+ -DLLAMA_BUILD_SERVER=ON \
+ -DLLAMA_CURL=ON \
+ -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} \
+ -DLLAMA_SANITIZE_${{ matrix.sanitizer }}=ON \
+ -DLLAMA_OPENMP=OFF ;
+ cmake --build build --config ${{ matrix.build_type }} -j $(nproc) --target llama-server
+
- name: Build
id: cmake_build
+ if: ${{ matrix.sanitizer != 'THREAD' }}
run: |
cmake -B build \
-DLLAMA_NATIVE=OFF \
diff --git a/.gitignore b/.gitignore
index 5296594952c4a..a0c16e880b719 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,90 +1,123 @@
-*.o
+# Extensions
+
*.a
-*.so
-*.gguf
-*.gguf.json
+*.bat
*.bin
-*.exe
*.dll
-*.log
-*.gcov
-*.gcno
-*.gcda
*.dot
-*.bat
-*.tmp
-*.metallib
*.etag
+*.exe
+*.gcda
+*.gcno
+*.gcov
+*.gguf
+*.gguf.json
*.lastModified
-.DS_Store
-.build/
+*.log
+*.metallib
+*.o
+*.so
+*.tmp
+
+# IDE / OS
+
.cache/
.ccls-cache/
.direnv/
+.DS_Store
.envrc
+.idea/
.swiftpm
-.venv
-.clang-tidy
.vs/
.vscode/
-.idea/
+nppBackup
-ggml-metal-embed.metal
-lcov-report/
+# Coverage
+
gcovr-report/
+lcov-report/
+
+# Build Artifacts
tags
+.build/
build*
+!build-info.cmake
+!build-info.cpp.in
+!build-info.sh
!build.zig
-cmake-build-*
+/libllama.so
+/llama-*
android-ndk-*
+arm_neon.h
+cmake-build-*
+CMakeSettings.json
+compile_commands.json
+ggml-metal-embed.metal
+llama-batched-swift
out/
tmp/
+# CI
+
+!.github/workflows/*.yml
+
+# Models
+
models/*
models-mnt
+!models/.editorconfig
+!models/ggml-vocab-*.gguf*
-/Pipfile
-/libllama.so
-/llama-*
-llama-batched-swift
-/common/build-info.cpp
-arm_neon.h
-compile_commands.json
-CMakeSettings.json
-
-__pycache__
-dist
+# Zig
zig-out/
zig-cache/
+# Logs
+
ppl-*.txt
qnt-*.txt
perf-*.txt
+# Examples
+
examples/jeopardy/results.txt
+examples/server/*.css.hpp
examples/server/*.html.hpp
examples/server/*.js.hpp
examples/server/*.mjs.hpp
-examples/server/*.css.hpp
+!build_64.sh
+!examples/*.bat
+!examples/*/*.kts
+!examples/*/*/*.kts
+!examples/sycl/*.bat
+!examples/sycl/*.sh
+# Python
+
+__pycache__
+.venv
+/Pipfile
+dist
poetry.lock
poetry.toml
-nppBackup
# Test binaries
-/tests/test-grammar-parser
-/tests/test-llama-grammar
+/tests/test-backend-ops
/tests/test-double-float
/tests/test-grad0
+/tests/test-grammar-parser
+/tests/test-llama-grammar
/tests/test-opt
/tests/test-quantize-fns
/tests/test-quantize-perf
+/tests/test-rope
/tests/test-sampling
/tests/test-tokenizer-0
-/tests/test-tokenizer-1-spm
/tests/test-tokenizer-1-bpe
-/tests/test-rope
-/tests/test-backend-ops
+/tests/test-tokenizer-1-spm
+
+# Scripts
+!/scripts/install-oneapi.bat
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8e280f87d9dd1..9cfe08d7b7d59 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -39,8 +39,12 @@ endif()
if (APPLE)
set(LLAMA_METAL_DEFAULT ON)
+ set(LLAMA_BLAS_DEFAULT ON)
+ set(LLAMA_BLAS_VENDOR_DEFAULT "Apple")
else()
set(LLAMA_METAL_DEFAULT OFF)
+ set(LLAMA_BLAS_DEFAULT OFF)
+ set(LLAMA_BLAS_VENDOR_DEFAULT "Generic")
endif()
set(LLAMA_LLAMAFILE_DEFAULT ON)
@@ -91,9 +95,10 @@ endif()
# 3rd party libs
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
-option(LLAMA_BLAS "llama: use BLAS" OFF)
+option(LLAMA_BLAS "llama: use BLAS" ${LLAMA_BLAS_DEFAULT})
+set(LLAMA_BLAS_VENDOR ${LLAMA_BLAS_VENDOR_DEFAULT} CACHE STRING
+ "llama: BLAS library vendor")
option(LLAMA_LLAMAFILE "llama: use llamafile SGEMM" ${LLAMA_LLAMAFILE_DEFAULT})
-set(LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor")
option(LLAMA_CUDA "llama: use CUDA" OFF)
option(LLAMA_CUBLAS "llama: use CUDA (deprecated, use LLAMA_CUDA)" OFF)
option(LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF)
@@ -114,6 +119,7 @@ option(LLAMA_HIP_UMA "llama: use HIP unified memory arch
option(LLAMA_VULKAN "llama: use Vulkan" OFF)
option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF)
option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF)
+option(LLAMA_VULKAN_MEMORY_DEBUG "llama: enable Vulkan memory debug output" OFF)
option(LLAMA_VULKAN_VALIDATE "llama: enable Vulkan validation" OFF)
option(LLAMA_VULKAN_RUN_TESTS "llama: run Vulkan tests" OFF)
option(LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT})
@@ -311,9 +317,9 @@ if (LLAMA_BLAS)
if (LLAMA_STATIC)
set(BLA_STATIC ON)
endif()
- if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22)
- set(BLA_SIZEOF_INTEGER 8)
- endif()
+ #if (CMAKE_VERSION VERSION_GREATER_EQUAL 3.22)
+ # set(BLA_SIZEOF_INTEGER 8)
+ #endif()
set(BLA_VENDOR ${LLAMA_BLAS_VENDOR})
find_package(BLAS)
@@ -321,7 +327,7 @@ if (LLAMA_BLAS)
if (BLAS_FOUND)
message(STATUS "BLAS found, Libraries: ${BLAS_LIBRARIES}")
- if ("${BLAS_INCLUDE_DIRS}" STREQUAL "")
+ if (("${BLAS_INCLUDE_DIRS}" STREQUAL "") AND NOT (${LLAMA_BLAS_VENDOR} MATCHES "Apple"))
# BLAS_INCLUDE_DIRS is missing in FindBLAS.cmake.
# see https://gitlab.kitware.com/cmake/cmake/-/issues/20268
find_package(PkgConfig REQUIRED)
@@ -374,12 +380,15 @@ if (LLAMA_BLAS)
add_compile_options(${BLAS_LINKER_FLAGS})
- add_compile_definitions(GGML_USE_OPENBLAS)
+ add_compile_definitions(GGML_USE_BLAS)
if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel"))
add_compile_definitions(GGML_BLAS_USE_MKL)
endif()
+ set(GGML_HEADERS_BLAS ggml-blas.h)
+ set(GGML_SOURCES_BLAS ggml-blas.cpp)
+
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${BLAS_LIBRARIES})
set(LLAMA_EXTRA_INCLUDES ${LLAMA_EXTRA_INCLUDES} ${BLAS_INCLUDE_DIRS})
else()
@@ -526,6 +535,10 @@ if (LLAMA_VULKAN)
add_compile_definitions(GGML_VULKAN_DEBUG)
endif()
+ if (LLAMA_VULKAN_MEMORY_DEBUG)
+ add_compile_definitions(GGML_VULKAN_MEMORY_DEBUG)
+ endif()
+
if (LLAMA_VULKAN_VALIDATE)
add_compile_definitions(GGML_VULKAN_VALIDATE)
endif()
@@ -652,6 +665,7 @@ if (LLAMA_SYCL)
#todo: AOT
find_package(IntelSYCL REQUIRED)
+ find_package(MKL REQUIRED)
message(STATUS "SYCL found")
@@ -666,21 +680,22 @@ if (LLAMA_SYCL)
endif()
add_compile_options(-I./) #include DPCT
- add_compile_options(-I/${SYCL_INCLUDE_DIR})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-narrowing")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -O3")
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib")
if (LLAMA_SYCL_TARGET STREQUAL "NVIDIA")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl-targets=nvptx64-nvidia-cuda")
endif()
set(GGML_HEADERS_SYCL ggml-sycl.h)
- set(GGML_SOURCES_SYCL ggml-sycl.cpp)
+ file(GLOB GGML_SOURCES_SYCL "ggml-sycl/*.cpp")
+ list(APPEND GGML_SOURCES_SYCL "ggml-sycl.cpp")
if (WIN32)
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl sycl7 OpenCL mkl_sycl_blas_dll.lib mkl_intel_ilp64_dll.lib mkl_sequential_dll.lib mkl_core_dll.lib)
+ set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} IntelSYCL::SYCL_CXX MKL::MKL MKL::MKL_SYCL)
else()
+ add_compile_options(-I/${SYCL_INCLUDE_DIR})
+ set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsycl -L${MKLROOT}/lib")
if (LLAMA_SYCL_TARGET STREQUAL "INTEL")
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} -fsycl OpenCL mkl_core pthread m dl mkl_sycl_blas mkl_intel_ilp64 mkl_tbb_thread)
elseif (LLAMA_SYCL_TARGET STREQUAL "NVIDIA")
@@ -1258,6 +1273,7 @@ add_library(ggml OBJECT
${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE}
${GGML_SOURCES_VULKAN} ${GGML_HEADERS_VULKAN}
${GGML_SOURCES_ROCM} ${GGML_HEADERS_ROCM}
+ ${GGML_SOURCES_BLAS} ${GGML_HEADERS_BLAS}
${GGML_SOURCES_LLAMAFILE} ${GGML_HEADERS_LLAMAFILE}
)
diff --git a/CMakePresets.json b/CMakePresets.json
index e2b7a79e371bf..fba22af9a6bab 100644
--- a/CMakePresets.json
+++ b/CMakePresets.json
@@ -11,9 +11,21 @@
"CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.."
}
},
-
+ {
+ "name": "sycl-base",
+ "hidden": true,
+ "generator": "Ninja",
+ "binaryDir": "${sourceDir}/build-${presetName}",
+ "cacheVariables": {
+ "CMAKE_EXPORT_COMPILE_COMMANDS": "ON",
+ "CMAKE_CXX_COMPILER": "icx",
+ "LLAMA_SYCL": "ON",
+ "CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.."
+ }
+ },
{ "name": "debug", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Debug" } },
- { "name": "release", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "RelWithDebInfo" } },
+ { "name": "release", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Release" } },
+ { "name": "reldbg", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "RelWithDebInfo" } },
{ "name": "static", "hidden": true, "cacheVariables": { "LLAMA_STATIC": "ON" } },
{
@@ -35,15 +47,18 @@
},
{ "name": "arm64-windows-llvm-debug" , "inherits": [ "base", "arm64-windows-llvm", "debug" ] },
- { "name": "arm64-windows-llvm-release", "inherits": [ "base", "arm64-windows-llvm", "release" ] },
- { "name": "arm64-windows-llvm+static-release", "inherits": [ "base", "arm64-windows-llvm", "release", "static" ] },
+ { "name": "arm64-windows-llvm-release", "inherits": [ "base", "arm64-windows-llvm", "reldbg" ] },
+ { "name": "arm64-windows-llvm+static-release", "inherits": [ "base", "arm64-windows-llvm", "reldbg", "static" ] },
{ "name": "arm64-windows-msvc-debug" , "inherits": [ "base", "arm64-windows-msvc", "debug" ] },
- { "name": "arm64-windows-msvc-release", "inherits": [ "base", "arm64-windows-msvc", "release" ] },
- { "name": "arm64-windows-msvc+static-release", "inherits": [ "base", "arm64-windows-msvc", "release", "static" ] },
+ { "name": "arm64-windows-msvc-release", "inherits": [ "base", "arm64-windows-msvc", "reldbg" ] },
+ { "name": "arm64-windows-msvc+static-release", "inherits": [ "base", "arm64-windows-msvc", "reldbg", "static" ] },
{ "name": "x64-windows-msvc-debug" , "inherits": [ "base", "debug" ] },
- { "name": "x64-windows-msvc-release", "inherits": [ "base", "release" ] },
- { "name": "x64-windows-msvc+static-release", "inherits": [ "base", "release", "static" ] }
+ { "name": "x64-windows-msvc-release", "inherits": [ "base", "reldbg" ] },
+ { "name": "x64-windows-msvc+static-release", "inherits": [ "base", "reldbg", "static" ] },
+
+ { "name": "x64-windows-sycl-debug" , "inherits": [ "sycl-base", "debug" ] },
+ { "name": "x64-windows-sycl-release", "inherits": [ "sycl-base", "release" ] }
]
}
diff --git a/Makefile b/Makefile
index a4cab1bb208bc..4ea59c0b4ef29 100644
--- a/Makefile
+++ b/Makefile
@@ -38,6 +38,7 @@ BUILD_TARGETS = \
llama-tokenize \
llama-train-text-from-scratch \
llama-vdot \
+ llama-cvector-generator \
tests/test-c.o
# Binaries only useful for tests
@@ -440,10 +441,11 @@ ifndef LLAMA_NO_ACCELERATE
# Mac OS - include Accelerate framework.
# `-framework Accelerate` works both with Apple Silicon and Mac Intel
ifeq ($(UNAME_S),Darwin)
- MK_CPPFLAGS += -DGGML_USE_ACCELERATE
+ MK_CPPFLAGS += -DGGML_USE_ACCELERATE -DGGML_USE_BLAS
MK_CPPFLAGS += -DACCELERATE_NEW_LAPACK
MK_CPPFLAGS += -DACCELERATE_LAPACK_ILP64
MK_LDFLAGS += -framework Accelerate
+ OBJS += ggml-blas.o
endif
endif # LLAMA_NO_ACCELERATE
@@ -454,21 +456,30 @@ ifndef LLAMA_NO_OPENMP
endif # LLAMA_NO_OPENMP
ifdef LLAMA_OPENBLAS
- MK_CPPFLAGS += -DGGML_USE_OPENBLAS $(shell pkg-config --cflags-only-I openblas)
+ MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas)
MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas)
MK_LDFLAGS += $(shell pkg-config --libs openblas)
+ OBJS += ggml-blas.o
endif # LLAMA_OPENBLAS
-ifndef LLAMA_NO_LLAMAFILE
- MK_CPPFLAGS += -DGGML_USE_LLAMAFILE
- OBJS += sgemm.o
-endif
+ifdef LLAMA_OPENBLAS64
+ MK_CPPFLAGS += -DGGML_USE_BLAS $(shell pkg-config --cflags-only-I openblas64)
+ MK_CFLAGS += $(shell pkg-config --cflags-only-other openblas64)
+ MK_LDFLAGS += $(shell pkg-config --libs openblas64)
+ OBJS += ggml-blas.o
+endif # LLAMA_OPENBLAS64
ifdef LLAMA_BLIS
- MK_CPPFLAGS += -DGGML_USE_OPENBLAS -I/usr/local/include/blis -I/usr/include/blis
+ MK_CPPFLAGS += -DGGML_USE_BLAS -I/usr/local/include/blis -I/usr/include/blis
MK_LDFLAGS += -lblis -L/usr/local/lib
+ OBJS += ggml-blas.o
endif # LLAMA_BLIS
+ifndef LLAMA_NO_LLAMAFILE
+ MK_CPPFLAGS += -DGGML_USE_LLAMAFILE
+ OBJS += sgemm.o
+endif
+
ifdef LLAMA_RPC
MK_CPPFLAGS += -DGGML_USE_RPC
OBJS += ggml-rpc.o
@@ -496,7 +507,7 @@ ifdef LLAMA_CUDA
CUDA_PATH ?= /usr/local/cuda
endif
MK_CPPFLAGS += -DGGML_USE_CUDA -I$(CUDA_PATH)/include -I$(CUDA_PATH)/targets/$(UNAME_M)-linux/include -DGGML_CUDA_USE_GRAPHS
- MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L/usr/lib/wsl/lib
+ MK_LDFLAGS += -lcuda -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L$(CUDA_PATH)/lib64 -L/usr/lib64 -L$(CUDA_PATH)/targets/$(UNAME_M)-linux/lib -L$(CUDA_PATH)/lib64/stubs -L/usr/lib/wsl/lib
OBJS += ggml-cuda.o
OBJS += $(patsubst %.cu,%.o,$(wildcard ggml-cuda/*.cu))
OBJS += $(OBJS_CUDA_TEMP_INST)
@@ -597,6 +608,10 @@ ifdef LLAMA_VULKAN_DEBUG
MK_CPPFLAGS += -DGGML_VULKAN_DEBUG
endif
+ifdef LLAMA_VULKAN_MEMORY_DEBUG
+ MK_CPPFLAGS += -DGGML_VULKAN_MEMORY_DEBUG
+endif
+
ifdef LLAMA_VULKAN_VALIDATE
MK_CPPFLAGS += -DGGML_VULKAN_VALIDATE
endif
@@ -776,6 +791,9 @@ ggml-backend.o: ggml-backend.c ggml.h ggml-backend.h
ggml-quants.o: ggml-quants.c ggml.h ggml-quants.h ggml-common.h
$(CC) $(CFLAGS) -c $< -o $@
+ggml-blas.o: ggml-blas.cpp ggml-blas.h
+ $(CXX) $(CXXFLAGS) -c $< -o $@
+
unicode.o: unicode.cpp unicode.h
$(CXX) $(CXXFLAGS) -c $< -o $@
@@ -909,6 +927,10 @@ llama-eval-callback: examples/eval-callback/eval-callback.cpp ggml.o llama.o $(C
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
+llama-cvector-generator: examples/cvector-generator/cvector-generator.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
+ $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
+ $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
+
llama-train-text-from-scratch: examples/train-text-from-scratch/train-text-from-scratch.cpp ggml.o llama.o $(COMMON_DEPS) train.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
@@ -1029,7 +1051,7 @@ tests/test-grammar-parser: tests/test-grammar-parser.cpp ggml.o llama.o grammar-
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
-tests/test-grammar-integration: tests/test-grammar-integration.cpp ggml.o llama.o grammar-parser.o $(OBJS)
+tests/test-grammar-integration: tests/test-grammar-integration.cpp json-schema-to-grammar.o ggml.o llama.o grammar-parser.o $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
diff --git a/README-sycl.md b/README-sycl.md
index 93b623daf6a1a..b7e2bb12a68e8 100644
--- a/README-sycl.md
+++ b/README-sycl.md
@@ -1,6 +1,7 @@
# llama.cpp for SYCL
- [Background](#background)
+- [Recommended Release](#recommended-release)
- [News](#news)
- [OS](#os)
- [Hardware](#hardware)
@@ -31,8 +32,23 @@ When targeting **Intel CPU**, it is recommended to use llama.cpp for [Intel oneM
It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [IntelĀ® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose.
+## Recommended Release
+
+The SYCL backend would be broken by some PRs due to no online CI.
+
+The following release is verified with good quality:
+
+|Commit ID|Tag|Release|Verified Platform|
+|-|-|-|-|
+|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1|
+
+
## News
+- 2024.5
+ - Performance is increased: 34 -> 37 tokens/s of llama-2-7b.Q4_0 on Arc770.
+ - Arch Linux is verified successfully.
+
- 2024.4
- Support data types: GGML_TYPE_IQ4_NL, GGML_TYPE_IQ4_XS, GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ3_S, GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M.
@@ -394,15 +410,9 @@ Output (example):
4. Install build tools
-a. Download & install cmake for Windows: https://cmake.org/download/
-
-b. Download & install mingw-w64 make for Windows provided by w64devkit
-
-- Download the 1.19.0 version of [w64devkit](https://github.com/skeeto/w64devkit/releases/download/v1.19.0/w64devkit-1.19.0.zip).
+a. Download & install cmake for Windows: https://cmake.org/download/ (CMake can also be installed from Visual Studio Installer)
+b. The new Visual Studio will install Ninja as default. (If not, please install it manually: https://ninja-build.org/)
-- Extract `w64devkit` on your pc.
-
-- Add the **bin** folder path in the Windows system PATH environment (for e.g. `C:\xxx\w64devkit\bin\`).
### II. Build llama.cpp
@@ -412,10 +422,10 @@ On the oneAPI command line window, step into the llama.cpp main directory and ru
@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force
# Option 1: Use FP32 (recommended for better performance in most cases)
-cmake -B build -G "MinGW Makefiles" -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release
+cmake -B build -G "Ninja" -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release
# Option 2: Or FP16
-cmake -B build -G "MinGW Makefiles" -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON
+cmake -B build -G "Ninja" -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON
cmake --build build --config Release -j
```
@@ -425,9 +435,23 @@ Otherwise, run the `win-build-sycl.bat` wrapper which encapsulates the former in
.\examples\sycl\win-build-sycl.bat
```
+Or, use CMake presets to build:
+```sh
+cmake --preset x64-windows-sycl-release
+cmake --build build-x64-windows-sycl-release -j --target llama-cli
+
+cmake -DLLAMA_SYCL_F16=ON --preset x64-windows-sycl-release
+cmake --build build-x64-windows-sycl-release -j --target llama-cli
+
+cmake --preset x64-windows-sycl-debug
+cmake --build build-x64-windows-sycl-debug -j --target llama-cli
+```
+
+Or, you can use Visual Studio to open llama.cpp folder as a CMake project. Choose the sycl CMake presets (`x64-windows-sycl-release` or `x64-windows-sycl-debug`) before you compile the project.
+
*Notes:*
-- By default, calling `make` will build all target binary files. In case of a minimal experimental setup, the user can build the inference executable only through `make llama-cli`.
+- In case of a minimal experimental setup, the user can build the inference executable only through `cmake --build build --config Release -j --target llama-cli`.
### III. Run the inference
diff --git a/README.md b/README.md
index d1c6190ddfd7b..40793c8eab880 100644
--- a/README.md
+++ b/README.md
@@ -195,6 +195,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
- [cztomsik/ava](https://github.com/cztomsik/ava) (MIT)
- [ptsochantaris/emeltal](https://github.com/ptsochantaris/emeltal)
- [pythops/tenere](https://github.com/pythops/tenere) (AGPL)
+- [RAGNA Desktop](https://ragna.app/) (proprietary)
- [RecurseChat](https://recurse.chat/) (proprietary)
- [semperai/amica](https://github.com/semperai/amica)
- [withcatai/catai](https://github.com/withcatai/catai)
@@ -208,6 +209,7 @@ Unless otherwise noted these projects are open-source with permissive licensing:
- [eva](https://github.com/ylsdamxssjxxdd/eva) (MIT)
- [AI Sublime Text plugin](https://github.com/yaroslavyaroslav/OpenAI-sublime-text) (MIT)
- [AIKit](https://github.com/sozercan/aikit) (MIT)
+- [LARS - The LLM & Advanced Referencing Solution](https://github.com/abgulati/LARS) (AGPL)
*(to have a project listed here, it should clearly state that it depends on `llama.cpp`)*
@@ -386,6 +388,30 @@ brew install llama.cpp
```
The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668
+### Nix
+
+On Mac and Linux, the Nix package manager can be used via
+```
+nix profile install nixpkgs#llama-cpp
+```
+For flake enabled installs.
+
+Or
+```
+nix-env --file '' --install --attr llama-cpp
+```
+For non-flake enabled installs.
+
+This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164).
+
+#### Flox
+
+On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via
+```
+flox install llama-cpp
+```
+Flox follows the nixpkgs build of llama.cpp.
+
### Metal Build
On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU.
@@ -622,9 +648,6 @@ python3 -m pip install -r requirements.txt
# convert the model to ggml FP16 format
python3 convert-hf-to-gguf.py models/mymodel/
-# [Optional] for models using BPE tokenizers
-python convert-hf-to-gguf.py models/mymodel/ --vocab-type bpe
-
# quantize the model to 4-bits (using Q4_K_M method)
./llama-quantize ./models/mymodel/ggml-model-f16.gguf ./models/mymodel/ggml-model-Q4_K_M.gguf Q4_K_M
diff --git a/codecov.yml b/codecov.yml
deleted file mode 100644
index a301c5b2c7694..0000000000000
--- a/codecov.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-comment: off
-
-coverage:
- status:
- project:
- default:
- target: auto
- threshold: 0
- base: auto
- patch:
- default:
- target: auto
- threshold: 0
- base: auto
diff --git a/common/common.cpp b/common/common.cpp
index 1591790e6df4c..cfdedcbae0cd9 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -6,7 +6,6 @@
#include "llama.h"
#include
-#include
#include
#include
#include
@@ -542,6 +541,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
/**/ if (value == "none") { params.pooling_type = LLAMA_POOLING_TYPE_NONE; }
else if (value == "mean") { params.pooling_type = LLAMA_POOLING_TYPE_MEAN; }
else if (value == "cls") { params.pooling_type = LLAMA_POOLING_TYPE_CLS; }
+ else if (value == "last") { params.pooling_type = LLAMA_POOLING_TYPE_LAST; }
else { invalid_param = true; }
return true;
}
@@ -1576,6 +1576,7 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
return true;
}
params.out_file = argv[i];
+ params.cvector_outfile = argv[i];
return true;
}
if (arg == "-ofreq" || arg == "--output-frequency") {
@@ -1610,6 +1611,55 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.i_chunk = std::stoi(argv[i]);
return true;
}
+ // cvector params
+ if (arg == "--completions-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.cvector_completions_file = argv[i];
+ return true;
+ }
+ if (arg == "--positive-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.cvector_positive_file = argv[i];
+ return true;
+ }
+ if (arg == "--negative-file") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.cvector_negative_file = argv[i];
+ return true;
+ }
+ if (arg == "--completions") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_completions = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--pca-batch") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_pca_batch = std::stoi(argv[i]);
+ return true;
+ }
+ if (arg == "--pca-iter") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.n_pca_iterations = std::stoi(argv[i]);
+ return true;
+ }
#ifndef LOG_DISABLE_LOGS
// Parse args for logging parameters
if (log_param_single_parse(argv[i])) {
@@ -1820,6 +1870,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "backend" });
options.push_back({ "*", " --rpc SERVERS", "comma separated list of RPC servers" });
+
if (llama_supports_mlock()) {
options.push_back({ "*", " --mlock", "force system to keep model in RAM rather than swapping or compressing" });
}
@@ -1931,6 +1982,16 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "logging", " --log-append", "Don't truncate the old log file." });
#endif // LOG_DISABLE_LOGS
+ options.push_back({ "cvector" });
+ options.push_back({ "cvector", "-o, --output FNAME", "output file (default: '%s')", params.cvector_outfile.c_str() });
+ options.push_back({ "cvector", " --positive-file FNAME", "positive prompts file, one prompt per line (default: '%s')", params.cvector_positive_file.c_str() });
+ options.push_back({ "cvector", " --negative-file FNAME", "negative prompts file, one prompt per line (default: '%s')", params.cvector_negative_file.c_str() });
+ options.push_back({ "cvector", " --completions-file FNAME",
+ "completions file (default: '%s')", params.cvector_completions_file.c_str() });
+ options.push_back({ "cvector", " --completions N", "number of lines of completions file to use (default: %d)", params.n_completions });
+ options.push_back({ "cvector", " --pca-batch N", "batch size used for PCA. Larger batch runs faster, but uses more memory (default: %d)", params.n_pca_batch });
+ options.push_back({ "cvector", " --pca-iter N", "number of iterations used for PCA (default: %d)", params.n_pca_iterations });
+
printf("usage: %s [options]\n", argv[0]);
for (const auto & o : options) {
@@ -2597,7 +2658,14 @@ static bool llama_download_file(const std::string & url, const std::string & pat
}
// Set the output file
- std::unique_ptr outfile(fopen(path_temporary.c_str(), "wb"), fclose);
+
+ struct FILE_deleter {
+ void operator()(FILE * f) const {
+ fclose(f);
+ }
+ };
+
+ std::unique_ptr outfile(fopen(path_temporary.c_str(), "wb"));
if (!outfile) {
fprintf(stderr, "%s: error opening local file for writing: %s\n", __func__, path.c_str());
return false;
diff --git a/common/common.h b/common/common.h
index 2345d855eed3c..9a1dc4a2fe4c1 100644
--- a/common/common.h
+++ b/common/common.h
@@ -73,7 +73,6 @@ struct gpt_params {
int32_t n_gpu_layers_draft = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
float tensor_split[128] = {0}; // how split tensors should be distributed across GPUs
- int32_t n_beams = 0; // if non-zero then use beam search of given width.
int32_t grp_attn_n = 1; // group-attention factor
int32_t grp_attn_w = 512; // group-attention width
int32_t n_print = -1; // print token count every n tokens (-1 = disabled)
@@ -232,6 +231,15 @@ struct gpt_params {
bool process_output = false; // collect data for the output tensor
bool compute_ppl = true; // whether to compute perplexity
+
+ // cvector-generator params
+ int n_completions = 64;
+ int n_pca_batch = 20;
+ int n_pca_iterations = 1000;
+ std::string cvector_outfile = "control_vector.gguf";
+ std::string cvector_completions_file = "examples/cvector-generator/completions.txt";
+ std::string cvector_positive_file = "examples/cvector-generator/positive.txt";
+ std::string cvector_negative_file = "examples/cvector-generator/negative.txt";
};
void gpt_params_handle_model_default(gpt_params & params);
diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py
index f43b15760e1b2..67598b561e6cb 100755
--- a/convert-hf-to-gguf-update.py
+++ b/convert-hf-to-gguf-update.py
@@ -83,6 +83,7 @@ class TOKENIZER_TYPE(IntEnum):
{"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
{"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
{"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", },
+ {"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", },
{"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", },
]
@@ -213,7 +214,7 @@ def get_vocab_base_pre(self, tokenizer) -> str:
"""
convert_py_pth = pathlib.Path("convert-hf-to-gguf.py")
-convert_py = convert_py_pth.read_text()
+convert_py = convert_py_pth.read_text(encoding="utf-8")
convert_py = re.sub(
r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)",
lambda m: m.group(1) + src_func + m.group(3),
@@ -221,7 +222,7 @@ def get_vocab_base_pre(self, tokenizer) -> str:
flags=re.DOTALL | re.MULTILINE,
)
-convert_py_pth.write_text(convert_py)
+convert_py_pth.write_text(convert_py, encoding="utf-8")
logger.info("+++ convert-hf-to-gguf.py was updated")
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index 025405a2c6ce1..3107b69f7e42e 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -477,6 +477,9 @@ def get_vocab_base_pre(self, tokenizer) -> str:
if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d":
# ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct
res = "smaug-bpe"
+ if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360":
+ # ref: https://huggingface.co/LumiOpen/Poro-34B-chat
+ res = "poro-chat"
if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a":
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code
res = "jina-v2-code"
@@ -964,7 +967,11 @@ def set_vocab(self):
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
- assert max(tokenizer.vocab.values()) < vocab_size
+ # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size,
+ # because vocab_size is the count of items, and indexes start at 0.
+ max_vocab_index = max(tokenizer.get_vocab().values())
+ if max_vocab_index >= vocab_size:
+ raise ValueError("Vocabulary size exceeds expected maximum size.")
reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()}
added_vocab = tokenizer.get_added_vocab()
@@ -1629,6 +1636,12 @@ def set_gguf_parameters(self):
super().set_gguf_parameters()
if (n_experts := self.hparams.get("num_experts")) is not None:
self.gguf_writer.add_expert_count(n_experts)
+ if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None:
+ self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size)
+ logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}")
+ if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None:
+ self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size)
+ logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}")
_experts: list[dict[str, Tensor]] | None = None
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index d6ce35f4cc4e9..0b51c44c05e4e 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -12,6 +12,7 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
if (EMSCRIPTEN)
else()
+ add_subdirectory(cvector-generator)
add_subdirectory(baby-llama)
add_subdirectory(batched-bench)
add_subdirectory(batched)
diff --git a/examples/cvector-generator/CMakeLists.txt b/examples/cvector-generator/CMakeLists.txt
new file mode 100644
index 0000000000000..0a559d60c2a6d
--- /dev/null
+++ b/examples/cvector-generator/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(TARGET llama-cvector-generator)
+add_executable(${TARGET} cvector-generator.cpp pca.hpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_11)
diff --git a/examples/cvector-generator/README.md b/examples/cvector-generator/README.md
new file mode 100644
index 0000000000000..5182e906d9180
--- /dev/null
+++ b/examples/cvector-generator/README.md
@@ -0,0 +1,34 @@
+# cvector-generator
+
+This example demonstrates how to generate a control vector using gguf models.
+
+Related PRs:
+- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970)
+- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880)
+- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
+
+## Examples
+
+```sh
+# CPU only
+./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf
+
+# With GPU
+./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99
+
+# With advanced options
+./cvector-generator -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --pca-batch 100
+
+# To see help message
+./cvector-generator -h
+# Then, have a look at "cvector" section
+```
+
+## Tips and tricks
+
+If you have multiple lines per prompt, you can escape the newline character (change it to `\n`). For example:
+
+```
+<|im_start|>system\nAct like a person who is extremely happy.<|im_end|>
+<|im_start|>system\nYou are in a very good mood today<|im_end|>
+```
diff --git a/examples/cvector-generator/completions.txt b/examples/cvector-generator/completions.txt
new file mode 100644
index 0000000000000..abc45ffd87269
--- /dev/null
+++ b/examples/cvector-generator/completions.txt
@@ -0,0 +1,582 @@
+
+That game
+I can see
+Hmm, this
+I can relate to
+Who is
+I understand the
+Ugh,
+What the hell was
+Hey, did anyone
+Although
+Thank you for choosing
+What are you
+Oh w
+How dare you open
+It was my pleasure
+I'm hon
+I appreciate that you
+Are you k
+Whoever left this
+It's always
+Ew,
+Hey, I l
+Hello? Is someone
+I understand that
+That poem
+Aww, poor
+Hey, it
+Alright, who
+I didn't
+Well, life
+The document
+Oh no, this
+I'm concerned
+Hello, this is
+This art
+Hmm, this drink
+Hi there!
+It seems
+Is
+Good
+I can't
+Ex
+Who are
+I can see that
+Wow,
+Today is a
+Hey friend
+Sometimes friends
+Oh, this old
+The weather outside
+This place is sur
+I appreciate your input
+Thank you for the
+Look at
+I'm disappoint
+To my
+How dare you
+That's an
+This piece of art
+Eww
+This park is
+This is incredible
+Oh no, someone
+Exc
+Well, it'
+I warned
+Hey, I understand
+Hey, I saw
+How dare you go
+What the he
+Hey
+It's
+Hello? Hello?
+It
+Oh no!
+This is the perfect
+Good morning,
+Oh no, there
+It's so
+Yeah
+Uh,
+Hello everyone
+Who turned off
+The weather
+Who'
+Hey, this
+Wait,
+Eww, gross
+Excuse
+It seems like you
+Thank you so
+What happened?
+Oh my g
+I am deeply sad
+I war
+Okay, let'
+Hey, that
+That was a beautiful
+Oh no! That
+What happened
+Hey there
+The artist'
+What?!
+Hey, it'
+I am disappoint
+It seems like
+Oh no! The
+This park is a
+If you
+Yes! I did
+It sounds
+What
+Who is it
+Hmm, that
+That's strange
+Yeah, that was
+That's interesting
+This park
+What the hell
+Who is that
+I feel like my
+Oh well
+What the hell is
+Hello? Hello
+To my dearest
+Bless you!\"
+Thank you for
+Oh, looks like
+Can you please
+This place is
+Eww, what
+Bless you
+Is everything
+Hey, I just
+Whoever left these
+Well, that'
+I feel
+Hey, do you
+It's sad
+Oh no, it
+Hey, that'
+Oh my god,
+Thank you,
+Hello little one,
+I apolog
+Hey team, I
+How dare you read
+Who is this and
+Whoever left
+Hi there! W
+A
+If you have
+I was
+U
+Bless
+Well, this
+Oh, I'
+It's a
+Eww,
+Is everything okay?
+Oh, I
+Hello, can you
+Al
+That was a great
+What are
+I understand that not
+Oh no, not
+Who is it?\"
+Hey, can we
+Whoever is taking
+I would love to
+Hey, I noticed
+Hey, could
+I understand that there
+Hello?
+D
+Oh man, I
+Thank you so much
+Oh no, my
+Dear [Name
+Uh
+I remember
+Hey, who
+Well, it
+Are you
+I understand that it
+Hey, is
+I would
+Who is this
+Excuse me
+Alright
+I am thrilled
+Sometimes friends have
+Who the
+It's interesting
+I would love
+E
+Hello? Is anyone
+Well, this is
+This place
+Well,
+I warned you
+Hey, watch where
+Oh my
+That'
+Sometimes friends have different
+I understand that everyone
+What?
+What do these notes
+I can relate
+I'm not
+I understand
+To my dear
+Guys
+Well
+Hey, I appreciate
+Wow, what
+Dear
+That melody
+Who the hell
+Today is
+Hello little
+Wow, look
+That's great
+Love is never wrong
+I'm having
+Whoa, did
+Ugh
+Can you please provide
+I miss you,
+I feel uncom
+I know
+Ugh, this
+Hey, watch
+Oh great, a
+I didn
+Okay
+That game of char
+Oh
+I appreciate
+Who's there
+I am so
+Oh great, someone
+Hey, could you
+I remember wondering
+Wait, what?
+What do
+Hello? Can
+Hey there,
+That game of
+This is incred
+Oh my gosh
+Oh great, f
+I appreciate your
+It sounds like
+What the heck
+Okay, I understand
+Ew
+I understand that this
+Uh, hi
+Hi everyone!
+What the hell?
+Thank you for your
+Oh no, the
+Wow, I
+Who turned
+Dear [
+Whoever
+This is a
+Whoa, he
+What in the world
+Although the physical
+Hello, who is
+That's amaz
+Hey, I know
+Okay, that
+Hi everyone
+Hey, is everything
+I understand your fr
+Oh no, poor
+Oh, look
+Good morning
+Ew, gross
+Oh no, did
+Look at the family
+Hey team
+Yes!
+Hey, can I
+Okay, that'
+It's great
+Love is
+Hey, what
+Good morning, world
+Who is it?
+That poem really reson
+I
+That's
+I understand the task
+Gu
+Hello? Who'
+This postcard is
+Whoa,
+Oh, that
+I understand that I
+Whoever is
+Hello? Who is
+I'm really
+Wow, this
+Can
+This artwork really
+This is a shame
+I miss you too
+Who are you?
+Today is a difficult
+Hey, just
+Are you okay
+I am
+Hi,
+Wow, that
+Hey there! Can
+Okay, stay
+Oh great, just
+Yeah,
+Hello? Can you
+Oh, looks
+Thank you for sharing
+I'm glad
+Hey, is that
+Hmm
+It was my
+It sounds like you
+Wow, your
+I was promised certain
+That was such a
+Thank
+Excuse you
+That was
+Hey team,
+I feel un
+It was
+What'
+Hey friend, I
+How
+Saying goodbye
+That
+It's heart
+How dare
+Oh,
+Hello, may
+What's this
+Thank you for recogn
+Aww, that
+Oh, I remember
+Hmm, that'
+I miss
+I know this
+Wait
+Is everything okay
+Who is that person
+Wow, you
+Oh great
+I'm sad
+Wow, the
+I am very disappoint
+Who turned off the
+I understand that things
+I'm very
+Hi
+That's very
+Okay, I
+Oh no,
+Wow, there
+What's wrong
+I apologize for
+Hey, I
+Can I help you
+Oh, I didn
+Alright,
+Oh wow,
+Oh my goodness
+I know this event
+What in the
+Saying
+Yeah, that
+Guys, I
+Hey, this v
+This post
+Are
+Hey, can
+Hello? Is
+I can only imagine
+Oh, that sounds
+Hey, is anyone
+I am disappointed
+Hello,
+Hey everyone, I
+That was such
+It's okay
+The artist
+Whoa
+I understand that mistakes
+Can I help
+Who
+Hi everyone! I
+Hey, can you
+Wow, how
+Today
+Oh no, I
+Oh well, I
+Well, that
+This is the
+Yes! I finally
+Hey there little
+Hello everyone!
+Love is never
+Look at the
+This postcard
+Oh great,
+Can I
+Hmm, this is
+I understand your
+Oh, look at
+B
+I'm so
+Whoa, this
+W
+Oh, this
+Sometimes
+This piece of
+What the
+That was a
+Hey, do
+Oh no
+Whoa, what
+I feel like I
+The documentary
+Hello
+Hello little one
+I understand that my
+Eww, that
+Wow, an
+Yes! Finally,
+Although the physical location
+Whoever is watching
+That movie
+I remember wondering about
+Hey there, little
+Who's
+Hello, who
+Hello everyone! Thank
+Hello, can
+That's too
+Hey, just wanted
+Hey there, I
+Saying good
+Hey there!
+Who is there?
+Oh my good
+I am very
+Oh no, what
+Wow, thank
+I was promised
+Hi, is
+Hey, I'
+Guys, the
+Oh no, that
+Who is there
+Hello, this
+That movie really touched
+If you have something
+The documentary was
+I'm starting
+Are you kidd
+That movie really
+Hey everyone,
+Thank you for considering
+I didn'
+Yes! I
+Can you
+Oh my god
+Hey, whoever
+That melody really
+Thank you, little
+Hello, may I
+Look
+Wow, we
+It looks
+What do these
+Oh wow
+I apologize
+What are you all
+It's such
+It's clear
+Hey, I was
+Hey friend,
+I can only
+The weather outside is
+Eww, this
+I miss you
+Wow
+Aww,
+Hi, is there
+This artwork
+Okay,
+Oh well,
+This
+I'
+Say
+Hey there little gu
+Hmm,
+Whoa, who
+I am thr
+Oh man
+Okay, stay calm
+I'm happy
+Oh, this cur
+Oh man,
+I'm sorry
+Hello? Who
+What?! That
+This piece
+Hey everyone
+That's so
+Are you okay?
+What happened? Where
+Hi there
+The
+Who the hell entered
+I can
+Guys,
+What's
+What in
+It's important
+I'm
+I'm coming
+It'
+Yes! Finally
+Wait, what
+Wow, reading
+I'm surprised
+Hey, did
+Hey,
+Okay, let
+I understand that you
+Who the hell threw
+Eww, who
+Thank you for thinking
+Who is this?\"
+I am deeply
+Thank you for including
+Oh no, an
+It looks like you
+Aww
+I'm confused
+Wow, it
+That poem really
+Yes
+Hey there, is
+Hey, what'
+Thank you for remember
+To
+This is
+Thank you for making
+I can'
+That mel
+Wow, they
+I feel like
+Although the
+Who are you
+Love
+If
+What the hell are
+I am so sad
+Oh, I found
+Thank you
+It looks like
+Well, life is
+I appreciate that
+The artist's
+Whoa, that
+It's never
\ No newline at end of file
diff --git a/examples/cvector-generator/cvector-generator.cpp b/examples/cvector-generator/cvector-generator.cpp
new file mode 100644
index 0000000000000..355905cb03d60
--- /dev/null
+++ b/examples/cvector-generator/cvector-generator.cpp
@@ -0,0 +1,499 @@
+#include "common.h"
+#include "llama.h"
+#include "ggml.h"
+#include "pca.hpp"
+
+#ifdef GGML_USE_CUDA
+#include "ggml-cuda.h"
+#endif
+
+#ifdef GGML_USE_METAL
+#include "ggml-metal.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+//////////////////////////////////////////////////
+// utils
+
+template
+static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
+ std::string ret;
+ for (; begin != end; ++begin) {
+ ret += llama_token_to_piece(ctx, *begin);
+ }
+
+ return ret;
+}
+
+static void print_usage(int argc, char ** argv, const gpt_params & params) {
+ gpt_params_print_usage(argc, argv, params);
+
+ printf("\nexample usage:\n");
+ printf("\n CPU only: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf\n", argv[0]);
+ printf("\n with GPU: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99\n", argv[0]);
+ printf("\n advanced: %s -m ./dolphin-2.0-mistral-7b.Q4_K_M.gguf -ngl 99 --completions 128 --pca-iter 2000 --pca-batch 100\n", argv[0]);
+ printf("\n");
+}
+
+//////////////////////////////////////////////////
+
+
+// cb_eval is reused for each pair of positive - negative prompt
+struct callback_data {
+ ggml_context * ctx_ggml = nullptr; // holds v_pos, v_neg, v_diff_filtered
+
+ int n_layers = 0;
+ int n_tokens = 0;
+ bool is_eval_pos = true;
+
+ // each element of the vector correspond to one layer
+ std::vector v_pos; // vector of matrices of size [n_embd, n_tokens]
+ std::vector v_neg; // vector of matrices of size [n_embd, n_tokens]
+ std::vector v_diff_filtered; // vector of matrices of size [n_embd, n_nonzero_rows]. NOTE: n_nonzero_rows maybe different for each layer
+
+ // save a tensor into either v_pos or v_neg (decided by is_eval_pos)
+ void save_tensor_for_layer(struct ggml_tensor * t) {
+ GGML_ASSERT(t->type == GGML_TYPE_F32);
+
+ if (ctx_ggml == nullptr) {
+ // alloc a new ctx_ggml if needed
+ struct ggml_init_params params_ggml = {
+ /*.mem_size =*/ ggml_tensor_overhead() * n_layers * 3u,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+ ctx_ggml = ggml_init(params_ggml);
+ }
+
+ // copy tensor data
+ auto n_bytes = ggml_nbytes(t);
+ struct ggml_tensor * t_layer = ggml_new_tensor_2d(ctx_ggml, t->type, t->ne[0], t->ne[1]);
+ t_layer->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
+ ggml_backend_tensor_get(t, t_layer->data, 0, n_bytes);
+ ggml_set_name(t_layer, ggml_get_name(t));
+ //print_debug_tensor(t_layer);
+
+ if (is_eval_pos) {
+ v_pos.push_back(t_layer);
+ } else {
+ v_neg.push_back(t_layer);
+ }
+ }
+
+ // calculate diff (v_pos - v_neg) and place the result back to v_pos
+ // all zero rows in the diff tensor will also be removed
+ // NOTE: final layer is ignored. we only have (n_layers - 1) to process
+ std::vector calc_diff() {
+ for (float il = 0; il < v_pos.size(); il++) {
+ float * a = (float *) v_pos[il]->data;
+ float * b = (float *) v_neg[il]->data;
+ size_t n_elem = ggml_nelements(v_pos[il]);
+ for (size_t j = 0; j < n_elem; j++) {
+ a[j] -= b[j];
+ }
+ //print_debug_tensor(v_pos[i]);
+ auto diff_filtered = filter_nonzero_rows(v_pos[il]);
+ v_diff_filtered.push_back(diff_filtered);
+ }
+ return v_diff_filtered; // for convinient, we return the result std::vector
+ }
+
+ // delete zero rows from a given 2D tensor
+ struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
+ //printf("filter_nonzero_rows\n");
+ auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
+ // check if given row containing all zero elements
+ int n_cols = t->ne[0]; // hint: should be equal to n_embd
+ for (int col = 0; col < n_cols; ++col) {
+ if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) {
+ return false;
+ }
+ }
+ return true;
+ };
+ std::vector rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
+ for (int i_row = 0; i_row < a->ne[1]; i_row++) {
+ if (!is_row_all_zeros(a, i_row, 1e-6)) {
+ rows_to_copy.push_back(i_row);
+ }
+ }
+
+ // get "n_nonzero_rows" for the output "diff_filtered"
+ int n_nonzero_rows = rows_to_copy.size();
+ //printf("n_nonzero_rows: %d\n", n_nonzero_rows);
+ int n_embd = a->ne[0];
+ GGML_ASSERT(n_nonzero_rows > 0);
+
+ // diff_filtered: [n_embd, n_nonzero_rows]
+ struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
+ ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
+ ggml_format_name(diff_filtered, "diff_filtered_%s", a->name);
+ diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
+
+ // copy non-zero rows
+ for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
+ int src_row = rows_to_copy[dest_row];
+ for (int i = 0; i < n_embd; i++) {
+ float src_elem = ggml_get_f32_nd(a, i, src_row, 0, 0);
+ ggml_set_f32_nd(diff_filtered, i, dest_row, 0, 0, src_elem);
+ }
+ }
+
+ //print_debug_tensor(diff_filtered);
+
+ return diff_filtered;
+ }
+
+ // we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
+ void reset() {
+ for (auto ptr : v_pos) free(ptr->data);
+ for (auto ptr : v_neg) free(ptr->data);
+ for (auto ptr : v_diff_filtered) free(ptr->data);
+ v_pos.clear();
+ v_neg.clear();
+ v_diff_filtered.clear();
+ if (ctx_ggml) {
+ ggml_free(ctx_ggml);
+ }
+ ctx_ggml = nullptr;
+ }
+};
+
+/**
+ * process_ctx is used to store the ggml context for pre-post processing the diff vectors
+ * in short, input => v_diff and output => v_final
+ */
+struct train_context {
+ ggml_context * ctx_ggml;
+ int n_embd;
+ int n_layers;
+
+ /* pair of prompts to be used for generating final vector */
+ std::vector positive_entries;
+ std::vector negative_entries;
+
+ // each element of the vector correspond to one layer
+ // NOTE: the last layer is discard. therefore, we will have (n_layers - 1) elements here
+ // NOTE (2): v_diff is transposed from v_diff_tmp
+ std::vector v_diff; // vector of matrices of size [m, n_embd] where m ~ n_tokens * n_completions (v_diff contains no zero-rows)
+ std::vector v_final; // vector of vectors of size [n_embd] to be written to file
+
+ // to easily re-alloc when concat v_diff, we temporary store v_diff in a vector instead of a tensor
+ // v_diff_tmp will get converted unto v_diff later on
+ std::vector> v_diff_tmp;
+
+ train_context(int n_embd_, int n_layers_) {
+ n_embd = n_embd_;
+ n_layers = n_layers_;
+ struct ggml_init_params params_ggml = {
+ /*.mem_size =*/ ggml_tensor_overhead() * (n_layers - 1) * 2u,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+ ctx_ggml = ggml_init(params_ggml);
+ for (int il = 0; il < n_layers - 1; il++) {
+ std::vector empty;
+ v_diff_tmp.push_back(empty);
+ auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
+ t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
+ v_final.push_back(t);
+ }
+ }
+
+ // add new rows into existing tensor in v_diff_tmp
+ void concat_diff_tmp(const std::vector & diff_filtered) {
+ GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
+ for (int il = 0; il < n_layers - 1; il++) {
+ auto t = diff_filtered[il];
+ auto & diff_tmp = v_diff_tmp[il];
+ size_t curr_size = diff_tmp.size();
+ diff_tmp.resize(curr_size + ggml_nbytes(t));
+ memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
+ }
+ }
+
+ // build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
+ // TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
+ void build_v_diff() {
+ printf("build_v_diff\n");
+ for (int il = 0; il < n_layers - 1; il++) {
+ auto & diff_tmp = v_diff_tmp[il];
+ int n_elem = diff_tmp.size() / sizeof(float);
+ GGML_ASSERT(n_elem % n_embd == 0);
+ int n_rows = n_elem / n_embd;
+ struct ggml_tensor * diff = ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd);
+ ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
+ // copy data & transpose
+ diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
+ float * arr = (float *) diff_tmp.data();
+ for (int ir = 0; ir < n_rows; ++ir) {
+ for (int ic = 0; ic < n_embd; ++ic) {
+ float f = arr[ir*n_embd + ic];
+ ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
+ }
+ }
+ v_diff.push_back(diff);
+ print_debug_tensor(diff);
+ // free memory of diff_tmp
+ diff_tmp.resize(0);
+ }
+ }
+
+ ~train_context() {
+ for (auto ptr : v_final) free(ptr->data);
+ for (auto ptr : v_diff) free(ptr->data);
+ // no need to free v_diff_tmp, since we didn't use malloc
+ ggml_free(ctx_ggml);
+ }
+};
+
+struct tokenized_prompt {
+ std::vector tokens_pos;
+ std::vector tokens_neg;
+ size_t max_seq_len;
+
+ tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
+ const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
+ tokens_pos = ::llama_tokenize(ctx, pos, add_bos);
+ tokens_neg = ::llama_tokenize(ctx, neg, add_bos);
+ max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
+ padding_seq(ctx, tokens_pos, max_seq_len);
+ padding_seq(ctx, tokens_neg, max_seq_len);
+ }
+
+ void padding_seq(llama_context * ctx, std::vector & tokens, size_t len) {
+ // TODO: customize padding token
+ std::vector pad_tokens = ::llama_tokenize(ctx, " ", false);
+ llama_token pad_tok = pad_tokens.back();
+ while (tokens.size() < len) {
+ tokens.push_back(pad_tok);
+ }
+ }
+};
+
+//////////////////////////////////////////////////
+
+template
+static std::string to_string(const T & val) {
+ std::stringstream ss;
+ ss << val;
+ return ss.str();
+}
+
+static std::vector ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
+ std::vector output;
+ std::ifstream file(path);
+ if (!file.is_open()) {
+ fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
+ exit(1);
+ }
+ std::string line;
+ while (std::getline(file, line)) {
+ bool is_skip = skip_empty_lines && line.empty();
+ if (!is_skip) {
+ string_process_escapes(line);
+ output.push_back(line);
+ }
+ }
+ file.close();
+ return output;
+}
+
+//////////////////////////////////////////////////
+
+static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
+ auto * cb_data = (callback_data *) user_data;
+ static const char * l_out_name = "l_out";
+ const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
+
+ if (ask) {
+ return is_l_out;
+ }
+
+ if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
+ return true;
+ }
+
+ // save the tensor to current context
+ cb_data->save_tensor_for_layer(t);
+ return true;
+}
+
+static bool get_hidden_layers(llama_context * ctx, std::vector & tokens) {
+ llama_kv_cache_clear(ctx);
+ if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
+ fprintf(stderr, "%s : failed to eval\n", __func__);
+ return false;
+ }
+ return true;
+}
+
+static void export_gguf(const std::vector & v_ctrl, const std::string fname, const std::string model_hint) {
+ struct gguf_context * ctx = gguf_init_empty();
+
+ const std::string arch = "controlvector";
+ gguf_set_val_str(ctx, "general.architecture", arch.c_str());
+ gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
+ gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
+
+ for (size_t i = 0; i < v_ctrl.size(); ++i) {
+ gguf_add_tensor(ctx, v_ctrl[i]);
+ print_debug_tensor(v_ctrl[i]);
+ printf("Added tensor: %s\n", v_ctrl[i]->name);
+ }
+
+ printf("%s: writing file...\n", __func__);
+ gguf_write_to_file(ctx, fname.c_str(), false);
+ printf("%s: wrote file '%s'\n", __func__, fname.c_str());
+ gguf_free(ctx);
+}
+
+/**
+ * Load prompt files and completion file.
+ * Then format each pair of prompt + completion to make an entry.
+ */
+static int prepare_entries(gpt_params & params, train_context & ctx_train) {
+ // load prompts
+ std::vector positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
+ std::vector negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
+ if (positive_prompts.size() != negative_prompts.size()) {
+ fprintf(stderr, "number of positive and negative prompts must be equal\n");
+ return 1;
+ }
+ if (positive_prompts.empty()) {
+ fprintf(stderr, "must provide at least one prompt pair\n");
+ return 1;
+ }
+
+ // create templated prompts
+ std::vector completions = ctrlvec_load_prompt_file(params.cvector_completions_file, false);
+ auto format_template = [](std::string persona, std::string suffix) {
+ // entry in positive/negative.txt must already be formatted i.e. "[INST] Act as if you're extremely happy. [/INST] "
+ return persona + suffix;
+ };
+ for (size_t i = 0; i < positive_prompts.size(); ++i) {
+ for (int j = 0; j < std::min((int) completions.size(), params.n_completions); ++j) {
+ // TODO replicate the truncations done by the python implementation
+ ctx_train.positive_entries.push_back(format_template(positive_prompts[i], completions[j]));
+ ctx_train.negative_entries.push_back(format_template(negative_prompts[i], completions[j]));
+ }
+ }
+ return 0;
+}
+
+int main(int argc, char ** argv) {
+ gpt_params params;
+
+ if (!gpt_params_parse(argc, argv, params)) {
+ print_usage(argc, argv, params);
+ return 1;
+ }
+
+ if (params.n_pca_iterations % params.n_pca_batch != 0) {
+ fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
+ return 1;
+ }
+
+
+ callback_data cb_data;
+
+ // pass the callback to the backend scheduler
+ // it will be executed for each node during the graph computation
+ params.cb_eval = cb_eval;
+ params.cb_eval_user_data = &cb_data;
+ params.warmup = false;
+
+ print_build_info();
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ // load the model to get hparams
+ llama_model * model;
+ llama_context * ctx;
+ std::tie(model, ctx) = llama_init_from_gpt_params(params);
+
+ // int n_ctx = llama_n_ctx(ctx);
+ int n_layers = llama_n_layer(model);
+ int n_embd = llama_n_embd(model);
+ // get model hint param (a.k.a model arch name)
+ char model_hint[128];
+ llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
+
+ // init train_context
+ train_context ctx_train(n_embd, n_layers);
+
+ // load and prepare entries for training
+ prepare_entries(params, ctx_train);
+
+ // we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
+ std::vector tokenized_prompts;
+ size_t n_total_tokens = 0;
+ for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
+ tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
+ n_total_tokens += 2 * t.max_seq_len;
+ tokenized_prompts.push_back(std::move(t));
+ }
+
+ std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
+
+ for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
+ bool success = false;
+ tokenized_prompt t = tokenized_prompts[i];
+ cb_data.n_layers = n_layers;
+ cb_data.n_tokens = t.max_seq_len;
+
+ printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
+ (int) i+1, (int) ctx_train.positive_entries.size(),
+ tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
+ tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
+ (int) t.max_seq_len);
+
+ cb_data.is_eval_pos = true;
+ success = get_hidden_layers(ctx, t.tokens_pos);
+ if (!success) break;
+
+ cb_data.is_eval_pos = false;
+ success = get_hidden_layers(ctx, t.tokens_neg);
+ if (!success) break;
+
+ // calculate diff and remove all zero rows
+ auto v_diff_filtered = cb_data.calc_diff();
+
+ // save & concat the filtered v_diff to ctx_train
+ ctx_train.concat_diff_tmp(v_diff_filtered);
+
+ // reset for next iteration
+ cb_data.reset();
+ }
+
+ // done with the model, we can now free it to make gain some memory
+ printf("Done evaluate prompts, unload model...\n");
+ llama_free(ctx);
+ llama_free_model(model);
+
+ // prepare ctx_train for PCA
+ ctx_train.build_v_diff();
+
+ // run PCA
+ PCA::pca_params pca_params;
+ pca_params.n_threads = params.n_threads;
+ pca_params.n_batch = params.n_pca_batch;
+ pca_params.n_iterations = params.n_pca_iterations;
+ PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
+
+ // write output vectors to gguf
+ export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint);
+
+ llama_backend_free();
+
+ return 0;
+}
diff --git a/examples/cvector-generator/negative.txt b/examples/cvector-generator/negative.txt
new file mode 100644
index 0000000000000..3e9951752e886
--- /dev/null
+++ b/examples/cvector-generator/negative.txt
@@ -0,0 +1 @@
+[INST] Act like a person who is extremely sad. [/INST]
diff --git a/examples/cvector-generator/pca.hpp b/examples/cvector-generator/pca.hpp
new file mode 100644
index 0000000000000..36eadaac26a12
--- /dev/null
+++ b/examples/cvector-generator/pca.hpp
@@ -0,0 +1,322 @@
+#include "common.h"
+#include "llama.h"
+#include "ggml.h"
+
+#ifdef GGML_USE_CUDA
+#include "ggml-cuda.h"
+#endif
+
+#ifdef GGML_USE_METAL
+#include "ggml-metal.h"
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define DEBUG_POS 5
+
+static void print_debug_tensor(struct ggml_tensor * t, bool with_data = true) {
+ printf("%s: %s (%s): [%d, %d]\n", __func__, t->name, ggml_type_name(t->type), (int) t->ne[0], (int) t->ne[1]);
+ if (!with_data) return;
+ printf("%s: %s[0] = [", __func__, t->name);
+ for (size_t i = 0; i <= DEBUG_POS; i++) {
+ printf(" %f,", ggml_get_f32_nd(t, i, 0, 0, 0));
+ }
+ printf(" ... ]\n");
+}
+
+namespace PCA {
+
+// input params for PCA computations
+struct pca_params {
+ int n_threads = 1;
+ int n_batch = 20; // number of iterations do to in one batch. larger the batch, more memory is used
+ int n_iterations = 1000;
+ float tolerance = 1e-7;
+
+ // for debugging
+ int i_layer = 0;
+ int n_layers = 0;
+};
+
+// result from each iteration
+struct pca_result {
+ struct ggml_tensor * calculated_square = NULL;
+ std::vector eigenvectors;
+ std::vector distances;
+};
+
+struct pca_model {
+ ggml_backend_t backend = NULL;
+ ggml_backend_buffer_t buffer;
+ struct ggml_context * ctx; // context to compute graph on target device
+ struct ggml_context * ctx_host; // host context to store results
+
+ // tensors on target device
+ struct ggml_tensor * dev_input;
+ struct ggml_tensor * dev_square;
+ struct ggml_tensor * dev_eigenvector;
+
+ pca_model(struct ggml_tensor * t_input) {
+#ifdef GGML_USE_CUDA
+ fprintf(stderr, "%s: using CUDA backend\n", __func__);
+ backend = ggml_backend_cuda_init(0); // init device 0
+ if (!backend) {
+ fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
+ }
+#endif
+
+// TODO: enable Metal support when support for GGML_OP_SQRT is added
+// #ifdef GGML_USE_METAL
+// fprintf(stderr, "%s: using Metal backend\n", __func__);
+// backend = ggml_backend_metal_init();
+// if (!backend) {
+// fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
+// }
+// #endif
+
+ // if there aren't GPU Backends fallback to CPU backend
+ if (!backend) {
+ backend = ggml_backend_cpu_init();
+ }
+
+ const int num_tensors = 4;
+ struct ggml_init_params params {
+ /*.mem_size =*/ ggml_tensor_overhead() * num_tensors,
+ /*.mem_buffer =*/ NULL,
+ /*.no_alloc =*/ true,
+ };
+ ctx = ggml_init(params);
+
+ auto n_samples = t_input->ne[0];
+ auto n_embd = t_input->ne[1];
+
+ dev_input = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_samples, n_embd);
+ dev_square = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
+ dev_eigenvector = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
+
+ ggml_set_name(dev_input, "dev_input");
+ ggml_set_name(dev_square, "dev_square");
+ ggml_set_name(dev_eigenvector, "dev_eigenvector");
+ buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
+ ggml_backend_tensor_set(dev_input, t_input->data, 0, ggml_nbytes(t_input));
+
+ // initialize eigenvector to random normalized vector
+ {
+ std::vector random_vec(ggml_nelements(dev_eigenvector), 0.0);
+ std::default_random_engine generator(static_cast(std::time(0)));
+ std::uniform_real_distribution distribution(0.0, 1.0);
+ float sum_sqr = 0.0; // for normalizing random_vec
+ for (size_t i = 0; i < random_vec.size(); ++i) {
+ float f = distribution(generator);
+ sum_sqr += f * f;
+ random_vec[i] = f;
+ }
+ // normalize it
+ float random_vec_norm = std::sqrt(sum_sqr);
+ for (size_t i = 0; i < random_vec.size(); ++i) {
+ random_vec[i] /= random_vec_norm;
+ }
+ ggml_backend_tensor_set(dev_eigenvector, random_vec.data(), 0, ggml_nbytes(dev_eigenvector));
+ }
+ }
+
+ ~pca_model() {
+ ggml_free(ctx);
+ ggml_backend_buffer_free(buffer);
+ ggml_backend_free(backend);
+ }
+};
+
+static struct ggml_cgraph * build_graph_piter(
+ const struct pca_params & params,
+ const pca_model & model,
+ bool calc_square = false) {
+ GGML_ASSERT(params.n_batch > 0);
+ // TODO: buf_size must be able to scale with params.n_batch
+ static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
+ static std::vector buf(buf_size);
+
+ struct ggml_init_params params0 = {
+ /*.mem_size =*/ buf_size,
+ /*.mem_buffer =*/ buf.data(),
+ /*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
+ };
+ // create a temporally context to build the graph
+ struct ggml_context * ctx0 = ggml_init(params0);
+ struct ggml_cgraph * gf = ggml_new_graph(ctx0);
+
+ // turn v_diff_original into square matrix if needed
+ struct ggml_tensor * tmp_square;
+ if (calc_square) {
+ tmp_square = ggml_mul_mat(ctx0, model.dev_input, model.dev_input);
+ ggml_set_name(tmp_square, "tmp_square");
+ }
+
+ struct ggml_tensor * b_tensor;
+ struct ggml_tensor * distance;
+ struct ggml_tensor * old_eigen = model.dev_eigenvector;
+ struct ggml_tensor * input_square = calc_square ? tmp_square : model.dev_square;
+
+ for (int i = 0; i < params.n_batch; ++i) {
+ // b_tensor = square * eigenvector^T
+ b_tensor = ggml_mul_mat(ctx0, input_square, old_eigen);
+ ggml_set_name(b_tensor, "b_tensor");
+
+ // normalize
+ b_tensor = ggml_div_inplace(ctx0,
+ b_tensor,
+ ggml_sqrt_inplace(ctx0, ggml_sum_rows(ctx0, ggml_sqr(ctx0, b_tensor)))
+ );
+ ggml_format_name(b_tensor, "b_tensor_norm_%d", i);
+
+ // calculate distance(new eigenvector - old eigenvector)
+ // we don't use ggml_sub because it may not be implemented on GPU backend
+ struct ggml_tensor * new_sub_old = ggml_add(ctx0, old_eigen, ggml_scale(ctx0, b_tensor, -1));
+ distance = ggml_sqrt_inplace(ctx0,
+ ggml_sum_rows(ctx0, ggml_sqr_inplace(ctx0, new_sub_old)));
+ ggml_format_name(distance, "distance_%d", i);
+
+ old_eigen = b_tensor;
+
+ // build operations nodes
+ ggml_build_forward_expand(gf, distance);
+ }
+
+ // delete the temporally context used to build the graph
+ ggml_free(ctx0);
+ return gf;
+}
+
+static ggml_status compute_piter(
+ const struct pca_params & params,
+ const pca_model & model,
+ struct ggml_cgraph * gf,
+ ggml_gallocr_t allocr,
+ struct pca_result & result) {
+ // allocate tensors
+ ggml_gallocr_alloc_graph(allocr, gf);
+
+ if (ggml_backend_is_cpu(model.backend)) {
+ ggml_backend_cpu_set_n_threads(model.backend, params.n_threads);
+ }
+
+// TODO: enable GPU support when support for GGML_OP_SQRT is added
+//#ifdef GGML_USE_METAL
+// if (ggml_backend_is_metal(model.backend)) {
+// ggml_backend_metal_set_n_cb(model.backend, params.n_threads);
+// }
+//#endif
+
+ ggml_status res = ggml_backend_graph_compute(model.backend, gf);
+ if (res == GGML_STATUS_SUCCESS) {
+ auto extract_i = [](std::string prefix, std::string str) -> int {
+ int i = -1;
+ if (str.rfind(prefix, 0) == 0) {
+ sscanf(str.c_str(), (prefix + "%d").c_str(), &i);
+ }
+ return i;
+ };
+ result.calculated_square = NULL;
+ result.eigenvectors.clear();
+ result.distances.clear();
+ result.eigenvectors.resize(params.n_batch);
+ result.distances.resize(params.n_batch);
+ // get output nodes
+ for (int i = 0; i < gf->n_nodes; ++i) {
+ auto node = gf->nodes[i];
+ int iter = -1;
+ // find b_tensor (without copying data from device)
+ if ((iter = extract_i("b_tensor_norm_", node->name)) > -1) {
+ result.eigenvectors[iter] = node;
+ }
+ // find distances, then copy data from device
+ if ((iter = extract_i("distance_", node->name)) > -1) {
+ float d;
+ ggml_backend_tensor_get(node, &d, 0, sizeof(float));
+ result.distances[iter] = d;
+ // std::cout << node->name << " = " << d << "\n";
+ }
+ // find tmp_square if it exists (without copying data from device)
+ if (std::string(node->name) == "tmp_square") {
+ result.calculated_square = node;
+ }
+ }
+ }
+ return res;
+}
+
+static void power_iteration(
+ const struct pca_params & params,
+ struct ggml_tensor * input, // shape of input: [n_samples, n_embd]
+ struct ggml_tensor * output) {
+ //printf("in power iteration\n");
+ struct pca_model model(input);
+
+ ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
+ struct pca_result result;
+ struct ggml_tensor * last_eigenvector = NULL;
+
+ int n_iters = params.n_iterations / params.n_batch; // more batch, fewer iterations
+ for (int iter = 0; iter < n_iters; ++iter) {
+ bool calc_square = (iter == 0); // only need to calculate square for first iteration
+ struct ggml_cgraph * gf = build_graph_piter(params, model, calc_square);
+ // ggml_graph_dump_dot(gf, nullptr, "/tmp/_cgraph.dot");
+ compute_piter(params, model, gf, allocr, result);
+
+ for (size_t k = 0; k < result.distances.size(); ++k) {
+ last_eigenvector = result.eigenvectors[k];
+ if (result.distances[k] < params.tolerance) {
+ break; // done
+ }
+ }
+
+ if (calc_square) {
+ // copy and store the square matrix if needed
+ GGML_ASSERT(result.calculated_square != NULL);
+ ggml_backend_tensor_copy(result.calculated_square, model.dev_square);
+ }
+
+ {
+ // copy last eigen vector and store as input for next iteration
+ GGML_ASSERT(last_eigenvector != NULL);
+ ggml_backend_tensor_copy(last_eigenvector, model.dev_eigenvector);
+ }
+
+ printf("%s: layer %d/%d, iteration: %d / total: %d (batch = %d) ...\n",
+ __func__, params.i_layer+1, params.n_layers, iter, n_iters, params.n_batch);
+ }
+
+ // get output tensor
+ GGML_ASSERT(last_eigenvector);
+ ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector));
+ //print_debug_tensor(output);
+ ggml_gallocr_free(allocr);
+}
+
+static void run_pca(
+ struct pca_params & params,
+ const std::vector & v_input, // shape of v_input[0]: [n_samples, n_embd]
+ const std::vector & v_output) {
+ printf("%s: Running PCA...\n", __func__);
+ for (size_t il = 0; il < v_input.size(); ++il) {
+
+ // prepare output vector
+ struct ggml_tensor * ctrl_out = v_output[il];
+ ggml_format_name(ctrl_out, "direction.%ld", il+1);
+
+ // run power_iteration
+ params.i_layer = il;
+ params.n_layers = v_input.size();
+ power_iteration(params, v_input[il], ctrl_out);
+ printf("%s: Done layer %d / %d\n", __func__, (int) il+1, (int) v_input.size());
+ }
+}
+
+}
diff --git a/examples/cvector-generator/positive.txt b/examples/cvector-generator/positive.txt
new file mode 100644
index 0000000000000..8802367873cd9
--- /dev/null
+++ b/examples/cvector-generator/positive.txt
@@ -0,0 +1 @@
+[INST] Act like a person who is extremely happy. [/INST]
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp
index 244751e003d9e..b4b73c0175cda 100644
--- a/examples/embedding/embedding.cpp
+++ b/examples/embedding/embedding.cpp
@@ -17,9 +17,10 @@ static std::vector split_lines(const std::string & s) {
return lines;
}
-static void batch_add_seq(llama_batch & batch, const std::vector & tokens, int seq_id) {
- for (size_t i = 0; i < tokens.size(); i++) {
- llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
+static void batch_add_seq(llama_batch & batch, const std::vector & tokens, llama_seq_id seq_id) {
+ size_t n_tokens = tokens.size();
+ for (size_t i = 0; i < n_tokens; i++) {
+ llama_batch_add(batch, tokens[i], i, { seq_id }, true);
}
}
@@ -40,13 +41,7 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
// try to get sequence embeddings - supported only when pooling_type is not NONE
const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
- if (embd == NULL) {
- embd = llama_get_embeddings_ith(ctx, i);
- if (embd == NULL) {
- fprintf(stderr, "%s: failed to get embeddings for token %d\n", __func__, i);
- continue;
- }
- }
+ GGML_ASSERT(embd != NULL && "failed to get sequence embeddings");
float * out = output + batch.seq_id[i][0] * n_embd;
//TODO: I would also add a parameter here to enable normalization or not.
@@ -97,6 +92,12 @@ int main(int argc, char ** argv) {
const int n_ctx_train = llama_n_ctx_train(model);
const int n_ctx = llama_n_ctx(ctx);
+ const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
+ if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
+ fprintf(stderr, "%s: error: pooling type NONE not supported\n", __func__);
+ return 1;
+ }
+
if (n_ctx > n_ctx_train) {
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
__func__, n_ctx_train, n_ctx);
diff --git a/examples/gritlm/gritlm.cpp b/examples/gritlm/gritlm.cpp
index 2135157916c97..2c61c2e1eb3bc 100644
--- a/examples/gritlm/gritlm.cpp
+++ b/examples/gritlm/gritlm.cpp
@@ -44,6 +44,7 @@ static std::vector> encode(llama_context * ctx, const std::ve
// clear previous kv_cache values (irrelevant for embeddings)
llama_kv_cache_clear(ctx);
+ llama_set_embeddings(ctx, true);
llama_set_causal_attn(ctx, false);
// run model
@@ -98,7 +99,9 @@ static std::string generate(llama_context * ctx, const std::string & prompt, boo
llama_token eos_token = llama_token_eos(mdl);
llama_kv_cache_clear(ctx);
+ llama_set_embeddings(ctx, false);
llama_set_causal_attn(ctx, true);
+
llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);
std::vector inputs = llama_tokenize(mdl, prompt, false, true);
@@ -166,8 +169,7 @@ int main(int argc, char * argv[]) {
llama_model * mdl = llama_load_model_from_file(params.model.c_str(), mparams);
- // create new context - set to embedding mode
- cparams.embeddings = true;
+ // create generation context
llama_context * ctx = llama_new_context_with_model(mdl, cparams);
// ### Embedding/Representation ###
diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp
index 0e4ec79c693fa..3e82e4a81a20b 100644
--- a/examples/infill/infill.cpp
+++ b/examples/infill/infill.cpp
@@ -223,7 +223,11 @@ int main(int argc, char ** argv) {
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
embd_inp = inp_pfx;
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
- embd_inp.push_back(llama_token_middle(model));
+
+ const llama_token middle_token = llama_token_middle(model);
+ if (middle_token >= 0) {
+ embd_inp.push_back(middle_token);
+ }
LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix));
LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix));
@@ -528,7 +532,12 @@ int main(int argc, char ** argv) {
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
embd_inp = inp_pfx;
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
- embd_inp.push_back(llama_token_middle(model));
+
+ const llama_token middle_token = llama_token_middle(model);
+ if (middle_token >= 0) {
+ embd_inp.push_back(middle_token);
+ }
+
embd.clear();
n_remain = params.n_predict;
n_past = 0;
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 61f5a5a0928a2..d641a9f12b388 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -293,6 +293,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
params.output_format = cmd_params_defaults.output_format;
params.output_format_stderr = cmd_params_defaults.output_format_stderr;
params.reps = cmd_params_defaults.reps;
+ params.numa = cmd_params_defaults.numa;
for (int i = 1; i < argc; i++) {
arg = argv[i];
@@ -713,7 +714,6 @@ struct test {
static const bool kompute;
static const bool metal;
static const bool sycl;
- static const bool rpc;
static const bool gpu_blas;
static const bool blas;
static const std::string cpu_info;
@@ -725,6 +725,7 @@ struct test {
int n_batch;
int n_ubatch;
int n_threads;
+ bool has_rpc;
ggml_type type_k;
ggml_type type_v;
int n_gpu_layers;
@@ -750,6 +751,7 @@ struct test {
n_batch = inst.n_batch;
n_ubatch = inst.n_ubatch;
n_threads = inst.n_threads;
+ has_rpc = !inst.rpc_servers.empty();
type_k = inst.type_k;
type_v = inst.type_v;
n_gpu_layers = inst.n_gpu_layers;
@@ -809,9 +811,6 @@ struct test {
if (sycl) {
return GGML_SYCL_NAME;
}
- if (rpc) {
- return "RPC";
- }
if (gpu_blas) {
return "GPU BLAS";
}
@@ -881,7 +880,7 @@ struct test {
std::vector values = {
build_commit, std::to_string(build_number),
std::to_string(cuda), std::to_string(vulkan), std::to_string(vulkan),
- std::to_string(metal), std::to_string(sycl), std::to_string(rpc), std::to_string(gpu_blas), std::to_string(blas),
+ std::to_string(metal), std::to_string(sycl), std::to_string(has_rpc), std::to_string(gpu_blas), std::to_string(blas),
cpu_info, gpu_info,
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
std::to_string(n_batch), std::to_string(n_ubatch),
@@ -915,7 +914,6 @@ const bool test::metal = !!ggml_cpu_has_metal();
const bool test::gpu_blas = !!ggml_cpu_has_gpublas();
const bool test::blas = !!ggml_cpu_has_blas();
const bool test::sycl = !!ggml_cpu_has_sycl();
-const bool test::rpc = !!ggml_cpu_has_rpc();
const std::string test::cpu_info = get_cpu_info();
const std::string test::gpu_info = get_gpu_info();
@@ -1181,6 +1179,9 @@ struct markdown_printer : public printer {
value = buf;
} else if (field == "backend") {
value = test::get_backend();
+ if (t.has_rpc) {
+ value += "+RPC";
+ }
} else if (field == "test") {
if (t.n_prompt > 0 && t.n_gen == 0) {
snprintf(buf, sizeof(buf), "pp%d", t.n_prompt);
diff --git a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
index 5bde1891727ce..2c1e3f61bad20 100644
--- a/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
+++ b/examples/llama.swiftui/llama.swiftui/Models/LlamaState.swift
@@ -131,22 +131,29 @@ class LlamaState: ObservableObject {
messageLog += "\(text)"
- while await llamaContext.n_cur < llamaContext.n_len {
- let result = await llamaContext.completion_loop()
- messageLog += "\(result)"
- }
+ Task.detached {
+ while await llamaContext.n_cur < llamaContext.n_len {
+ let result = await llamaContext.completion_loop()
+ await MainActor.run {
+ self.messageLog += "\(result)"
+ }
+ }
- let t_end = DispatchTime.now().uptimeNanoseconds
- let t_generation = Double(t_end - t_heat_end) / NS_PER_S
- let tokens_per_second = Double(await llamaContext.n_len) / t_generation
+ let t_end = DispatchTime.now().uptimeNanoseconds
+ let t_generation = Double(t_end - t_heat_end) / self.NS_PER_S
+ let tokens_per_second = Double(await llamaContext.n_len) / t_generation
- await llamaContext.clear()
- messageLog += """
- \n
- Done
- Heat up took \(t_heat)s
- Generated \(tokens_per_second) t/s\n
- """
+ await llamaContext.clear()
+
+ await MainActor.run {
+ self.messageLog += """
+ \n
+ Done
+ Heat up took \(t_heat)s
+ Generated \(tokens_per_second) t/s\n
+ """
+ }
+ }
}
func bench() async {
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 28584e14b788c..76e2052d55d79 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -16,41 +16,41 @@ struct quant_option {
};
static const std::vector QUANT_OPTIONS = {
- { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.56G, +0.2166 ppl @ LLaMA-v1-7B", },
- { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1585 ppl @ LLaMA-v1-7B", },
- { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.33G, +0.0683 ppl @ LLaMA-v1-7B", },
- { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0349 ppl @ LLaMA-v1-7B", },
+ { "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
+ { "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", },
+ { "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", },
+ { "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 5.65G, +0.1062 ppl @ Llama-3-8B", },
{ "IQ2_XXS",LLAMA_FTYPE_MOSTLY_IQ2_XXS," 2.06 bpw quantization", },
{ "IQ2_XS", LLAMA_FTYPE_MOSTLY_IQ2_XS, " 2.31 bpw quantization", },
{ "IQ2_S", LLAMA_FTYPE_MOSTLY_IQ2_S, " 2.5 bpw quantization", },
{ "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
{ "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
{ "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", },
- { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", },
- { "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.16G, +9.0634 ppl @ LLaMA-v1-7B", },
+ { "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.96G, +3.5199 ppl @ Llama-3-8B", },
+ { "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.96G, +3.1836 ppl @ Llama-3-8B", },
{ "IQ3_XXS",LLAMA_FTYPE_MOSTLY_IQ3_XXS," 3.06 bpw quantization", },
{ "IQ3_S", LLAMA_FTYPE_MOSTLY_IQ3_S, " 3.44 bpw quantization", },
{ "IQ3_M", LLAMA_FTYPE_MOSTLY_IQ3_M, " 3.66 bpw quantization mix", },
- { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
- { "IQ3_XS", LLAMA_FTYPE_MOSTLY_IQ3_XS, " 3.3 bpw quantization" , },
- { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", },
- { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.07G, +0.2496 ppl @ LLaMA-v1-7B", },
- { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", },
+ { "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
+ { "IQ3_XS", LLAMA_FTYPE_MOSTLY_IQ3_XS, " 3.3 bpw quantization", },
+ { "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 3.41G, +1.6321 ppl @ Llama-3-8B", },
+ { "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.74G, +0.6569 ppl @ Llama-3-8B", },
+ { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 4.03G, +0.5562 ppl @ Llama-3-8B", },
{ "IQ4_NL", LLAMA_FTYPE_MOSTLY_IQ4_NL, " 4.50 bpw non-linear quantization", },
{ "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", },
- { "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
- { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", },
- { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0532 ppl @ LLaMA-v1-7B", },
- { "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
- { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0400 ppl @ LLaMA-v1-7B", },
- { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
- { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0008 ppl @ LLaMA-v1-7B", },
- { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
- { "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, -0.0020 ppl @ Mistral-7B", },
- { "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
- { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
+ { "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
+ { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 4.37G, +0.2689 ppl @ Llama-3-8B", },
+ { "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 4.58G, +0.1754 ppl @ Llama-3-8B", },
+ { "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
+ { "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 5.21G, +0.1049 ppl @ Llama-3-8B", },
+ { "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
+ { "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
+ { "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
+ { "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
+ { "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
+ { "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
- { "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
+ { "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
};
static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file";
diff --git a/examples/retrieval/retrieval.cpp b/examples/retrieval/retrieval.cpp
index 55b7b2f70ae2a..eb89d16daf18d 100644
--- a/examples/retrieval/retrieval.cpp
+++ b/examples/retrieval/retrieval.cpp
@@ -73,9 +73,10 @@ static std::vector chunk_file(const std::string & filename, int chunk_siz
return chunks;
}
-static void batch_add_seq(llama_batch & batch, const std::vector & tokens, int seq_id) {
- for (size_t i = 0; i < tokens.size(); i++) {
- llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
+static void batch_add_seq(llama_batch & batch, const std::vector & tokens, llama_seq_id seq_id) {
+ size_t n_tokens = tokens.size();
+ for (size_t i = 0; i < n_tokens; i++) {
+ llama_batch_add(batch, tokens[i], i, { seq_id }, true);
}
}
@@ -160,6 +161,12 @@ int main(int argc, char ** argv) {
const int n_ctx_train = llama_n_ctx_train(model);
const int n_ctx = llama_n_ctx(ctx);
+ const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
+ if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
+ fprintf(stderr, "%s: error: pooling type NONE not supported\n", __func__);
+ return 1;
+ }
+
if (n_ctx > n_ctx_train) {
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
__func__, n_ctx_train, n_ctx);
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 919078f2bd920..f9a86961f9c8e 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -1594,7 +1594,7 @@ struct server_context {
} else {
std::string prompt;
if (task.data.contains("prompt") && task.data.at("prompt").is_string()) {
- json_value(task.data, "prompt", std::string());
+ prompt = json_value(task.data, "prompt", std::string());
}
slot = get_available_slot(prompt);
@@ -2038,7 +2038,12 @@ struct server_context {
prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
- prefix_tokens.push_back(llama_token_middle(model));
+
+ const llama_token middle_token = llama_token_middle(model);
+ if (middle_token >= 0) {
+ prefix_tokens.push_back(middle_token);
+ }
+
prompt_tokens = prefix_tokens;
} else {
prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
diff --git a/examples/sycl/win-build-sycl.bat b/examples/sycl/win-build-sycl.bat
index b8037aae8c4ef..027173b0a974b 100644
--- a/examples/sycl/win-build-sycl.bat
+++ b/examples/sycl/win-build-sycl.bat
@@ -13,16 +13,16 @@ if %errorlevel% neq 0 goto ERROR
:: for FP16
:: faster for long-prompt inference
-:: cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON
+:: cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_CXX_COMPILER=icx -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Release -DLLAMA_SYCL_F16=ON
:: for FP32
-cmake -G "MinGW Makefiles" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icx -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Release
+cmake -G "Ninja" .. -DLLAMA_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DBUILD_SHARED_LIBS=ON -DCMAKE_BUILD_TYPE=Release
if %errorlevel% neq 0 goto ERROR
:: build example/main only
:: make main
:: build all binary
-make -j
+cmake --build . -j
if %errorlevel% neq 0 goto ERROR
cd ..
diff --git a/flake.lock b/flake.lock
index 7272e65fa8fa5..5278fb68a3f98 100644
--- a/flake.lock
+++ b/flake.lock
@@ -20,11 +20,11 @@
},
"nixpkgs": {
"locked": {
- "lastModified": 1717786204,
- "narHash": "sha256-4q0s6m0GUcN7q+Y2DqD27iLvbcd1G50T2lv08kKxkSI=",
+ "lastModified": 1718318537,
+ "narHash": "sha256-4Zu0RYRcAY/VWuu6awwq4opuiD//ahpc2aFHg2CWqFY=",
"owner": "NixOS",
"repo": "nixpkgs",
- "rev": "051f920625ab5aabe37c920346e3e69d7d34400e",
+ "rev": "e9ee548d90ff586a6471b4ae80ae9cfcbceb3420",
"type": "github"
},
"original": {
diff --git a/ggml-alloc.c b/ggml-alloc.c
index eb75962d49cc3..bd367c42df44e 100644
--- a/ggml-alloc.c
+++ b/ggml-alloc.c
@@ -339,6 +339,7 @@ struct hash_node {
};
struct tensor_alloc {
+ int buffer_id;
size_t offset;
size_t size_max; // 0 = pre-allocated, unused, or view
};
@@ -349,7 +350,6 @@ struct leaf_alloc {
};
struct node_alloc {
- int buffer_id;
struct tensor_alloc dst;
struct tensor_alloc src[GGML_MAX_SRC];
};
@@ -386,8 +386,19 @@ ggml_gallocr_t ggml_gallocr_new_n(ggml_backend_buffer_type_t * bufts, int n_bufs
for (int i = 0; i < n_bufs; i++) {
galloc->bufts[i] = bufts[i];
galloc->buffers[i] = NULL;
- size_t alignment = ggml_backend_buft_get_alignment(bufts[i]);
- galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment);
+
+ // check if the same buffer type is used multiple times and reuse the same allocator
+ for (int j = 0; j < i; j++) {
+ if (bufts[i] == bufts[j]) {
+ galloc->buf_tallocs[i] = galloc->buf_tallocs[j];
+ break;
+ }
+ }
+
+ if (galloc->buf_tallocs[i] == NULL) {
+ size_t alignment = ggml_backend_buft_get_alignment(bufts[i]);
+ galloc->buf_tallocs[i] = ggml_dyn_tallocr_new(alignment);
+ }
}
galloc->n_buffers = n_bufs;
@@ -405,10 +416,30 @@ void ggml_gallocr_free(ggml_gallocr_t galloc) {
for (int i = 0; i < galloc->n_buffers; i++) {
if (galloc->buffers != NULL) {
- ggml_backend_buffer_free(galloc->buffers[i]);
+ // skip if already freed
+ bool freed = false;
+ for (int j = 0; j < i; j++) {
+ if (galloc->buffers[j] == galloc->buffers[i]) {
+ freed = true;
+ break;
+ }
+ }
+ if (!freed) {
+ ggml_backend_buffer_free(galloc->buffers[i]);
+ }
}
if (galloc->buf_tallocs != NULL) {
- ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
+ // skip if already freed
+ bool freed = false;
+ for (int j = 0; j < i; j++) {
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
+ freed = true;
+ break;
+ }
+ }
+ if (!freed) {
+ ggml_dyn_tallocr_free(galloc->buf_tallocs[i]);
+ }
}
}
@@ -511,17 +542,18 @@ static void ggml_gallocr_allocate_node(ggml_gallocr_t galloc, struct ggml_tensor
}
}
-static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node, int buffer_id) {
+static void ggml_gallocr_free_node(ggml_gallocr_t galloc, struct ggml_tensor * node) {
// graph outputs are never freed
if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
AT_PRINTF("not freeing output %s\n", node->name);
return;
}
- struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
- ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
size_t offset = hn->offset;
+ int buffer_id = hn->buffer_id;
+ struct ggml_dyn_tallocr * alloc = galloc->buf_tallocs[buffer_id];
+ ggml_backend_buffer_type_t buft = galloc->bufts[buffer_id];
size_t size = ggml_backend_buft_get_alloc_size(buft, node);
ggml_dyn_tallocr_free_tensor(alloc, offset, size, node);
hn->allocated = false;
@@ -626,11 +658,11 @@ static void ggml_gallocr_alloc_graph_impl(ggml_gallocr_t galloc, struct ggml_cgr
AT_PRINTF("view_src %s: %d children, %d views\n",
view_src->name, view_src_hn->n_children, view_src_hn->n_views);
if (view_src_hn->n_views == 0 && view_src_hn->n_children == 0 && view_src_hn->allocated) {
- ggml_gallocr_free_node(galloc, view_src, buffer_id);
+ ggml_gallocr_free_node(galloc, view_src);
}
}
else if (p_hn->allocated) {
- ggml_gallocr_free_node(galloc, parent, buffer_id);
+ ggml_gallocr_free_node(galloc, parent);
}
}
AT_PRINTF("\n");
@@ -674,22 +706,25 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
struct node_alloc * node_alloc = &galloc->node_allocs[i];
- node_alloc->buffer_id = get_node_buffer_id(node_buffer_ids, i);
if (node->view_src || node->data) {
+ node_alloc->dst.buffer_id = -1;
node_alloc->dst.offset = SIZE_MAX;
node_alloc->dst.size_max = 0;
} else {
struct hash_node * hn = ggml_gallocr_hash_get(galloc, node);
- node_alloc->dst.offset = hn->offset;
- node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
+ node_alloc->dst.buffer_id = hn->buffer_id;
+ node_alloc->dst.offset = hn->offset;
+ node_alloc->dst.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], node);
}
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
if (!src || src->view_src || src->data) {
+ node_alloc->src[j].buffer_id = -1;
node_alloc->src[j].offset = SIZE_MAX;
node_alloc->src[j].size_max = 0;
} else {
struct hash_node * hn = ggml_gallocr_hash_get(galloc, src);
+ node_alloc->src[j].buffer_id = hn->buffer_id;
node_alloc->src[j].offset = hn->offset;
node_alloc->src[j].size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], src);
}
@@ -706,9 +741,11 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
struct hash_node * hn = ggml_gallocr_hash_get(galloc, leaf);
galloc->leaf_allocs[i].buffer_id = hn->buffer_id;
if (leaf->view_src || leaf->data) {
+ galloc->leaf_allocs[i].leaf.buffer_id = -1;
galloc->leaf_allocs[i].leaf.offset = SIZE_MAX;
galloc->leaf_allocs[i].leaf.size_max = 0;
} else {
+ galloc->leaf_allocs[i].leaf.buffer_id = hn->buffer_id;
galloc->leaf_allocs[i].leaf.offset = hn->offset;
galloc->leaf_allocs[i].leaf.size_max = ggml_backend_buft_get_alloc_size(galloc->bufts[hn->buffer_id], leaf);
}
@@ -716,6 +753,14 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
// reallocate buffers if needed
for (int i = 0; i < galloc->n_buffers; i++) {
+ // if the buffer type is used multiple times, we reuse the same buffer
+ for (int j = 0; j < i; j++) {
+ if (galloc->buf_tallocs[j] == galloc->buf_tallocs[i]) {
+ galloc->buffers[i] = galloc->buffers[j];
+ break;
+ }
+ }
+
size_t cur_size = galloc->buffers[i] ? ggml_backend_buffer_get_size(galloc->buffers[i]) : 0;
size_t new_size = ggml_dyn_tallocr_max_size(galloc->buf_tallocs[i]);
@@ -724,6 +769,7 @@ bool ggml_gallocr_reserve_n(ggml_gallocr_t galloc, struct ggml_cgraph * graph, c
#ifndef NDEBUG
fprintf(stderr, "%s: reallocating %s buffer from size %.02f MiB to %.02f MiB\n", __func__, ggml_backend_buft_name(galloc->bufts[i]), cur_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0);
#endif
+
ggml_backend_buffer_free(galloc->buffers[i]);
galloc->buffers[i] = ggml_backend_buft_alloc_buffer(galloc->bufts[i], new_size);
if (galloc->buffers[i] == NULL) {
@@ -740,7 +786,8 @@ bool ggml_gallocr_reserve(ggml_gallocr_t galloc, struct ggml_cgraph *graph) {
return ggml_gallocr_reserve_n(galloc, graph, NULL, NULL);
}
-static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, int buffer_id, struct tensor_alloc * tensor_alloc) {
+static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor * tensor, struct tensor_alloc * tensor_alloc) {
+ int buffer_id = tensor_alloc->buffer_id;
assert(tensor->data || tensor->view_src || ggml_backend_buffer_get_alloc_size(galloc->buffers[buffer_id], tensor) <= tensor_alloc->size_max);
if (tensor->view_src != NULL) {
@@ -768,8 +815,8 @@ static void ggml_gallocr_init_tensor(ggml_gallocr_t galloc, struct ggml_tensor *
}
}
-static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct node_alloc * nalloc, struct tensor_alloc * talloc) {
- ggml_backend_buffer_type_t buft = galloc->bufts[nalloc->buffer_id];
+static bool ggml_gallocr_node_needs_realloc(ggml_gallocr_t galloc, struct ggml_tensor * node, struct tensor_alloc * talloc) {
+ ggml_backend_buffer_type_t buft = talloc->buffer_id != -1 ? galloc->bufts[talloc->buffer_id] : NULL;
size_t node_size = (node->data || node->view_src) ? 0 : ggml_backend_buft_get_alloc_size(buft, node);
return talloc->size_max >= node_size;
}
@@ -793,7 +840,7 @@ static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph
struct ggml_tensor * node = graph->nodes[i];
struct node_alloc * node_alloc = &galloc->node_allocs[i];
- if (!ggml_gallocr_node_needs_realloc(galloc, node, node_alloc, &node_alloc->dst)) {
+ if (!ggml_gallocr_node_needs_realloc(galloc, node, &node_alloc->dst)) {
#ifndef NDEBUG
fprintf(stderr, "%s: node %s is not valid\n", __func__, node->name);
#endif
@@ -805,7 +852,7 @@ static bool ggml_gallocr_needs_realloc(ggml_gallocr_t galloc, struct ggml_cgraph
if (src == NULL) {
continue;
}
- if (!ggml_gallocr_node_needs_realloc(galloc, src, node_alloc, &node_alloc->src[j])) {
+ if (!ggml_gallocr_node_needs_realloc(galloc, src, &node_alloc->src[j])) {
#ifndef NDEBUG
fprintf(stderr, "%s: src %d (%s) of node %s is not valid\n", __func__, j, src->name, node->name);
#endif
@@ -846,7 +893,7 @@ bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph)
for (int i = 0; i < graph->n_leafs; i++) {
struct ggml_tensor * leaf = graph->leafs[i];
struct leaf_alloc * leaf_alloc = &galloc->leaf_allocs[i];
- ggml_gallocr_init_tensor(galloc, leaf, leaf_alloc->buffer_id, &leaf_alloc->leaf);
+ ggml_gallocr_init_tensor(galloc, leaf, &leaf_alloc->leaf);
}
// nodes
for (int i = 0; i < graph->n_nodes; i++) {
@@ -857,9 +904,9 @@ bool ggml_gallocr_alloc_graph(ggml_gallocr_t galloc, struct ggml_cgraph * graph)
if (src == NULL) {
continue;
}
- ggml_gallocr_init_tensor(galloc, src, node_alloc->buffer_id, &node_alloc->src[j]);
+ ggml_gallocr_init_tensor(galloc, src, &node_alloc->src[j]);
}
- ggml_gallocr_init_tensor(galloc, node, node_alloc->buffer_id, &node_alloc->dst);
+ ggml_gallocr_init_tensor(galloc, node, &node_alloc->dst);
}
return true;
@@ -871,6 +918,15 @@ size_t ggml_gallocr_get_buffer_size(ggml_gallocr_t galloc, int buffer_id) {
if (galloc->buffers[buffer_id] == NULL) {
return 0;
}
+
+ for (int i = 0; i < buffer_id; i++) {
+ if (galloc->buffers[i] == galloc->buffers[buffer_id]) {
+ // this buffer is the same as a previous one due to the same buffer type being used multiple times
+ // only return the buffer size the first time it appears to avoid double counting
+ return 0;
+ }
+ }
+
return ggml_backend_buffer_get_size(galloc->buffers[buffer_id]);
}
diff --git a/ggml-backend-impl.h b/ggml-backend-impl.h
index f121e1de420fa..36ca370867c9e 100644
--- a/ggml-backend-impl.h
+++ b/ggml-backend-impl.h
@@ -17,13 +17,15 @@ extern "C" {
struct ggml_backend_buffer_type_i {
const char * (*GGML_CALL get_name) (ggml_backend_buffer_type_t buft);
+ // allocate a buffer of this type
ggml_backend_buffer_t (*GGML_CALL alloc_buffer) (ggml_backend_buffer_type_t buft, size_t size);
- size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft); // tensor alignment
- size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft); // allocation max size
- size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor); // data size needed to allocate the tensor, including padding
- bool (*GGML_CALL supports_backend)(ggml_backend_buffer_type_t buft, ggml_backend_t backend); // check if the buffer type is usable by the backend
+ // tensor alignment
+ size_t (*GGML_CALL get_alignment) (ggml_backend_buffer_type_t buft);
+ // max buffer size that can be allocated
+ size_t (*GGML_CALL get_max_size) (ggml_backend_buffer_type_t buft);
+ // data size needed to allocate the tensor, including padding
+ size_t (*GGML_CALL get_alloc_size) (ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor);
// check if tensor data is in host memory
- // should be equivalent to supports_backend(buft, ggml_backend_cpu_init())
bool (*GGML_CALL is_host) (ggml_backend_buffer_type_t buft);
};
@@ -92,27 +94,37 @@ extern "C" {
void (*GGML_CALL synchronize)(ggml_backend_t backend);
// compute graph with a plan (not used currently)
+ // create a new plan for a graph
ggml_backend_graph_plan_t (*GGML_CALL graph_plan_create) (ggml_backend_t backend, const struct ggml_cgraph * cgraph);
void (*GGML_CALL graph_plan_free) (ggml_backend_t backend, ggml_backend_graph_plan_t plan);
+ // update the plan with a new graph - this should be faster than creating a new plan when the graph has the same topology
+ void (*GGML_CALL graph_plan_update) (ggml_backend_t backend, ggml_backend_graph_plan_t plan, const struct ggml_cgraph * cgraph);
+ // compute the graph with the plan
+ enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
- // compute graph with a plan
- enum ggml_status (*GGML_CALL graph_plan_compute)(ggml_backend_t backend, ggml_backend_graph_plan_t plan);
// compute graph without a plan (async)
enum ggml_status (*GGML_CALL graph_compute) (ggml_backend_t backend, struct ggml_cgraph * cgraph);
- // check if the backend supports an operation
+ // check if the backend can compute an operation
bool (*GGML_CALL supports_op)(ggml_backend_t backend, const struct ggml_tensor * op);
+ // check if the backend can use tensors allocated in a buffer type
+ bool (*GGML_CALL supports_buft)(ggml_backend_t backend, ggml_backend_buffer_type_t buft);
+
// check if the backend wants to run an operation, even if the weights are allocated in a CPU buffer
// these should be expensive operations with large batch sizes that may benefit from running on this backend
// even if the weight has to be copied from the CPU temporarily
bool (*GGML_CALL offload_op)(ggml_backend_t backend, const struct ggml_tensor * op);
// (optional) event synchronization
+ // create a new event that can record events on this backend instance
ggml_backend_event_t (*GGML_CALL event_new) (ggml_backend_t backend);
void (*GGML_CALL event_free) (ggml_backend_event_t event);
+ // record an event on the backend instance that created it
void (*GGML_CALL event_record) (ggml_backend_event_t event);
+ // wait for an event on on a different backend instance
void (*GGML_CALL event_wait) (ggml_backend_t backend, ggml_backend_event_t event);
+ // block until an event is recorded
void (*GGML_CALL event_synchronize) (ggml_backend_event_t event);
};
diff --git a/ggml-backend.c b/ggml-backend.c
index 05737ed696954..13c71c310c446 100644
--- a/ggml-backend.c
+++ b/ggml-backend.c
@@ -44,10 +44,6 @@ GGML_CALL size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buf
return ggml_nbytes(tensor);
}
-bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
- return buft->iface.supports_backend(buft, backend);
-}
-
bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
if (buft->iface.is_host) {
return buft->iface.is_host(buft);
@@ -286,6 +282,10 @@ bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor *
return backend->iface.supports_op(backend, op);
}
+bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
+ return backend->iface.supports_buft(backend, buft);
+}
+
bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) {
if (backend->iface.offload_op != NULL) {
return backend->iface.offload_op(backend, op);
@@ -639,12 +639,6 @@ GGML_CALL static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_
GGML_UNUSED(buft);
}
-GGML_CALL static bool ggml_backend_cpu_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
- return ggml_backend_is_cpu(backend);
-
- GGML_UNUSED(buft);
-}
-
GGML_CALL static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
return true;
@@ -659,7 +653,6 @@ GGML_CALL ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
- /* .supports_backend = */ ggml_backend_cpu_buffer_type_supports_backend,
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
},
/* .context = */ NULL,
@@ -715,7 +708,6 @@ ggml_backend_buffer_type_t ggml_backend_cpu_hbm_buffer_type(void) {
/* .get_alignment = */ ggml_backend_cpu_buffer_type_get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ NULL, // defaults to ggml_nbytes
- /* .supports_backend = */ ggml_backend_cpu_buffer_type_supports_backend,
/* .is_host = */ ggml_backend_cpu_buffer_type_is_host,
},
/* .context = */ NULL,
@@ -836,6 +828,12 @@ GGML_CALL static bool ggml_backend_cpu_supports_op(ggml_backend_t backend, const
GGML_UNUSED(backend);
}
+GGML_CALL static bool ggml_backend_cpu_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
+ return ggml_backend_buft_is_host(buft);
+
+ GGML_UNUSED(backend);
+}
+
static struct ggml_backend_i cpu_backend_i = {
/* .get_name = */ ggml_backend_cpu_name,
/* .free = */ ggml_backend_cpu_free,
@@ -846,9 +844,11 @@ static struct ggml_backend_i cpu_backend_i = {
/* .synchronize = */ NULL,
/* .graph_plan_create = */ ggml_backend_cpu_graph_plan_create,
/* .graph_plan_free = */ ggml_backend_cpu_graph_plan_free,
+ /* .graph_plan_update = */ NULL,
/* .graph_plan_compute = */ ggml_backend_cpu_graph_plan_compute,
/* .graph_compute = */ ggml_backend_cpu_graph_compute,
/* .supports_op = */ ggml_backend_cpu_supports_op,
+ /* .supports_buft = */ ggml_backend_cpu_supports_buft,
/* .offload_op = */ NULL,
/* .event_new = */ NULL,
/* .event_free = */ NULL,
@@ -1055,6 +1055,9 @@ struct ggml_backend_sched {
int * node_backend_ids; // [graph_size]
int * leaf_backend_ids; // [graph_size]
+ int * prev_node_backend_ids; // [graph_size]
+ int * prev_leaf_backend_ids; // [graph_size]
+
// copy of the graph with modified inputs
struct ggml_cgraph * graph;
@@ -1075,6 +1078,8 @@ struct ggml_backend_sched {
ggml_backend_sched_eval_callback callback_eval;
void * callback_eval_user_data;
+ bool debug;
+
// align context_buffer to GGML_MEM_ALIGN
#ifdef _MSC_VER
__declspec(align(GGML_MEM_ALIGN))
@@ -1097,22 +1102,24 @@ static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backen
return -1;
}
-static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor) {
+static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) {
ggml_backend_buffer_t buffer = tensor->buffer;
if (buffer == NULL) {
return -1;
}
- // find highest prio backend that supports the buffer type
+ // find highest prio backend that supports the buffer type and the op
for (int i = 0; i < sched->n_backends; i++) {
- if (ggml_backend_buft_supports_backend(buffer->buft, sched->backends[i])) {
+ if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) &&
+ ggml_backend_supports_op(sched->backends[i], op)) {
return i;
}
}
- fprintf(stderr, "%s: error: no backend supports buffer type %s used in tensor %s\n",
- __func__, ggml_backend_buffer_name(buffer), tensor->name);
- GGML_ASSERT(false);
+#ifndef NDEBUG
+ fprintf(stderr, "%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
+ __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name);
+#endif
return -1;
}
@@ -1131,7 +1138,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
// TODO: use supports_op to check if the backend supports the op
// assign pre-allocated nodes to their backend
- int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor);
+ int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor);
if (cur_backend_id != -1) {
SET_CAUSE(tensor, "1.dst");
return cur_backend_id;
@@ -1139,7 +1146,7 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
// view_src
if (tensor->view_src != NULL) {
- cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src);
+ cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor);
if (cur_backend_id != -1) {
SET_CAUSE(tensor, "1.vsrc");
return cur_backend_id;
@@ -1161,11 +1168,11 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
continue;
}
if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
- int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src);
+ int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
// check if a backend with higher prio wants to offload the op
if (src_backend_id == sched->n_backends - 1) {
for (int b = 0; b < src_backend_id; b++) {
- if (ggml_backend_offload_op(sched->backends[b], tensor)) {
+ if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
SET_CAUSE(tensor, "1.off");
return b;
}
@@ -1223,10 +1230,33 @@ static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, str
}
}
-//#define DEBUG_PASS1
-//#define DEBUG_PASS2
-//#define DEBUG_PASS3
-//#define DEBUG_PASS4
+static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) {
+ ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer;
+ ggml_backend_buffer_type_t buft = NULL;
+
+ if (buf) {
+ // the tensor is already allocated
+ buft = buf->buft;
+ } else {
+ // see if the tensor already has a backend assigned, and use the buffer type of that backend
+ int tensor_backend_id = tensor_backend_id(t);
+ if (tensor_backend_id == -1 && t->view_src) {
+ tensor_backend_id = tensor_backend_id(t->view_src);
+ }
+ if (tensor_backend_id != -1) {
+ buft = sched->bufts[tensor_backend_id];
+ }
+ }
+
+ return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft);
+}
+
+static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) {
+ if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) {
+ *node_backend_id = cur_backend_id;
+ SET_CAUSE(node, "2.sup");
+ }
+}
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
@@ -1280,17 +1310,13 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
}
}
}
-#ifdef DEBUG_PASS1
- fprintf(stderr, "PASS 1 ASSIGNMENTS\n"); ggml_backend_sched_print_assignments(sched, graph);
-#endif
// pass 2: expand current backend assignments
// assign the same backend to adjacent nodes
// expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
// thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
-
-
- // pass 2.2 expand gpu down
+ // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
+ // expand gpu down
{
int cur_backend_id = -1;
for (int i = 0; i < graph->n_nodes; i++) {
@@ -1306,13 +1332,12 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
} else {
cur_backend_id = *node_backend_id;
}
- } else {
- *node_backend_id = cur_backend_id;
- SET_CAUSE(node, "2.2");
+ } else if (cur_backend_id != -1) {
+ ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
}
}
}
- // pass 2.1 expand gpu up
+ // expand gpu up
{
int cur_backend_id = -1;
for (int i = graph->n_nodes - 1; i >= 0; i--) {
@@ -1328,13 +1353,12 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
} else {
cur_backend_id = *node_backend_id;
}
- } else {
- *node_backend_id = cur_backend_id;
- SET_CAUSE(node, "2.1");
+ } else if (cur_backend_id != -1) {
+ ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
}
}
}
- // pass 2.4 expand rest down
+ // expand rest down
{
int cur_backend_id = -1;
for (int i = 0; i < graph->n_nodes; i++) {
@@ -1345,13 +1369,12 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
int * node_backend_id = &tensor_backend_id(node);
if (*node_backend_id != -1) {
cur_backend_id = *node_backend_id;
- } else {
- *node_backend_id = cur_backend_id;
- SET_CAUSE(node, "2.4");
+ } else if (cur_backend_id != -1) {
+ ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
}
}
}
- // pass 2.3 expand rest up
+ // expand rest up
{
int cur_backend_id = -1;
for (int i = graph->n_nodes - 1; i >= 0; i--) {
@@ -1362,24 +1385,80 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
int * node_backend_id = &tensor_backend_id(node);
if (*node_backend_id != -1) {
cur_backend_id = *node_backend_id;
- } else {
- *node_backend_id = cur_backend_id;
- SET_CAUSE(node, "2.3");
+ } else if (cur_backend_id != -1) {
+ ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
}
}
}
-#ifdef DEBUG_PASS2
- fprintf(stderr, "PASS 2 ASSIGNMENTS\n"); ggml_backend_sched_print_assignments(sched, graph);
-#endif
+ // pass 3: upgrade nodes to higher prio backends with compatible buffer types
+ // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
+ // however, we also need to verify that the sources are in compatible buffer types
+ // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
+ // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
+ // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
+ // additionally, set remaining unassigned nodes to the backend with the most supported inputs
+ // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
+ for (int i = 0; i < graph->n_nodes; i++) {
+ struct ggml_tensor * node = graph->nodes[i];
+ if (ggml_is_view_op(node->op)) {
+ continue;
+ }
+ int * node_backend_id = &tensor_backend_id(node);
+ if (*node_backend_id == -1) {
+ // unassigned node: find the backend with the most supported inputs
+ int n_supported_best = -1;
+ for (int b = 0; b < sched->n_backends; b++) {
+ if (ggml_backend_supports_op(sched->backends[b], node)) {
+ int n_supported = 0;
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ continue;
+ }
+ if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) {
+ n_supported++;
+ }
+ }
+ if (n_supported > n_supported_best) {
+ n_supported_best = n_supported;
+ *node_backend_id = b;
+ SET_CAUSE(node, "3.best");
+ }
+ }
+ }
+ } else {
+ // assigned node: upgrade to higher prio backend if possible
+ for (int b = 0; b < *node_backend_id; b++) {
+ if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) {
+ bool supported = true;
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ struct ggml_tensor * src = node->src[j];
+ if (src == NULL) {
+ continue;
+ }
+ if (!ggml_backend_sched_buffer_supported(sched, src, b)) {
+ supported = false;
+ break;
+ }
+ }
+ if (supported) {
+ *node_backend_id = b;
+ SET_CAUSE(node, "3.upg");
+ break;
+ }
+ }
+ }
+ }
+ }
- // pass 3: assign backends to remaining src from dst and view_src
+ // pass 4: assign backends to remaining src from dst and view_src
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
int * cur_backend_id = &tensor_backend_id(node);
if (node->view_src != NULL && *cur_backend_id == -1) {
*cur_backend_id = tensor_backend_id(node->view_src);
- SET_CAUSE(node, "3.vsrc");
+ SET_CAUSE(node, "4.vsrc");
}
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
@@ -1391,17 +1470,14 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
if (src->view_src != NULL) {
// views are always on the same backend as the source
*src_backend_id = tensor_backend_id(src->view_src);
- SET_CAUSE(src, "3.vsrc");
+ SET_CAUSE(src, "4.vsrc");
} else {
*src_backend_id = *cur_backend_id;
- SET_CAUSE(src, "3.cur");
+ SET_CAUSE(src, "4.cur");
}
}
}
}
-#ifdef DEBUG_PASS3
- fprintf(stderr, "PASS 3 ASSIGNMENTS\n"); ggml_backend_sched_print_assignments(sched, graph);
-#endif
// pass 4: split graph, find tensors that need to be copied
{
@@ -1448,10 +1524,12 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
}
}
// check if the split has too many inputs
+ // FIXME: count the number of inputs instead of only checking when full
if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) {
const size_t id = hash_id(src);
int src_backend_id = sched->tensor_backend_id[id];
- if (src_backend_id != cur_backend_id && sched->tensor_copies[hash_id(src)][cur_backend_id][0] == NULL) {
+ bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
+ if (src_backend_id != cur_backend_id && sched->tensor_copies[hash_id(src)][cur_backend_id][0] == NULL && !supported) {
//printf("starting new split because of too many inputs: node %s, input %s\n", node->name, src->name);
need_new_split = true;
break;
@@ -1486,7 +1564,7 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
const int src_backend_id = tensor_backend_id(src);
assert(src_backend_id != -1); // all inputs should be assigned by now
- if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
+ if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
size_t id = hash_id(src);
if (sched->tensor_copies[id][src_backend_id][0] == NULL) {
ggml_backend_t backend = sched->backends[src_backend_id];
@@ -1511,7 +1589,8 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
}
}
- if (src_backend_id != node_backend_id) {
+ bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
+ if (src_backend_id != cur_backend_id && !supported) {
// create a copy of the input in the split's backend
const size_t id = hash_id(src);
if (sched->tensor_copies[id][cur_backend_id][0] == NULL) {
@@ -1537,9 +1616,21 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
split->i_end = graph->n_nodes;
sched->n_splits = i_split + 1;
}
-#ifdef DEBUG_PASS4
- fprintf(stderr, "PASS 4 ASSIGNMENTS\n"); ggml_backend_sched_print_assignments(sched, graph);
-#endif
+
+ if (sched->debug) {
+ ggml_backend_sched_print_assignments(sched, graph);
+ }
+
+ // swap node_backend_ids and leaf_backend_ids and prevs
+ {
+ int * tmp = sched->node_backend_ids;
+ sched->node_backend_ids = sched->prev_node_backend_ids;
+ sched->prev_node_backend_ids = tmp;
+
+ tmp = sched->leaf_backend_ids;
+ sched->leaf_backend_ids = sched->prev_leaf_backend_ids;
+ sched->prev_leaf_backend_ids = tmp;
+ }
// create copies of the graph for each split
// TODO: avoid this copy
@@ -1613,8 +1704,26 @@ static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct gg
}
static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
+ bool backend_ids_changed = false;
+ for (int i = 0; i < sched->graph->n_nodes; i++) {
+ if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] &&
+ sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) {
+ backend_ids_changed = true;
+ break;
+ }
+ }
+ if (!backend_ids_changed) {
+ for (int i = 0; i < sched->graph->n_leafs; i++) {
+ if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] &&
+ sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) {
+ backend_ids_changed = true;
+ break;
+ }
+ }
+ }
+
// allocate graph
- if (!ggml_gallocr_alloc_graph(sched->galloc, sched->graph)) {
+ if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, sched->graph)) {
// the re-allocation may cause the split inputs to be moved to a different address
ggml_backend_sched_synchronize(sched);
#ifndef NDEBUG
@@ -1727,6 +1836,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
struct ggml_backend_sched * sched = calloc(1, sizeof(struct ggml_backend_sched));
+ sched->debug = getenv("GGML_SCHED_DEBUG") != NULL;
+
// initialize hash table
sched->hash_set = ggml_hash_set_new(graph_size);
sched->tensor_backend_id = calloc(sched->hash_set.size, sizeof(sched->tensor_backend_id[0]));
@@ -1735,6 +1846,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
const size_t nodes_size = graph_size + GGML_SCHED_MAX_SPLITS*GGML_SCHED_MAX_SPLIT_INPUTS*2;
sched->node_backend_ids = calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
sched->leaf_backend_ids = calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
+ sched->prev_node_backend_ids = calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
+ sched->prev_leaf_backend_ids = calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
sched->n_backends = n_backends;
@@ -1747,7 +1860,7 @@ ggml_backend_sched_t ggml_backend_sched_new(
for (int b = 0; b < n_backends; b++) {
sched->backends[b] = backends[b];
sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]);
- GGML_ASSERT(ggml_backend_buft_supports_backend(sched->bufts[b], backends[b]));
+ GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b]));
if (sched->n_copies > 1) {
for (int c = 0; c < sched->n_copies; c++) {
sched->events[b][c] = ggml_backend_event_new(backends[b]);
@@ -1779,6 +1892,8 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) {
free(sched->tensor_copies);
free(sched->node_backend_ids);
free(sched->leaf_backend_ids);
+ free(sched->prev_node_backend_ids);
+ free(sched->prev_leaf_backend_ids);
free(sched);
}
@@ -1864,6 +1979,15 @@ int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) {
return sched->n_copies;
}
+int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) {
+ return sched->n_backends;
+}
+
+ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) {
+ GGML_ASSERT(i >= 0 && i < sched->n_backends);
+ return sched->backends[i];
+}
+
size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
int backend_index = ggml_backend_sched_backend_id(sched, backend);
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
@@ -1875,6 +1999,7 @@ void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct gg
int backend_index = ggml_backend_sched_backend_id(sched, backend);
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
tensor_backend_id(node) = backend_index;
+ SET_CAUSE(node, "usr");
}
ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
diff --git a/ggml-backend.h b/ggml-backend.h
index c582b06850ed1..4a38eeb5c23bd 100644
--- a/ggml-backend.h
+++ b/ggml-backend.h
@@ -23,7 +23,6 @@ extern "C" {
GGML_API size_t ggml_backend_buft_get_alignment (ggml_backend_buffer_type_t buft);
GGML_API size_t ggml_backend_buft_get_max_size (ggml_backend_buffer_type_t buft);
GGML_API GGML_CALL size_t ggml_backend_buft_get_alloc_size (ggml_backend_buffer_type_t buft, struct ggml_tensor * tensor);
- GGML_API bool ggml_backend_buft_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend);
GGML_API bool ggml_backend_buft_is_host (ggml_backend_buffer_type_t buft);
// buffer
@@ -74,6 +73,7 @@ extern "C" {
GGML_API enum ggml_status ggml_backend_graph_compute (ggml_backend_t backend, struct ggml_cgraph * cgraph);
GGML_API enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph);
GGML_API bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op);
+ GGML_API bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft);
GGML_API bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op);
// tensor copy between different backends
@@ -90,7 +90,7 @@ extern "C" {
GGML_API void ggml_backend_event_free (ggml_backend_event_t event);
GGML_API void ggml_backend_event_record (ggml_backend_event_t event);
GGML_API void ggml_backend_event_synchronize(ggml_backend_event_t event);
- GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event); // wait async on event
+ GGML_API void ggml_backend_event_wait (ggml_backend_t backend, ggml_backend_event_t event);
//
// CPU backend
@@ -119,7 +119,7 @@ extern "C" {
GGML_API size_t ggml_backend_reg_get_count(void);
GGML_API size_t ggml_backend_reg_find_by_name(const char * name);
- GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is name[:params]
+ GGML_API ggml_backend_t ggml_backend_reg_init_backend_from_str(const char * backend_str); // str is backend_name:params (params is optional)
GGML_API const char * ggml_backend_reg_get_name(size_t i);
GGML_API ggml_backend_t ggml_backend_reg_init_backend(size_t i, const char * params); // params is backend-specific
GGML_API ggml_backend_buffer_type_t ggml_backend_reg_get_default_buffer_type(size_t i);
@@ -182,6 +182,9 @@ extern "C" {
// Initialize backend buffers from a measure graph
GGML_API bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph);
+ GGML_API int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched);
+ GGML_API ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i);
+
// Get the number of splits of the last graph
GGML_API int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched);
GGML_API int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched);
diff --git a/ggml-blas.cpp b/ggml-blas.cpp
new file mode 100644
index 0000000000000..d709a357bbf29
--- /dev/null
+++ b/ggml-blas.cpp
@@ -0,0 +1,363 @@
+#include "ggml-blas.h"
+#include "ggml-backend-impl.h"
+
+#include
+#include
+
+#if defined(GGML_USE_ACCELERATE)
+# include
+#elif defined(GGML_BLAS_USE_MKL)
+# include
+#else
+# include
+# ifdef BLIS_ENABLE_CBLAS
+# include
+# endif
+#endif
+
+struct ggml_backend_blas_context {
+ int n_threads = GGML_DEFAULT_N_THREADS;
+ std::unique_ptr work_data;
+ size_t work_size = 0;
+#ifndef GGML_USE_OPENMP
+ std::vector> tasks;
+#endif
+};
+
+// helper function to determine if it is better to use BLAS or not
+// for large matrices, BLAS is faster
+static bool ggml_backend_blas_use_blas(const struct ggml_tensor * dst) {
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ const int64_t ne10 = src1->ne[0];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+
+ // TODO: find the optimal values for these
+ if (ggml_is_contiguous(src0) &&
+ ggml_is_contiguous(src1) &&
+ src1->type == GGML_TYPE_F32 &&
+ (ne0 >= 32 && ne1 >= 32 && ne10 >= 32)) {
+
+ /*printf("BLAS: %d %d %d %d %d\n", ne0, ne1, ne10, ne00, ne01);*/
+ return true;
+ }
+
+ return false;
+}
+
+static void ggml_backend_blas_mul_mat(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) {
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const enum ggml_type type = src0->type;
+
+ GGML_ASSERT(ne0 == ne01);
+ GGML_ASSERT(ne1 == ne11);
+ GGML_ASSERT(ne2 == ne12);
+ GGML_ASSERT(ne3 == ne13);
+
+ // we don't support permuted src0 or src1
+ GGML_ASSERT(nb00 == ggml_type_size(type));
+ GGML_ASSERT(nb10 == ggml_type_size(src1->type));
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ GGML_ASSERT(nb0 <= nb1);
+ GGML_ASSERT(nb1 <= nb2);
+ GGML_ASSERT(nb2 <= nb3);
+
+ // broadcast factors
+ const int64_t r2 = ne12/ne02;
+ const int64_t r3 = ne13/ne03;
+
+ const int64_t ne_plane = ne01*ne00;
+ const size_t desired_wsize = type == GGML_TYPE_F32 ? 0 : ne03*ne02*ne_plane*sizeof(float);
+
+ if (ctx->work_size < desired_wsize) {
+ ctx->work_data.reset(new char[desired_wsize]);
+ ctx->work_size = desired_wsize;
+ }
+ void * wdata = ctx->work_data.get();
+
+ // convert src0 to float
+ if (type != GGML_TYPE_F32) {
+ ggml_type_traits_t type_traits = ggml_internal_get_type_traits(type);
+ ggml_to_float_t const to_float = type_traits.to_float;
+
+ for (int64_t i03 = 0; i03 < ne03; i03++) {
+ for (int64_t i02 = 0; i02 < ne02; i02++) {
+ const void * x = (char *) src0->data + i02*nb02 + i03*nb03;
+ float * const wplane = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
+
+ const int min_cols_per_thread = 4096;
+ const int min_rows_per_thread = std::max((int)(min_cols_per_thread/ne00), 1);
+ const int n_threads = std::max(std::min(ctx->n_threads, (int)(ne01/min_rows_per_thread)), 1);
+
+#ifdef GGML_USE_OPENMP
+ #pragma omp parallel for num_threads(n_threads)
+ for (int64_t i01 = 0; i01 < ne01; i01++) {
+ to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
+ }
+#else
+ for (int i = 1; i < n_threads; i++) {
+ const int64_t start = i*ne01/n_threads;
+ const int64_t end = (i + 1)*ne01/n_threads;
+ if (start < end) {
+ ctx->tasks.push_back(std::async(std::launch::async, [=]() {
+ for (int64_t i01 = start; i01 < end; i01++) {
+ to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
+ }
+ }));
+ }
+ }
+ {
+ // reuse the current thread for the first task
+ const int64_t start = 0;
+ const int64_t end = ne01/n_threads;
+ for (int64_t i01 = start; i01 < end; i01++) {
+ to_float((const char *) x + i01*nb01, wplane + i01*ne00, ne00);
+ }
+ }
+#endif
+ }
+ }
+
+#ifndef GGML_USE_OPENMP
+ // wait for all tasks to finish
+ for (auto & task : ctx->tasks) {
+ task.get();
+ }
+ ctx->tasks.clear();
+#endif
+ }
+
+#if defined(OPENBLAS_VERSION)
+ openblas_set_num_threads(ctx->n_threads);
+#endif
+
+#if defined(BLIS_ENABLE_CBLAS)
+ bli_thread_set_num_threads(ctx->n_threads);
+#endif
+
+ for (int64_t i13 = 0; i13 < ne13; i13++) {
+ for (int64_t i12 = 0; i12 < ne12; i12++) {
+ const int64_t i03 = i13/r3;
+ const int64_t i02 = i12/r2;
+
+ const float * x = (float *) ((char *) src0->data + i02*nb02 + i03*nb03);
+ const float * y = (float *) ((char *) src1->data + i12*nb12 + i13*nb13);
+ float * d = (float *) ((char *) dst->data + i12*nb2 + i13*nb3);
+
+ if (type != GGML_TYPE_F32) {
+ x = (float *) wdata + i02*ne_plane + i03*ne02*ne_plane;
+ }
+
+ cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans,
+ ne1, ne01, ne10,
+ 1.0f, y, ne10,
+ x, ne00,
+ 0.0f, d, ne01);
+ }
+ }
+}
+
+static void ggml_backend_blas_out_prod(ggml_backend_blas_context * ctx, struct ggml_tensor * dst) {
+ const struct ggml_tensor * src0 = dst->src[0];
+ const struct ggml_tensor * src1 = dst->src[1];
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ GGML_ASSERT(ne0 == ne00);
+ GGML_ASSERT(ne1 == ne10);
+ GGML_ASSERT(ne2 == ne02);
+ GGML_ASSERT(ne02 == ne12);
+ GGML_ASSERT(ne3 == ne13);
+ GGML_ASSERT(ne03 == ne13);
+
+ // we don't support permuted src0 or src1
+ GGML_ASSERT(nb00 == sizeof(float));
+
+ // dst cannot be transposed or permuted
+ GGML_ASSERT(nb0 == sizeof(float));
+ // GGML_ASSERT(nb0 <= nb1);
+ // GGML_ASSERT(nb1 <= nb2);
+ // GGML_ASSERT(nb2 <= nb3);
+
+ // Arguments to ggml_compute_forward_out_prod (expressed as major,minor)
+ // src0: (k,n)
+ // src1: (k,m)
+ // dst: (m,n)
+ //
+ // Arguments to sgemm (see https://github.com/Reference-LAPACK/lapack/blob/master/BLAS/SRC/sgemm.f)
+ // Also expressed as (major,minor)
+ // a: (m,k): so src1 transposed
+ // b: (k,n): so src0
+ // c: (m,n)
+ //
+ // However, if ggml_is_transposed(src1) is true, then
+ // src1->data already contains a transposed version, so sgemm mustn't
+ // transpose it further.
+
+ int n = src0->ne[0];
+ int k = src0->ne[1];
+ int m = src1->ne[0];
+
+ CBLAS_TRANSPOSE transposeA;
+ int lda;
+
+ if (!ggml_is_transposed(src1)) {
+ transposeA = CblasTrans;
+ lda = m;
+ } else {
+ transposeA = CblasNoTrans;
+ lda = k;
+ }
+
+ float * a = (float *) ((char *) src1->data);
+ float * b = (float *) ((char *) src0->data);
+ float * c = (float *) ((char *) dst->data);
+
+ cblas_sgemm(CblasRowMajor, transposeA, CblasNoTrans, m, n, k, 1.0, a, lda, b, n, 0.0, c, n);
+
+ GGML_UNUSED(ctx);
+}
+
+// backend interface
+
+GGML_CALL static const char * ggml_backend_blas_name(ggml_backend_t backend) {
+ return "BLAS";
+
+ GGML_UNUSED(backend);
+}
+
+GGML_CALL static void ggml_backend_blas_free(ggml_backend_t backend) {
+ ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
+ delete ctx;
+ delete backend;
+}
+
+GGML_CALL static ggml_backend_buffer_type_t ggml_backend_blas_get_default_buffer_type(ggml_backend_t backend) {
+ return ggml_backend_cpu_buffer_type();
+
+ GGML_UNUSED(backend);
+}
+
+GGML_CALL static enum ggml_status ggml_backend_blas_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
+ ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend->context;
+
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ struct ggml_tensor * node = cgraph->nodes[i];
+
+ switch (node->op) {
+ case GGML_OP_MUL_MAT:
+ ggml_backend_blas_mul_mat(ctx, node);
+ break;
+
+ case GGML_OP_OUT_PROD:
+ ggml_backend_blas_out_prod(ctx, node);
+ break;
+
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ break;
+
+ default:
+ fprintf(stderr, "%s: unsupported op %s\n", __func__, ggml_op_desc(node));
+ GGML_ASSERT(false);
+ }
+ }
+
+ return GGML_STATUS_SUCCESS;
+
+ GGML_UNUSED(backend);
+}
+
+GGML_CALL static bool ggml_backend_blas_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
+ const struct ggml_tensor * src0 = op->src[0];
+ const struct ggml_tensor * src1 = op->src[1];
+
+ return (op->op == GGML_OP_MUL_MAT && ggml_backend_blas_use_blas(op)) ||
+ (op->op == GGML_OP_OUT_PROD && op->src[0]->type == GGML_TYPE_F32 &&
+ op->src[1]->type == GGML_TYPE_F32 &&
+ ggml_is_matrix(src0) &&
+ ggml_is_matrix(src1) &&
+ ggml_is_contiguous(src0) &&
+ (ggml_is_contiguous(src1) || ggml_is_transposed(src1)));
+
+ GGML_UNUSED(backend);
+}
+
+GGML_CALL static bool ggml_backend_blas_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
+ return ggml_backend_buft_is_host(buft);
+
+ GGML_UNUSED(backend);
+}
+
+static struct ggml_backend_i blas_backend_i = {
+ /* .get_name = */ ggml_backend_blas_name,
+ /* .free = */ ggml_backend_blas_free,
+ /* .get_default_buffer_type = */ ggml_backend_blas_get_default_buffer_type,
+ /* .set_tensor_async = */ NULL,
+ /* .get_tensor_async = */ NULL,
+ /* .cpy_tensor_async = */ NULL,
+ /* .synchronize = */ NULL,
+ /* .graph_plan_create = */ NULL,
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_update = */ NULL,
+ /* .graph_plan_compute = */ NULL,
+ /* .graph_compute = */ ggml_backend_blas_graph_compute,
+ /* .supports_op = */ ggml_backend_blas_supports_op,
+ /* .supports_buft = */ ggml_backend_blas_supports_buft,
+ /* .offload_op = */ NULL,
+ /* .event_new = */ NULL,
+ /* .event_free = */ NULL,
+ /* .event_record = */ NULL,
+ /* .event_wait = */ NULL,
+ /* .event_synchronize = */ NULL,
+};
+
+static ggml_guid_t ggml_backend_blas_guid(void) {
+ static ggml_guid guid = { 0x12, 0xa8, 0xae, 0xf4, 0xc0, 0x1e, 0x61, 0x97, 0x8f, 0xeb, 0x33, 0x04, 0xa1, 0x33, 0x51, 0x2d };
+ return &guid;
+}
+
+ggml_backend_t ggml_backend_blas_init(void) {
+ ggml_backend_blas_context * ctx = new ggml_backend_blas_context;
+
+ ggml_backend_t backend = new ggml_backend {
+ /* .guid = */ ggml_backend_blas_guid(),
+ /* .interface = */ blas_backend_i,
+ /* .context = */ ctx,
+ };
+
+#if !defined(NDEBUG) && defined(OPENBLAS_VERSION) && defined(GGML_USE_OPENMP)
+ if (openblas_get_parallel() != OPENBLAS_OPENMP) {
+ fprintf(stderr, "%s: warning: ggml is using OpenMP, but OpenBLAS was compiled without OpenMP support\n", __func__);
+ }
+#endif
+
+#if !defined(NDEBUG) && defined(BLIS_ENABLE_CBLAS) && defined(GGML_USE_OPENMP) && !defined(BLIS_ENABLE_OPENMP)
+ fprintf(stderr, "%s: warning: ggml is using OpenMP, but BLIS was compiled without OpenMP support\n", __func__);
+#endif
+
+ return backend;
+}
+
+GGML_CALL bool ggml_backend_is_blas(ggml_backend_t backend) {
+ return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_blas_guid());
+}
+
+void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads) {
+ GGML_ASSERT(ggml_backend_is_blas(backend_blas));
+
+ ggml_backend_blas_context * ctx = (ggml_backend_blas_context *)backend_blas->context;
+ ctx->n_threads = n_threads;
+}
diff --git a/ggml-blas.h b/ggml-blas.h
new file mode 100644
index 0000000000000..f2e37de06f609
--- /dev/null
+++ b/ggml-blas.h
@@ -0,0 +1,23 @@
+#pragma once
+
+#include "ggml.h"
+#include "ggml-backend.h"
+
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// backend API
+GGML_API GGML_CALL ggml_backend_t ggml_backend_blas_init(void);
+
+GGML_API GGML_CALL bool ggml_backend_is_blas(ggml_backend_t backend);
+
+// number of threads used for conversion to float
+// for openblas and blis, this will also set the number of threads used for blas operations
+GGML_API GGML_CALL void ggml_backend_blas_set_n_threads(ggml_backend_t backend_blas, int n_threads);
+
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index c6bc3f64c90de..f914efd712665 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -188,13 +188,15 @@ static ggml_cuda_device_info ggml_cuda_init() {
info.default_tensor_split[id] = total_vram;
total_vram += prop.totalGlobalMem;
+ info.devices[id].nsm = prop.multiProcessorCount;
+ info.devices[id].smpb = prop.sharedMemPerBlock;
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+ info.devices[id].smpbo = prop.sharedMemPerBlock;
info.devices[id].cc = 100*prop.major + 10*prop.minor + CC_OFFSET_AMD;
#else
+ info.devices[id].smpbo = prop.sharedMemPerBlockOptin;
info.devices[id].cc = 100*prop.major + 10*prop.minor;
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
- info.devices[id].smpb = prop.sharedMemPerBlock;
- info.devices[id].nsm = prop.multiProcessorCount;
}
for (int id = 0; id < info.device_count; ++id) {
@@ -543,6 +545,10 @@ GGML_CALL static const char * ggml_backend_cuda_buffer_type_name(ggml_backend_bu
return ctx->name.c_str();
}
+static bool ggml_backend_buft_is_cuda(ggml_backend_buffer_type_t buft) {
+ return buft->iface.get_name == ggml_backend_cuda_buffer_type_name;
+}
+
GGML_CALL static ggml_backend_buffer_t ggml_backend_cuda_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context;
@@ -585,24 +591,12 @@ GGML_CALL static size_t ggml_backend_cuda_buffer_type_get_alloc_size(ggml_backen
GGML_UNUSED(buft);
}
-GGML_CALL static bool ggml_backend_cuda_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
- if (!ggml_backend_is_cuda(backend)) {
- return false;
- }
-
- ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context;
- ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
-
- return buft_ctx->device == cuda_ctx->device;
-}
-
static ggml_backend_buffer_type_i ggml_backend_cuda_buffer_type_interface = {
/* .get_name = */ ggml_backend_cuda_buffer_type_name,
/* .alloc_buffer = */ ggml_backend_cuda_buffer_type_alloc_buffer,
/* .get_alignment = */ ggml_backend_cuda_buffer_type_get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ ggml_backend_cuda_buffer_type_get_alloc_size,
- /* .supports_backend = */ ggml_backend_cuda_buffer_type_supports_backend,
/* .is_host = */ NULL,
};
@@ -641,7 +635,7 @@ static int64_t get_row_rounding(const std::array &
}
const int cc = ggml_cuda_info().devices[id].cc;
- row_rounding = std::max(row_rounding, (int64_t)get_mmq_y_host(cc, get_mmq_x_max_host(cc)));
+ row_rounding = std::max(row_rounding, (int64_t)get_mmq_y_host(cc));
}
return row_rounding;
}
@@ -863,6 +857,10 @@ GGML_CALL static const char * ggml_backend_cuda_split_buffer_type_name(ggml_back
GGML_UNUSED(buft);
}
+static bool ggml_backend_buft_is_cuda_split(ggml_backend_buffer_type_t buft) {
+ return buft->iface.get_name == ggml_backend_cuda_split_buffer_type_name;
+}
+
GGML_CALL static ggml_backend_buffer_t ggml_backend_cuda_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
// since we don't know the exact split after rounding, we cannot allocate the device buffers at this point
// instead, we allocate them for each tensor separately in init_tensor
@@ -906,12 +904,6 @@ GGML_CALL static size_t ggml_backend_cuda_split_buffer_type_get_alloc_size(ggml_
return total_size;
}
-GGML_CALL static bool ggml_backend_cuda_split_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) {
- return ggml_backend_is_cuda(backend);
-
- GGML_UNUSED(buft);
-}
-
GGML_CALL static bool ggml_backend_cuda_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
return false;
@@ -924,7 +916,6 @@ static ggml_backend_buffer_type_i ggml_backend_cuda_split_buffer_type_interface
/* .get_alignment = */ ggml_backend_cuda_split_buffer_type_get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ ggml_backend_cuda_split_buffer_type_get_alloc_size,
- /* .supports_backend = */ ggml_backend_cuda_split_buffer_type_supports_backend,
/* .is_host = */ ggml_backend_cuda_split_buffer_type_is_host,
};
@@ -1024,7 +1015,6 @@ GGML_CALL ggml_backend_buffer_type_t ggml_backend_cuda_host_buffer_type() {
/* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
/* .get_max_size = */ NULL, // defaults to SIZE_MAX
/* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
- /* .supports_backend = */ ggml_backend_cpu_buffer_type()->iface.supports_backend,
/* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
},
/* .context = */ nullptr,
@@ -2277,6 +2267,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
case GGML_OP_SQR:
ggml_cuda_op_sqr(ctx, dst);
break;
+ case GGML_OP_SQRT:
+ ggml_cuda_op_sqrt(ctx, dst);
+ break;
case GGML_OP_CLAMP:
ggml_cuda_op_clamp(ctx, dst);
break;
@@ -2840,6 +2833,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_OP_RMS_NORM:
case GGML_OP_SCALE:
case GGML_OP_SQR:
+ case GGML_OP_SQRT:
case GGML_OP_CLAMP:
case GGML_OP_CONT:
case GGML_OP_DIAG_MASK_INF:
@@ -2879,6 +2873,20 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
GGML_UNUSED(backend);
}
+GGML_CALL static bool ggml_backend_cuda_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
+ if (ggml_backend_buft_is_cuda_split(buft)) {
+ return true;
+ }
+
+ if (ggml_backend_buft_is_cuda(buft)) {
+ ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *)backend->context;
+ ggml_backend_cuda_buffer_type_context * buft_ctx = (ggml_backend_cuda_buffer_type_context *)buft->context;
+ return buft_ctx->device == cuda_ctx->device;
+ }
+
+ return false;
+}
+
GGML_CALL static bool ggml_backend_cuda_offload_op(ggml_backend_t backend, const ggml_tensor * op) {
const int min_batch_size = 32;
@@ -2951,9 +2959,11 @@ static ggml_backend_i ggml_backend_cuda_interface = {
/* .synchronize = */ ggml_backend_cuda_synchronize,
/* .graph_plan_create = */ NULL,
/* .graph_plan_free = */ NULL,
+ /* .graph_plan_update = */ NULL,
/* .graph_plan_compute = */ NULL,
/* .graph_compute = */ ggml_backend_cuda_graph_compute,
/* .supports_op = */ ggml_backend_cuda_supports_op,
+ /* .supports_buft = */ ggml_backend_cuda_supports_buft,
/* .offload_op = */ ggml_backend_cuda_offload_op,
/* .event_new = */ ggml_backend_cuda_event_new,
/* .event_free = */ ggml_backend_cuda_event_free,
diff --git a/ggml-cuda/argsort.cu b/ggml-cuda/argsort.cu
index 1641440617779..15757ca18e4d7 100644
--- a/ggml-cuda/argsort.cu
+++ b/ggml-cuda/argsort.cu
@@ -73,6 +73,7 @@ static void argsort_f32_i32_cuda(const float * x, int * dst, const int ncols, co
const dim3 block_nums(1, nrows, 1);
const size_t shared_mem = ncols_pad * sizeof(int);
+ // FIXME: this limit could be raised by ~2-4x on Ampere or newer
GGML_ASSERT(shared_mem <= ggml_cuda_info().devices[ggml_cuda_get_device()].smpb);
if (order == GGML_SORT_ORDER_ASC) {
diff --git a/ggml-cuda/common.cuh b/ggml-cuda/common.cuh
index 7f4764d60e854..5bd24ebe5fa79 100644
--- a/ggml-cuda/common.cuh
+++ b/ggml-cuda/common.cuh
@@ -331,6 +331,10 @@ static __device__ __forceinline__ half2 __shfl_xor(half2 var, int laneMask, int
#define FP16_AVAILABLE
#endif // (defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) || __CUDA_ARCH__ >= CC_PASCAL
+#if defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610
+#define FAST_FP16_AVAILABLE
+#endif // defined(FP16_AVAILABLE) && __CUDA_ARCH__ != 610
+
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA
#define FP16_MMA_AVAILABLE
#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA
@@ -648,8 +652,8 @@ static int get_mmq_x_max_host(const int cc) {
}
// Round rows to this value for --split-mode row:
-static int get_mmq_y_host(const int cc, const int mmq_x) {
- return cc >= CC_VOLTA && mmq_x >= 32 ? 128 : 64;
+static int get_mmq_y_host(const int cc) {
+ return cc >= CC_VOLTA ? 128 : 64;
}
//////////////////////
@@ -661,6 +665,7 @@ struct ggml_cuda_device_info {
int cc; // compute capability
int nsm; // number of streaming multiprocessors
size_t smpb; // max. shared memory per block
+ size_t smpbo; // max. shared memory per block (with opt-in)
bool vmm; // virtual memory support
size_t vmm_granularity; // granularity of virtual memory
size_t total_vram;
diff --git a/ggml-cuda/mmq.cu b/ggml-cuda/mmq.cu
index 1d6b9e6982b6e..6dbd85feff2fa 100644
--- a/ggml-cuda/mmq.cu
+++ b/ggml-cuda/mmq.cu
@@ -30,34 +30,34 @@ void ggml_cuda_op_mul_mat_q(
switch (src0->type) {
case GGML_TYPE_Q4_0:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q4_1:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q5_0:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q5_1:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q8_0:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q2_K:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q3_K:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q4_K:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q5_K:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
case GGML_TYPE_Q6_K:
- mul_mat_q_case(args, stream);
+ mul_mat_q_case(ctx, args, stream);
break;
default:
GGML_ASSERT(false);
diff --git a/ggml-cuda/mmq.cuh b/ggml-cuda/mmq.cuh
index 01e2086b41646..e2d07c20253ae 100644
--- a/ggml-cuda/mmq.cuh
+++ b/ggml-cuda/mmq.cuh
@@ -8,14 +8,15 @@
#include
#define MMQ_TILE_Y_K (WARP_SIZE + WARP_SIZE/QI8_1)
+#define MMQ_NWARPS 8
typedef void (*load_tiles_mmq_t)(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride);
typedef void (*vec_dot_mmq_t)(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0);
-typedef void (*mmq_write_back_t)(const float * __restrict__ sum, float * __restrict__ dst, const int & ne0, const int & ne1);
+typedef void (*mmq_write_back_t)(const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max);
struct block_q8_1_mmq {
half2 ds[4];
@@ -25,9 +26,8 @@ static_assert(sizeof(block_q8_1_mmq) == 4*QK8_1 + 4*sizeof(half2), "Unexpected b
static_assert(sizeof(block_q8_1_mmq) == 4*sizeof(block_q8_1), "Unexpected block_q8_1_mmq size");
struct tile_x_sizes {
- int ql;
+ int qs;
int dm;
- int qh;
int sc;
};
@@ -51,32 +51,28 @@ static constexpr __device__ int get_mmq_x_max_device() {
// get_mmq_y_host is in common.cuh so that it can be used to determine the correct way to round for --split-mode row
+static constexpr __device__ int get_mmq_y_device() {
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
-static constexpr __device__ int get_mmq_y_device(int mmq_x) {
- return mmq_x >= 32 ? 128 : 64;
-}
+ return 128;
#else
#if __CUDA_ARCH__ >= CC_VOLTA
-static constexpr __device__ int get_mmq_y_device(int mmq_x) {
- return mmq_x >= 32 ? 128 : 64;
-}
+ return 128;
#else
-static constexpr __device__ int get_mmq_y_device(int /*mmq_x*/) {
return 64;
-}
#endif // __CUDA_ARCH__ >= CC_VOLTA
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
+}
-#define TILE_X_SIZES_Q4_0 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_0 + mmq_y/QI4_0, 0, 0}
-#define TILE_X_SIZES_Q4_1 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_1 + mmq_y/QI4_1, 0, 0}
-#define TILE_X_SIZES_Q5_0 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_0 + mmq_y/QI5_0, 0, 0}
-#define TILE_X_SIZES_Q5_1 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_1 + mmq_y/QI5_1, 0, 0}
-#define TILE_X_SIZES_Q8_0 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI8_0 + mmq_y/QI8_0, 0, 0}
-#define TILE_X_SIZES_Q2_K tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI2_K + mmq_y/QI2_K, 0, mmq_y*WARP_SIZE/4 + mmq_y/4}
-#define TILE_X_SIZES_Q3_K tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI3_K + mmq_y/QI3_K, mmq_y*WARP_SIZE/2 + mmq_y/2, mmq_y*WARP_SIZE/4 + mmq_y/4}
-#define TILE_X_SIZES_Q4_K tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_K + mmq_y/QI4_K, 0, mmq_y*WARP_SIZE/8 + mmq_y/8}
-#define TILE_X_SIZES_Q5_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_K + mmq_y/QI5_K, 0, mmq_y*WARP_SIZE/8 + mmq_y/8}
-#define TILE_X_SIZES_Q6_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI6_K + mmq_y/QI6_K, 0, mmq_y*WARP_SIZE/8 + mmq_y/8}
+#define TILE_X_SIZES_Q4_0 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_0 + mmq_y/QI4_0, 0}
+#define TILE_X_SIZES_Q4_1 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_1 + mmq_y/QI4_1, 0}
+#define TILE_X_SIZES_Q5_0 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_0 + mmq_y/QI5_0, 0}
+#define TILE_X_SIZES_Q5_1 tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_1 + mmq_y/QI5_1, 0}
+#define TILE_X_SIZES_Q8_0 tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI8_0 + mmq_y/QI8_0, 0}
+#define TILE_X_SIZES_Q2_K tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE + mmq_y, 0}
+#define TILE_X_SIZES_Q3_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI3_K + mmq_y/QI3_K, mmq_y*WARP_SIZE/4 + mmq_y/4}
+#define TILE_X_SIZES_Q4_K tile_x_sizes{mmq_y*WARP_SIZE + mmq_y, mmq_y*WARP_SIZE/QI4_K + mmq_y/QI4_K, mmq_y*WARP_SIZE/8 + mmq_y/8}
+#define TILE_X_SIZES_Q5_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI5_K + mmq_y/QI5_K, mmq_y*WARP_SIZE/8 + mmq_y/8}
+#define TILE_X_SIZES_Q6_K tile_x_sizes{mmq_y*WARP_SIZE*2 + mmq_y, mmq_y*WARP_SIZE/QI6_K + mmq_y/QI6_K, mmq_y*WARP_SIZE/8 + mmq_y/8}
#define GET_TILE_X_SIZES_BODY \
return type == GGML_TYPE_Q4_0 ? TILE_X_SIZES_Q4_0 : \
@@ -89,7 +85,7 @@ static constexpr __device__ int get_mmq_y_device(int /*mmq_x*/) {
type == GGML_TYPE_Q4_K ? TILE_X_SIZES_Q4_K : \
type == GGML_TYPE_Q5_K ? TILE_X_SIZES_Q5_K : \
type == GGML_TYPE_Q6_K ? TILE_X_SIZES_Q6_K : \
- tile_x_sizes{0, 0, 0, 0}
+ tile_x_sizes{0, 0, 0}
static tile_x_sizes get_tile_x_sizes_host(const ggml_type type, const int mmq_y) {
GET_TILE_X_SIZES_BODY;
@@ -103,9 +99,9 @@ static constexpr __device__ tile_x_sizes get_tile_x_sizes_device(ggml_type type)
// ------------------------------------------------------------
template static __device__ __forceinline__ void load_tiles_q4_0(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int kbx = threadIdx.x / QI4_0;
const int kqsx = threadIdx.x % QI4_0;
@@ -122,7 +118,7 @@ template static __device__ __forceinlin
const block_q4_0 * bxi = (const block_q4_0 *) x + kbx0 + i*stride + kbx;
- x_ql[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8(bxi->qs, kqsx);
+ x_qs[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
@@ -144,10 +140,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const float * x_df = (const float *) x_dm;
const int * y_qs = (const int *) y + 4;
@@ -172,7 +167,7 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a(
}
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q4_0_q8_1_impl
- (&x_ql[i*(WARP_SIZE + 1) + k0], u, x_df[i*(WARP_SIZE/QI4_0) + i/QI4_0 + k0/QI4_0],
+ (&x_qs[i*(WARP_SIZE + 1) + k0], u, x_df[i*(WARP_SIZE/QI4_0) + i/QI4_0 + k0/QI4_0],
y_ds[j*MMQ_TILE_Y_K + (2*k0/QI8_1) % (WARP_SIZE/QI8_1)]);
}
}
@@ -180,10 +175,10 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_dp4a(
template
static __device__ __forceinline__ void vec_dot_q4_0_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
+ GGML_UNUSED(x_sc);
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -205,7 +200,7 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_mma(
const int k = k0 + mma_A::get_k(l) % QI4_0;
const int shift = 4*(mma_A::get_k(l) / QI4_0);
- A.x[l] = __vsubss4((x_ql[i*(WARP_SIZE + 1) + k] >> shift) & 0x0F0F0F0F, 0x08080808);
+ A.x[l] = __vsubss4((x_qs[i*(WARP_SIZE + 1) + k] >> shift) & 0x0F0F0F0F, 0x08080808);
}
#pragma unroll
for (int l = 0; l < mma_C::ne/2; ++l) {
@@ -240,12 +235,16 @@ static __device__ __forceinline__ void vec_dot_q4_0_q8_1_mma(
sum[(j0/B.J)*C.ne + l] += dA[l/2]*__low2float(dsB[l%2])*C.x[l];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q4_1(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int kbx = threadIdx.x / QI4_1;
const int kqsx = threadIdx.x % QI4_1;
@@ -260,7 +259,7 @@ template static __device__ __forceinlin
const block_q4_1 * bxi = (const block_q4_1 *) x + kbx0 + i*stride + kbx;
- x_ql[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ x_qs[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
@@ -282,10 +281,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int * y_qs = (const int *) y + 4;
const half2 * y_ds = (const half2 *) y;
@@ -309,7 +307,7 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a(
}
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q4_1_q8_1_impl
- (&x_ql[i*(WARP_SIZE + 1) + k0], u, x_dm[i*(WARP_SIZE/QI4_1) + i/QI4_1 + k0/QI4_1],
+ (&x_qs[i*(WARP_SIZE + 1) + k0], u, x_dm[i*(WARP_SIZE/QI4_1) + i/QI4_1 + k0/QI4_1],
y_ds[j*MMQ_TILE_Y_K + (2*k0/QI8_1) % (WARP_SIZE/QI8_1)]);
}
}
@@ -317,10 +315,10 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_dp4a(
template
static __device__ __forceinline__ void vec_dot_q4_1_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
+ GGML_UNUSED(x_sc);
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -341,7 +339,7 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_mma(
const int k = k0 + mma_A::get_k(l) % QI4_0;
const int shift = 4*(mma_A::get_k(l) / QI4_0);
- A.x[l] = (x_ql[i*(WARP_SIZE + 1) + k] >> shift) & 0x0F0F0F0F;
+ A.x[l] = (x_qs[i*(WARP_SIZE + 1) + k] >> shift) & 0x0F0F0F0F;
}
#pragma unroll
for (int l = 0; l < mma_C::ne/2; ++l) {
@@ -377,12 +375,16 @@ static __device__ __forceinline__ void vec_dot_q4_1_q8_1_mma(
sum[(j0/B.J)*C.ne + l] += __low2float(dmA_dsB)*C.x[l] + __high2float(dmA_dsB);
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q5_0(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int kbx = threadIdx.x / QI5_0;
const int kqsx = threadIdx.x % QI5_0;
@@ -407,7 +409,7 @@ template static __device__ __forceinlin
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
qs0 = __vsubss4(qs0, 0x10101010); // subtract 16
- x_ql[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+0] = qs0;
+ x_qs[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
@@ -416,7 +418,7 @@ template static __device__ __forceinlin
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
qs1 = __vsubss4(qs1, 0x10101010); // subtract 16
- x_ql[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+1] = qs1;
+ x_qs[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
@@ -439,10 +441,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q5_0_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const float * x_dmf = (const float *) x_dm;
const int * y_qs = (const int *) y + 4;
@@ -468,17 +469,17 @@ static __device__ __forceinline__ void vec_dot_q5_0_q8_1_dp4a(
}
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q8_0_q8_1_impl
- (&x_ql[i*(2*WARP_SIZE + 1) + 2*k0], u, x_dmf[index_bx], y_df[j*MMQ_TILE_Y_K + (2*k0/QI8_1) % (WARP_SIZE/QI8_1)]);
+ (&x_qs[i*(2*WARP_SIZE + 1) + 2*k0], u, x_dmf[index_bx], y_df[j*MMQ_TILE_Y_K + (2*k0/QI8_1) % (WARP_SIZE/QI8_1)]);
}
}
}
template
static __device__ __forceinline__ void vec_dot_q5_0_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
+ GGML_UNUSED(x_sc);
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -499,7 +500,7 @@ static __device__ __forceinline__ void vec_dot_q5_0_q8_1_mma(
const int i = i0 + mma_A::get_i(l);
const int k = 2*(k0 + mma_A::get_k(l) % QI5_0) + mma_A::get_k(l) / QI5_0;
- A.x[l] = x_ql[i*(2*WARP_SIZE + 1) + k];
+ A.x[l] = x_qs[i*(2*WARP_SIZE + 1) + k];
}
#pragma unroll
for (int l = 0; l < mma_C::ne/2; ++l) {
@@ -534,12 +535,16 @@ static __device__ __forceinline__ void vec_dot_q5_0_q8_1_mma(
sum[(j0/B.J)*C.ne + l] += dA[l/2]*dB[l%2]*C.x[l];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q5_1(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int kbx = threadIdx.x / QI5_1;
const int kqsx = threadIdx.x % QI5_1;
@@ -563,7 +568,7 @@ template static __device__ __forceinlin
qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
- x_ql[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+0] = qs0;
+ x_qs[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+0] = qs0;
int qs1 = (ql >> 4) & 0x0F0F0F0F;
qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
@@ -571,7 +576,7 @@ template static __device__ __forceinlin
qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
- x_ql[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+1] = qs1;
+ x_qs[i * (2*WARP_SIZE + 1) + 2*threadIdx.x+1] = qs1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
@@ -593,10 +598,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q5_1_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int * y_qs = (const int *) y + 4;
const half2 * y_ds = (const half2 *) y;
@@ -621,17 +625,17 @@ static __device__ __forceinline__ void vec_dot_q5_1_q8_1_dp4a(
}
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q8_1_q8_1_impl
- (&x_ql[i*(2*WARP_SIZE + 1) + 2*k0], u, x_dm[index_bx], y_ds[j*MMQ_TILE_Y_K + (2*k0/QI8_1) % (WARP_SIZE/QI8_1)]);
+ (&x_qs[i*(2*WARP_SIZE + 1) + 2*k0], u, x_dm[index_bx], y_ds[j*MMQ_TILE_Y_K + (2*k0/QI8_1) % (WARP_SIZE/QI8_1)]);
}
}
}
template
static __device__ __forceinline__ void vec_dot_q5_1_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
+ GGML_UNUSED(x_sc);
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -651,7 +655,7 @@ static __device__ __forceinline__ void vec_dot_q5_1_q8_1_mma(
const int i = i0 + mma_A::get_i(l);
const int k = 2*(k0 + mma_A::get_k(l) % QI5_1) + mma_A::get_k(l) / QI5_1;
- A.x[l] = x_ql[i*(2*WARP_SIZE + 1) + k];
+ A.x[l] = x_qs[i*(2*WARP_SIZE + 1) + k];
}
#pragma unroll
for (int l = 0; l < mma_C::ne/2; ++l) {
@@ -687,13 +691,16 @@ static __device__ __forceinline__ void vec_dot_q5_1_q8_1_mma(
sum[(j0/B.J)*C.ne + l] += __low2float(dmA_dsB)*C.x[l] + __high2float(dmA_dsB);
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q8_0(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const int kbx = threadIdx.x / QI8_0;
const int kqsx = threadIdx.x % QI8_0;
@@ -709,7 +716,7 @@ template static __device__ __forceinlin
const block_q8_0 * bxi = (const block_q8_0 *) x + kbx0 + i*stride + kbx;
- x_ql[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_int8(bxi->qs, kqsx);
+ x_qs[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_int8(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
@@ -731,10 +738,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+ GGML_UNUSED(x_sc);
const float * x_dmf = (const float *) x_dm;
const int * y_qs = (const int *) y + 4;
@@ -749,7 +755,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a(
const int i = i0 + threadIdx.x;
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q8_0_q8_1_impl
- (&x_ql[i*(WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k0], x_dmf[i*(WARP_SIZE/QI8_0) + i/QI8_0 + k0/QI8_0],
+ (&x_qs[i*(WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + k0], x_dmf[i*(WARP_SIZE/QI8_0) + i/QI8_0 + k0/QI8_0],
y_df[j*MMQ_TILE_Y_K + k0/QI8_1]);
}
}
@@ -757,10 +763,10 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_dp4a(
template
static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
+ GGML_UNUSED(x_sc);
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -781,7 +787,7 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma(
const int i = i0 + mma_A::get_i(l);
const int k = k0 + mma_A::get_k(l);
- A.x[l] = x_ql[i*(WARP_SIZE + 1) + k];
+ A.x[l] = x_qs[i*(WARP_SIZE + 1) + k];
}
#pragma unroll
for (int l = 0; l < mma_C::ne/2; ++l) {
@@ -816,12 +822,15 @@ static __device__ __forceinline__ void vec_dot_q8_0_q8_1_mma(
sum[(j0/B.J)*C.ne + l] += C.x[l]*dA[l/2]*dB[l%2];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q2_K(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh);
const int kbx = threadIdx.x / QI2_K;
const int kqsx = threadIdx.x % QI2_K;
@@ -836,48 +845,42 @@ template static __device__ __forceinlin
const block_q2_K * bxi = (const block_q2_K *) x + kbx0 + i*stride + kbx;
- x_ql[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
- }
-
- const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
- const int kbxd = threadIdx.x % blocks_per_tile_x_row;
+ const int x_ql_0 = get_int_from_uint8(bxi->qs, kqsx);
#pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
- int i = (i0 + threadIdx.y * QI2_K + threadIdx.x / blocks_per_tile_x_row) % mmq_y;
+ for (int l = 0; l < QR2_K; ++l) {
+ const int k = kbx*QI2_K + (kqsx/8)*8 + l*2 + (kqsx % 8)/4;
- if (need_check) {
- i = min(i, i_max);
- }
-
- const block_q2_K * bxi = (const block_q2_K *) x + kbx0 + i*stride + kbxd;
+ int x_qs_k = ((x_ql_0 >> (2*l)) & 0x03030303) << (2*(kqsx % 4));
+ x_qs_k |= __shfl_xor_sync(0xFFFFFFFF, x_qs_k, 1, WARP_SIZE);
+ x_qs_k |= __shfl_xor_sync(0xFFFFFFFF, x_qs_k, 2, WARP_SIZE);
- x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
- }
-
-#pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
- int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4);
+ if (kqsx % QR2_K != 0) {
+ continue;
+ }
- if (need_check) {
- i = min(i, i_max);
+ x_qs[i*(WARP_SIZE + 1) + k] = x_qs_k;
}
- const block_q2_K * bxi = (const block_q2_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/4)) / (QI2_K/4);
+ const int sc_m = bxi->scales[kqsx];
+#ifdef FAST_FP16_AVAILABLE
+ const half2 x_dm_ik = __hmul2(bxi->dm, make_half2(sc_m & 0x0F, sc_m >> 4));
+#else
+ const float2 bxi_dmf = __half22float2(bxi->dm);
+ const half2 x_dm_ik = make_half2(bxi_dmf.x*(sc_m & 0x0F), bxi_dmf.y*(sc_m >> 4));
+#endif // FAST_FP16_AVAILABLE
- x_sc[i * (WARP_SIZE/4) + i / 4 + threadIdx.x % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, threadIdx.x % (QI2_K/4));
+ x_dm[i*(WARP_SIZE + 1) + threadIdx.x] = x_dm_ik;
}
}
template
-static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+static __device__ __forceinline__ void vec_dot_q2_K_q8_1_dp4a(
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
- GGML_UNUSED(x_qh);
-
- const int * y_qs = (const int *) y + 4;
- const float * y_df = (const float *) y;
+ const int * y_qs = (const int *) y + 4;
+ const float * y_df = (const float *) y;
#pragma unroll
for (int j0 = 0; j0 < mmq_x; j0 += nwarps) {
@@ -887,30 +890,99 @@ static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mul_mat(
for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) {
const int i = i0 + threadIdx.x;
- const int kbx = k0 / QI2_K;
- const int ky = (k0 % QI2_K) * QR2_K;
+ sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q2_K_q8_1_impl_mmq(
+ &x_qs[i*(WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + (QR2_K*k0) % WARP_SIZE],
+ &x_dm[i*(WARP_SIZE + 1) + k0], y_df[j*MMQ_TILE_Y_K + ((QR2_K*k0) % WARP_SIZE)/QI8_1]);
+ }
+ }
+}
+
+template
+static __device__ __forceinline__ void vec_dot_q2_K_q8_1_mma(
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
+ const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
+#ifdef INT8_MMA_AVAILABLE
+
+ typedef mma_int_A_I16K4 mma_A;
+ typedef mma_int_B_J8K4 mma_B;
+ typedef mma_int_C_I16J8 mma_C;
+
+ const int * y_qs = (const int *) y + 4;
+ const float * y_df = (const float *) y;
+
+ const int i0 = threadIdx.y*mma_A::I;
+ static_assert(nwarps*mma_A::I == mmq_y, "nwarps*mma_A::I != mmq_y");
+
+ mma_A A[2];
+ float dA[mma_C::ne/2][2];
+ float mA[mma_C::ne/2][2];
- int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
+#pragma unroll
+ for (int l = 0; l < mma_A::ne; ++l) {
+ const int i = i0 + mma_A::get_i(l);
+ const int shift = 2*mma_A::get_k(l);
- const int kqsx = i*(WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
- const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
+ A[0].x[l] = (x_qs[i*(WARP_SIZE + 1) + k0 + 0] >> shift) & 0x03030303;
+ A[1].x[l] = (x_qs[i*(WARP_SIZE + 1) + k0 + 1] >> shift) & 0x03030303;
+ }
#pragma unroll
- for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
- v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
- }
+ for (int l = 0; l < mma_C::ne/2; ++l) {
+ const int i = i0 + mma_C::get_i(2*l);
- const uint8_t * scales = ((const uint8_t *) &x_sc[i*(WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
+#pragma unroll
+ for (int kk = 0; kk < 2; ++kk) {
+ const float2 dm = __half22float2(x_dm[i*(WARP_SIZE + 1) + k0 + kk]);
- sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q2_K_q8_1_impl_mmq(
- v, &y_qs[j*MMQ_TILE_Y_K + (QR2_K*k0) % WARP_SIZE], scales,
- x_dm[i*(WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[j*MMQ_TILE_Y_K + ((QR2_K*k0) % WARP_SIZE)/QI8_1]);
+ dA[l][kk] = dm.x;
+ mA[l][kk] = dm.y;
}
}
+
+#pragma unroll
+ for (int j0 = 0; j0 < mmq_x; j0 += mma_int_B_J8K8::J) {
+ mma_C Cd[2];
+ mma_C Cm[2];
+ mma_B B[2];
+ float dB[mma_C::ne/2];
+
+#pragma unroll
+ for (int l = 0; l < mma_B::ne; ++l) {
+ const int j = j0 + mma_B::get_j(l);
+ const int k = (4*k0 + mma_B::get_k(l)) % WARP_SIZE;
+
+ B[0].x[l] = y_qs[j*MMQ_TILE_Y_K + k + 0];
+ B[1].x[l] = y_qs[j*MMQ_TILE_Y_K + k + mma_B::K];
+ }
+#pragma unroll
+ for (int l = 0; l < mma_C::ne/2; ++l) {
+ const int j = j0 + mma_C::get_j(l);
+
+ dB[l] = y_df[j*MMQ_TILE_Y_K + ((4*k0)/QI8_1) % (WARP_SIZE/QI8_1)];
+ }
+
+ Cd[0].mma_K4(A[0], B[0]);
+ Cd[1].mma_K4(A[1], B[1]);
+
+ mma_A A1;
+ A1.x[0] = 0x01010101;
+ A1.x[1] = 0x01010101;
+ Cm[0].mma_K4(A1, B[0]);
+ Cm[1].mma_K4(A1, B[1]);
+
+#pragma unroll
+ for (int l = 0; l < mma_C::ne; ++l) {
+ sum[(j0/mma_B::J)*mma_C::ne + l] += (Cd[0].x[l]*dA[l/2][0] + Cd[1].x[l]*dA[l/2][1] - Cm[0].x[l]*mA[l/2][0] - Cm[1].x[l]*mA[l/2][1])*dB[l%2];
+ }
+ }
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q3_K(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
const int kbx = threadIdx.x / QI3_K;
@@ -926,7 +998,25 @@ template static __device__ __forceinlin
const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride + kbx;
- x_ql[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8(bxi->qs, kqsx);
+ const int x_ql_0 = get_int_from_uint8(bxi->qs, kqsx);
+ const int x_qh_0 = get_int_from_uint8(bxi->hmask, kqsx % (QI3_K/2)) >> (4 * (kqsx / (QI3_K/2)));
+
+#pragma unroll
+ for (int l = 0; l < QR3_K; ++l) {
+ const int k = kbx*(QR3_K*QI3_K) + (kqsx/8)*32 + l*8 + kqsx % 8;
+
+ const int x_ql_k = (x_ql_0 >> (2*l)) & 0x03030303;
+ const int x_qh_k = ((x_qh_0 >> l) << 2) & 0x04040404;
+
+ int x_qs_k = (x_ql_k | x_qh_k) << (4*(k%2));
+ x_qs_k |= __shfl_xor_sync(0xFFFFFFFF, x_qs_k, 1, WARP_SIZE);
+
+ if (kqsx % 2 != 0) {
+ continue;
+ }
+
+ x_qs[i*(2*WARP_SIZE + 1) + k/2] = x_qs_k;
+ }
}
const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
@@ -946,20 +1036,6 @@ template static __device__ __forceinlin
x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
}
-#pragma unroll
- for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
- int i = i0 + threadIdx.y * 2 + threadIdx.x / (WARP_SIZE/2);
-
- if (need_check) {
- i = min(i, i_max);
- }
-
- const block_q3_K * bxi = (const block_q3_K *) x + kbx0 + i*stride + (threadIdx.x % (WARP_SIZE/2)) / (QI3_K/2);
-
- // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
- x_qh[i * (WARP_SIZE/2) + i / 2 + threadIdx.x % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, threadIdx.x % (QI3_K/2));
- }
-
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4);
@@ -987,13 +1063,13 @@ template static __device__ __forceinlin
}
template
-static __device__ __forceinline__ void vec_dot_q3_K_q8_1_mul_mat(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+static __device__ __forceinline__ void vec_dot_q3_K_q8_1_dp4a(
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
- const float * x_dmf = (const float *) x_dm;
- const int * y_qs = (const int *) y + 4;
- const float * y_df = (const float *) y;
+ const float * x_df = (const float *) x_dm;
+ const int * y_qs = (const int *) y + 4;
+ const float * y_df = (const float *) y;
#pragma unroll
for (int j0 = 0; j0 < mmq_x; j0 += nwarps) {
@@ -1008,31 +1084,102 @@ static __device__ __forceinline__ void vec_dot_q3_K_q8_1_mul_mat(
const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
- int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
+ sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q3_K_q8_1_impl_mmq(
+ &x_qs[i*(2*WARP_SIZE + 1) + 2*k0], &y_qs[j*MMQ_TILE_Y_K + (k0*QR3_K) % WARP_SIZE], scales,
+ x_df[i*(WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[j*MMQ_TILE_Y_K + ((k0*QR3_K) % WARP_SIZE)/QI8_1]);
+ }
+ }
+}
+
+template
+static __device__ __forceinline__ void vec_dot_q3_K_q8_1_mma(
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
+ const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
+#ifdef INT8_MMA_AVAILABLE
+
+ typedef mma_int_A_I16K4 mma_A;
+ typedef mma_int_B_J8K4 mma_B;
+ typedef mma_int_C_I16J8 mma_C;
+
+ const float * x_df = (const float *) x_dm;
+ const int * y_qs = (const int *) y + 4;
+ const float * y_df = (const float *) y;
+
+ const int i0 = threadIdx.y*mma_A::I;
+ static_assert(nwarps*mma_A::I == mmq_y, "nwarps*mma_A::I != mmq_y");
+
+ mma_A A[2];
+ int scA[mma_C::ne/2][2];
+ float dA[mma_C::ne/2];
#pragma unroll
- for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
- const int kqsx = i*(WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
- const int shift = 2 * ((ky % 32) / 8);
- const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
+ for (int l = 0; l < mma_A::ne; ++l) {
+ const int i = i0 + mma_A::get_i(l);
+ const int k = QR3_K*k0 + mma_A::get_k(l);
- const int vh = x_qh[i*(WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
- const int vlh = (vh << 2) & 0x04040404;
+ A[0].x[l] = (x_qs[i*(2*WARP_SIZE + 1) + k/2 + 0] >> (4*(k%2))) & 0x0F0F0F0F;
+ A[1].x[l] = (x_qs[i*(2*WARP_SIZE + 1) + k/2 + mma_A::K/2] >> (4*(k%2))) & 0x0F0F0F0F;
+ A[0].x[l] = __vsubss4(A[0].x[l], 0x04040404);
+ A[1].x[l] = __vsubss4(A[1].x[l], 0x04040404);
+ }
- v[l] = __vsubss4(vll, vlh);
- }
+#pragma unroll
+ for (int l = 0; l < mma_C::ne/2; ++l) {
+ const int i = i0 + mma_C::get_i(2*l);
- sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q3_K_q8_1_impl_mmq(
- v, &y_qs[j*MMQ_TILE_Y_K + (k0*QR3_K) % WARP_SIZE], scales,
- x_dmf[i*(WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[j*MMQ_TILE_Y_K + ((k0*QR3_K) % WARP_SIZE)/QI8_1]);
+ const int kbx = k0 / QI3_K;
+ const int ky = (k0 % QI3_K) * QR3_K;
+ const int8_t * sc = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
+
+ scA[l][0] = sc[0];
+ scA[l][1] = sc[1];
+ }
+
+#pragma unroll
+ for (int l = 0; l < mma_C::ne/2; ++l) {
+ const int i = i0 + mma_C::get_i(2*l);
+
+ dA[l] = x_df[i*(WARP_SIZE/QI3_K) + i/QI3_K + k0/QI3_K];
+ }
+
+#pragma unroll
+ for (int j0 = 0; j0 < mmq_x; j0 += mma_int_B_J8K8::J) {
+ mma_C C[2];
+ mma_B B[2];
+ float dB[mma_C::ne/2];
+
+#pragma unroll
+ for (int l = 0; l < mma_B::ne; ++l) {
+ const int j = j0 + mma_B::get_j(l);
+ const int k = (4*k0 + mma_B::get_k(l)) % WARP_SIZE;
+
+ B[0].x[l] = y_qs[j*MMQ_TILE_Y_K + k + 0];
+ B[1].x[l] = y_qs[j*MMQ_TILE_Y_K + k + mma_B::K];
+ }
+#pragma unroll
+ for (int l = 0; l < mma_C::ne/2; ++l) {
+ const int j = j0 + mma_C::get_j(l);
+
+ dB[l] = y_df[j*MMQ_TILE_Y_K + ((4*k0)/QI8_1) % (WARP_SIZE/QI8_1)];
+ }
+
+ C[0].mma_K4(A[0], B[0]);
+ C[1].mma_K4(A[1], B[1]);
+
+#pragma unroll
+ for (int l = 0; l < mma_C::ne; ++l) {
+ sum[(j0/mma_B::J)*mma_C::ne + l] += (C[0].x[l]*scA[l/2][0] + C[1].x[l]*scA[l/2][1])*dA[l/2]*dB[l%2];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q4_K(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh);
const int kbx = 0; // threadIdx.x / QI4_K
const int kqsx = threadIdx.x; // threadIdx.x % QI4_K
@@ -1047,7 +1194,7 @@ template static __device__ __forceinlin
const block_q4_K * bxi = (const block_q4_K *) x + kbx0 + i*stride + kbx;
- x_ql[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ x_qs[i * (WARP_SIZE + 1) + threadIdx.x] = get_int_from_uint8_aligned(bxi->qs, kqsx);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI4_K; // == 1 if QK_K == 256
@@ -1090,11 +1237,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
- GGML_UNUSED(x_qh);
-
const int * y_qs = (const int *) y + 4;
const half2 * y_ds = (const half2 *) y;
@@ -1109,7 +1254,7 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a(
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k0/16]) + 2*((k0 % 16) / 8);
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q4_K_q8_1_impl_mmq(
- &x_ql[i*(WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + (QR4_K*k0) % WARP_SIZE], sc, sc+8,
+ &x_qs[i*(WARP_SIZE + 1) + k0], &y_qs[j*MMQ_TILE_Y_K + (QR4_K*k0) % WARP_SIZE], sc, sc+8,
x_dm[i*(WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[j*MMQ_TILE_Y_K + ((QR4_K*k0) % WARP_SIZE)/QI8_1]);
}
}
@@ -1117,10 +1262,9 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_dp4a(
template
static __device__ __forceinline__ void vec_dot_q4_K_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -1143,7 +1287,7 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_mma(
const int i = i0 + mma_A::get_i(l);
const int k = k0 + mma_A::get_k(l);
- A[kvdr/4].x[l] = (x_ql[i*(WARP_SIZE + 1) + k] >> kvdr) & 0x0F0F0F0F;
+ A[kvdr/4].x[l] = (x_qs[i*(WARP_SIZE + 1) + k] >> kvdr) & 0x0F0F0F0F;
}
#pragma unroll
@@ -1204,12 +1348,15 @@ static __device__ __forceinline__ void vec_dot_q4_K_q8_1_mma(
sum[(j0/mma_B::J)*mma_C::ne + l] += __low2float(dmA[l/2])*tmpd[l] - __high2float(dmA[l/2])*tmpm[l];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q5_K(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh);
const int kbx = 0; // threadIdx.x / QI5_K
const int kqsx = threadIdx.x; // threadIdx.x % QI5_K
@@ -1236,8 +1383,8 @@ template static __device__ __forceinlin
const int kq0 = ky - ky % (QI5_K/2) + threadIdx.x % (QI5_K/4) + 0;
const int kq1 = ky - ky % (QI5_K/2) + threadIdx.x % (QI5_K/4) + (QI5_K/4);
- x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
- x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
+ x_qs[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
+ x_qs[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
}
const int blocks_per_tile_x_row = WARP_SIZE / QI5_K; // == 1 if QK_K == 256
@@ -1280,11 +1427,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
- GGML_UNUSED(x_qh);
-
const int * y_qs = (const int *) y + 4;
const half2 * y_ds = (const half2 *) y;
@@ -1299,7 +1444,7 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a(
const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k0/16]) + 2 * ((k0 % 16) / 8);
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q5_K_q8_1_impl_mmq(
- &x_ql[i*(QR5_K*WARP_SIZE + 1) + QR5_K*k0], &y_qs[j*MMQ_TILE_Y_K + (QR5_K*k0) % WARP_SIZE], sc, sc+8,
+ &x_qs[i*(QR5_K*WARP_SIZE + 1) + QR5_K*k0], &y_qs[j*MMQ_TILE_Y_K + (QR5_K*k0) % WARP_SIZE], sc, sc+8,
x_dm[i*(WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[j*MMQ_TILE_Y_K + ((QR5_K*k0) % WARP_SIZE)/QI8_1]);
}
}
@@ -1307,10 +1452,9 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_dp4a(
template
static __device__ __forceinline__ void vec_dot_q5_K_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
typedef mma_int_A_I16K8 mma_A;
typedef mma_int_B_J8K8 mma_B;
@@ -1333,7 +1477,7 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_mma(
const int i = i0 + mma_A::get_i(l);
const int k = QR5_K*k0 + QR5_K*kvdr + mma_A::get_k(l);
- A[kvdr/4].x[l] = x_ql[i*(QR5_K*WARP_SIZE + 1) + k];
+ A[kvdr/4].x[l] = x_qs[i*(QR5_K*WARP_SIZE + 1) + k];
}
#pragma unroll
@@ -1394,12 +1538,15 @@ static __device__ __forceinline__ void vec_dot_q5_K_q8_1_mma(
sum[(j0/mma_B::J)*mma_C::ne + l] += __low2float(dmA[l/2])*tmpd[l] - __high2float(dmA[l/2])*tmpm[l];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template static __device__ __forceinline__ void load_tiles_q6_K(
- const char * __restrict__ x, int * __restrict__ x_ql, half2 * __restrict__ x_dm, int * __restrict__ x_qh,
+ const char * __restrict__ x, int * __restrict__ x_qs, half2 * __restrict__ x_dm,
int * __restrict__ x_sc, const int & kbx0, const int & i_max, const int & stride) {
- GGML_UNUSED(x_qh);
const int kbx = 0; // threadIdx.x / QI6_K
const int kqsx = threadIdx.x; // threadIdx.x % QI6_K
@@ -1426,8 +1573,8 @@ template static __device__ __forceinlin
const int kq0 = ky - ky % QI6_K + threadIdx.x % (QI6_K/2) + 0;
const int kq1 = ky - ky % QI6_K + threadIdx.x % (QI6_K/2) + (QI6_K/2);
- x_ql[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
- x_ql[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
+ x_qs[i * (2*WARP_SIZE + 1) + kq0] = __vsubss4(ql0 | qh0, 0x20202020);
+ x_qs[i * (2*WARP_SIZE + 1) + kq1] = __vsubss4(ql1 | qh1, 0x20202020);
}
const int blocks_per_tile_x_row = WARP_SIZE / QI6_K; // == 1 if QK_K == 256
@@ -1463,11 +1610,9 @@ template static __device__ __forceinlin
template
static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
- GGML_UNUSED(x_qh);
-
const float * x_dmf = (const float *) x_dm;
const int * y_qs = (const int *) y + 4;
const float * y_df = (const float *) y;
@@ -1483,7 +1628,7 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a(
const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k0/8]);
sum[j0/nwarps*mmq_y/WARP_SIZE + i0/WARP_SIZE] += vec_dot_q6_K_q8_1_impl_mmq(
- &x_ql[i*(QR6_K*WARP_SIZE + 1) + QR6_K*k0], &y_qs[j*MMQ_TILE_Y_K + (QR6_K*k0) % WARP_SIZE], sc,
+ &x_qs[i*(QR6_K*WARP_SIZE + 1) + QR6_K*k0], &y_qs[j*MMQ_TILE_Y_K + (QR6_K*k0) % WARP_SIZE], sc,
x_dmf[i*(WARP_SIZE/QI6_K) + i/QI6_K], &y_df[j*MMQ_TILE_Y_K + ((QR6_K*k0) % WARP_SIZE)/QI8_1]);
}
}
@@ -1491,10 +1636,9 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_dp4a(
template
static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma(
- const int * __restrict__ x_ql, const half2 * __restrict__ x_dm, const int * __restrict__ x_qh, const int * __restrict__ x_sc,
+ const int * __restrict__ x_qs, const half2 * __restrict__ x_dm, const int * __restrict__ x_sc,
const int * __restrict__ y, float * __restrict__ sum, const int & k0) {
-
- GGML_UNUSED(x_qh); GGML_UNUSED(x_sc);
+#ifdef INT8_MMA_AVAILABLE
typedef mma_int_A_I16K4 mma_A;
typedef mma_int_B_J8K4 mma_B;
@@ -1505,7 +1649,9 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma(
const float * y_df = (const float *) y;
const int i0 = threadIdx.y*mma_A::I;
+#ifdef INT8_MMA_AVAILABLE
static_assert(nwarps*mma_A::I == mmq_y, "nwarps*mma_A::I != mmq_y");
+#endif // INT8_MMA_AVAILABLE
mma_A A[4];
int scA[mma_C::ne/2][4];
@@ -1517,8 +1663,8 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma(
const int i = i0 + mma_A::get_i(l);
const int k = QR6_K*k0 + QR6_K*kvdr + mma_A::get_k(l);
- A[kvdr/2 + 0].x[l] = x_ql[i*(QR6_K*WARP_SIZE + 1) + k + 0];
- A[kvdr/2 + 1].x[l] = x_ql[i*(QR6_K*WARP_SIZE + 1) + k + mma_A::K];
+ A[kvdr/2 + 0].x[l] = x_qs[i*(QR6_K*WARP_SIZE + 1) + k + 0];
+ A[kvdr/2 + 1].x[l] = x_qs[i*(QR6_K*WARP_SIZE + 1) + k + mma_A::K];
}
#pragma unroll
@@ -1578,55 +1724,65 @@ static __device__ __forceinline__ void vec_dot_q6_K_q8_1_mma(
sum[(j0/mma_B::J)*mma_C::ne + l] += tmp[l]*dA[l/2];
}
}
+#else
+ GGML_UNUSED(x_qs); GGML_UNUSED(x_dm); GGML_UNUSED(x_sc); GGML_UNUSED(y); GGML_UNUSED(sum); GGML_UNUSED(k0);
+ NO_DEVICE_CODE;
+#endif // INT8_MMA_AVAILABLE
}
template
-static __device__ __forceinline__ void mmq_write_back_dp4a(const float * __restrict__ sum, float * __restrict__ dst, const int & ne0, const int & ne1) {
+static __device__ __forceinline__ void mmq_write_back_dp4a(
+ const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max) {
+
#pragma unroll
for (int j0 = 0; j0 < mmq_x; j0 += nwarps) {
- const int j = blockIdx.y*mmq_x + j0 + threadIdx.y;
+ const int j = j0 + threadIdx.y;
- if (j >= ne1) {
+ if (j > j_max) {
return;
}
#pragma unroll
for (int i0 = 0; i0 < mmq_y; i0 += WARP_SIZE) {
- const int i = blockIdx.x*mmq_y + i0 + threadIdx.x;
+ const int i = i0 + threadIdx.x;
- if (need_check && i >= ne0) {
+ if (need_check && i > i_max) {
continue;
}
- dst[j*ne0 + i] = sum[(j0/nwarps) * (mmq_y/WARP_SIZE) + i0/WARP_SIZE];
+ dst[j*stride + i] = sum[(j0/nwarps) * (mmq_y/WARP_SIZE) + i0/WARP_SIZE];
}
}
}
template
-static __device__ __forceinline__ void mmq_write_back_mma(const float * __restrict__ sum, float * __restrict__ dst, const int & ne0, const int & ne1) {
+static __device__ __forceinline__ void mmq_write_back_mma(
+ const float * __restrict__ sum, float * __restrict__ dst, const int & stride, const int & i_max, const int & j_max) {
+
typedef mma_int_C_I16J8 mma_C;
const int i0 = threadIdx.y*mma_C::I;
+#ifdef INT8_MMA_AVAILABLE
static_assert(nwarps*mma_C::I == mmq_y, "nwarps*mma_C::I != mmq_y");
+#endif // INT8_MMA_AVAILABLE
#pragma unroll
for (int j0 = 0; j0 < mmq_x; j0 += mma_C::J) {
#pragma unroll
for (int l = 0; l < mma_C::ne; ++l) {
- const int j = blockIdx.y*mmq_x + j0 + mma_C::get_j(l);
+ const int j = j0 + mma_C::get_j(l);
- if (j >= ne1) {
+ if (j > j_max) {
continue;
}
- const int i = blockIdx.x*mmq_y + i0 + mma_C::get_i(l);
+ const int i = i0 + mma_C::get_i(l);
- if (need_check && i >= ne0) {
+ if (need_check && i > i_max) {
continue;
}
- dst[j*ne0 + i] = sum[(j0/mma_C::J)*mma_C::ne + l];
+ dst[j*stride + i] = sum[(j0/mma_C::J)*mma_C::ne + l];
}
}
}
@@ -1638,125 +1794,85 @@ struct mmq_type_traits;
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q4_0_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_0;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q4_0_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q4_0_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q4_0_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_0;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q4_0_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_0_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q4_1_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_1;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q4_1_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q4_1_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q4_1_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_1;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q4_1_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_1_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q5_0_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_0;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q5_0_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q5_0_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q5_0_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_0;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q5_0_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_0_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q5_1_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_1;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q5_1_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q5_1_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q5_1_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_1;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q5_1_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_1_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q8_0_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q8_0;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q8_0_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q8_0_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q8_0_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q8_0;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q2_K_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q2_K;
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q2_K_q8_1_mul_mat;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
+ static constexpr int vdr = VDR_Q2_K_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q2_K;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q2_K_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q2_K_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q3_K_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q3_K;
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q3_K_q8_1_mul_mat;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
+ static constexpr int vdr = VDR_Q3_K_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q3_K;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q3_K_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q3_K_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q4_K_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_K;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q4_K_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q4_K_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q4_K_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q4_K;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q4_K_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q4_K_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q5_K_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_K;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q5_K_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q5_K_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q5_K_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q5_K;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q5_K_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q5_K_q8_1_dp4a;
};
template
struct mmq_type_traits {
- static constexpr int vdr = VDR_Q6_K_Q8_1_MMQ;
- static constexpr load_tiles_mmq_t load_tiles = load_tiles_q6_K;
-#ifdef INT8_MMA_AVAILABLE
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q6_K_q8_1_mma;
- static constexpr mmq_write_back_t write_back = mmq_write_back_mma;
-#else
- static constexpr vec_dot_mmq_t vec_dot = vec_dot_q6_K_q8_1_dp4a;
- static constexpr mmq_write_back_t write_back = mmq_write_back_dp4a;
-#endif // INT8_MMA_AVAILABLE
+ static constexpr int vdr = VDR_Q6_K_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_q6_K;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q6_K_q8_1_mma;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q6_K_q8_1_dp4a;
};
-static int mmq_need_sum(const ggml_type type_x) {
+static bool mmq_need_sum(const ggml_type type_x) {
switch (type_x) {
case GGML_TYPE_Q4_0:
case GGML_TYPE_Q4_1:
@@ -1781,6 +1897,79 @@ static int mmq_need_sum(const ggml_type type_x) {
return false;
}
+template
+static __device__ void mul_mat_q_process_tile(
+ const char * __restrict__ x, const char * __restrict__ yc, float * __restrict__ dst, float * __restrict__ tmp_fixup,
+ const int & ne00, const int & ne01, const int & stride01, const int & ne10, const int & ne11, const int & stride11, const int & ne0,
+ const int & it, const int & jt, const int & kb0_start, const int & kb0_stop) {
+
+ constexpr int qk = ggml_cuda_type_traits