Skip to content

Commit

Permalink
Removes multiple newlines at the end of files that is breaking the ed…
Browse files Browse the repository at this point in the history
…itorconfig step of CI. (ggerganov#8258)
  • Loading branch information
HanClinto authored and Neo Zhang committed Jul 3, 2024
1 parent 6b695b5 commit 044995e
Show file tree
Hide file tree
Showing 22 changed files with 0 additions and 24 deletions.
2 changes: 0 additions & 2 deletions .github/ISSUE_TEMPLATE/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,3 @@ contact_links:
- name: Want to contribute?
url: https://github.com/ggerganov/llama.cpp/wiki/contribute
about: Head to the contribution guide page of the wiki for areas you can help with


1 change: 0 additions & 1 deletion common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -459,4 +459,3 @@ void yaml_dump_string_multiline(FILE * stream, const char * prop_name, const cha
void yaml_dump_non_result_info(
FILE * stream, const gpt_params & params, const llama_context * lctx,
const std::string & timestamp, const std::vector<int> & prompt_tokens, const char * model_desc);

1 change: 0 additions & 1 deletion examples/embedding/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,4 +58,3 @@ The above command will output space-separated float values.
```powershell
embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
```

1 change: 0 additions & 1 deletion examples/infill/infill.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -659,4 +659,3 @@ int main(int argc, char ** argv) {

return 0;
}

1 change: 0 additions & 1 deletion examples/lookup/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,4 +10,3 @@ More info:

https://github.com/ggerganov/llama.cpp/pull/4484
https://github.com/ggerganov/llama.cpp/issues/4226

1 change: 0 additions & 1 deletion examples/main-cmake-pkg/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -48,4 +48,3 @@
build*/
out/
tmp/

1 change: 0 additions & 1 deletion examples/main-cmake-pkg/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -30,4 +30,3 @@ target_include_directories(${TARGET} PRIVATE ${_common_path})
install(TARGETS ${TARGET} RUNTIME)
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
target_compile_features(${TARGET} PRIVATE cxx_std_11)

1 change: 0 additions & 1 deletion examples/server-embd.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,3 @@ async def main():
embedding2 = np.array(result[j])
similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2))
print(f"Similarity between {i} and {j}: {similarity:.2f}")

1 change: 0 additions & 1 deletion examples/server/tests/features/passkey.feature
Original file line number Diff line number Diff line change
Expand Up @@ -52,4 +52,3 @@ Feature: Passkey / Self-extend with context shift
#| TheBloke/Llama-2-7B-GGUF | llama-2-7b.Q2_K.gguf | 4096 | 3 | 16384 | 512 | 4 | 512 | 500 | 300 | 1234 | 5 | 1234 |
#| TheBloke/Mixtral-8x7B-v0.1-GGUF | mixtral-8x7b-v0.1.Q2_K.gguf | 32768 | 2 | 16384 | 512 | 4 | 512 | 500 | 100 | 0987 | 5 | 0
# 987 |

1 change: 0 additions & 1 deletion examples/server/themes/buttons-top/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1054,4 +1054,3 @@ <h1>llama.cpp</h1>
</body>

</html>

1 change: 0 additions & 1 deletion examples/server/themes/wild/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1058,4 +1058,3 @@
</body>

</html>

1 change: 0 additions & 1 deletion examples/sycl/run-llama2.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,3 @@ fi

#use multiple GPUs with same max compute units
#ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "${INPUT2}" -n 400 -e -ngl 33 -s 0

1 change: 0 additions & 1 deletion examples/sycl/win-build-sycl.bat
Original file line number Diff line number Diff line change
Expand Up @@ -31,4 +31,3 @@ exit /B 0
:ERROR
echo comomand error: %errorlevel%
exit /B %errorlevel%

2 changes: 0 additions & 2 deletions examples/sycl/win-run-llama2.bat
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,3 @@ set INPUT2="Building a website can be done in 10 simple steps:\nStep 1:"


.\build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p %INPUT2% -n 400 -e -ngl 33 -s 0


1 change: 0 additions & 1 deletion ggml/include/ggml-metal.h
Original file line number Diff line number Diff line change
Expand Up @@ -63,4 +63,3 @@ GGML_API void ggml_backend_metal_capture_next_compute(ggml_backend_t backend);
#ifdef __cplusplus
}
#endif

1 change: 0 additions & 1 deletion ggml/src/ggml-cuda/cpy.cu
Original file line number Diff line number Diff line change
Expand Up @@ -487,4 +487,3 @@ void* ggml_cuda_cpy_fn(const ggml_tensor * src0, ggml_tensor * src1) {
GGML_ASSERT(false);
}
}

1 change: 0 additions & 1 deletion ggml/src/ggml-metal.metal
Original file line number Diff line number Diff line change
Expand Up @@ -6537,4 +6537,3 @@ template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t
template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;

1 change: 0 additions & 1 deletion ggml/src/ggml-quants.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,4 +130,3 @@ void iq3xs_free_impl(int grid_size);
#ifdef __cplusplus
}
#endif

1 change: 0 additions & 1 deletion ggml/src/ggml-vulkan-shaders.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -144954,4 +144954,3 @@ unsigned char sum_rows_f32_data[] = {

};
const uint64_t sum_rows_f32_len = 2112;

1 change: 0 additions & 1 deletion scripts/pod-llama.sh
Original file line number Diff line number Diff line change
Expand Up @@ -210,4 +210,3 @@ fi
# more benches
#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-7b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1
#GGML_CUDA=1 make -j && ./llama-batched-bench ./models/codellama-13b/ggml-model-q4_k.gguf 4096 1 99 1 512,3200 128,128,800 1

1 change: 0 additions & 1 deletion src/unicode-data.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7030,4 +7030,3 @@ const std::vector<range_nfd> unicode_ranges_nfd = { // start, last, nfd
{0x02FA1C, 0x02FA1C, 0x009F3B},
{0x02FA1D, 0x02FA1D, 0x02A600},
};

1 change: 0 additions & 1 deletion tests/test-rope.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -218,4 +218,3 @@ int main(int /*argc*/, const char ** /*argv*/) {

return 0;
}

0 comments on commit 044995e

Please sign in to comment.