Skip to content

Commit

Permalink
Merge pull request #74 from ggerganov/master
Browse files Browse the repository at this point in the history
b1961
  • Loading branch information
Nexesenex authored Jan 24, 2024
2 parents 6f9939d + c9b316c commit 4c1672b
Show file tree
Hide file tree
Showing 14 changed files with 345 additions and 262 deletions.
26 changes: 26 additions & 0 deletions .devops/main-intel.Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
ARG ONEAPI_VERSION=2024.0.1-devel-ubuntu22.04
ARG UBUNTU_VERSION=22.04

FROM intel/hpckit:$ONEAPI_VERSION as build

RUN apt-get update && \
apt-get install -y git

WORKDIR /app

COPY . .

# for some reasons, "-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=Intel10_64lp -DLLAMA_NATIVE=ON" give worse performance
RUN mkdir build && \
cd build && \
cmake .. -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx && \
cmake --build . --config Release --target main server

FROM ubuntu:$UBUNTU_VERSION as runtime

COPY --from=build /app/build/bin/main /main
COPY --from=build /app/build/bin/server /server

ENV LC_ALL=C.utf8

ENTRYPOINT [ "/main" ]
3 changes: 3 additions & 0 deletions .devops/nix/package.nix
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,9 @@ effectiveStdenv.mkDerivation (
description = "contains numpy and sentencepiece";
buildInputs = [ llama-python ];
inputsFrom = [ finalAttrs.finalPackage ];
shellHook = ''
addToSearchPath "LD_LIBRARY_PATH" "${lib.getLib effectiveStdenv.cc.cc}/lib"
'';
};

shell-extra = mkShell {
Expand Down
1 change: 1 addition & 0 deletions .github/workflows/docker.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ jobs:
- { tag: "full-cuda", dockerfile: ".devops/full-cuda.Dockerfile", platforms: "linux/amd64" }
- { tag: "light-rocm", dockerfile: ".devops/main-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
- { tag: "full-rocm", dockerfile: ".devops/full-rocm.Dockerfile", platforms: "linux/amd64,linux/arm64" }
- { tag: "light-intel", dockerfile: ".devops/main-intel.Dockerfile", platforms: "linux/amd64" }
steps:
- name: Check out the repo
uses: actions/checkout@v3
Expand Down
5 changes: 5 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -478,6 +478,11 @@ function(get_flags CCID CCVER)
if (CCVER VERSION_GREATER_EQUAL 8.1.0)
set(CXX_FLAGS ${CXX_FLAGS} -Wextra-semi)
endif()
elseif (CCID MATCHES "Intel")
# enable max optimization level when using Intel compiler
set(C_FLAGS -ipo -O3 -static -fp-model=fast -flto -fno-stack-protector)
set(CXX_FLAGS -ipo -O3 -static -fp-model=fast -flto -fno-stack-protector)
add_link_options(-fuse-ld=lld -static-intel)
endif()

set(GF_C_FLAGS ${C_FLAGS} PARENT_SCOPE)
Expand Down
10 changes: 4 additions & 6 deletions common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -216,12 +216,10 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
}
// store the external file name in params
params.prompt_file = argv[i];
file.seekg(0, std::ios::end);
size_t size = file.tellg();
file.seekg(0, std::ios::beg);
params.prompt.resize(size);
file.read((char *)params.prompt.data(), size);
fprintf(stderr, "Read %zu bytes from binary file %s\n", size, argv[i]);
std::ostringstream ss;
ss << file.rdbuf();
params.prompt = ss.str();
fprintf(stderr, "Read %zu bytes from binary file %s\n", params.prompt.size(), argv[i]);
} else if (arg == "-f" || arg == "--file") {
if (++i >= argc) {
invalid_param = true;
Expand Down
5 changes: 4 additions & 1 deletion examples/llama.vim
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
" Similarly, you could add an insert mode keybind with
" inoremap <C-B> <Cmd>call llama#doLlamaGen()<CR>
"
" g:llama_api_url and g:llama_overrides can be configured in your .vimrc
" g:llama_api_url, g:llama_api_key and g:llama_overrides can be configured in your .vimrc
" let g:llama_api_url = "192.168.1.10:8080"
" llama_overrides can also be set through buffer/window scopes. For instance
" autocmd filetype python let b:llama_overrides = {"temp": 0.2}
Expand Down Expand Up @@ -82,6 +82,9 @@ func llama#doLlamaGen()
endif
let l:querydata.prompt = join(l:buflines, "\n")
let l:curlcommand = copy(s:curlcommand)
if exists("g:llama_api_key")
call extend(l:curlcommand, ['--header', 'Authorization: Bearer ' .. g:llama_api_key])
endif
let l:curlcommand[2] = json_encode(l:querydata)
let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])})
endfunction
Expand Down
47 changes: 19 additions & 28 deletions examples/llava/clip.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,6 @@
// so there might be still unnecessary artifacts hanging around
// I'll gradually clean and extend it

#include <cassert>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <regex>
#include <stdexcept>
#include <vector>
#include <sstream>

#include "clip.h"
#include "ggml.h"
#include "ggml-alloc.h"
Expand All @@ -30,6 +18,19 @@
#define STB_IMAGE_IMPLEMENTATION
#include "stb_image.h"

#include <cassert>
#include <cmath>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <map>
#include <regex>
#include <stdexcept>
#include <vector>
#include <sstream>
#include <cinttypes>

static std::string format(const char * fmt, ...) {
va_list ap;
va_list ap2;
Expand Down Expand Up @@ -217,9 +218,9 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {

static void print_tensor_info(const ggml_tensor* tensor, const char* prefix = "") {
size_t tensor_size = ggml_nbytes(tensor);
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%d, %d, %d, %d], type: %d\n",
printf("%s: n_dims = %d, name = %s, tensor_size=%zu, shape:[%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 "], type = %s\n",
prefix, ggml_n_dims(tensor), tensor->name, tensor_size,
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], tensor->type);
tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3], ggml_type_name(tensor->type));
}

static projector_type clip_projector_type_from_string(const std::string & name) {
Expand Down Expand Up @@ -592,7 +593,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
mlp_3 = ggml_cont(ctx0, ggml_permute(ctx0, mlp_3, 1, 0, 2, 3));
mlp_3 = ggml_reshape_4d(ctx0, mlp_3, n_patch, n_patch, mlp_3->ne[1], mlp_3->ne[2]);
// stride = 1, padding = 1, bias is nullptr
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, nullptr, 1, 1, 1, 1, 1, 1);
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_1_block_0_0_w, mlp_3, 1, 1, 1, 1, 1, 1);

// layer norm
// // block_1 shape = [1, 2048, 24, 24], ne = [24, 24, 2048, 1]
Expand Down Expand Up @@ -640,7 +641,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
// block_2
{
// stride = 2
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, nullptr, 2, 2, 1, 1, 1, 1);
block_1 = ggml_conv_depthwise_2d(ctx0, model.mm_model_block_2_block_0_0_w, block_1, 2, 2, 1, 1, 1, 1);

// block_1 shape = [1, 2048, 12, 12], ne = [12, 12, 2048, 1]
// layer norm
Expand Down Expand Up @@ -741,18 +742,10 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
{
std::map<enum ggml_type, uint32_t> n_type;

uint32_t n_type_max = 0;
enum ggml_type type_max = GGML_TYPE_F32;

for (int i = 0; i < n_tensors; i++) {
enum ggml_type type = gguf_get_tensor_type(ctx, i);

n_type[type]++;

if (n_type_max < n_type[type]) {
n_type_max = n_type[type];
type_max = type;
}
}

printf("%s: Dumping metadata keys/values. Note: KV overrides do not apply in this output.\n", __func__);
Expand Down Expand Up @@ -795,14 +788,12 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
size_t tensor_size = ggml_nbytes(cur);
buffer_size += tensor_size;
if (verbosity >= 3) {
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%d, %d, %d, %d], type: %d\n", __func__, i,
ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], type);
printf("%s: tensor[%d]: n_dims = %d, name = %s, tensor_size=%zu, offset=%zu, shape:[%" PRIu64 ", %" PRIu64 ", %" PRIu64 ", %" PRIu64 "], type = %s\n",
__func__, i, ggml_n_dims(cur), cur->name, tensor_size, offset, cur->ne[0], cur->ne[1], cur->ne[2], cur->ne[3], ggml_type_name(type));
}
}
}



buffer_size += n_tensors * 128 /* CLIP PADDING */;

clip_ctx * new_clip = new clip_ctx;
Expand Down
75 changes: 63 additions & 12 deletions examples/perplexity/perplexity.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -222,13 +222,18 @@ struct kl_divergence_result {
double sum_kld2 = 0;
double sum_nll_diff = 0;
double sum_nll_diff2 = 0;
size_t n_same_top = 0;
size_t count = 0;
};

static void log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
static double log_softmax(int n_vocab, const float * logits, const uint16_t * base_log_prob, int tok, kl_divergence_result & kld) {
float max_logit = logits[0];
int imax = 0;
for (int i = 1; i < n_vocab; ++i) {
max_logit = std::max(max_logit, logits[i]);
if (logits[i] > max_logit) {
max_logit = logits[i];
imax = i;
}
}
double sum_exp = 0.0;
for (int i = 0; i < n_vocab; ++i) {
Expand All @@ -247,8 +252,14 @@ static void log_softmax(int n_vocab, const float * logits, const uint16_t * base
kld.sum_nll_diff2 += nll*nll;
max_logit += log_sum_exp;
double sum = 0;
int imax_base = -1;
float p_log_base_max = 0;
for (int i = 0; i < n_vocab; ++i) {
const float p_log_base = scale*base_log_prob[i] + min_log_prob;
if (i == 0 || p_log_base > p_log_base_max) {
p_log_base_max = p_log_base;
imax_base = i;
}
if (p_log_base > -16.f) {
const float p_base = expf(p_log_base);
sum += p_base * (p_log_base - logits[i] + max_logit);
Expand All @@ -257,14 +268,17 @@ static void log_softmax(int n_vocab, const float * logits, const uint16_t * base
kld.sum_kld += sum;
kld.sum_kld2 += sum*sum;
++kld.count;
if (imax == imax_base) ++kld.n_same_top;
return sum;
}

static void process_logits(int n_vocab, const float * logits, const int * tokens, int n_token,
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld) {
std::vector<std::thread> & workers, const std::vector<uint16_t> & base_log_probs, kl_divergence_result & kld,
float * kld_values) {
std::mutex mutex;
const int nv = 2*((n_vocab + 1)/2) + 4;
int counter = 0;
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv] () {
auto compute = [&mutex, &counter, &base_log_probs, &kld, n_vocab, logits, tokens, n_token, nv, kld_values] () {
kl_divergence_result local_kld;
while (true) {
std::unique_lock<std::mutex> lock(mutex);
Expand All @@ -276,11 +290,13 @@ static void process_logits(int n_vocab, const float * logits, const int * tokens
kld.sum_kld2 += local_kld.sum_kld2;
kld.sum_nll_diff += local_kld.sum_nll_diff;
kld.sum_nll_diff2 += local_kld.sum_nll_diff2;
kld.n_same_top += local_kld.n_same_top;
kld.count += local_kld.count;
break;
}
lock.unlock();
log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
double v = log_softmax(n_vocab, logits + i*n_vocab, base_log_probs.data() + i*nv, tokens[i+1], local_kld);
kld_values[i] = (float)v;
}
};
for (auto & w : workers) {
Expand Down Expand Up @@ -1202,11 +1218,11 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) {
printf("Final Winogrande score(%d tasks): %.4lf +/- %.4lf\n", n_done, 100*p, sigma);
}

static bool deserialize_string(std::istream& in, std::string& str) {
static bool deserialize_string(std::istream & in, std::string & str) {
uint32_t size;
if (!in.read((char *)&size, sizeof(size)).fail()) {
str.resize(size);
if (!in.read((char *)str.data(), size).fail()) return true;
if (!in.read((char *)&str[0], size).fail()) return true;
}
return false;
}
Expand Down Expand Up @@ -1615,7 +1631,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
in.read((char *)&n_vocab, sizeof(n_vocab));
in.read((char *)&n_chunk, sizeof(n_chunk));
if (in.fail()) {
fprintf(stderr, "%s: failed rwading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str());
fprintf(stderr, "%s: failed reading n_vocab, n_chunk from %s\n", __func__, params.logits_file.c_str());
return;
}
if (n_vocab != llama_n_vocab(llama_get_model(ctx))) {
Expand All @@ -1634,6 +1650,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));

std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv);
std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk);
std::vector<float> logits;
if (num_batches > 1) {
logits.reserve(n_ctx * n_vocab);
Expand All @@ -1652,6 +1669,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
};

kl_divergence_result kld;
auto kld_ptr = kld_values.data();

for (int i = 0; i < n_chunk; ++i) {
const int start = i * n_ctx;
Expand Down Expand Up @@ -1705,27 +1723,60 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) {
}
fprintf(stderr, "%.2f minutes\n", total_seconds / 60.0);

printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence\n");
printf("\nchunk PPL ln(PPL(Q)/PPL(base)) KL-Divergence Same top\n");
}

const int first = n_ctx/2;
const float * all_logits = num_batches > 1 ? logits.data() : llama_get_logits(ctx);
process_logits(n_vocab, all_logits + first*n_vocab, tokens.data() + start + first, n_ctx - 1 - first,
workers, log_probs_uint16, kld);
workers, log_probs_uint16, kld, kld_ptr);
kld_ptr += n_ctx - 1 - first;

auto ppl = mean_and_uncertainty(kld.sum_nll, kld.sum_nll2, kld.count);
auto log_ppl_ratio = mean_and_uncertainty(kld.sum_nll_diff, kld.sum_nll_diff2, kld.count);
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
auto p_top = 1.*kld.n_same_top/kld.count;
auto d_p_top = sqrt(p_top*(1 - p_top)/(kld.count - 1));

printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf\n", i+1, exp(ppl.first),
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second);
printf("%4d %10.4lf %10.5lf ± %10.5f %10.5f ± %10.5lf %.5f ± %.5f\n", i+1, exp(ppl.first),
log_ppl_ratio.first, log_ppl_ratio.second, kl_div.first, kl_div.second,
p_top, d_p_top);

fflush(stdout);

logits.clear();
}
printf("\n");

if (kld.count < 100) return; // we do not wish to do statistics on so few values

std::sort(kld_values.begin(), kld_values.end());

printf("===== KL-divergence statistics\n");
auto kl_div = mean_and_uncertainty(kld.sum_kld, kld.sum_kld2, kld.count);
printf("Average: %10.6f ±%10.6lf\n", kl_div.first, kl_div.second);
auto kld_median = kld_values.size()%2 == 0 ? 0.5f*(kld_values[kld_values.size()/2] + kld_values[kld_values.size()/2-1])
: kld_values[kld_values.size()/2];
printf("Median : %10.6f\n", kld_median);

auto percentile = [&kld_values] (float fraction) {
if (fraction <= 0) return kld_values.front();
if (fraction >= 1) return kld_values.back();
float p = fraction*(kld_values.size() - 1);
size_t ip = size_t(p); p -= ip;
return (1 - p)*kld_values[ip] + p*kld_values[std::min(ip+1, kld_values.size()-1)];
};

printf("Maximum: %10.6f\n", kld_values.back());
printf("KLD_99 : %10.6f\n", percentile(0.99f));
printf("KLD_95 : %10.6f\n", percentile(0.95f));
printf("KLD_90 : %10.6f\n", percentile(0.90f));

printf("Minimum: %10.6f\n", kld_values.front());
printf("KLD_01 : %10.6f\n", percentile(0.01f));
printf("KLD_05 : %10.6f\n", percentile(0.05f));
printf("KLD_10 : %10.6f\n", percentile(0.10f));

}

int main(int argc, char ** argv) {
Expand Down
4 changes: 2 additions & 2 deletions ggml-alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,8 @@ void ggml_tallocr_alloc(ggml_tallocr_t alloc, struct ggml_tensor * tensor) {
if (block->size >= size) {
best_fit_block = alloc->n_free_blocks - 1;
} else {
fprintf(stderr, "%s: not enough space in the buffer (needed %zu, largest block available %zu)\n",
__func__, size, max_avail);
fprintf(stderr, "%s: not enough space in the buffer to allocate %s (needed %zu, largest block available %zu)\n",
__func__, tensor->name, size, max_avail);
GGML_ASSERT(!"not enough space in the buffer");
return;
}
Expand Down
Loading

0 comments on commit 4c1672b

Please sign in to comment.