Skip to content

Commit

Permalink
baby-llama : fix -Wmaybe-uninitialized warning from gcc
Browse files Browse the repository at this point in the history
  • Loading branch information
cebtenzzre committed Sep 18, 2023
1 parent 2657f7b commit 29cfecc
Showing 1 changed file with 6 additions and 6 deletions.
12 changes: 6 additions & 6 deletions examples/baby-llama/baby-llama.cpp
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
#include "ggml.h"
#include <vector>

#include <cassert>
#include <random>
#include <cstdlib>
#include <cstring>
#include <random>
#include <vector>

#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
Expand Down Expand Up @@ -457,7 +459,7 @@ static void randomize_model_lora(
}
}

static bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
static void init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
const auto & hparams = model->hparams;

const uint32_t n_ctx = hparams.n_ctx;
Expand All @@ -483,14 +485,12 @@ static bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * mod

if (!cache->ctx) {
fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
return false;
exit(1);
}
}

cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);

return true;
}

static bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_lora * model, int n_batch) {
Expand Down

0 comments on commit 29cfecc

Please sign in to comment.