fix a few missing 'static' specifiers

This commit is contained in:
Cebtenzzre 2023-09-15 15:07:40 -04:00
parent 159c597581
commit e8a3090508
5 changed files with 36 additions and 38 deletions

View file

@ -444,7 +444,7 @@ __attribute__((format(gnu_printf, 1, 2)))
__attribute__((format(printf, 1, 2)))
#endif
#endif
std::string format(const char * fmt, ...) {
static std::string format(const char * fmt, ...) {
va_list ap, ap2;
va_start(ap, fmt);
va_copy(ap2, ap);
@ -531,7 +531,7 @@ struct llama_file {
}
};
bool is_ggml_file(const char *filename) {
static bool is_ggml_file(const char * filename) {
llama_file file(filename, "rb");
if (file.size < 4) {
return false;
@ -540,7 +540,7 @@ bool is_ggml_file(const char *filename) {
return magic == GGUF_MAGIC;
}
std::string llama_escape_whitespaces(const std::string& text) {
static std::string llama_escape_whitespaces(const std::string & text) {
std::ostringstream out;
for (char c : text) {
if (c == ' ') out << "\xe2\x96\x81";

View file

@ -132,7 +132,7 @@ static void process_logits(
}
results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params) {
static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params) {
// Download: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
// Output: `perplexity: 13.5106 [114/114]`
@ -262,8 +262,7 @@ results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & params)
return {tokens, std::exp(nll / count), logit_history, prob_history};
}
results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
static results_perplexity perplexity(llama_context * ctx, const gpt_params & params) {
if (params.ppl_stride > 0) {
return perplexity_v2(ctx, params);
}

View file

@ -102,7 +102,7 @@ static void combine_error_stats(error_stats & into, const error_stats & from) {
for (size_t i=0; i<HISTOGRAM_BUCKETS; ++i) into.error_histogram[i] += from.error_histogram[i];
}
double find_quantile(const error_stats & stats, double quantile) {
static double find_quantile(const error_stats & stats, double quantile) {
double sum = std::accumulate(std::begin(stats.error_histogram), std::end(stats.error_histogram), 0.0);
double accum = 0;

View file

@ -48,7 +48,7 @@ struct completion_token_output
llama_token tok;
};
size_t common_part(const std::vector<llama_token> & a, const std::vector<llama_token> & b)
static size_t common_part(const std::vector<llama_token> &a, const std::vector<llama_token> &b)
{
size_t i;
for (i = 0; i < a.size() && i < b.size() && a[i] == b[i]; i++)
@ -69,7 +69,7 @@ static bool ends_with(const std::string & str, const std::string & suffix)
0 == str.compare(str.size() - suffix.size(), suffix.size(), suffix);
}
size_t find_partial_stop_string(const std::string & stop, const std::string & text)
static size_t find_partial_stop_string(const std::string &stop, const std::string &text)
{
if (!text.empty() && !stop.empty())
{
@ -90,7 +90,7 @@ size_t find_partial_stop_string(const std::string & stop, const std::string & te
}
template <class Iter>
std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
static std::string tokens_to_str(llama_context *ctx, Iter begin, Iter end)
{
std::string ret;
for (; begin != end; ++begin)
@ -122,7 +122,7 @@ static void server_log(
}
// format incomplete utf-8 multibyte character for output
std::string tokens_to_output_formatted_string(const llama_context * ctx, llama_token token)
static std::string tokens_to_output_formatted_string(const llama_context *ctx, llama_token token)
{
std::string out = token == -1 ? "" : llama_token_to_piece(ctx, token);
// if the size is 1 and first bit is 1, meaning it's a partial character
@ -138,7 +138,7 @@ std::string tokens_to_output_formatted_string(const llama_context * ctx, llama_t
}
// convert a vector of completion_token_output to json
json probs_vector_to_json(const llama_context * ctx, const std::vector<completion_token_output> & probs)
static json probs_vector_to_json(const llama_context *ctx, const std::vector<completion_token_output> &probs)
{
json out = json::array();
for (const auto &prob : probs)
@ -992,7 +992,7 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
}
}
json format_generation_settings(llama_server_context & llama)
static json format_generation_settings(llama_server_context &llama)
{
const auto eos_bias = llama.params.logit_bias.find(llama_token_eos(llama.ctx));
const bool ignore_eos = eos_bias != llama.params.logit_bias.end() &&
@ -1026,14 +1026,14 @@ json format_generation_settings(llama_server_context & llama)
};
}
json format_embedding_response(llama_server_context & llama)
static json format_embedding_response(llama_server_context &llama)
{
return json{
{"embedding", llama.getEmbedding()},
};
}
json format_timings(llama_server_context & llama)
static json format_timings(llama_server_context &llama)
{
const auto timings = llama_get_timings(llama.ctx);
@ -1052,7 +1052,7 @@ json format_timings(llama_server_context & llama)
};
}
json format_final_response(
static json format_final_response(
llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs
) {
@ -1081,7 +1081,7 @@ json format_final_response(
return res;
}
json format_partial_response(
static json format_partial_response(
llama_server_context &llama, const std::string &content, const std::vector<completion_token_output> &probs
) {
json res = json{
@ -1097,20 +1097,20 @@ json format_partial_response(
return res;
}
json format_tokenizer_response(const std::vector<llama_token> & tokens)
static json format_tokenizer_response(const std::vector<llama_token> &tokens)
{
return json{
{"tokens", tokens}};
}
json format_detokenized_response(std::string content)
static json format_detokenized_response(std::string content)
{
return json{
{"content", content}};
}
template <typename T>
T json_value(const json & body, const std::string & key, const T & default_value)
static T json_value(const json &body, const std::string &key, const T &default_value)
{
// Fallback null to default value
return body.contains(key) && !body.at(key).is_null()
@ -1257,7 +1257,8 @@ struct token_translator {
std::string operator()(const completion_token_output & cto) const { return (*this)(cto.tok); }
};
static void append_to_generated_text_from_generated_token_probs(llama_server_context & llama) {
static void append_to_generated_text_from_generated_token_probs(llama_server_context &llama)
{
auto & gtps = llama.generated_token_probs;
auto translator = token_translator{llama.ctx};
auto add_strlen = [=](size_t sum, const completion_token_output & cto) { return sum + translator(cto).size(); };
@ -1270,7 +1271,8 @@ static void append_to_generated_text_from_generated_token_probs(llama_server_con
}
}
int main(int argc, char **argv) {
int main(int argc, char **argv)
{
// own arguments required by this example
gpt_params params;
server_params sparams;

View file

@ -61,12 +61,9 @@ static void get_random_dims_minmax(int64_t * dims, int ndims, int min, int max)
}
struct ggml_tensor * get_random_tensor(
struct ggml_context * ctx0,
int ndims,
int64_t ne[],
float fmin,
float fmax) {
static struct ggml_tensor * get_random_tensor(
struct ggml_context * ctx0, int ndims, int64_t ne[], float fmin, float fmax
) {
struct ggml_tensor * result = ggml_new_tensor(ctx0, GGML_TYPE_F32, ndims, ne);
switch (ndims) {