common : use common_ prefix for common library functions (#9805)
* common : use common_ prefix for common library functions --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
0e9f760eb1
commit
7eee341bee
45 changed files with 1284 additions and 1284 deletions
|
@ -31,7 +31,7 @@ template <class Iter>
|
|||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
std::string ret;
|
||||
for (; begin != end; ++begin) {
|
||||
ret += llama_token_to_piece(ctx, *begin);
|
||||
ret += common_token_to_piece(ctx, *begin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -272,8 +272,8 @@ struct tokenized_prompt {
|
|||
|
||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||
const bool add_bos = llama_add_bos_token(llama_get_model(ctx));
|
||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos, true);
|
||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos, true);
|
||||
tokens_pos = common_tokenize(ctx, pos, add_bos, true);
|
||||
tokens_neg = common_tokenize(ctx, neg, add_bos, true);
|
||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||
padding_seq(ctx, tokens_pos, max_seq_len);
|
||||
padding_seq(ctx, tokens_neg, max_seq_len);
|
||||
|
@ -281,7 +281,7 @@ struct tokenized_prompt {
|
|||
|
||||
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||
// TODO: customize padding token
|
||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
||||
std::vector<llama_token> pad_tokens = common_tokenize(ctx, " ", false);
|
||||
llama_token pad_tok = pad_tokens.back();
|
||||
while (tokens.size() < len) {
|
||||
tokens.push_back(pad_tok);
|
||||
|
@ -370,7 +370,7 @@ static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const
|
|||
* Load prompt files and completion file.
|
||||
* Then format each pair of prompt + completion to make an entry.
|
||||
*/
|
||||
static int prepare_entries(gpt_params & params, train_context & ctx_train) {
|
||||
static int prepare_entries(common_params & params, train_context & ctx_train) {
|
||||
// load prompts
|
||||
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
|
||||
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
|
||||
|
@ -388,9 +388,9 @@ static int prepare_entries(gpt_params & params, train_context & ctx_train) {
|
|||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
common_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
|
||||
if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_CVECTOR_GENERATOR, print_usage)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -413,7 +413,7 @@ int main(int argc, char ** argv) {
|
|||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model to get hparams
|
||||
llama_init_result llama_init = llama_init_from_gpt_params(params);
|
||||
common_init_result llama_init = common_init_from_params(params);
|
||||
|
||||
llama_model * model = llama_init.model;
|
||||
llama_context * ctx = llama_init.context;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue