Merge branch 'ggerganov:master' into server_branch

This commit is contained in:
pudepiedj 2024-03-04 08:19:12 +00:00 committed by GitHub
commit 51e184fd87
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 22 additions and 1 deletions

View file

@ -43,7 +43,7 @@ extern char const *LLAMA_BUILD_TARGET;
int32_t get_num_physical_cores(); int32_t get_num_physical_cores();
struct gpt_params { struct gpt_params {
uint32_t seed = -1; // RNG seed uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed
int32_t n_threads = get_num_physical_cores(); int32_t n_threads = get_num_physical_cores();
int32_t n_threads_draft = -1; int32_t n_threads_draft = -1;

View file

@ -511,6 +511,14 @@ int main(int argc, char ** argv) {
std::vector<llama_token> embd; std::vector<llama_token> embd;
std::vector<llama_token> embd_guidance; std::vector<llama_token> embd_guidance;
// tokenized antiprompts
std::vector<std::vector<llama_token>> antiprompt_ids;
antiprompt_ids.reserve(params.antiprompt.size());
for (const std::string & antiprompt : params.antiprompt) {
antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true));
}
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams); struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
while ((n_remain != 0 && !is_antiprompt) || params.interactive) { while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
@ -769,6 +777,18 @@ int main(int argc, char ** argv) {
} }
} }
// check for reverse prompt using special tokens
llama_token last_token = llama_sampling_last(ctx_sampling);
for (std::vector<llama_token> ids : antiprompt_ids) {
if (ids.size() == 1 && last_token == ids[0]) {
if (params.interactive) {
is_interacting = true;
}
is_antiprompt = true;
break;
}
}
if (is_antiprompt) { if (is_antiprompt) {
LOG("found antiprompt: %s\n", last_output.c_str()); LOG("found antiprompt: %s\n", last_output.c_str());
} }

View file

@ -6904,6 +6904,7 @@ static __global__ void soft_max_f32(const float * x, const float * mask, const f
// find the sum of exps in the block // find the sum of exps in the block
tmp = warp_reduce_sum(tmp); tmp = warp_reduce_sum(tmp);
if (block_size > WARP_SIZE) { if (block_size > WARP_SIZE) {
__syncthreads();
if (warp_id == 0) { if (warp_id == 0) {
buf_iw[lane_id] = 0.0f; buf_iw[lane_id] = 0.0f;
} }