diff --git a/common/common.h b/common/common.h index d3682b7ad..b2868833b 100644 --- a/common/common.h +++ b/common/common.h @@ -43,7 +43,7 @@ extern char const *LLAMA_BUILD_TARGET; int32_t get_num_physical_cores(); struct gpt_params { - uint32_t seed = -1; // RNG seed + uint32_t seed = LLAMA_DEFAULT_SEED; // RNG seed int32_t n_threads = get_num_physical_cores(); int32_t n_threads_draft = -1; diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 2f65a44e6..d709729a6 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -511,6 +511,14 @@ int main(int argc, char ** argv) { std::vector embd; std::vector embd_guidance; + // tokenized antiprompts + std::vector> antiprompt_ids; + + antiprompt_ids.reserve(params.antiprompt.size()); + for (const std::string & antiprompt : params.antiprompt) { + antiprompt_ids.emplace_back(::llama_tokenize(ctx, antiprompt, false, true)); + } + struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams); while ((n_remain != 0 && !is_antiprompt) || params.interactive) { @@ -769,6 +777,18 @@ int main(int argc, char ** argv) { } } + // check for reverse prompt using special tokens + llama_token last_token = llama_sampling_last(ctx_sampling); + for (std::vector ids : antiprompt_ids) { + if (ids.size() == 1 && last_token == ids[0]) { + if (params.interactive) { + is_interacting = true; + } + is_antiprompt = true; + break; + } + } + if (is_antiprompt) { LOG("found antiprompt: %s\n", last_output.c_str()); } diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 7ed97430f..04c6cb1b8 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -6904,6 +6904,7 @@ static __global__ void soft_max_f32(const float * x, const float * mask, const f // find the sum of exps in the block tmp = warp_reduce_sum(tmp); if (block_size > WARP_SIZE) { + __syncthreads(); if (warp_id == 0) { buf_iw[lane_id] = 0.0f; }