removed unused llama_context in dry sampler

This commit is contained in:
l3utterfly 2024-04-29 10:25:25 +09:00
parent 793e1e221b
commit 3caec6bb41
3 changed files with 2 additions and 3 deletions

View file

@ -337,7 +337,7 @@ static llama_token_data_array llama_sampling_prepare_impl(
{ {
const int penalty_tokens_used_size = std::min(penalty_tokens.size(), (size_t)dry_penalty_last_n); const int penalty_tokens_used_size = std::min(penalty_tokens.size(), (size_t)dry_penalty_last_n);
if (penalty_tokens_used_size) { if (penalty_tokens_used_size) {
llama_sample_dry(ctx_main, &cur_p, llama_sample_dry(&cur_p,
penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size, penalty_tokens.data() + penalty_tokens.size() - penalty_tokens_used_size,
penalty_tokens_used_size, dry_base, dry_multiplier, dry_allowed_length, penalty_tokens_used_size, dry_base, dry_multiplier, dry_allowed_length,
params.dry_seq_breakers.data(), params.dry_seq_breakers.size()); params.dry_seq_breakers.data(), params.dry_seq_breakers.size());

View file

@ -13233,7 +13233,7 @@ void llama_sample_min_p(struct llama_context * ctx, llama_token_data_array * can
} }
} }
void llama_sample_dry(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, int last_tokens_size, float dry_base, float dry_multiplier, int dry_allowed_length, const llama_token * dry_seq_breakers, int dry_seq_breakers_size) { void llama_sample_dry(llama_token_data_array * candidates, const llama_token * last_tokens, int last_tokens_size, float dry_base, float dry_multiplier, int dry_allowed_length, const llama_token * dry_seq_breakers, int dry_seq_breakers_size) {
// skip dry sampler if we don't have a previous token // skip dry sampler if we don't have a previous token
if (last_tokens_size < 1) return; if (last_tokens_size < 1) return;

View file

@ -926,7 +926,6 @@ extern "C" {
/// @details DRY sampler as described in: https://github.com/oobabooga/text-generation-webui/pull/5677 /// @details DRY sampler as described in: https://github.com/oobabooga/text-generation-webui/pull/5677
LLAMA_API void llama_sample_dry( LLAMA_API void llama_sample_dry(
struct llama_context * ctx,
llama_token_data_array * candidates, llama_token_data_array * candidates,
const llama_token * last_tokens, const llama_token * last_tokens,
int last_tokens_size, int last_tokens_size,