diff --git a/examples/server/public/index.html b/examples/server/public/index.html index f7d245f03..79990c9ee 100644 --- a/examples/server/public/index.html +++ b/examples/server/public/index.html @@ -201,7 +201,7 @@ - + ? @@ -213,7 +213,7 @@ OK - + {{ key }} @@ -226,7 +226,7 @@ Samplers - + ? @@ -238,7 +238,7 @@ OK - + {{ key }} @@ -246,7 +246,7 @@ Penalties - + ? @@ -258,7 +258,7 @@ OK - + {{ key }} @@ -321,22 +321,22 @@ apiKey: '', systemMessage: 'The starting message that defines how model should behave.', temperature: 'Controls the randomness of the generated text by affecting the probability distribution of the output tokens. Higher = more random, lower = more focused.', - dynatemp_range: 'The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.', - dynatemp_exponent: 'Smoothes out the probability redistribution based on the most probable token.', + dynatemp_range: 'Addon for the temperature sampler. The added value to the range of dynamic temperature, which adjusts probabilities by entropy of tokens.', + dynatemp_exponent: 'Addon for the temperature sampler. Smoothes out the probability redistribution based on the most probable token.', top_k: 'Keeps only k top tokens.', top_p: 'Limits tokens to those that together have a cumulative probability of at least p', min_p: 'Limits tokens based on the minimum probability for a token to be considered, relative to the probability of the most likely token.', - xtc_probability: 'The probability that the XTC sampler will cut token from the beginning.', - xtc_threshold: 'If XTC is used, all top tokens with probabilities above this threshold will be cut.', + xtc_probability: 'XTC sampler cuts out top tokens; this parameter controls the chance of cutting tokens at all. 0 disables XTC.', + xtc_threshold: 'XTC sampler cuts out top tokens; this parameter controls the token probability that is required to cut that token.', typical_p: 'Sorts and limits tokens based on the difference between log-probability and entropy.', repeat_last_n: 'Last n tokens to consider for penalizing repetition', repeat_penalty: 'Controls the repetition of token sequences in the generated text', presence_penalty: 'Limits tokens based on whether they appear in the output or not.', frequency_penalty: 'Limits tokens based on how often they appear in the output.', - dry_multiplier: 'Sets the DRY sampling multiplier.', - dry_base: 'Sets the DRY sampling base value.', - dry_allowed_length: 'Sets the allowed length for DRY sampling.', - dry_penalty_last_n: 'Sets DRY penalty for the last n tokens.', + dry_multiplier: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling multiplier.', + dry_base: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the DRY sampling base value.', + dry_allowed_length: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets the allowed length for DRY sampling.', + dry_penalty_last_n: 'DRY sampling reduces repetition in generated text even across long contexts. This parameter sets DRY penalty for the last n tokens.', max_tokens: 'The maximum number of token per output.', custom: '', // custom json-stringified object };