removing random prompt generation
This commit is contained in:
parent
e95e64bd49
commit
1cb9215e5d
1 changed files with 4 additions and 4 deletions
8
chat.cpp
8
chat.cpp
|
@ -815,9 +815,9 @@ int main(int argc, char ** argv) {
|
||||||
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
||||||
|
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
if (params.prompt.empty()) {
|
// if (params.prompt.empty()) {
|
||||||
params.prompt = gpt_random_prompt(rng);
|
// params.prompt = gpt_random_prompt(rng);
|
||||||
}
|
// }
|
||||||
|
|
||||||
// params.prompt = R"(// this function checks if the number n is prime
|
// params.prompt = R"(// this function checks if the number n is prime
|
||||||
//bool is_prime(int n) {)";
|
//bool is_prime(int n) {)";
|
||||||
|
@ -853,7 +853,7 @@ int main(int argc, char ** argv) {
|
||||||
std::vector<float> logits;
|
std::vector<float> logits;
|
||||||
|
|
||||||
// Add a space in front of the first character to match OG llama tokenizer behavior
|
// Add a space in front of the first character to match OG llama tokenizer behavior
|
||||||
params.prompt.insert(0, 1, ' ');
|
// params.prompt.insert(0, 1, ' ');
|
||||||
// tokenize the prompt
|
// tokenize the prompt
|
||||||
std::vector<gpt_vocab::id> embd_inp;// = ::llama_tokenize(vocab, params.prompt, true);
|
std::vector<gpt_vocab::id> embd_inp;// = ::llama_tokenize(vocab, params.prompt, true);
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue