removing random prompt generation

This commit is contained in:
antimatter15 2023-03-18 00:19:52 -07:00
parent e95e64bd49
commit 1cb9215e5d

View file

@ -815,9 +815,9 @@ int main(int argc, char ** argv) {
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
std::mt19937 rng(params.seed);
if (params.prompt.empty()) {
params.prompt = gpt_random_prompt(rng);
}
// if (params.prompt.empty()) {
// params.prompt = gpt_random_prompt(rng);
// }
// params.prompt = R"(// this function checks if the number n is prime
//bool is_prime(int n) {)";
@ -853,7 +853,7 @@ int main(int argc, char ** argv) {
std::vector<float> logits;
// Add a space in front of the first character to match OG llama tokenizer behavior
params.prompt.insert(0, 1, ' ');
// params.prompt.insert(0, 1, ' ');
// tokenize the prompt
std::vector<gpt_vocab::id> embd_inp;// = ::llama_tokenize(vocab, params.prompt, true);