fixed add end condition for generating
This commit is contained in:
parent
ba1f617d7d
commit
6ed4893391
2 changed files with 22 additions and 14 deletions
|
@ -33,16 +33,20 @@ class MyModel:
|
|||
s = libc.sampling(self.model)
|
||||
return s
|
||||
|
||||
model = MyModel(["main", "--model", "../llama.cpp/models/ggml-vic13b-q4_1.bin", "-c", "2048"])
|
||||
# print(model)
|
||||
model.eval_string("""user: what is the color of the flag of UN?""")
|
||||
# model.eval_token(100)
|
||||
x = np.random.random((10, 5120))# , dtype=np.float32)
|
||||
model.eval_float(x)
|
||||
model.eval_string("""assistant:""")
|
||||
# print(x[0,0], x[0,1],x[1,0])
|
||||
# model.eval_float(x)
|
||||
# print(libc)
|
||||
if __name__ == "__main__":
|
||||
model = MyModel(["main", "--model", "../llama.cpp/models/ggml-vic13b-q4_1.bin", "-c", "2048"])
|
||||
# print(model)
|
||||
model.eval_string("""user: what is the color of the flag of UN?""")
|
||||
# model.eval_token(100)
|
||||
x = np.random.random((10, 5120))# , dtype=np.float32)
|
||||
model.eval_float(x)
|
||||
model.eval_string("""assistant:""")
|
||||
# print(x[0,0], x[0,1],x[1,0])
|
||||
# model.eval_float(x)
|
||||
# print(libc)
|
||||
|
||||
for i in range(50):
|
||||
print(model.sampling().decode(), end="", flush=True)
|
||||
for i in range(500):
|
||||
tmp = model.sampling().decode()
|
||||
if tmp == "":
|
||||
break
|
||||
print(tmp, end="", flush=True)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#include "embd_input.h"
|
||||
#include <stdlib.h>
|
||||
#include <random>
|
||||
#include <string.h>
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
|
||||
|
@ -20,9 +21,12 @@ int main(int argc, char** argv) {
|
|||
eval_string(mymodel, "assistant:");
|
||||
// printf("eval float end\n");
|
||||
eval_string(mymodel, mymodel->params.prompt.c_str());
|
||||
for (int i=0;i < 50; i++) {
|
||||
const char* tmp;
|
||||
for (int i=0;i < 500; i++) {
|
||||
// int id = sampling_id(mymodel);
|
||||
printf("%s", sampling(mymodel)); // llama_token_to_str(mymodel->ctx, id));
|
||||
tmp = sampling(mymodel);
|
||||
if (strlen(tmp) == 0) break;
|
||||
printf("%s", tmp); // llama_token_to_str(mymodel->ctx, id));
|
||||
fflush(stdout);
|
||||
// eval_id(mymodel, id);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue