Finished fixing some issues with llamacheck

This commit is contained in:
Andrew Ferrouolo 2024-06-11 13:05:10 -04:00
parent 5b125003ca
commit 9f8790bb49
3 changed files with 4 additions and 5 deletions

View file

@ -912,7 +912,7 @@ gbnf-validator: examples/gbnf-validator/gbnf-validator.cpp ggml.o llama.o $(COMM
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)
llamacheck: examples/llamacheck/llamacheck.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS) llamacheck: examples/llamacheck/llamacheck.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
$(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<)
$(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS)

View file

@ -1,7 +1,4 @@
#include "common.h" #include "common.h"
#include "llama.h"
#include <cmath>
#include <cstdio> #include <cstdio>
#include <string> #include <string>
#include <vector> #include <vector>
@ -66,6 +63,7 @@ int main(int argc, char ** argv) {
std::string prompt_template = "You will see two sentences. The first is marked INCORRECT and has a plethora of spelling and grammatical issues, the second is marked CORRECT and shows the fixed version of the prior sentence. INCORRECT:"; std::string prompt_template = "You will see two sentences. The first is marked INCORRECT and has a plethora of spelling and grammatical issues, the second is marked CORRECT and shows the fixed version of the prior sentence. INCORRECT:";
std::string prompt_suffix = " CORRECT: "; std::string prompt_suffix = " CORRECT: ";
std::string input_string = ""; std::string input_string = "";
LOG_TEE(">>>\n");
while (std::getline(std::cin, input_string, '\n')) { while (std::getline(std::cin, input_string, '\n')) {
if (input_string == "q") { if (input_string == "q") {
break; break;
@ -144,7 +142,7 @@ int main(int argc, char ** argv) {
const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p); const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
// is it an end of generation? // is it an end of generation?
if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) { if (llama_token_is_eog(model, new_token_id) || n_cur >= n_len) {
LOG_TEE("\n"); LOG_TEE("\n");
break; break;
@ -182,6 +180,7 @@ int main(int argc, char ** argv) {
fprintf(stderr, "\n"); fprintf(stderr, "\n");
llama_batch_free(batch); llama_batch_free(batch);
LOG_TEE(">>>\n");
} }

BIN
llamacheck Executable file

Binary file not shown.