main loop finished, starting to debug
This commit is contained in:
parent
cae8f50b1a
commit
0ec5fdb5ce
5 changed files with 138 additions and 37 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -48,6 +48,7 @@ models-mnt
|
|||
/llama-bench
|
||||
/llava-cli
|
||||
/lookahead
|
||||
/lookup
|
||||
/main
|
||||
/metal
|
||||
/perplexity
|
||||
|
|
5
Makefile
5
Makefile
|
@ -2,7 +2,7 @@
|
|||
BUILD_TARGETS = \
|
||||
main quantize quantize-stats perplexity embedding vdot q8dot train-text-from-scratch convert-llama2c-to-ggml \
|
||||
simple batched batched-bench save-load-state server gguf llama-bench libllava.a llava-cli baby-llama beam-search \
|
||||
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead tests/test-c.o
|
||||
speculative infill tokenize benchmark-matmult parallel finetune export-lora lookahead lookup tests/test-c.o
|
||||
|
||||
# Binaries only useful for tests
|
||||
TEST_TARGETS = \
|
||||
|
@ -664,6 +664,9 @@ parallel: examples/parallel/parallel.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
|||
lookahead: examples/lookahead/lookahead.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
|
||||
lookup: examples/lookup/lookup.cpp ggml.o llama.o $(COMMON_DEPS) $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
|
||||
|
||||
ifdef LLAMA_METAL
|
||||
metal: examples/metal/metal.cpp ggml.o $(OBJS)
|
||||
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
|
||||
|
|
|
@ -75,10 +75,10 @@ struct gpt_params {
|
|||
// // sampling parameters
|
||||
struct llama_sampling_params sparams;
|
||||
|
||||
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
|
||||
std::string model = "models/7B/ggml-model-q4_0.gguf"; // model path
|
||||
std::string model_draft = ""; // draft model for speculative decoding
|
||||
std::string model_alias = "unknown"; // model alias
|
||||
std::string prompt = "";
|
||||
std::string prompt = "Hello my name is";
|
||||
std::string prompt_file = ""; // store the external prompt file name
|
||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||
std::string input_prefix = ""; // string to prefix user inputs with
|
||||
|
|
|
@ -33,6 +33,7 @@ else()
|
|||
add_subdirectory(simple)
|
||||
add_subdirectory(speculative)
|
||||
add_subdirectory(lookahead)
|
||||
add_subdirectory(lookup)
|
||||
add_subdirectory(train-text-from-scratch)
|
||||
if (LLAMA_METAL)
|
||||
add_subdirectory(metal)
|
||||
|
|
|
@ -6,38 +6,6 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
/*
|
||||
def find_candidate_pred_tokens(input_ids, max_ngram_size=3, num_pred_tokens=10):
|
||||
input_length = input_ids.size(1)
|
||||
|
||||
for ngram_size in range(max_ngram_size, 0, -1):
|
||||
# Extract the last n tokens as our search ngram
|
||||
ngram = input_ids[0, -ngram_size:].tolist()
|
||||
|
||||
# Create sliding windows of size ngram_size
|
||||
windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
|
||||
|
||||
# Convert ngram to a tensor for comparison
|
||||
ngram_tensor = torch.tensor(ngram, device=input_ids.device).unsqueeze(0)
|
||||
|
||||
# Find where the windows match the ngram
|
||||
matches = (windows == ngram_tensor).all(dim=2)
|
||||
|
||||
# Get the indices of matches
|
||||
match_indices = matches.nonzero(as_tuple=True)[1]
|
||||
|
||||
# Iterate through match indices to find a valid continuation
|
||||
for idx in match_indices:
|
||||
start_idx = idx + ngram_size
|
||||
end_idx = start_idx + num_pred_tokens
|
||||
# Ensure we don't go beyond the length of input_ids and avoid self-match
|
||||
if end_idx <= input_length and start_idx < input_length - ngram_size:
|
||||
return input_ids[0, start_idx:end_idx]
|
||||
|
||||
# If no match is found, return an empty tensor
|
||||
return torch.tensor([], dtype=torch.long, device=input_ids.device)
|
||||
*/
|
||||
|
||||
int main(int argc, char ** argv){
|
||||
gpt_params params;
|
||||
|
||||
|
@ -48,8 +16,8 @@ int main(int argc, char ** argv){
|
|||
// maximum n-grams to search for in prompt
|
||||
const int max_ngram_size = 3;
|
||||
|
||||
// length of the candidate sequence, if match is found
|
||||
const int num_pred_tokens = 10;
|
||||
// length of the candidate / draft sequence, if match is found
|
||||
const int n_draft = 10;
|
||||
|
||||
#ifndef LOG_DISABLE_LOGS
|
||||
log_set_target(log_filename_generator("lookup", "log"));
|
||||
|
@ -98,6 +66,8 @@ int main(int argc, char ** argv){
|
|||
|
||||
const auto t_enc_end = ggml_time_us();
|
||||
|
||||
int n_predict = 0;
|
||||
int n_drafted = 0;
|
||||
int n_accept = 0;
|
||||
|
||||
int n_past = inp.size();
|
||||
|
@ -106,8 +76,134 @@ int main(int argc, char ** argv){
|
|||
|
||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams);
|
||||
|
||||
std::vector<llama_token> draft(n_draft);
|
||||
|
||||
llama_batch batch_tgt = llama_batch_init(params.n_ctx, 0, 1);
|
||||
|
||||
const auto t_dec_start = ggml_time_us();
|
||||
|
||||
while(true){
|
||||
// print current draft sequence
|
||||
LOG("drafted %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, draft).c_str());
|
||||
|
||||
int i_dft = 0;
|
||||
while (true) {
|
||||
//LOG("sampling target: s_keep = %3d, i_dft = %3d, i_batch_tgt = %3d\n", s_keep, i_dft, drafts[s_keep].i_batch_tgt[i_dft]);
|
||||
|
||||
// sample from the target model
|
||||
llama_token id = llama_sampling_sample(ctx_sampling, ctx, NULL, 0);
|
||||
|
||||
llama_sampling_accept(ctx_sampling, ctx, id, true);
|
||||
|
||||
//LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_tgt, ctx_sampling->prev).c_str());
|
||||
|
||||
const std::string token_str = llama_token_to_piece(ctx, id);
|
||||
|
||||
printf("%s", token_str.c_str());
|
||||
fflush(stdout);
|
||||
|
||||
if (id == llama_token_eos(model)) {
|
||||
has_eos = true;
|
||||
}
|
||||
|
||||
++n_predict;
|
||||
|
||||
// check if the target token matches the draft
|
||||
if (i_dft < (int) draft.size() && id == draft[i_dft]) {
|
||||
LOG("the sampled target token matches the %dth drafted token (%d, '%s') - accepted\n", i_dft, id, token_str.c_str());
|
||||
++n_accept;
|
||||
++n_past;
|
||||
++i_dft;
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", id, token_str.c_str());
|
||||
|
||||
draft.clear();
|
||||
draft.push_back(id);
|
||||
// drafts[0].i_batch_tgt.push_back(0);
|
||||
|
||||
// llama_batch_clear(batch_dft);
|
||||
// llama_batch_add (batch_dft, id, n_past_dft, { 0 }, true);
|
||||
|
||||
// llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
|
||||
// // LOG("dft batch: %s\n", LOG_BATCH_TOSTR_PRETTY(ctx_dft, batch_dft).c_str());
|
||||
// llama_decode (ctx_dft, batch_dft);
|
||||
|
||||
// ++n_past_dft;
|
||||
break;
|
||||
}
|
||||
|
||||
if (n_predict > params.n_predict || has_eos) {
|
||||
break;
|
||||
}
|
||||
|
||||
llama_batch_clear(batch_tgt);
|
||||
llama_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
|
||||
|
||||
bool match = false;
|
||||
// generate n_pred tokens through prompt lookup
|
||||
for (int ngram_size = max_ngram_size ; ngram_size > 0; --ngram_size){
|
||||
if (match){
|
||||
break;
|
||||
}
|
||||
const auto & prev = ctx_sampling->prev;
|
||||
int prev_size = prev.size();
|
||||
const llama_token * ngram = &prev[prev_size - ngram_size];
|
||||
|
||||
for (int i = 0; i <= (int) prev_size - (ngram_size * 2); ++i) {
|
||||
if (prev[i] == ngram[0] && prev[i + 1] == ngram[1] && prev[i + 2] == ngram[2]) {
|
||||
const int startIdx = i + ngram_size;
|
||||
const int endIdx = startIdx + n_draft;
|
||||
if (endIdx < prev_size){
|
||||
match = true;
|
||||
for (int j = startIdx; j < endIdx; ++j) {
|
||||
LOG(" - draft candidate %d: %d\n", j, prev[j]);
|
||||
draft.push_back(prev[j]);
|
||||
llama_batch_add(batch_tgt, prev[j], n_past + j + 1, { 1 }, true);
|
||||
++n_drafted;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
llama_decode(ctx, batch_tgt);
|
||||
++n_past;
|
||||
|
||||
draft.erase(draft.begin());
|
||||
|
||||
// we have our draft!
|
||||
|
||||
}
|
||||
|
||||
auto t_dec_end = ggml_time_us();
|
||||
|
||||
LOG_TEE("\n\n");
|
||||
|
||||
LOG_TEE("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
|
||||
LOG_TEE("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
|
||||
|
||||
LOG_TEE("\n");
|
||||
LOG_TEE("n_draft = %d\n", n_draft);
|
||||
LOG_TEE("n_predict = %d\n", n_predict);
|
||||
LOG_TEE("n_drafted = %d\n", n_drafted);
|
||||
LOG_TEE("n_accept = %d\n", n_accept);
|
||||
LOG_TEE("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
|
||||
|
||||
LOG_TEE("\ntarget:\n");
|
||||
llama_print_timings(ctx);
|
||||
|
||||
llama_sampling_free(ctx_sampling);
|
||||
llama_batch_free(batch_tgt);
|
||||
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
fprintf(stderr, "\n\n");
|
||||
|
||||
return 0;
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue