diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 59b45fd56..dd213f8ec 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -96,7 +96,7 @@ if (LLAMA_LLGUIDANCE) set(LLGUIDANCE_PATH ${LLGUIDANCE_SRC}/target/release) ExternalProject_Add(llguidance_ext GIT_REPOSITORY https://github.com/guidance-ai/llguidance - GIT_TAG 0cbe6b3a6ee72e5726c76f13fe67e21a5895f679 + GIT_TAG d7b382916c466e6f26869671b8480229dc930423 PREFIX ${CMAKE_BINARY_DIR}/llguidance SOURCE_DIR ${LLGUIDANCE_SRC} BUILD_IN_SOURCE TRUE diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index 46c2dc4d7..fa9a067fe 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -992,7 +992,7 @@ public: std::string json_schema_to_grammar(const json & schema) { #ifdef LLAMA_USE_LLGUIDANCE - return "llg:json:" + schema.dump(); + return "%llguidance {}\nstart: %json " + schema.dump(); #else return build_grammar([&](const llama_grammar_builder & callbacks) { auto copy = schema; diff --git a/common/sampling.cpp b/common/sampling.cpp index 97691c04b..2efb9c9a3 100644 --- a/common/sampling.cpp +++ b/common/sampling.cpp @@ -152,15 +152,9 @@ struct common_sampler * common_sampler_init(const struct llama_model * model, co lparams.no_perf = params.no_perf; struct llama_sampler * grmr; - if (params.grammar.compare(0, 4, "llg:") == 0) { + if (params.grammar.compare(0, 11, "%llguidance") == 0) { #ifdef LLAMA_USE_LLGUIDANCE - auto gp = params.grammar.find(':', 4); - if (gp == std::string::npos) { - GGML_ABORT("invalid serialized grammar"); - } - auto grm_type = params.grammar.substr(4, gp - 4); - auto grm_data = params.grammar.c_str() + gp + 1; - grmr = llama_sampler_init_llg(model, grm_type.c_str(), grm_data); + grmr = llama_sampler_init_llg(model, "lark", params.grammar.c_str()); #else GGML_ABORT("llguidance (cmake -DLLAMA_LLGUIDANCE=ON) is not enabled"); #endif