fix build

This commit is contained in:
ngxson 2024-02-24 00:57:23 +01:00
parent aeed190d9f
commit 2a0d74d52e
3 changed files with 5 additions and 6 deletions

View file

@ -111,4 +111,4 @@ int main() {
std::cout << "\n" << llama_functionary::convert_response_to_oai_choices(test_response) << "\n";
return 0;
}
}

View file

@ -22,6 +22,7 @@ using json = nlohmann::json;
#define FUNCTIONARY_RECIP_ALL "all"
#define FUNCTIONARY_RECIP_NONE "no-tool-call"
namespace llama_functionary {
template <typename T>
static T json_value(const json &body, const std::string &key, const T &default_value)
@ -55,9 +56,6 @@ inline std::vector<std::string> str_split(std::string str, const std::string & d
return output;
}
namespace llama_functionary {
typedef struct message {
std::string from; // can be "system", "user", "assistant" or name of function
std::string recipient = FUNCTIONARY_RECIP_ALL;
@ -273,7 +271,7 @@ inline json convert_response_to_oai_choices(const std::string & content) {
}
// build final response
json choices = json::array();
// TODO: technically, functionary can reponse both text + tool_call in one shot. But for some reasons, the original implementation of OpenAI only return either ofthem, not both.
// TODO: technically, functionary can reponse both text + tool_call in one shot. But for some reasons, the original implementation of OpenAI only return only one, not both.
if (tool_calls.size() > 0) {
choices.push_back(json{
{"index", 0},
@ -297,4 +295,4 @@ inline json convert_response_to_oai_choices(const std::string & content) {
return choices;
}
} // namespace llama_functionary
} // namespace llama_functionary

View file

@ -9,6 +9,7 @@
#include "json.hpp"
#include "utils.hpp"
#include "functionary.hpp"
#define DEFAULT_OAICOMPAT_MODEL "gpt-3.5-turbo-0613"