1
0
Fork 0
mirror of https://gitlab.com/niansa/discord_llama.git synced 2025-03-06 20:48:25 +01:00

Updated for latest justlm

This commit is contained in:
niansa 2023-05-16 21:14:39 +02:00
parent 2c15a85877
commit 425e6b3c68
6 changed files with 13 additions and 6 deletions

1
.gitmodules vendored
View file

@ -7,3 +7,4 @@
[submodule "libjustlm"]
path = libjustlm
url = https://gitlab.com/niansa/libjustlm.git
branch = sharedobjects

View file

@ -21,7 +21,7 @@ add_executable(discord_llama
config.hpp config.cpp
utils.cpp utils.hpp
)
target_link_libraries(discord_llama PUBLIC dpp fmt pthread libjustlm anyproc ggml cosched sqlite3)
target_link_libraries(discord_llama PUBLIC dpp fmt pthread justlm anyproc cosched sqlite3)
install(TARGETS discord_llama
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR})

@ -1 +1 @@
Subproject commit 3d3de6ae875737c88a8c4496651ef1ed1c74b073
Subproject commit e1a72334e0163906a64e6dcf044497a04a7ff2f1

@ -1 +1 @@
Subproject commit 05eb25ce59de5fa9fdb80c0124cfdc09f920861c
Subproject commit 9d1047be5460cfea8958464421efb8cafed065d0

@ -1 +1 @@
Subproject commit 59a6a8b1d18047121ecf394da30ea7bda63a36ec
Subproject commit 36aa757a329ef9c17f5ab4a5e400f4d67be81f7a

View file

@ -148,7 +148,12 @@ private:
ENSURE_LLM_THREAD();
// Deserialize init cache if not instruct mode without prompt file
if (channel_cfg.instruct_mode && config.instruct_prompt_file == "none") co_return true;
std::ifstream f((*channel_cfg.model_name)+(channel_cfg.instruct_mode?"_instruct_init_cache":"_init_cache"), std::ios::binary);
const auto path = (*channel_cfg.model_name)+(channel_cfg.instruct_mode?"_instruct_init_cache":"_init_cache");
std::ifstream f(path, std::ios::binary);
if (!f) {
std::cerr << "Warning: Failed to init cache open file, consider regeneration: " << path << std::endl;
co_return false;
}
if (!co_await inference->deserialize(f)) {
co_return false;
}
@ -163,6 +168,7 @@ private:
// Get or create inference
auto inference = co_await llm_pool.create_inference(id, channel_cfg.model->weights_path, llm_get_params(channel_cfg.instruct_mode));
if (!co_await llm_restart(inference, channel_cfg)) {
std::cerr << "Warning: Failed to deserialize cache: " << inference->get_last_error() << std::endl;
co_return nullptr;
}
co_return inference;
@ -498,7 +504,7 @@ private:
if (instruct_mode_param.index()) {
instruct_mode = std::get<bool>(instruct_mode_param);
} else {
instruct_mode = true;
instruct_mode = model_config.instruct_mode_policy != Configuration::Model::InstructModePolicy::Forbid;
}
}
// Create thread if it doesn't exist or update it if it does