mirror of
https://gitlab.com/niansa/discord_llama.git
synced 2025-03-06 20:48:25 +01:00
Temporarily removed init cache
This commit is contained in:
parent
67d605d15e
commit
0ac2c63aca
1 changed files with 19 additions and 3 deletions
22
main.cpp
22
main.cpp
|
@ -177,10 +177,25 @@ class Bot {
|
|||
}
|
||||
|
||||
// Must run in llama thread
|
||||
void llm_restart(LM::Inference& inference) {
|
||||
void llm_restart(LM::Inference& llm) {
|
||||
llm.append("History of the discord server.\n"
|
||||
"Note 1: "+bot.me.username+" is a friendly chatbot that is always happy to talk. He is friendly and helpful and always answers immediately. He has a good sense of humor and likes everyone. His age is unknown.\n"
|
||||
"Note 2: Ecki's real name is Eckhard Kohlhuber and he comes from Bavaria.\n" // Little easter egg
|
||||
"\n"
|
||||
"This is the #meta channel.\n"
|
||||
"Bob: "+bot.me.username+" have you ever been to France and if yes where?\n"
|
||||
+bot.me.username+": I was in Paris, in the museums!\n"
|
||||
"Bob: "+bot.me.username+" what are you exactly?\n"
|
||||
+bot.me.username+": I am "+bot.me.username+", your chatbot! I can answer questions and increase the activity of the server.\n"
|
||||
"Bob: Shall we talk about sex? "+bot.me.username+"?\n"
|
||||
+bot.me.username+": No! I will **not** talk about any NSFW topics.\n"
|
||||
"Bob: "+bot.me.username+" How are you?\n"
|
||||
+bot.me.username+": I am quite well! :-)\n"
|
||||
"Ecki: Hey "+bot.me.username+", what is 10 times 90??\n"
|
||||
+bot.me.username+": that is 900!\n", show_console_progress);
|
||||
// Deserialize init cache
|
||||
std::ifstream f("init_cache", std::ios::binary);
|
||||
inference.deserialize(f);
|
||||
//std::ifstream f("init_cache", std::ios::binary);
|
||||
//inference.deserialize(f);
|
||||
}
|
||||
// Must run in llama thread
|
||||
LM::Inference &llm_restart(dpp::snowflake id) {
|
||||
|
@ -215,6 +230,7 @@ class Bot {
|
|||
texts.translated = true;
|
||||
}
|
||||
// Inference for init cache TODO: Don't recreate on each startup
|
||||
return;
|
||||
LM::Inference llm(config.inference_model, llm_get_params());
|
||||
std::ofstream f("init_cache", std::ios::binary);
|
||||
// Add initial context
|
||||
|
|
Loading…
Add table
Reference in a new issue