1
0
Fork 0
mirror of https://gitlab.com/niansa/anyproc.git synced 2025-03-06 20:49:24 +01:00

Better caching using hashes

This commit is contained in:
niansa/tuxifan 2023-04-19 10:00:19 +02:00
parent ac1ee32900
commit 7cb4139d62

View file

@ -6,6 +6,7 @@
#include <unordered_map>
#include <string_view>
#include <utility>
#include <functional>
#include <justlm.hpp>
@ -144,7 +145,7 @@ public:
class Translator : PyEval {
LM::Inference::Savestate sv;
std::unordered_map<std::string_view, std::string> cache;
std::unordered_map<size_t/*hash*/, std::string> cache;
static inline LM::Inference::Params get_params() {
auto p = get_recommended_params();
@ -193,10 +194,11 @@ public:
}
std::string translate(std::string_view text, std::string_view language, const std::function<bool (float)> &on_append_tick = nullptr, const std::function<bool (const char *generated)>& on_generation_tick = nullptr) {
// Hash
size_t hash = std::hash<std::string_view>{}(text) + std::hash<std::string_view>{}(language);
// Cache lookup
auto res = std::find_if(cache.begin(), cache.end(), [text] (const auto& o) {
return o.first == text;
});
auto res = cache.find(hash);
if (res != cache.end()) {
return res->second;
}
@ -210,7 +212,7 @@ public:
.run(on_append_tick, on_generation_tick));
// Add to cache
cache[text] = fres;
cache[hash] = fres;
// Return final result;
return fres;