1
0
Fork 0
mirror of https://gitlab.com/niansa/llama_any.git synced 2025-03-06 20:48:27 +01:00
llama_any/main.cpp
2023-04-03 22:06:29 +02:00

45 lines
956 B
C++

#include "Runtime.hpp"
#include "Client.hpp"
#include <iostream>
#include <string>
#include <string_view>
void on_progress(float progress) {
std::cout << unsigned(progress) << '\r' << std::flush;
}
int main()
{
Runtime rt;
// Print header
std::cout << "llama.any running on " PLATFORM ".\n"
"\n";
// Ask for server address
const std::string addr = rt.readInput("Server address");
// Create client
Client client(addr, 99181);
// Connection loop
for (;; rt.cooperate()) {
// Clear screen
rt.clearScreen();
// Run inference
client.ask(rt.readInput("Prompt"), [&rt] (float progress) {
std::cout << unsigned(progress) << "%\r" << std::flush;
rt.cooperate();
}, [&rt] (std::string_view token) {
std::cout << token << std::flush;
rt.cooperate();
});
std::cout << "\n";
}
return 0;
}