mirror of
https://gitlab.com/niansa/llama_nds.git
synced 2025-03-06 20:53:28 +01:00
45 lines
956 B
C++
45 lines
956 B
C++
#include "Runtime.hpp"
|
|
#include "Client.hpp"
|
|
|
|
#include <iostream>
|
|
#include <string>
|
|
#include <string_view>
|
|
|
|
|
|
|
|
void on_progress(float progress) {
|
|
std::cout << unsigned(progress) << '\r' << std::flush;
|
|
}
|
|
|
|
int main()
|
|
{
|
|
Runtime rt;
|
|
|
|
// Print header
|
|
std::cout << "llama.any running on " PLATFORM ".\n"
|
|
"\n";
|
|
|
|
// Ask for server address
|
|
const std::string addr = rt.readInput("Server address");
|
|
|
|
// Create client
|
|
Client client(addr, 99181);
|
|
|
|
// Connection loop
|
|
for (;; rt.cooperate()) {
|
|
// Clear screen
|
|
rt.clearScreen();
|
|
|
|
// Run inference
|
|
client.ask(rt.readInput("Prompt"), [&rt] (float progress) {
|
|
std::cout << unsigned(progress) << "%\r" << std::flush;
|
|
rt.cooperate();
|
|
}, [&rt] (std::string_view token) {
|
|
std::cout << token << std::flush;
|
|
rt.cooperate();
|
|
});
|
|
std::cout << "\n";
|
|
}
|
|
|
|
return 0;
|
|
}
|