1
0
Fork 0
mirror of https://gitlab.com/niansa/libcrosscoro.git synced 2025-03-06 20:53:32 +01:00

Removed non-STL components

This commit is contained in:
Nils 2021-07-28 11:43:57 +02:00
parent 285416bfe5
commit d1263aebd7
36 changed files with 0 additions and 22678 deletions

3
.gitmodules vendored
View file

@ -1,3 +0,0 @@
[submodule "vendor/c-ares/c-ares"]
path = vendor/c-ares/c-ares
url = https://github.com/c-ares/c-ares.git

View file

@ -8,20 +8,10 @@ execute_process(
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR} WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
) )
option(LIBCORO_BUILD_TESTS "Build the tests, Default=ON." ON)
option(LIBCORO_CODE_COVERAGE "Enable code coverage, tests must also be enabled, Default=OFF" OFF)
option(LIBCORO_BUILD_EXAMPLES "Build the examples, Default=ON." ON)
message("${PROJECT_NAME} LIBCORO_BUILD_TESTS = ${LIBCORO_BUILD_TESTS}")
message("${PROJECT_NAME} LIBCORO_CODE_COVERAGE = ${LIBCORO_CODE_COVERAGE}")
message("${PROJECT_NAME} LIBCORO_BUILD_EXAMPLES = ${LIBCORO_BUILD_EXAMPLES}")
set(CARES_STATIC ON CACHE INTERNAL "") set(CARES_STATIC ON CACHE INTERNAL "")
set(CARES_SHARED OFF CACHE INTERNAL "") set(CARES_SHARED OFF CACHE INTERNAL "")
set(CARES_INSTALL OFF CACHE INTERNAL "") set(CARES_INSTALL OFF CACHE INTERNAL "")
add_subdirectory(vendor/c-ares/c-ares)
set(LIBCORO_SOURCE_FILES set(LIBCORO_SOURCE_FILES
inc/coro/concepts/awaitable.hpp inc/coro/concepts/awaitable.hpp
inc/coro/concepts/buffer.hpp inc/coro/concepts/buffer.hpp
@ -32,24 +22,10 @@ set(LIBCORO_SOURCE_FILES
inc/coro/detail/poll_info.hpp inc/coro/detail/poll_info.hpp
inc/coro/detail/void_value.hpp inc/coro/detail/void_value.hpp
inc/coro/net/connect.hpp src/net/connect.cpp
inc/coro/net/dns_resolver.hpp src/net/dns_resolver.cpp
inc/coro/net/hostname.hpp
inc/coro/net/ip_address.hpp src/net/ip_address.cpp
inc/coro/net/recv_status.hpp src/net/recv_status.cpp
inc/coro/net/send_status.hpp src/net/send_status.cpp
inc/coro/net/socket.hpp src/net/socket.cpp
inc/coro/net/ssl_context.hpp src/net/ssl_context.cpp
inc/coro/net/ssl_handshake_status.hpp
inc/coro/net/tcp_client.hpp src/net/tcp_client.cpp
inc/coro/net/tcp_server.hpp src/net/tcp_server.cpp
inc/coro/net/udp_peer.hpp src/net/udp_peer.cpp
inc/coro/coro.hpp inc/coro/coro.hpp
inc/coro/event.hpp src/event.cpp inc/coro/event.hpp src/event.cpp
inc/coro/fd.hpp inc/coro/fd.hpp
inc/coro/generator.hpp inc/coro/generator.hpp
inc/coro/io_scheduler.hpp src/io_scheduler.cpp
inc/coro/latch.hpp inc/coro/latch.hpp
inc/coro/mutex.hpp src/mutex.cpp inc/coro/mutex.hpp src/mutex.cpp
inc/coro/poll.hpp inc/coro/poll.hpp
@ -79,17 +55,3 @@ if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
message(FATAL_ERROR "Clang is currently not supported.") message(FATAL_ERROR "Clang is currently not supported.")
endif() endif()
if(LIBCORO_BUILD_TESTS)
if(LIBCORO_CODE_COVERAGE)
target_compile_options(${PROJECT_NAME} PRIVATE --coverage)
target_link_libraries(${PROJECT_NAME} PRIVATE gcov)
endif()
enable_testing()
add_subdirectory(test)
endif()
if(LIBCORO_BUILD_EXAMPLES)
add_subdirectory(examples)
endif()

676
README.md
View file

@ -50,82 +50,7 @@ The `coro::task<T>` is the main coroutine building block within `libcoro`. Use
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Task that takes a value and doubles it.
auto double_task = [](uint64_t x) -> coro::task<uint64_t> { co_return x* x; };
// Create a task that awaits the doubling of its given value and
// then returns the result after adding 5.
auto double_and_add_5_task = [&](uint64_t input) -> coro::task<uint64_t> {
auto doubled = co_await double_task(input);
co_return doubled + 5;
};
auto output = coro::sync_wait(double_and_add_5_task(2));
std::cout << "Task1 output = " << output << "\n";
struct expensive_struct
{
std::string id{};
std::vector<std::string> records{};
expensive_struct() = default;
~expensive_struct() = default;
// Explicitly delete copy constructor and copy assign, force only moves!
// While the default move constructors will work for this struct the example
// inserts explicit print statements to show the task is moving the value
// out correctly.
expensive_struct(const expensive_struct&) = delete;
auto operator=(const expensive_struct&) -> expensive_struct& = delete;
expensive_struct(expensive_struct&& other) : id(std::move(other.id)), records(std::move(other.records))
{
std::cout << "expensive_struct() move constructor called\n";
}
auto operator=(expensive_struct&& other) -> expensive_struct&
{
if (std::addressof(other) != this)
{
id = std::move(other.id);
records = std::move(other.records);
}
std::cout << "expensive_struct() move assignment called\n";
return *this;
}
};
// Create a very large object and return it by moving the value so the
// contents do not have to be copied out.
auto move_output_task = []() -> coro::task<expensive_struct> {
expensive_struct data{};
data.id = "12345678-1234-5678-9012-123456781234";
for (size_t i = 10'000; i < 100'000; ++i)
{
data.records.emplace_back(std::to_string(i));
}
// Because the struct only has move contructors it will be forced to use
// them, no need to explicitly std::move(data).
co_return data;
};
auto data = coro::sync_wait(move_output_task());
std::cout << data.id << " has " << data.records.size() << " records.\n";
// std::unique_ptr<T> can also be used to return a larger object.
auto unique_ptr_task = []() -> coro::task<std::unique_ptr<uint64_t>> { co_return std::make_unique<uint64_t>(42); };
auto answer_to_everything = coro::sync_wait(unique_ptr_task());
if (answer_to_everything != nullptr)
{
std::cout << "Answer to everything = " << *answer_to_everything << "\n";
}
}
``` ```
Expected output: Expected output:
@ -143,37 +68,7 @@ Answer to everything = 42
The `coro::generator<T>` construct is a coroutine which can generate one or more values. The `coro::generator<T>` construct is a coroutine which can generate one or more values.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
auto task = [](uint64_t count_to) -> coro::task<void> {
// Create a generator function that will yield and incrementing
// number each time its called.
auto gen = []() -> coro::generator<uint64_t> {
uint64_t i = 0;
while (true)
{
co_yield i++;
}
};
// Generate the next number until its greater than count to.
for (auto val : gen())
{
std::cout << val << ", ";
if (val >= count_to)
{
break;
}
}
co_return;
};
coro::sync_wait(task(100));
}
``` ```
Expected output: Expected output:
@ -186,32 +81,7 @@ $ ./examples/coro_generator
The `coro::event` is a thread safe async tool to have 1 or more waiters suspend for an event to be set before proceeding. The implementation of event currently will resume execution of all waiters on the thread that sets the event. If the event is already set when a waiter goes to wait on the thread they will simply continue executing with no suspend or wait time incurred. The `coro::event` is a thread safe async tool to have 1 or more waiters suspend for an event to be set before proceeding. The implementation of event currently will resume execution of all waiters on the thread that sets the event. If the event is already set when a waiter goes to wait on the thread they will simply continue executing with no suspend or wait time incurred.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
coro::event e;
// These tasks will wait until the given event has been set before advancing.
auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task<void> {
std::cout << "task " << i << " is waiting on the event...\n";
co_await e;
std::cout << "task " << i << " event triggered, now resuming.\n";
co_return;
};
// This task will trigger the event allowing all waiting tasks to proceed.
auto make_set_task = [](coro::event& e) -> coro::task<void> {
std::cout << "set task is triggering the event\n";
e.set();
co_return;
};
// Given more than a single task to synchronously wait on, use when_all() to execute all the
// tasks concurrently on this thread and then sync_wait() for them all to complete.
coro::sync_wait(coro::when_all(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
}
``` ```
Expected output: Expected output:
@ -230,60 +100,7 @@ task 1 event triggered, now resuming.
The `coro::latch` is a thread safe async tool to have 1 waiter suspend until all outstanding events have completed before proceeding. The `coro::latch` is a thread safe async tool to have 1 waiter suspend until all outstanding events have completed before proceeding.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker
// tasks can yield for a specific amount of time to mimic difficult work. The pool is only
// setup with a single thread to showcase yield_for().
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
// This task will wait until the given latch setters have completed.
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
// It seems like the dependent worker tasks could be created here, but in that case it would
// be superior to simply do: `co_await coro::when_all(tasks);`
// It is also important to note that the last dependent task will resume the waiting latch
// task prior to actually completing -- thus the dependent task's frame could be destroyed
// by the latch task completing before it gets a chance to finish after calling resume() on
// the latch task!
std::cout << "latch task is now waiting on all children tasks...\n";
co_await l;
std::cout << "latch task dependency tasks completed, resuming.\n";
co_return;
};
// This task does 'work' and counts down on the latch when completed. The final child task to
// complete will end up resuming the latch task when the latch's count reaches zero.
auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task<void> {
// Schedule the worker task onto the thread pool.
co_await tp.schedule();
std::cout << "worker task " << i << " is working...\n";
// Do some expensive calculations, yield to mimic work...! Its also important to never use
// std::this_thread::sleep_for() within the context of coroutines, it will block the thread
// and other tasks that are ready to execute will be blocked.
co_await tp.yield_for(std::chrono::milliseconds{i * 20});
std::cout << "worker task " << i << " is done, counting down on the latch\n";
l.count_down();
co_return;
};
const int64_t num_tasks{5};
coro::latch l{num_tasks};
std::vector<coro::task<void>> tasks{};
// Make the latch task first so it correctly waits for all worker tasks to count down.
tasks.emplace_back(make_latch_task(l));
for (int64_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_worker_task(tp, l, i));
}
// Wait for all tasks to complete.
coro::sync_wait(coro::when_all(std::move(tasks)));
}
``` ```
Expected output: Expected output:
@ -311,44 +128,7 @@ Its important to note that upon releasing the mutex that thread unlocking the mu
The suspend waiter queue is LIFO, however the worker that current holds the mutex will periodically 'acquire' the current LIFO waiter list to process those waiters when its internal list becomes empty. This effectively resets the suspended waiter list to empty and the worker holding the mutex will work through the newly acquired LIFO queue of waiters. It would be possible to reverse this list to be as fair as possible, however not reversing the list should result is better throughput at possibly the cost of some latency for the first suspended waiters on the 'current' LIFO queue. Reversing the list, however, would introduce latency for all queue waiters since its done everytime the LIFO queue is swapped. The suspend waiter queue is LIFO, however the worker that current holds the mutex will periodically 'acquire' the current LIFO waiter list to process those waiters when its internal list becomes empty. This effectively resets the suspended waiter list to empty and the worker holding the mutex will work through the newly acquired LIFO queue of waiters. It would be possible to reverse this list to be as fair as possible, however not reversing the list should result is better throughput at possibly the cost of some latency for the first suspended waiters on the 'current' LIFO queue. Reversing the list, however, would introduce latency for all queue waiters since its done everytime the LIFO queue is swapped.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
std::vector<uint64_t> output{};
coro::mutex mutex;
auto make_critical_section_task = [&](uint64_t i) -> coro::task<void> {
co_await tp.schedule();
// To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the
// lock() function returns a coro::scoped_lock that holds the mutex and automatically
// unlocks the mutex upon destruction. This behaves just like std::scoped_lock.
{
auto scoped_lock = co_await mutex.lock();
output.emplace_back(i);
} // <-- scoped lock unlocks the mutex here.
co_return;
};
const size_t num_tasks{100};
std::vector<coro::task<void>> tasks{};
tasks.reserve(num_tasks);
for (size_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_critical_section_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
// The output will be variable per run depending on how the tasks are picked up on the
// thread pool workers.
for (const auto& value : output)
{
std::cout << value << ", ";
}
}
``` ```
Expected output, note that the output will vary from run to run based on how the thread pool workers Expected output, note that the output will vary from run to run based on how the thread pool workers
@ -367,61 +147,7 @@ The `coro::shared_mutex` requires a `executor_type` when constructed to be able
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Shared mutexes require an excutor type to be able to wake up multiple shared waiters when
// there is an exclusive lock holder releasing the lock. This example uses a single thread
// to also show the interleaving of coroutines acquiring the shared lock in shared and
// exclusive mode as they resume and suspend in a linear manner. Ideally the thread pool
// executor would have more than 1 thread to resume all shared waiters in parallel.
auto tp = std::make_shared<coro::thread_pool>(coro::thread_pool::options{.thread_count = 1});
coro::shared_mutex mutex{tp};
auto make_shared_task = [&](uint64_t i) -> coro::task<void> {
co_await tp->schedule();
{
std::cerr << "shared task " << i << " lock_shared()\n";
auto scoped_lock = co_await mutex.lock_shared();
std::cerr << "shared task " << i << " lock_shared() acquired\n";
/// Immediately yield so the other shared tasks also acquire in shared state
/// while this task currently holds the mutex in shared state.
co_await tp->yield();
std::cerr << "shared task " << i << " unlock_shared()\n";
}
co_return;
};
auto make_exclusive_task = [&]() -> coro::task<void> {
co_await tp->schedule();
std::cerr << "exclusive task lock()\n";
auto scoped_lock = co_await mutex.lock();
std::cerr << "exclusive task lock() acquired\n";
// Do the exclusive work..
std::cerr << "exclusive task unlock()\n";
co_return;
};
// Create 3 shared tasks that will acquire the mutex in a shared state.
const size_t num_tasks{3};
std::vector<coro::task<void>> tasks{};
for (size_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_shared_task(i));
}
// Create an exclusive task.
tasks.emplace_back(make_exclusive_task());
// Create 3 more shared tasks that will be blocked until the exclusive task completes.
for (size_t i = num_tasks + 1; i <= num_tasks * 2; ++i)
{
tasks.emplace_back(make_shared_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
}
``` ```
Example output, notice how the (4,5,6) shared tasks attempt to acquire the lock in a shared state but are blocked behind the exclusive waiter until it completes: Example output, notice how the (4,5,6) shared tasks attempt to acquire the lock in a shared state but are blocked behind the exclusive waiter until it completes:
@ -455,35 +181,7 @@ shared task 6 unlock_shared()
The `coro::semaphore` is a thread safe async tool to protect a limited number of resources by only allowing so many consumers to acquire the resources a single time. The `coro::semaphore` also has a maximum number of resources denoted by its constructor. This means if a resource is produced or released when the semaphore is at its maximum resource availability then the release operation will await for space to become available. This is useful for a ringbuffer type situation where the resources are produced and then consumed, but will have no effect on a semaphores usage if there is a set known quantity of resources to start with and are acquired and then released back. The `coro::semaphore` is a thread safe async tool to protect a limited number of resources by only allowing so many consumers to acquire the resources a single time. The `coro::semaphore` also has a maximum number of resources denoted by its constructor. This means if a resource is produced or released when the semaphore is at its maximum resource availability then the release operation will await for space to become available. This is useful for a ringbuffer type situation where the resources are produced and then consumed, but will have no effect on a semaphores usage if there is a set known quantity of resources to start with and are acquired and then released back.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Have more threads/tasks than the semaphore will allow for at any given point in time.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 8}};
coro::semaphore semaphore{2};
auto make_rate_limited_task = [&](uint64_t task_num) -> coro::task<void> {
co_await tp.schedule();
// This will only allow 2 tasks through at any given point in time, all other tasks will
// await the resource to be available before proceeding.
co_await semaphore.acquire();
std::cout << task_num << ", ";
semaphore.release();
co_return;
};
const size_t num_tasks{100};
std::vector<coro::task<void>> tasks{};
for (size_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_rate_limited_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
}
``` ```
Expected output, note that there is no lock around the `std::cout` so some of the output isn't perfect. Expected output, note that there is no lock around the `std::cout` so some of the output isn't perfect.
@ -498,80 +196,7 @@ The `coro::ring_buffer<element, num_elements>` is thread safe async multi-produc
The `coro::ring_buffer` also works with `coro::stop_signal` in that if the ring buffers `stop_signal_notify_waiters()` function is called then any producers or consumers that are suspended and waiting will be awoken by throwing a `coro::stop_signal`. This can be useful to write code that will always suspend if data cannot be produced or consumed for long running daemons but will need to break out of the suspend unpon shutdown. The `coro::ring_buffer` also works with `coro::stop_signal` in that if the ring buffers `stop_signal_notify_waiters()` function is called then any producers or consumers that are suspended and waiting will be awoken by throwing a `coro::stop_signal`. This can be useful to write code that will always suspend if data cannot be produced or consumed for long running daemons but will need to break out of the suspend unpon shutdown.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
const size_t iterations = 100;
const size_t consumers = 4;
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
coro::ring_buffer<uint64_t, 16> rb{};
coro::mutex m{};
std::vector<coro::task<void>> tasks{};
auto make_producer_task = [&]() -> coro::task<void> {
co_await tp.schedule();
for (size_t i = 1; i <= iterations; ++i)
{
co_await rb.produce(i);
}
// Wait for the ring buffer to clear all items so its a clean stop.
while (!rb.empty())
{
co_await tp.yield();
}
// Now that the ring buffer is empty signal to all the consumers its time to stop. Note that
// the stop signal works on producers as well, but this example only uses 1 producer.
{
auto scoped_lock = co_await m.lock();
std::cerr << "\nproducer is sending stop signal";
}
rb.stop_signal_notify_waiters();
co_return;
};
auto make_consumer_task = [&](size_t id) -> coro::task<void> {
co_await tp.schedule();
try
{
while (true)
{
auto value = co_await rb.consume();
{
auto scoped_lock = co_await m.lock();
std::cout << "(id=" << id << ", v=" << value << "), ";
}
// Mimic doing some work on the consumed value.
co_await tp.yield();
}
}
catch (const coro::stop_signal&)
{
auto scoped_lock = co_await m.lock();
std::cerr << "\nconsumer " << id << " shutting down, stop signal received";
}
co_return;
};
// Create N consumers
for (size_t i = 0; i < consumers; ++i)
{
tasks.emplace_back(make_consumer_task(i));
}
// Create 1 producer.
tasks.emplace_back(make_producer_task());
// Wait for all the values to be produced and consumed through the ring buffer.
coro::sync_wait(coro::when_all(std::move(tasks)));
}
``` ```
Expected output: Expected output:
@ -589,84 +214,7 @@ consumer 3 shutting down, stop signal received
`coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. To schedule a coroutine on a thread pool the pool's `schedule()` function should be `co_awaited` to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up. `coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. To schedule a coroutine on a thread pool the pool's `schedule()` function should be `co_awaited` to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
#include <random>
int main()
{
coro::thread_pool tp{coro::thread_pool::options{
// By default all thread pools will create its thread count with the
// std::thread::hardware_concurrency() as the number of worker threads in the pool,
// but this can be changed via this thread_count option. This example will use 4.
.thread_count = 4,
// Upon starting each worker thread an optional lambda callback with the worker's
// index can be called to make thread changes, perhaps priority or change the thread's
// name.
.on_thread_start_functor = [](std::size_t worker_idx) -> void {
std::cout << "thread pool worker " << worker_idx << " is starting up.\n";
},
// Upon stopping each worker thread an optional lambda callback with the worker's
// index can b called.
.on_thread_stop_functor = [](std::size_t worker_idx) -> void {
std::cout << "thread pool worker " << worker_idx << " is shutting down.\n";
}}};
auto offload_task = [&](uint64_t child_idx) -> coro::task<uint64_t> {
// Start by scheduling this offload worker task onto the thread pool.
co_await tp.schedule();
// Now any code below this schedule() line will be executed on one of the thread pools
// worker threads.
// Mimic some expensive task that should be run on a background thread...
std::random_device rd;
std::mt19937 gen{rd()};
std::uniform_int_distribution<> d{0, 1};
size_t calculation{0};
for (size_t i = 0; i < 1'000'000; ++i)
{
calculation += d(gen);
// Lets be nice and yield() to let other coroutines on the thread pool have some cpu
// time. This isn't necessary but is illustrated to show how tasks can cooperatively
// yield control at certain points of execution. Its important to never call the
// std::this_thread::sleep_for() within the context of a coroutine, that will block
// and other coroutines which are ready for execution from starting, always use yield()
// or within the context of a coro::io_scheduler you can use yield_for(amount).
if (i == 500'000)
{
std::cout << "Task " << child_idx << " is yielding()\n";
co_await tp.yield();
}
}
co_return calculation;
};
auto primary_task = [&]() -> coro::task<uint64_t> {
const size_t num_children{10};
std::vector<coro::task<uint64_t>> child_tasks{};
child_tasks.reserve(num_children);
for (size_t i = 0; i < num_children; ++i)
{
child_tasks.emplace_back(offload_task(i));
}
// Wait for the thread pool workers to process all child tasks.
auto results = co_await coro::when_all(std::move(child_tasks));
// Sum up the results of the completed child tasks.
size_t calculation{0};
for (const auto& task : results)
{
calculation += task.return_value();
}
co_return calculation;
};
auto result = coro::sync_wait(primary_task());
std::cout << "calculated thread pool result = " << result << "\n";
}
``` ```
Example output (will vary based on threads): Example output (will vary based on threads):
@ -710,150 +258,7 @@ Before getting to an example there are two methods of scheduling work onto an i/
The example provided here shows an i/o scheduler that spins up a basic `coro::net::tcp_server` and a `coro::net::tcp_client` that will connect to each other and then send a request and a response. The example provided here shows an i/o scheduler that spins up a basic `coro::net::tcp_server` and a `coro::net::tcp_client` that will connect to each other and then send a request and a response.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
auto scheduler = std::make_shared<coro::io_scheduler>(coro::io_scheduler::options{
// The scheduler will spawn a dedicated event processing thread. This is the default, but
// it is possible to use 'manual' and call 'process_events()' to drive the scheduler yourself.
.thread_strategy = coro::io_scheduler::thread_strategy_t::spawn,
// If the scheduler is in spawn mode this functor is called upon starting the dedicated
// event processor thread.
.on_io_thread_start_functor = [] { std::cout << "io_scheduler::process event thread start\n"; },
// If the scheduler is in spawn mode this functor is called upon stopping the dedicated
// event process thread.
.on_io_thread_stop_functor = [] { std::cout << "io_scheduler::process event thread stop\n"; },
// The io scheduler uses a coro::thread_pool to process the events or tasks it is given.
// The tasks are not processed inline on the dedicated event processor thread so events can
// be received and handled as soon as a worker thread is available. See the coro::thread_pool
// for the available options and their descriptions.
.pool =
coro::thread_pool::options{
.thread_count = 2,
.on_thread_start_functor =
[](size_t i) { std::cout << "io_scheduler::thread_pool worker " << i << " starting\n"; },
.on_thread_stop_functor =
[](size_t i) { std::cout << "io_scheduler::thread_pool worker " << i << " stopping\n"; }}});
auto make_server_task = [&]() -> coro::task<void> {
// Start by creating a tcp server, we'll do this before putting it into the scheduler so
// it is immediately available for the client to connect since this will create a socket,
// bind the socket and start listening on that socket. See tcp_server for more details on
// how to specify the local address and port to bind to as well as enabling SSL/TLS.
coro::net::tcp_server server{scheduler};
// Now scheduler this task onto the scheduler.
co_await scheduler->schedule();
// Wait for an incoming connection and accept it.
auto poll_status = co_await server.poll();
if (poll_status != coro::poll_status::event)
{
co_return; // Handle error, see poll_status for detailed error states.
}
// Accept the incoming client connection.
auto client = server.accept();
// Verify the incoming connection was accepted correctly.
if (!client.socket().is_valid())
{
co_return; // Handle error.
}
// Now wait for the client message, this message is small enough it should always arrive
// with a single recv() call.
poll_status = co_await client.poll(coro::poll_op::read);
if (poll_status != coro::poll_status::event)
{
co_return; // Handle error.
}
// Prepare a buffer and recv() the client's message. This function returns the recv() status
// as well as a span<char> that overlaps the given buffer for the bytes that were read. This
// can be used to resize the buffer or work with the bytes without modifying the buffer at all.
std::string request(256, '\0');
auto [recv_status, recv_bytes] = client.recv(request);
if (recv_status != coro::net::recv_status::ok)
{
co_return; // Handle error, see net::recv_status for detailed error states.
}
request.resize(recv_bytes.size());
std::cout << "server: " << request << "\n";
// Make sure the client socket can be written to.
poll_status = co_await client.poll(coro::poll_op::write);
if (poll_status != coro::poll_status::event)
{
co_return; // Handle error.
}
// Send the server response to the client.
// This message is small enough that it will be sent in a single send() call, but to demonstrate
// how to use the 'remaining' portion of the send() result this is wrapped in a loop until
// all the bytes are sent.
std::string response = "Hello from server.";
std::span<const char> remaining = response;
do
{
// Optimistically send() prior to polling.
auto [send_status, r] = client.send(remaining);
if (send_status != coro::net::send_status::ok)
{
co_return; // Handle error, see net::send_status for detailed error states.
}
if (r.empty())
{
break; // The entire message has been sent.
}
// Re-assign remaining bytes for the next loop iteration and poll for the socket to be
// able to be written to again.
remaining = r;
auto pstatus = co_await client.poll(coro::poll_op::write);
if (pstatus != coro::poll_status::event)
{
co_return; // Handle error.
}
} while (true);
co_return;
};
auto make_client_task = [&]() -> coro::task<void> {
// Immediately schedule onto the scheduler.
co_await scheduler->schedule();
// Create the tcp_client with the default settings, see tcp_client for how to set the
// ip address, port, and optionally enabling SSL/TLS.
coro::net::tcp_client client{scheduler};
// Ommitting error checking code for the client, each step should check the status and
// verify the number of bytes sent or received.
// Connect to the server.
co_await client.connect();
// Send the request data.
client.send(std::string_view{"Hello from client."});
// Wait for the response an receive it.
co_await client.poll(coro::poll_op::read);
std::string response(256, '\0');
auto [recv_status, recv_bytes] = client.recv(response);
response.resize(recv_bytes.size());
std::cout << "client: " << response << "\n";
co_return;
};
// Create and wait for the server and client tasks to complete.
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
}
``` ```
Example output: Example output:
@ -875,88 +280,7 @@ io_scheduler::process event thread stop
All tasks that are stored within a `coro::task_container` must have a `void` return type since their result cannot be accessed due to the task's lifetime being indeterminate. All tasks that are stored within a `coro::task_container` must have a `void` return type since their result cannot be accessed due to the task's lifetime being indeterminate.
```C++ ```C++
#include <coro/coro.hpp>
#include <iostream>
int main()
{
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
auto make_server_task = [&]() -> coro::task<void> {
// This is the task that will handle processing a client's requests.
auto serve_client = [](coro::net::tcp_client client) -> coro::task<void> {
size_t requests{1};
while (true)
{
// Continue to accept more requests until the client closes the connection.
co_await client.poll(coro::poll_op::read);
std::string request(64, '\0');
auto [recv_status, recv_bytes] = client.recv(request);
if (recv_status == coro::net::recv_status::closed)
{
break;
}
request.resize(recv_bytes.size());
std::cout << "server: " << request << "\n";
auto response = "Hello from server " + std::to_string(requests);
client.send(response);
++requests;
}
co_return;
};
// Spin up the tcp_server and schedule it onto the io_scheduler.
coro::net::tcp_server server{scheduler};
co_await scheduler->schedule();
// All incoming connections will be stored into the task container until they are completed.
coro::task_container tc{scheduler};
// Wait for an incoming connection and accept it, this example will only use 1 connection.
co_await server.poll();
auto client = server.accept();
// Store the task that will serve the client into the container and immediately begin executing it
// on the task container's thread pool, which is the same as the scheduler.
tc.start(serve_client(std::move(client)));
// Wait for all clients to complete before shutting down the tcp_server.
co_await tc.garbage_collect_and_yield_until_empty();
co_return;
};
auto make_client_task = [&](size_t request_count) -> coro::task<void> {
co_await scheduler->schedule();
coro::net::tcp_client client{scheduler};
co_await client.connect();
// Send N requests on the same connection and wait for the server response to each one.
for (size_t i = 1; i <= request_count; ++i)
{
// Send the request data.
auto request = "Hello from client " + std::to_string(i);
client.send(request);
co_await client.poll(coro::poll_op::read);
std::string response(64, '\0');
auto [recv_status, recv_bytes] = client.recv(response);
response.resize(recv_bytes.size());
std::cout << "client: " << response << "\n";
}
co_return; // Upon exiting the tcp_client will close its connection to the server.
};
coro::sync_wait(coro::when_all(make_server_task(), make_client_task(5)));
}
``` ```
```bash ```bash

View file

@ -1,64 +0,0 @@
cmake_minimum_required(VERSION 3.0)
project(libcoro_examples)
add_executable(coro_task coro_task.cpp)
target_compile_features(coro_task PUBLIC cxx_std_20)
target_link_libraries(coro_task PUBLIC libcoro)
add_executable(coro_generator coro_generator.cpp)
target_compile_features(coro_generator PUBLIC cxx_std_20)
target_link_libraries(coro_generator PUBLIC libcoro)
add_executable(coro_event coro_event.cpp)
target_compile_features(coro_event PUBLIC cxx_std_20)
target_link_libraries(coro_event PUBLIC libcoro)
add_executable(coro_latch coro_latch.cpp)
target_compile_features(coro_latch PUBLIC cxx_std_20)
target_link_libraries(coro_latch PUBLIC libcoro)
add_executable(coro_mutex coro_mutex.cpp)
target_compile_features(coro_mutex PUBLIC cxx_std_20)
target_link_libraries(coro_mutex PUBLIC libcoro)
add_executable(coro_thread_pool coro_thread_pool.cpp)
target_compile_features(coro_thread_pool PUBLIC cxx_std_20)
target_link_libraries(coro_thread_pool PUBLIC libcoro)
add_executable(coro_io_scheduler coro_io_scheduler.cpp)
target_compile_features(coro_io_scheduler PUBLIC cxx_std_20)
target_link_libraries(coro_io_scheduler PUBLIC libcoro)
add_executable(coro_task_container coro_task_container.cpp)
target_compile_features(coro_task_container PUBLIC cxx_std_20)
target_link_libraries(coro_task_container PUBLIC libcoro)
add_executable(coro_semaphore coro_semaphore.cpp)
target_compile_features(coro_semaphore PUBLIC cxx_std_20)
target_link_libraries(coro_semaphore PUBLIC libcoro)
add_executable(coro_ring_buffer coro_ring_buffer.cpp)
target_compile_features(coro_ring_buffer PUBLIC cxx_std_20)
target_link_libraries(coro_ring_buffer PUBLIC libcoro)
add_executable(coro_shared_mutex coro_shared_mutex.cpp)
target_compile_features(coro_shared_mutex PUBLIC cxx_std_20)
target_link_libraries(coro_shared_mutex PUBLIC libcoro)
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
target_compile_options(coro_task PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_generator PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_event PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_latch PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_mutex PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_thread_pool PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_io_scheduler PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_task_container PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_semaphore PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_ring_buffer PUBLIC -fcoroutines -Wall -Wextra -pipe)
target_compile_options(coro_shared_mutex PUBLIC -fcoroutines -Wall -Wextra -pipe)
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
message(FATAL_ERROR "Clang is currently not supported.")
else()
message(FATAL_ERROR "Unsupported compiler.")
endif()

View file

@ -1,26 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
coro::event e;
// These tasks will wait until the given event has been set before advancing.
auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task<void> {
std::cout << "task " << i << " is waiting on the event...\n";
co_await e;
std::cout << "task " << i << " event triggered, now resuming.\n";
co_return;
};
// This task will trigger the event allowing all waiting tasks to proceed.
auto make_set_task = [](coro::event& e) -> coro::task<void> {
std::cout << "set task is triggering the event\n";
e.set();
co_return;
};
// Given more than a single task to synchronously wait on, use when_all() to execute all the
// tasks concurrently on this thread and then sync_wait() for them all to complete.
coro::sync_wait(coro::when_all(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
}

View file

@ -1,31 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
auto task = [](uint64_t count_to) -> coro::task<void> {
// Create a generator function that will yield and incrementing
// number each time its called.
auto gen = []() -> coro::generator<uint64_t> {
uint64_t i = 0;
while (true)
{
co_yield i++;
}
};
// Generate the next number until its greater than count to.
for (auto val : gen())
{
std::cout << val << ", ";
if (val >= count_to)
{
break;
}
}
co_return;
};
coro::sync_wait(task(100));
}

View file

@ -1,144 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
auto scheduler = std::make_shared<coro::io_scheduler>(coro::io_scheduler::options{
// The scheduler will spawn a dedicated event processing thread. This is the default, but
// it is possible to use 'manual' and call 'process_events()' to drive the scheduler yourself.
.thread_strategy = coro::io_scheduler::thread_strategy_t::spawn,
// If the scheduler is in spawn mode this functor is called upon starting the dedicated
// event processor thread.
.on_io_thread_start_functor = [] { std::cout << "io_scheduler::process event thread start\n"; },
// If the scheduler is in spawn mode this functor is called upon stopping the dedicated
// event process thread.
.on_io_thread_stop_functor = [] { std::cout << "io_scheduler::process event thread stop\n"; },
// The io scheduler uses a coro::thread_pool to process the events or tasks it is given.
// The tasks are not processed inline on the dedicated event processor thread so events can
// be received and handled as soon as a worker thread is available. See the coro::thread_pool
// for the available options and their descriptions.
.pool =
coro::thread_pool::options{
.thread_count = 2,
.on_thread_start_functor =
[](size_t i) { std::cout << "io_scheduler::thread_pool worker " << i << " starting\n"; },
.on_thread_stop_functor =
[](size_t i) { std::cout << "io_scheduler::thread_pool worker " << i << " stopping\n"; }}});
auto make_server_task = [&]() -> coro::task<void> {
// Start by creating a tcp server, we'll do this before putting it into the scheduler so
// it is immediately available for the client to connect since this will create a socket,
// bind the socket and start listening on that socket. See tcp_server for more details on
// how to specify the local address and port to bind to as well as enabling SSL/TLS.
coro::net::tcp_server server{scheduler};
// Now scheduler this task onto the scheduler.
co_await scheduler->schedule();
// Wait for an incoming connection and accept it.
auto poll_status = co_await server.poll();
if (poll_status != coro::poll_status::event)
{
co_return; // Handle error, see poll_status for detailed error states.
}
// Accept the incoming client connection.
auto client = server.accept();
// Verify the incoming connection was accepted correctly.
if (!client.socket().is_valid())
{
co_return; // Handle error.
}
// Now wait for the client message, this message is small enough it should always arrive
// with a single recv() call.
poll_status = co_await client.poll(coro::poll_op::read);
if (poll_status != coro::poll_status::event)
{
co_return; // Handle error.
}
// Prepare a buffer and recv() the client's message. This function returns the recv() status
// as well as a span<char> that overlaps the given buffer for the bytes that were read. This
// can be used to resize the buffer or work with the bytes without modifying the buffer at all.
std::string request(256, '\0');
auto [recv_status, recv_bytes] = client.recv(request);
if (recv_status != coro::net::recv_status::ok)
{
co_return; // Handle error, see net::recv_status for detailed error states.
}
request.resize(recv_bytes.size());
std::cout << "server: " << request << "\n";
// Make sure the client socket can be written to.
poll_status = co_await client.poll(coro::poll_op::write);
if (poll_status != coro::poll_status::event)
{
co_return; // Handle error.
}
// Send the server response to the client.
// This message is small enough that it will be sent in a single send() call, but to demonstrate
// how to use the 'remaining' portion of the send() result this is wrapped in a loop until
// all the bytes are sent.
std::string response = "Hello from server.";
std::span<const char> remaining = response;
do
{
// Optimistically send() prior to polling.
auto [send_status, r] = client.send(remaining);
if (send_status != coro::net::send_status::ok)
{
co_return; // Handle error, see net::send_status for detailed error states.
}
if (r.empty())
{
break; // The entire message has been sent.
}
// Re-assign remaining bytes for the next loop iteration and poll for the socket to be
// able to be written to again.
remaining = r;
auto pstatus = co_await client.poll(coro::poll_op::write);
if (pstatus != coro::poll_status::event)
{
co_return; // Handle error.
}
} while (true);
co_return;
};
auto make_client_task = [&]() -> coro::task<void> {
// Immediately schedule onto the scheduler.
co_await scheduler->schedule();
// Create the tcp_client with the default settings, see tcp_client for how to set the
// ip address, port, and optionally enabling SSL/TLS.
coro::net::tcp_client client{scheduler};
// Ommitting error checking code for the client, each step should check the status and
// verify the number of bytes sent or received.
// Connect to the server.
co_await client.connect();
// Send the request data.
client.send(std::string_view{"Hello from client."});
// Wait for the response an receive it.
co_await client.poll(coro::poll_op::read);
std::string response(256, '\0');
auto [recv_status, recv_bytes] = client.recv(response);
response.resize(recv_bytes.size());
std::cout << "client: " << response << "\n";
co_return;
};
// Create and wait for the server and client tasks to complete.
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
}

View file

@ -1,54 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker
// tasks can yield for a specific amount of time to mimic difficult work. The pool is only
// setup with a single thread to showcase yield_for().
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
// This task will wait until the given latch setters have completed.
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
// It seems like the dependent worker tasks could be created here, but in that case it would
// be superior to simply do: `co_await coro::when_all(tasks);`
// It is also important to note that the last dependent task will resume the waiting latch
// task prior to actually completing -- thus the dependent task's frame could be destroyed
// by the latch task completing before it gets a chance to finish after calling resume() on
// the latch task!
std::cout << "latch task is now waiting on all children tasks...\n";
co_await l;
std::cout << "latch task dependency tasks completed, resuming.\n";
co_return;
};
// This task does 'work' and counts down on the latch when completed. The final child task to
// complete will end up resuming the latch task when the latch's count reaches zero.
auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task<void> {
// Schedule the worker task onto the thread pool.
co_await tp.schedule();
std::cout << "worker task " << i << " is working...\n";
// Do some expensive calculations, yield to mimic work...! Its also important to never use
// std::this_thread::sleep_for() within the context of coroutines, it will block the thread
// and other tasks that are ready to execute will be blocked.
co_await tp.yield_for(std::chrono::milliseconds{i * 20});
std::cout << "worker task " << i << " is done, counting down on the latch\n";
l.count_down();
co_return;
};
const int64_t num_tasks{5};
coro::latch l{num_tasks};
std::vector<coro::task<void>> tasks{};
// Make the latch task first so it correctly waits for all worker tasks to count down.
tasks.emplace_back(make_latch_task(l));
for (int64_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_worker_task(tp, l, i));
}
// Wait for all tasks to complete.
coro::sync_wait(coro::when_all(std::move(tasks)));
}

View file

@ -1,38 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
std::vector<uint64_t> output{};
coro::mutex mutex;
auto make_critical_section_task = [&](uint64_t i) -> coro::task<void> {
co_await tp.schedule();
// To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the
// lock() function returns a coro::scoped_lock that holds the mutex and automatically
// unlocks the mutex upon destruction. This behaves just like std::scoped_lock.
{
auto scoped_lock = co_await mutex.lock();
output.emplace_back(i);
} // <-- scoped lock unlocks the mutex here.
co_return;
};
const size_t num_tasks{100};
std::vector<coro::task<void>> tasks{};
tasks.reserve(num_tasks);
for (size_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_critical_section_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
// The output will be variable per run depending on how the tasks are picked up on the
// thread pool workers.
for (const auto& value : output)
{
std::cout << value << ", ";
}
}

View file

@ -1,74 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
const size_t iterations = 100;
const size_t consumers = 4;
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
coro::ring_buffer<uint64_t, 16> rb{};
coro::mutex m{};
std::vector<coro::task<void>> tasks{};
auto make_producer_task = [&]() -> coro::task<void> {
co_await tp.schedule();
for (size_t i = 1; i <= iterations; ++i)
{
co_await rb.produce(i);
}
// Wait for the ring buffer to clear all items so its a clean stop.
while (!rb.empty())
{
co_await tp.yield();
}
// Now that the ring buffer is empty signal to all the consumers its time to stop. Note that
// the stop signal works on producers as well, but this example only uses 1 producer.
{
auto scoped_lock = co_await m.lock();
std::cerr << "\nproducer is sending stop signal";
}
rb.stop_signal_notify_waiters();
co_return;
};
auto make_consumer_task = [&](size_t id) -> coro::task<void> {
co_await tp.schedule();
try
{
while (true)
{
auto value = co_await rb.consume();
{
auto scoped_lock = co_await m.lock();
std::cout << "(id=" << id << ", v=" << value << "), ";
}
// Mimic doing some work on the consumed value.
co_await tp.yield();
}
}
catch (const coro::stop_signal&)
{
auto scoped_lock = co_await m.lock();
std::cerr << "\nconsumer " << id << " shutting down, stop signal received";
}
co_return;
};
// Create N consumers
for (size_t i = 0; i < consumers; ++i)
{
tasks.emplace_back(make_consumer_task(i));
}
// Create 1 producer.
tasks.emplace_back(make_producer_task());
// Wait for all the values to be produced and consumed through the ring buffer.
coro::sync_wait(coro::when_all(std::move(tasks)));
}

View file

@ -1,29 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Have more threads/tasks than the semaphore will allow for at any given point in time.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 8}};
coro::semaphore semaphore{2};
auto make_rate_limited_task = [&](uint64_t task_num) -> coro::task<void> {
co_await tp.schedule();
// This will only allow 2 tasks through at any given point in time, all other tasks will
// await the resource to be available before proceeding.
co_await semaphore.acquire();
std::cout << task_num << ", ";
semaphore.release();
co_return;
};
const size_t num_tasks{100};
std::vector<coro::task<void>> tasks{};
for (size_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_rate_limited_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
}

View file

@ -1,55 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Shared mutexes require an excutor type to be able to wake up multiple shared waiters when
// there is an exclusive lock holder releasing the lock. This example uses a single thread
// to also show the interleaving of coroutines acquiring the shared lock in shared and
// exclusive mode as they resume and suspend in a linear manner. Ideally the thread pool
// executor would have more than 1 thread to resume all shared waiters in parallel.
auto tp = std::make_shared<coro::thread_pool>(coro::thread_pool::options{.thread_count = 1});
coro::shared_mutex mutex{tp};
auto make_shared_task = [&](uint64_t i) -> coro::task<void> {
co_await tp->schedule();
{
std::cerr << "shared task " << i << " lock_shared()\n";
auto scoped_lock = co_await mutex.lock_shared();
std::cerr << "shared task " << i << " lock_shared() acquired\n";
/// Immediately yield so the other shared tasks also acquire in shared state
/// while this task currently holds the mutex in shared state.
co_await tp->yield();
std::cerr << "shared task " << i << " unlock_shared()\n";
}
co_return;
};
auto make_exclusive_task = [&]() -> coro::task<void> {
co_await tp->schedule();
std::cerr << "exclusive task lock()\n";
auto scoped_lock = co_await mutex.lock();
std::cerr << "exclusive task lock() acquired\n";
// Do the exclusive work..
std::cerr << "exclusive task unlock()\n";
co_return;
};
// Create 3 shared tasks that will acquire the mutex in a shared state.
const size_t num_tasks{3};
std::vector<coro::task<void>> tasks{};
for (size_t i = 1; i <= num_tasks; ++i)
{
tasks.emplace_back(make_shared_task(i));
}
// Create an exclusive task.
tasks.emplace_back(make_exclusive_task());
// Create 3 more shared tasks that will be blocked until the exclusive task completes.
for (size_t i = num_tasks + 1; i <= num_tasks * 2; ++i)
{
tasks.emplace_back(make_shared_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
}

View file

@ -1,76 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
// Task that takes a value and doubles it.
auto double_task = [](uint64_t x) -> coro::task<uint64_t> { co_return x* x; };
// Create a task that awaits the doubling of its given value and
// then returns the result after adding 5.
auto double_and_add_5_task = [&](uint64_t input) -> coro::task<uint64_t> {
auto doubled = co_await double_task(input);
co_return doubled + 5;
};
auto output = coro::sync_wait(double_and_add_5_task(2));
std::cout << "Task1 output = " << output << "\n";
struct expensive_struct
{
std::string id{};
std::vector<std::string> records{};
expensive_struct() = default;
~expensive_struct() = default;
// Explicitly delete copy constructor and copy assign, force only moves!
// While the default move constructors will work for this struct the example
// inserts explicit print statements to show the task is moving the value
// out correctly.
expensive_struct(const expensive_struct&) = delete;
auto operator=(const expensive_struct&) -> expensive_struct& = delete;
expensive_struct(expensive_struct&& other) : id(std::move(other.id)), records(std::move(other.records))
{
std::cout << "expensive_struct() move constructor called\n";
}
auto operator=(expensive_struct&& other) -> expensive_struct&
{
if (std::addressof(other) != this)
{
id = std::move(other.id);
records = std::move(other.records);
}
std::cout << "expensive_struct() move assignment called\n";
return *this;
}
};
// Create a very large object and return it by moving the value so the
// contents do not have to be copied out.
auto move_output_task = []() -> coro::task<expensive_struct> {
expensive_struct data{};
data.id = "12345678-1234-5678-9012-123456781234";
for (size_t i = 10'000; i < 100'000; ++i)
{
data.records.emplace_back(std::to_string(i));
}
// Because the struct only has move contructors it will be forced to use
// them, no need to explicitly std::move(data).
co_return data;
};
auto data = coro::sync_wait(move_output_task());
std::cout << data.id << " has " << data.records.size() << " records.\n";
// std::unique_ptr<T> can also be used to return a larger object.
auto unique_ptr_task = []() -> coro::task<std::unique_ptr<uint64_t>> { co_return std::make_unique<uint64_t>(42); };
auto answer_to_everything = coro::sync_wait(unique_ptr_task());
if (answer_to_everything != nullptr)
{
std::cout << "Answer to everything = " << *answer_to_everything << "\n";
}
}

View file

@ -1,82 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
int main()
{
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
auto make_server_task = [&]() -> coro::task<void> {
// This is the task that will handle processing a client's requests.
auto serve_client = [](coro::net::tcp_client client) -> coro::task<void> {
size_t requests{1};
while (true)
{
// Continue to accept more requests until the client closes the connection.
co_await client.poll(coro::poll_op::read);
std::string request(64, '\0');
auto [recv_status, recv_bytes] = client.recv(request);
if (recv_status == coro::net::recv_status::closed)
{
break;
}
request.resize(recv_bytes.size());
std::cout << "server: " << request << "\n";
auto response = "Hello from server " + std::to_string(requests);
client.send(response);
++requests;
}
co_return;
};
// Spin up the tcp_server and schedule it onto the io_scheduler.
coro::net::tcp_server server{scheduler};
co_await scheduler->schedule();
// All incoming connections will be stored into the task container until they are completed.
coro::task_container tc{scheduler};
// Wait for an incoming connection and accept it, this example will only use 1 connection.
co_await server.poll();
auto client = server.accept();
// Store the task that will serve the client into the container and immediately begin executing it
// on the task container's thread pool, which is the same as the scheduler.
tc.start(serve_client(std::move(client)));
// Wait for all clients to complete before shutting down the tcp_server.
co_await tc.garbage_collect_and_yield_until_empty();
co_return;
};
auto make_client_task = [&](size_t request_count) -> coro::task<void> {
co_await scheduler->schedule();
coro::net::tcp_client client{scheduler};
co_await client.connect();
// Send N requests on the same connection and wait for the server response to each one.
for (size_t i = 1; i <= request_count; ++i)
{
// Send the request data.
auto request = "Hello from client " + std::to_string(i);
client.send(request);
co_await client.poll(coro::poll_op::read);
std::string response(64, '\0');
auto [recv_status, recv_bytes] = client.recv(response);
response.resize(recv_bytes.size());
std::cout << "client: " << response << "\n";
}
co_return; // Upon exiting the tcp_client will close its connection to the server.
};
coro::sync_wait(coro::when_all(make_server_task(), make_client_task(5)));
}

View file

@ -1,78 +0,0 @@
#include <coro/coro.hpp>
#include <iostream>
#include <random>
int main()
{
coro::thread_pool tp{coro::thread_pool::options{
// By default all thread pools will create its thread count with the
// std::thread::hardware_concurrency() as the number of worker threads in the pool,
// but this can be changed via this thread_count option. This example will use 4.
.thread_count = 4,
// Upon starting each worker thread an optional lambda callback with the worker's
// index can be called to make thread changes, perhaps priority or change the thread's
// name.
.on_thread_start_functor = [](std::size_t worker_idx) -> void {
std::cout << "thread pool worker " << worker_idx << " is starting up.\n";
},
// Upon stopping each worker thread an optional lambda callback with the worker's
// index can b called.
.on_thread_stop_functor = [](std::size_t worker_idx) -> void {
std::cout << "thread pool worker " << worker_idx << " is shutting down.\n";
}}};
auto offload_task = [&](uint64_t child_idx) -> coro::task<uint64_t> {
// Start by scheduling this offload worker task onto the thread pool.
co_await tp.schedule();
// Now any code below this schedule() line will be executed on one of the thread pools
// worker threads.
// Mimic some expensive task that should be run on a background thread...
std::random_device rd;
std::mt19937 gen{rd()};
std::uniform_int_distribution<> d{0, 1};
size_t calculation{0};
for (size_t i = 0; i < 1'000'000; ++i)
{
calculation += d(gen);
// Lets be nice and yield() to let other coroutines on the thread pool have some cpu
// time. This isn't necessary but is illustrated to show how tasks can cooperatively
// yield control at certain points of execution. Its important to never call the
// std::this_thread::sleep_for() within the context of a coroutine, that will block
// and other coroutines which are ready for execution from starting, always use yield()
// or within the context of a coro::io_scheduler you can use yield_for(amount).
if (i == 500'000)
{
std::cout << "Task " << child_idx << " is yielding()\n";
co_await tp.yield();
}
}
co_return calculation;
};
auto primary_task = [&]() -> coro::task<uint64_t> {
const size_t num_children{10};
std::vector<coro::task<uint64_t>> child_tasks{};
child_tasks.reserve(num_children);
for (size_t i = 0; i < num_children; ++i)
{
child_tasks.emplace_back(offload_task(i));
}
// Wait for the thread pool workers to process all child tasks.
auto results = co_await coro::when_all(std::move(child_tasks));
// Sum up the results of the completed child tasks.
size_t calculation{0};
for (const auto& task : results)
{
calculation += task.return_value();
}
co_return calculation;
};
auto result = coro::sync_wait(primary_task());
std::cout << "calculated thread pool result = " << result << "\n";
}

View file

@ -1,42 +0,0 @@
cmake_minimum_required(VERSION 3.0)
project(libcoro_test)
set(LIBCORO_TEST_SOURCE_FILES
net/test_dns_resolver.cpp
net/test_ip_address.cpp
net/test_tcp_server.cpp
net/test_udp_peers.cpp
bench.cpp
test_event.cpp
test_generator.cpp
test_io_scheduler.cpp
test_latch.cpp
test_mutex.cpp
test_ring_buffer.cpp
test_semaphore.cpp
test_shared_mutex.cpp
test_sync_wait.cpp
test_task.cpp
test_thread_pool.cpp
test_when_all.cpp
)
add_executable(${PROJECT_NAME} main.cpp ${LIBCORO_TEST_SOURCE_FILES})
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20)
target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
target_link_libraries(${PROJECT_NAME} PUBLIC libcoro)
target_compile_options(${PROJECT_NAME} PUBLIC -fcoroutines)
if(LIBCORO_CODE_COVERAGE)
target_compile_options(${PROJECT_NAME} PRIVATE --coverage)
target_link_libraries(${PROJECT_NAME} PRIVATE gcov)
endif()
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
target_compile_options(${PROJECT_NAME} PUBLIC -fcoroutines -Wall -Wextra -pipe)
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
message(FATAL_ERROR "Clang is currently not supported.")
endif()
add_test(NAME libcoro_tests COMMAND ${PROJECT_NAME})

View file

@ -1,735 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <atomic>
#include <chrono>
#include <iomanip>
#include <iostream>
using namespace std::chrono_literals;
using sc = std::chrono::steady_clock;
constexpr std::size_t default_iterations = 5'000'000;
static auto print_stats(const std::string& bench_name, uint64_t operations, sc::time_point start, sc::time_point stop)
-> void
{
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start);
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration);
std::cout << bench_name << "\n";
std::cout << " " << operations << " ops in " << ms.count() << "ms\n";
double seconds = duration.count() / 1'000'000'000.0;
double ops_per_sec = static_cast<uint64_t>(operations / seconds);
std::cout << " ops/sec: " << std::fixed << ops_per_sec << "\n";
}
TEST_CASE("benchmark counter func direct call", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
std::atomic<uint64_t> counter{0};
auto func = [&]() -> void {
counter.fetch_add(1, std::memory_order::relaxed);
return;
};
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
func();
}
print_stats("benchmark counter func direct call", iterations, start, sc::now());
REQUIRE(counter == iterations);
}
TEST_CASE("benchmark counter func coro::sync_wait(awaitable)", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
uint64_t counter{0};
auto func = []() -> coro::task<uint64_t> { co_return 1; };
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
counter += coro::sync_wait(func());
}
print_stats("benchmark counter func coro::sync_wait(awaitable)", iterations, start, sc::now());
REQUIRE(counter == iterations);
}
TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all(awaitable)) x10", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
uint64_t counter{0};
auto f = []() -> coro::task<uint64_t> { co_return 1; };
auto start = sc::now();
for (std::size_t i = 0; i < iterations; i += 10)
{
auto tasks = coro::sync_wait(coro::when_all(f(), f(), f(), f(), f(), f(), f(), f(), f(), f()));
std::apply([&counter](auto&&... t) { ((counter += t.return_value()), ...); }, tasks);
}
print_stats("benchmark counter func coro::sync_wait(coro::when_all(awaitable))", iterations, start, sc::now());
REQUIRE(counter == iterations);
}
TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all(vector<awaitable>)) x10", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
uint64_t counter{0};
auto f = []() -> coro::task<uint64_t> { co_return 1; };
auto start = sc::now();
for (std::size_t i = 0; i < iterations; i += 10)
{
std::vector<coro::task<uint64_t>> tasks{};
tasks.reserve(10);
for (size_t j = 0; j < 10; ++j)
{
tasks.emplace_back(f());
}
auto results = coro::sync_wait(coro::when_all(std::move(tasks)));
for (const auto& r : results)
{
counter += r.return_value();
}
}
print_stats("benchmark counter func coro::sync_wait(coro::when_all(awaitable))", iterations, start, sc::now());
REQUIRE(counter == iterations);
}
TEST_CASE("benchmark thread_pool{1} counter task", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
coro::thread_pool tp{coro::thread_pool::options{1}};
std::atomic<uint64_t> counter{0};
auto make_task = [](coro::thread_pool& tp, std::atomic<uint64_t>& c) -> coro::task<void> {
co_await tp.schedule();
c.fetch_add(1, std::memory_order::relaxed);
co_return;
};
std::vector<coro::task<void>> tasks;
tasks.reserve(iterations);
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
tasks.emplace_back(make_task(tp, counter));
tasks.back().resume();
}
// This will fail in valgrind since it runs in a single 'thread', and thus is shutsdown prior
// to any coroutine actually getting properly scheduled onto the background thread pool.
// Inject a sleep here so it forces a thread context switch within valgrind.
std::this_thread::sleep_for(std::chrono::milliseconds{10});
tp.shutdown();
print_stats("benchmark thread_pool{1} counter task", iterations, start, sc::now());
REQUIRE(counter == iterations);
REQUIRE(tp.empty());
}
TEST_CASE("benchmark thread_pool{2} counter task", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
coro::thread_pool tp{coro::thread_pool::options{2}};
std::atomic<uint64_t> counter{0};
auto make_task = [](coro::thread_pool& tp, std::atomic<uint64_t>& c) -> coro::task<void> {
co_await tp.schedule();
c.fetch_add(1, std::memory_order::relaxed);
co_return;
};
std::vector<coro::task<void>> tasks;
tasks.reserve(iterations);
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
tasks.emplace_back(make_task(tp, counter));
tasks.back().resume();
}
// This will fail in valgrind since it runs in a single 'thread', and thus is shutsdown prior
// to any coroutine actually getting properly scheduled onto the background thread pool.
// Inject a sleep here so it forces a thread context switch within valgrind.
std::this_thread::sleep_for(std::chrono::milliseconds{10});
tp.shutdown();
print_stats("benchmark thread_pool{2} counter task", iterations, start, sc::now());
REQUIRE(counter == iterations);
REQUIRE(tp.empty());
}
TEST_CASE("benchmark thread_pool{N} counter task", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
coro::thread_pool tp{};
std::atomic<uint64_t> counter{0};
auto make_task = [](coro::thread_pool& tp, std::atomic<uint64_t>& c) -> coro::task<void> {
co_await tp.schedule();
c.fetch_add(1, std::memory_order::relaxed);
co_return;
};
std::vector<coro::task<void>> tasks;
tasks.reserve(iterations);
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
tasks.emplace_back(make_task(tp, counter));
tasks.back().resume();
}
// This will fail in valgrind since it runs in a single 'thread', and thus is shutsdown prior
// to any coroutine actually getting properly scheduled onto the background thread pool.
// Inject a sleep here so it forces a thread context switch within valgrind.
std::this_thread::sleep_for(std::chrono::milliseconds{10});
tp.shutdown();
print_stats("benchmark thread_pool{N} counter task", iterations, start, sc::now());
REQUIRE(counter == iterations);
REQUIRE(tp.empty());
}
TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
constexpr std::size_t ops = iterations * 2; // the external resume is still a resume op
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
std::atomic<uint64_t> counter{0};
std::vector<coro::task<void>> tasks{};
tasks.reserve(iterations);
auto make_task = [&]() -> coro::task<void> {
co_await s.schedule();
co_await s.yield();
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
tasks.emplace_back(make_task());
}
coro::sync_wait(coro::when_all(std::move(tasks)));
auto stop = sc::now();
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
REQUIRE(s.empty());
REQUIRE(counter == iterations);
}
TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
constexpr std::size_t ops = iterations * 2; // the external resume is still a resume op
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
std::atomic<uint64_t> counter{0};
std::vector<coro::task<void>> tasks{};
tasks.reserve(iterations);
auto make_task = [&]() -> coro::task<void> {
co_await s.schedule();
co_await s.yield_for(std::chrono::milliseconds{1});
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
tasks.emplace_back(make_task());
}
coro::sync_wait(coro::when_all(std::move(tasks)));
auto stop = sc::now();
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
REQUIRE(s.empty());
REQUIRE(counter == iterations);
}
TEST_CASE("benchmark counter task scheduler await event from another coroutine", "[benchmark]")
{
constexpr std::size_t iterations = default_iterations;
constexpr std::size_t ops = iterations * 3; // two tasks + event resume
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
std::vector<std::unique_ptr<coro::event>> events{};
events.reserve(iterations);
for (std::size_t i = 0; i < iterations; ++i)
{
events.emplace_back(std::make_unique<coro::event>());
}
std::vector<coro::task<void>> tasks{};
tasks.reserve(iterations * 2); // one for wait, one for resume
std::atomic<uint64_t> counter{0};
auto wait_func = [&](std::size_t index) -> coro::task<void> {
co_await s.schedule();
co_await* events[index];
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
auto resume_func = [&](std::size_t index) -> coro::task<void> {
co_await s.schedule();
events[index]->set();
co_return;
};
auto start = sc::now();
for (std::size_t i = 0; i < iterations; ++i)
{
tasks.emplace_back(wait_func(i));
tasks.emplace_back(resume_func(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
auto stop = sc::now();
print_stats("benchmark counter task scheduler await event from another coroutine", ops, start, stop);
REQUIRE(counter == iterations);
// valgrind workaround
while (!s.empty())
{
std::this_thread::sleep_for(std::chrono::milliseconds{1});
}
REQUIRE(s.empty());
}
TEST_CASE("benchmark tcp_server echo server thread pool", "[benchmark]")
{
const constexpr std::size_t connections = 100;
const constexpr std::size_t messages_per_connection = 1'000;
const constexpr std::size_t ops = connections * messages_per_connection;
const std::string msg = "im a data point in a stream of bytes";
const constexpr std::size_t server_count = 5;
const constexpr std::size_t client_count = 5;
const constexpr std::size_t server_thread_count = 4;
const constexpr std::size_t client_thread_count = 4;
std::atomic<uint64_t> listening{0};
std::atomic<uint64_t> accepted{0};
std::atomic<uint64_t> clients_completed{0};
std::atomic<uint64_t> server_id{0};
struct server
{
uint64_t id;
std::shared_ptr<coro::io_scheduler> scheduler{std::make_shared<coro::io_scheduler>(coro::io_scheduler::options{
.pool = coro::thread_pool::options{.thread_count = server_thread_count},
.execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool})};
// coro::task_container<coro::io_scheduler> task_container{scheduler};
uint64_t live_clients{0};
coro::event wait_for_clients{};
};
struct client
{
std::shared_ptr<coro::io_scheduler> scheduler{std::make_shared<coro::io_scheduler>(coro::io_scheduler::options{
.pool = coro::thread_pool::options{.thread_count = client_thread_count},
.execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_on_thread_pool})};
std::vector<coro::task<void>> tasks{};
};
auto make_on_connection_task = [&](server& s, coro::net::tcp_client client) -> coro::task<void> {
std::string in(64, '\0');
// Echo the messages until the socket is closed.
while (true)
{
auto pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
auto [rstatus, rspan] = client.recv(in);
if (rstatus == coro::net::recv_status::closed)
{
REQUIRE(rspan.empty());
break;
}
REQUIRE(rstatus == coro::net::recv_status::ok);
in.resize(rspan.size());
auto [sstatus, remaining] = client.send(in);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
}
s.live_clients--;
if (s.live_clients == 0)
{
s.wait_for_clients.set();
}
co_return;
};
auto make_server_task = [&](server& s) -> coro::task<void> {
co_await s.scheduler->schedule();
coro::net::tcp_server server{s.scheduler};
listening++;
while (accepted.load(std::memory_order::acquire) < connections)
{
auto pstatus = co_await server.poll(std::chrono::milliseconds{1});
if (pstatus == coro::poll_status::event)
{
auto c = server.accept();
if (c.socket().is_valid())
{
accepted.fetch_add(1, std::memory_order::release);
s.live_clients++;
s.scheduler->schedule(make_on_connection_task(s, std::move(c)));
// s.task_container.start(make_on_connection_task(s, std::move(c)));
}
}
}
co_await s.wait_for_clients;
co_return;
};
std::mutex g_histogram_mutex;
std::map<std::chrono::milliseconds, uint64_t> g_histogram;
auto make_client_task = [&](client& c) -> coro::task<void> {
co_await c.scheduler->schedule();
std::map<std::chrono::milliseconds, uint64_t> histogram;
coro::net::tcp_client client{c.scheduler};
auto cstatus = co_await client.connect(); // std::chrono::seconds{1});
REQUIRE(cstatus == coro::net::connect_status::connected);
for (size_t i = 1; i <= messages_per_connection; ++i)
{
auto req_start = std::chrono::steady_clock::now();
auto [sstatus, remaining] = client.send(msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
auto pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string response(64, '\0');
auto [rstatus, rspan] = client.recv(response);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(rspan.size() == msg.size());
response.resize(rspan.size());
REQUIRE(response == msg);
auto req_stop = std::chrono::steady_clock::now();
histogram[std::chrono::duration_cast<std::chrono::milliseconds>(req_stop - req_start)]++;
}
{
std::scoped_lock lk{g_histogram_mutex};
for (auto [ms, count] : histogram)
{
g_histogram[ms] += count;
}
}
clients_completed.fetch_add(1);
co_return;
};
auto start = sc::now();
// Create the server to accept incoming tcp connections.
std::vector<std::thread> server_threads{};
for (size_t i = 0; i < server_count; ++i)
{
server_threads.emplace_back(std::thread{[&]() {
server s{};
s.id = server_id++;
coro::sync_wait(make_server_task(s));
s.scheduler->shutdown();
}});
}
// The server can take a small bit of time to start up, if we don't wait for it to notify then
// the first few connections can easily fail to connect causing this test to fail.
while (listening != server_count)
{
std::this_thread::sleep_for(std::chrono::milliseconds{1});
}
// Spawn N client connections across a set number of clients.
std::vector<std::thread> client_threads{};
std::vector<client> clients{};
for (size_t i = 0; i < client_count; ++i)
{
client_threads.emplace_back(std::thread{[&]() {
client c{};
for (size_t i = 0; i < connections / client_count; ++i)
{
c.tasks.emplace_back(make_client_task(c));
}
coro::sync_wait(coro::when_all(std::move(c.tasks)));
c.scheduler->shutdown();
}});
}
for (auto& ct : client_threads)
{
ct.join();
}
for (auto& st : server_threads)
{
st.join();
}
auto stop = sc::now();
print_stats("benchmark tcp_client and tcp_server thread_pool", ops, start, stop);
for (const auto& [ms, count] : g_histogram)
{
std::cerr << ms.count() << " : " << count << "\n";
}
}
TEST_CASE("benchmark tcp_server echo server inline", "[benchmark]")
{
const constexpr std::size_t connections = 100;
const constexpr std::size_t messages_per_connection = 1'000;
const constexpr std::size_t ops = connections * messages_per_connection;
const std::string msg = "im a data point in a stream of bytes";
const constexpr std::size_t server_count = 10;
const constexpr std::size_t client_count = 10;
std::atomic<uint64_t> listening{0};
std::atomic<uint64_t> accepted{0};
std::atomic<uint64_t> clients_completed{0};
std::atomic<uint64_t> server_id{0};
using estrat = coro::io_scheduler::execution_strategy_t;
struct server
{
uint64_t id;
std::shared_ptr<coro::io_scheduler> scheduler{std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.execution_strategy = estrat::process_tasks_inline})};
// coro::task_container<coro::io_scheduler> task_container{scheduler};
uint64_t live_clients{0};
coro::event wait_for_clients{};
};
struct client
{
std::shared_ptr<coro::io_scheduler> scheduler{std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.execution_strategy = estrat::process_tasks_inline})};
std::vector<coro::task<void>> tasks{};
};
auto make_on_connection_task = [&](server& s, coro::net::tcp_client client) -> coro::task<void> {
std::string in(64, '\0');
// Echo the messages until the socket is closed.
while (true)
{
auto pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
auto [rstatus, rspan] = client.recv(in);
if (rstatus == coro::net::recv_status::closed)
{
REQUIRE(rspan.empty());
break;
}
REQUIRE(rstatus == coro::net::recv_status::ok);
in.resize(rspan.size());
auto [sstatus, remaining] = client.send(in);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
}
s.live_clients--;
if (s.live_clients == 0)
{
s.wait_for_clients.set();
}
co_return;
};
auto make_server_task = [&](server& s) -> coro::task<void> {
co_await s.scheduler->schedule();
coro::net::tcp_server server{s.scheduler};
listening++;
while (accepted.load(std::memory_order::acquire) < connections)
{
auto pstatus = co_await server.poll(std::chrono::milliseconds{1});
if (pstatus == coro::poll_status::event)
{
auto c = server.accept();
if (c.socket().is_valid())
{
accepted.fetch_add(1, std::memory_order::release);
s.live_clients++;
s.scheduler->schedule(make_on_connection_task(s, std::move(c)));
// s.task_container.start(make_on_connection_task(s, std::move(c)));
}
}
}
co_await s.wait_for_clients;
co_return;
};
std::mutex g_histogram_mutex;
std::map<std::chrono::milliseconds, uint64_t> g_histogram;
auto make_client_task = [&](client& c) -> coro::task<void> {
co_await c.scheduler->schedule();
std::map<std::chrono::milliseconds, uint64_t> histogram;
coro::net::tcp_client client{c.scheduler};
auto cstatus = co_await client.connect(); // std::chrono::seconds{1});
REQUIRE(cstatus == coro::net::connect_status::connected);
for (size_t i = 1; i <= messages_per_connection; ++i)
{
auto req_start = std::chrono::steady_clock::now();
auto [sstatus, remaining] = client.send(msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
auto pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string response(64, '\0');
auto [rstatus, rspan] = client.recv(response);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(rspan.size() == msg.size());
response.resize(rspan.size());
REQUIRE(response == msg);
auto req_stop = std::chrono::steady_clock::now();
histogram[std::chrono::duration_cast<std::chrono::milliseconds>(req_stop - req_start)]++;
}
{
std::scoped_lock lk{g_histogram_mutex};
for (auto [ms, count] : histogram)
{
g_histogram[ms] += count;
}
}
clients_completed.fetch_add(1);
co_return;
};
auto start = sc::now();
// Create the server to accept incoming tcp connections.
std::vector<std::thread> server_threads{};
for (size_t i = 0; i < server_count; ++i)
{
server_threads.emplace_back(std::thread{[&]() {
server s{};
s.id = server_id++;
coro::sync_wait(make_server_task(s));
s.scheduler->shutdown();
}});
}
// The server can take a small bit of time to start up, if we don't wait for it to notify then
// the first few connections can easily fail to connect causing this test to fail.
while (listening != server_count)
{
std::this_thread::sleep_for(std::chrono::milliseconds{1});
}
// Spawn N client connections across a set number of clients.
std::vector<std::thread> client_threads{};
std::vector<client> clients{};
for (size_t i = 0; i < client_count; ++i)
{
client_threads.emplace_back(std::thread{[&]() {
client c{};
for (size_t i = 0; i < connections / client_count; ++i)
{
c.tasks.emplace_back(make_client_task(c));
}
coro::sync_wait(coro::when_all(std::move(c.tasks)));
c.scheduler->shutdown();
}});
}
for (auto& ct : client_threads)
{
ct.join();
}
for (auto& st : server_threads)
{
st.join();
}
auto stop = sc::now();
print_stats("benchmark tcp_client and tcp_server inline", ops, start, stop);
for (const auto& [ms, count] : g_histogram)
{
std::cerr << ms.count() << " : " << count << "\n";
}
}

File diff suppressed because it is too large Load diff

View file

@ -1,32 +0,0 @@
#define CATCH_CONFIG_MAIN
#include "catch.hpp"
#include <signal.h>
/**
* This structure invokes a constructor to setup some global test settings that are needed prior
* to executing the tests.
*/
struct test_setup
{
test_setup()
{
// Ignore SIGPIPE, the library should be handling these gracefully.
signal(SIGPIPE, SIG_IGN);
// For SSL/TLS tests create a localhost cert.pem and key.pem, tests expected these files
// to be generated into the same directory that the tests are running in.
auto unused = system(
"openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -subj '/CN=localhost' -nodes");
(void)unused;
}
~test_setup()
{
// Cleanup the temporary key.pem and cert.pem files.
auto unused = system("rm key.pem cert.pem");
(void)unused;
}
};
static test_setup g_test_setup{};

View file

@ -1,34 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
TEST_CASE("dns_resolver basic", "[dns]")
{
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
coro::net::dns_resolver dns_resolver{scheduler, std::chrono::milliseconds{5000}};
auto make_host_by_name_task = [&](coro::net::hostname hn) -> coro::task<void> {
co_await scheduler->schedule();
auto result_ptr = co_await std::move(dns_resolver.host_by_name(hn));
if (result_ptr->status() == coro::net::dns_status::complete)
{
for (const auto& ip_addr : result_ptr->ip_addresses())
{
std::cerr << coro::net::to_string(ip_addr.domain()) << " " << ip_addr.to_string() << "\n";
}
}
co_return;
};
coro::sync_wait(make_host_by_name_task(coro::net::hostname{"www.example.com"}));
std::cerr << "io_scheduler.size() before shutdown = " << scheduler->size() << "\n";
scheduler->shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << scheduler->size() << "\n";
REQUIRE(scheduler->empty());
}

View file

@ -1,73 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <iomanip>
TEST_CASE("net::ip_address from_string() ipv4")
{
{
auto ip_addr = coro::net::ip_address::from_string("127.0.0.1");
REQUIRE(ip_addr.to_string() == "127.0.0.1");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv4);
std::array<uint8_t, coro::net::ip_address::ipv4_len> expected{127, 0, 0, 1};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
{
auto ip_addr = coro::net::ip_address::from_string("255.255.0.0");
REQUIRE(ip_addr.to_string() == "255.255.0.0");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv4);
std::array<uint8_t, coro::net::ip_address::ipv4_len> expected{255, 255, 0, 0};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
}
TEST_CASE("net::ip_address from_string() ipv6")
{
{
auto ip_addr =
coro::net::ip_address::from_string("0123:4567:89ab:cdef:0123:4567:89ab:cdef", coro::net::domain_t::ipv6);
REQUIRE(ip_addr.to_string() == "123:4567:89ab:cdef:123:4567:89ab:cdef");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
{
auto ip_addr = coro::net::ip_address::from_string("::", coro::net::domain_t::ipv6);
REQUIRE(ip_addr.to_string() == "::");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
{
auto ip_addr = coro::net::ip_address::from_string("::1", coro::net::domain_t::ipv6);
REQUIRE(ip_addr.to_string() == "::1");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
{
auto ip_addr = coro::net::ip_address::from_string("1::1", coro::net::domain_t::ipv6);
REQUIRE(ip_addr.to_string() == "1::1");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
{
auto ip_addr = coro::net::ip_address::from_string("1::", coro::net::domain_t::ipv6);
REQUIRE(ip_addr.to_string() == "1::");
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
}
}

View file

@ -1,199 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <iostream>
TEST_CASE("tcp_server ping server", "[tcp_server]")
{
const std::string client_msg{"Hello from client"};
const std::string server_msg{"Reply from server!"};
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
auto make_client_task = [&]() -> coro::task<void> {
co_await scheduler->schedule();
coro::net::tcp_client client{scheduler};
std::cerr << "client connect\n";
auto cstatus = co_await client.connect();
REQUIRE(cstatus == coro::net::connect_status::connected);
// Skip polling for write, should really only poll if the write is partial, shouldn't be
// required for this test.
std::cerr << "client send()\n";
auto [sstatus, remaining] = client.send(client_msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
// Poll for the server's response.
std::cerr << "client poll(read)\n";
auto pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string buffer(256, '\0');
std::cerr << "client recv()\n";
auto [rstatus, rspan] = client.recv(buffer);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(rspan.size() == server_msg.length());
buffer.resize(rspan.size());
REQUIRE(buffer == server_msg);
std::cerr << "client return\n";
co_return;
};
auto make_server_task = [&]() -> coro::task<void> {
co_await scheduler->schedule();
coro::net::tcp_server server{scheduler};
// Poll for client connection.
std::cerr << "server poll(accept)\n";
auto pstatus = co_await server.poll();
REQUIRE(pstatus == coro::poll_status::event);
std::cerr << "server accept()\n";
auto client = server.accept();
REQUIRE(client.socket().is_valid());
// Poll for client request.
std::cerr << "server poll(read)\n";
pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string buffer(256, '\0');
std::cerr << "server recv()\n";
auto [rstatus, rspan] = client.recv(buffer);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(rspan.size() == client_msg.size());
buffer.resize(rspan.size());
REQUIRE(buffer == client_msg);
// Respond to client.
std::cerr << "server send()\n";
auto [sstatus, remaining] = client.send(server_msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
std::cerr << "server return\n";
co_return;
};
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
}
TEST_CASE("tcp_server with ssl", "[tcp_server]")
{
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
coro::net::ssl_context client_ssl_context{};
coro::net::ssl_context server_ssl_context{
"cert.pem", coro::net::ssl_file_type::pem, "key.pem", coro::net::ssl_file_type::pem};
std::string client_msg = "Hello world from SSL client!";
std::string server_msg = "Hello world from SSL server!!";
auto make_client_task = [&]() -> coro::task<void> {
co_await scheduler->schedule();
coro::net::tcp_client client{scheduler, coro::net::tcp_client::options{.ssl_ctx = &client_ssl_context}};
std::cerr << "client.connect()\n";
auto cstatus = co_await client.connect();
REQUIRE(cstatus == coro::net::connect_status::connected);
std::cerr << "client.connected\n";
std::cerr << "client.ssl_handshake()\n";
auto hstatus = co_await client.ssl_handshake();
REQUIRE(hstatus == coro::net::ssl_handshake_status::ok);
std::cerr << "client.poll(write)\n";
auto pstatus = co_await client.poll(coro::poll_op::write);
REQUIRE(pstatus == coro::poll_status::event);
std::cerr << "client.send()\n";
auto [sstatus, remaining] = client.send(client_msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
std::string response;
response.resize(256, '\0');
while (true)
{
std::cerr << "client.poll(read)\n";
pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::cerr << "client.recv()\n";
auto [rstatus, rspan] = client.recv(response);
if (rstatus == coro::net::recv_status::would_block)
{
std::cerr << coro::net::to_string(rstatus) << "\n";
continue;
}
else
{
std::cerr << coro::net::to_string(rstatus) << "\n";
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(rspan.size() == server_msg.size());
response.resize(rspan.size());
break;
}
}
REQUIRE(response == server_msg);
std::cerr << "client received message: " << response << "\n";
std::cerr << "client finished\n";
co_return;
};
auto make_server_task = [&]() -> coro::task<void> {
co_await scheduler->schedule();
coro::net::tcp_server server{scheduler, coro::net::tcp_server::options{.ssl_ctx = &server_ssl_context}};
std::cerr << "server.poll()\n";
auto pstatus = co_await server.poll();
REQUIRE(pstatus == coro::poll_status::event);
std::cerr << "server.accept()\n";
auto client = server.accept();
REQUIRE(client.socket().is_valid());
std::cerr << "server client.handshake()\n";
auto hstatus = co_await client.ssl_handshake();
REQUIRE(hstatus == coro::net::ssl_handshake_status::ok);
std::cerr << "server client.poll(read)\n";
pstatus = co_await client.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string buffer;
buffer.resize(256, '\0');
std::cerr << "server client.recv()\n";
auto [rstatus, rspan] = client.recv(buffer);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(rspan.size() == client_msg.size());
buffer.resize(rspan.size());
REQUIRE(buffer == client_msg);
std::cerr << "server received message: " << buffer << "\n";
std::cerr << "server client.poll(write)\n";
pstatus = co_await client.poll(coro::poll_op::write);
REQUIRE(pstatus == coro::poll_status::event);
std::cerr << "server client.send()\n";
auto [sstatus, remaining] = client.send(server_msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
std::cerr << "server finished\n";
co_return;
};
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
}

View file

@ -1,118 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
TEST_CASE("udp one way")
{
const std::string msg{"aaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccccc"};
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
auto make_send_task = [&]() -> coro::task<void> {
co_await scheduler->schedule();
coro::net::udp_peer peer{scheduler};
coro::net::udp_peer::info peer_info{};
auto [sstatus, remaining] = peer.sendto(peer_info, msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
co_return;
};
auto make_recv_task = [&]() -> coro::task<void> {
co_await scheduler->schedule();
coro::net::udp_peer::info self_info{.address = coro::net::ip_address::from_string("0.0.0.0")};
coro::net::udp_peer self{scheduler, self_info};
auto pstatus = co_await self.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string buffer(64, '\0');
auto [rstatus, peer_info, rspan] = self.recvfrom(buffer);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(peer_info.address == coro::net::ip_address::from_string("127.0.0.1"));
// The peer's port will be randomly picked by the kernel since it wasn't bound.
REQUIRE(rspan.size() == msg.size());
buffer.resize(rspan.size());
REQUIRE(buffer == msg);
co_return;
};
coro::sync_wait(coro::when_all(make_recv_task(), make_send_task()));
}
TEST_CASE("udp echo peers")
{
const std::string peer1_msg{"Hello from peer1!"};
const std::string peer2_msg{"Hello from peer2!!"};
auto scheduler = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}});
auto make_peer_task = [&scheduler](
uint16_t my_port,
uint16_t peer_port,
bool send_first,
const std::string my_msg,
const std::string peer_msg) -> coro::task<void> {
co_await scheduler->schedule();
coro::net::udp_peer::info my_info{.address = coro::net::ip_address::from_string("0.0.0.0"), .port = my_port};
coro::net::udp_peer::info peer_info{
.address = coro::net::ip_address::from_string("127.0.0.1"), .port = peer_port};
coro::net::udp_peer me{scheduler, my_info};
if (send_first)
{
// Send my message to my peer first.
auto [sstatus, remaining] = me.sendto(peer_info, my_msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
}
else
{
// Poll for my peers message first.
auto pstatus = co_await me.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string buffer(64, '\0');
auto [rstatus, recv_peer_info, rspan] = me.recvfrom(buffer);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(recv_peer_info == peer_info);
REQUIRE(rspan.size() == peer_msg.size());
buffer.resize(rspan.size());
REQUIRE(buffer == peer_msg);
}
if (send_first)
{
// I sent first so now I need to await my peer's message.
auto pstatus = co_await me.poll(coro::poll_op::read);
REQUIRE(pstatus == coro::poll_status::event);
std::string buffer(64, '\0');
auto [rstatus, recv_peer_info, rspan] = me.recvfrom(buffer);
REQUIRE(rstatus == coro::net::recv_status::ok);
REQUIRE(recv_peer_info == peer_info);
REQUIRE(rspan.size() == peer_msg.size());
buffer.resize(rspan.size());
REQUIRE(buffer == peer_msg);
}
else
{
auto [sstatus, remaining] = me.sendto(peer_info, my_msg);
REQUIRE(sstatus == coro::net::send_status::ok);
REQUIRE(remaining.empty());
}
co_return;
};
coro::sync_wait(coro::when_all(
make_peer_task(8081, 8080, false, peer2_msg, peer1_msg),
make_peer_task(8080, 8081, true, peer1_msg, peer2_msg)));
}

View file

@ -1,264 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
TEST_CASE("event single awaiter", "[event]")
{
coro::event e{};
auto func = [&]() -> coro::task<uint64_t> {
co_await e;
co_return 42;
};
auto task = func();
task.resume();
REQUIRE_FALSE(task.is_ready());
e.set(); // this will automaticaly resume the task that is awaiting the event.
REQUIRE(task.is_ready());
REQUIRE(task.promise().return_value() == 42);
}
auto producer(coro::event& event) -> void
{
// Long running task that consumers are waiting for goes here...
event.set();
}
auto consumer(const coro::event& event) -> coro::task<uint64_t>
{
co_await event;
// Normally consume from some object which has the stored result from the producer
co_return 42;
}
TEST_CASE("event one watcher", "[event]")
{
coro::event e{};
auto value = consumer(e);
value.resume(); // start co_awaiting event
REQUIRE_FALSE(value.is_ready());
producer(e);
REQUIRE(value.promise().return_value() == 42);
}
TEST_CASE("event multiple watchers", "[event]")
{
coro::event e{};
auto value1 = consumer(e);
auto value2 = consumer(e);
auto value3 = consumer(e);
value1.resume(); // start co_awaiting event
value2.resume();
value3.resume();
REQUIRE_FALSE(value1.is_ready());
REQUIRE_FALSE(value2.is_ready());
REQUIRE_FALSE(value3.is_ready());
producer(e);
REQUIRE(value1.promise().return_value() == 42);
REQUIRE(value2.promise().return_value() == 42);
REQUIRE(value3.promise().return_value() == 42);
}
TEST_CASE("event reset", "[event]")
{
coro::event e{};
e.reset();
REQUIRE_FALSE(e.is_set());
auto value1 = consumer(e);
value1.resume(); // start co_awaiting event
REQUIRE_FALSE(value1.is_ready());
producer(e);
REQUIRE(value1.promise().return_value() == 42);
e.reset();
auto value2 = consumer(e);
value2.resume();
REQUIRE_FALSE(value2.is_ready());
producer(e);
REQUIRE(value2.promise().return_value() == 42);
}
TEST_CASE("event fifo", "[event]")
{
coro::event e{};
// Need consistency FIFO on a single thread to verify the execution order is correct.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> counter{0};
auto make_waiter = [&](uint64_t value) -> coro::task<void> {
co_await tp.schedule();
co_await e;
counter++;
REQUIRE(counter == value);
co_return;
};
auto make_setter = [&]() -> coro::task<void> {
co_await tp.schedule();
REQUIRE(counter == 0);
e.set(coro::resume_order_policy::fifo);
co_return;
};
coro::sync_wait(
coro::when_all(make_waiter(1), make_waiter(2), make_waiter(3), make_waiter(4), make_waiter(5), make_setter()));
REQUIRE(counter == 5);
}
TEST_CASE("event fifo none", "[event]")
{
coro::event e{};
// Need consistency FIFO on a single thread to verify the execution order is correct.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> counter{0};
auto make_setter = [&]() -> coro::task<void> {
co_await tp.schedule();
REQUIRE(counter == 0);
e.set(coro::resume_order_policy::fifo);
co_return;
};
coro::sync_wait(coro::when_all(make_setter()));
REQUIRE(counter == 0);
}
TEST_CASE("event fifo single", "[event]")
{
coro::event e{};
// Need consistency FIFO on a single thread to verify the execution order is correct.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> counter{0};
auto make_waiter = [&](uint64_t value) -> coro::task<void> {
co_await tp.schedule();
co_await e;
counter++;
REQUIRE(counter == value);
co_return;
};
auto make_setter = [&]() -> coro::task<void> {
co_await tp.schedule();
REQUIRE(counter == 0);
e.set(coro::resume_order_policy::fifo);
co_return;
};
coro::sync_wait(coro::when_all(make_waiter(1), make_setter()));
REQUIRE(counter == 1);
}
TEST_CASE("event fifo executor", "[event]")
{
coro::event e{};
// Need consistency FIFO on a single thread to verify the execution order is correct.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> counter{0};
auto make_waiter = [&](uint64_t value) -> coro::task<void> {
co_await tp.schedule();
co_await e;
counter++;
REQUIRE(counter == value);
co_return;
};
auto make_setter = [&]() -> coro::task<void> {
co_await tp.schedule();
REQUIRE(counter == 0);
e.set(tp, coro::resume_order_policy::fifo);
co_return;
};
coro::sync_wait(
coro::when_all(make_waiter(1), make_waiter(2), make_waiter(3), make_waiter(4), make_waiter(5), make_setter()));
REQUIRE(counter == 5);
}
TEST_CASE("event fifo none executor", "[event]")
{
coro::event e{};
// Need consistency FIFO on a single thread to verify the execution order is correct.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> counter{0};
auto make_setter = [&]() -> coro::task<void> {
co_await tp.schedule();
REQUIRE(counter == 0);
e.set(tp, coro::resume_order_policy::fifo);
co_return;
};
coro::sync_wait(coro::when_all(make_setter()));
REQUIRE(counter == 0);
}
TEST_CASE("event fifo single executor", "[event]")
{
coro::event e{};
// Need consistency FIFO on a single thread to verify the execution order is correct.
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> counter{0};
auto make_waiter = [&](uint64_t value) -> coro::task<void> {
co_await tp.schedule();
co_await e;
counter++;
REQUIRE(counter == value);
co_return;
};
auto make_setter = [&]() -> coro::task<void> {
co_await tp.schedule();
REQUIRE(counter == 0);
e.set(tp, coro::resume_order_policy::fifo);
co_return;
};
coro::sync_wait(coro::when_all(make_waiter(1), make_setter()));
REQUIRE(counter == 1);
}

View file

@ -1,40 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
TEST_CASE("generator single yield", "[generator]")
{
std::string msg{"Hello World Generator!"};
auto func = [&]() -> coro::generator<std::string> { co_yield msg; };
for (const auto& v : func())
{
REQUIRE(v == msg);
}
}
TEST_CASE("generator infinite incrementing integer yield", "[generator]")
{
constexpr const int64_t max = 1024;
auto func = []() -> coro::generator<int64_t> {
int64_t i{0};
while (true)
{
++i;
co_yield i;
}
};
int64_t v{1};
for (const auto& v_1 : func())
{
REQUIRE(v == v_1);
++v;
if (v > max)
{
break;
}
}
}

View file

@ -1,732 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <atomic>
#include <chrono>
#include <thread>
#include <cstring>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/socket.h>
#include <sys/timerfd.h>
#include <sys/types.h>
#include <unistd.h>
using namespace std::chrono_literals;
TEST_CASE("io_scheduler schedule single task", "[io_scheduler]")
{
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto make_task = [&]() -> coro::task<uint64_t> {
co_await s.schedule();
co_return 42;
};
auto value = coro::sync_wait(make_task());
REQUIRE(value == 42);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]")
{
constexpr std::size_t n = 1000;
std::atomic<uint64_t> counter{0};
std::vector<coro::task<void>> tasks{};
tasks.reserve(n);
coro::io_scheduler s{};
auto make_task = [&]() -> coro::task<void> {
co_await s.schedule();
counter++;
co_return;
};
for (std::size_t i = 0; i < n; ++i)
{
tasks.emplace_back(make_task());
}
coro::sync_wait(coro::when_all(std::move(tasks)));
REQUIRE(counter == n);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]")
{
std::atomic<uint64_t> counter{0};
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
coro::event e1;
coro::event e2;
coro::event e3;
auto make_wait_task = [&]() -> coro::task<void> {
co_await s.schedule();
co_await e1;
counter++;
co_await e2;
counter++;
co_await e3;
counter++;
co_return;
};
auto make_set_task = [&](coro::event& e) -> coro::task<void> {
co_await s.schedule();
e.set();
};
coro::sync_wait(coro::when_all(make_wait_task(), make_set_task(e1), make_set_task(e2), make_set_task(e3)));
REQUIRE(counter == 3);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler task with read poll", "[io_scheduler]")
{
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto make_poll_read_task = [&]() -> coro::task<void> {
co_await s.schedule();
auto status = co_await s.poll(trigger_fd, coro::poll_op::read);
REQUIRE(status == coro::poll_status::event);
co_return;
};
auto make_poll_write_task = [&]() -> coro::task<void> {
co_await s.schedule();
uint64_t value{42};
auto unused = write(trigger_fd, &value, sizeof(value));
(void)unused;
co_return;
};
coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task()));
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
close(trigger_fd);
}
TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]")
{
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto make_poll_read_task = [&]() -> coro::task<void> {
co_await s.schedule();
// Poll with a timeout but don't timeout.
auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 50ms);
REQUIRE(status == coro::poll_status::event);
co_return;
};
auto make_poll_write_task = [&]() -> coro::task<void> {
co_await s.schedule();
uint64_t value{42};
auto unused = write(trigger_fd, &value, sizeof(value));
(void)unused;
co_return;
};
coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task()));
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
close(trigger_fd);
}
TEST_CASE("io_scheduler task with read poll timeout", "[io_scheduler]")
{
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto make_task = [&]() -> coro::task<void> {
co_await s.schedule();
// Poll with a timeout and timeout.
auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 10ms);
REQUIRE(status == coro::poll_status::timeout);
co_return;
};
coro::sync_wait(make_task());
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
close(trigger_fd);
}
// TODO: This probably requires a TCP socket?
// TEST_CASE("io_scheduler task with read poll closed socket", "[io_scheduler]")
// {
// auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
// coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options { .thread_count = 1 }}};
// auto make_poll_task = [&]() -> coro::task<void> {
// co_await s.schedule();
// auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 1000ms);
// REQUIRE(status == coro::poll_status::closed);
// co_return;
// };
// auto make_close_task = [&]() -> coro::task<void> {
// co_await s.schedule();
// std::this_thread::sleep_for(100ms);
// // shutdown(trigger_fd, SHUT_RDWR);
// close(trigger_fd);
// co_return;
// };
// coro::sync_wait(coro::when_all(make_poll_task(), make_close_task()));
// s.shutdown();
// REQUIRE(s.empty());
// }
TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]")
{
coro::io_scheduler s1{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
coro::io_scheduler s2{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
coro::event e{};
auto make_s1_task = [&]() -> coro::task<void> {
co_await s1.schedule();
auto tid = std::this_thread::get_id();
co_await e;
// This coroutine will hop to the other scheduler's single thread upon resuming.
REQUIRE_FALSE(tid == std::this_thread::get_id());
co_return;
};
auto make_s2_task = [&]() -> coro::task<void> {
co_await s2.schedule();
// Wait a bit to be sure the wait on 'e' in the other scheduler is done first.
std::this_thread::sleep_for(10ms);
e.set();
co_return;
};
coro::sync_wait(coro::when_all(make_s1_task(), make_s2_task()));
s1.shutdown();
REQUIRE(s1.empty());
s2.shutdown();
REQUIRE(s2.empty());
}
TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]")
{
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto make_task = [&]() -> coro::task<void> {
co_await s.schedule();
coro::event e{};
auto tid = std::this_thread::get_id();
// Normally this thread is probably already running for real world use cases, but in general
// the 3rd party function api will be set, they should have "user data" void* or ability
// to capture variables via lambdas for on complete callbacks, here we mimic an on complete
// callback by capturing the hande.
std::thread third_party_thread([&e, &s]() -> void {
// mimic some expensive computation
// Resume the coroutine back onto the scheduler, not this background thread.
e.set(s);
});
third_party_thread.detach();
// Wait on the handle until the 3rd party service is completed.
co_await e;
REQUIRE(tid == std::this_thread::get_id());
};
coro::sync_wait(make_task());
s.shutdown();
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]")
{
constexpr uint64_t expected_value{1337};
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
coro::event start_service{};
coro::event service_done{};
std::atomic<uint64_t> output;
std::thread service{[&]() -> void {
while (!start_service.is_set())
{
std::this_thread::sleep_for(1ms);
}
output = expected_value;
service_done.set(s);
}};
auto third_party_service = [&](int multiplier) -> coro::task<uint64_t> {
start_service.set();
co_await service_done;
co_return output* multiplier;
};
auto make_task = [&]() -> coro::task<void> {
co_await s.schedule();
int multiplier{5};
uint64_t value = co_await third_party_service(multiplier);
REQUIRE(value == (expected_value * multiplier));
};
coro::sync_wait(make_task());
service.join();
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler with basic task", "[io_scheduler]")
{
constexpr std::size_t expected_value{5};
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto add_data = [&](uint64_t val) -> coro::task<int> {
co_await s.schedule();
co_return val;
};
auto func = [&]() -> coro::task<int> {
co_await s.schedule();
auto output_tasks = co_await coro::when_all(add_data(1), add_data(1), add_data(1), add_data(1), add_data(1));
int counter{0};
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
co_return counter;
};
auto counter = coro::sync_wait(func());
REQUIRE(counter == expected_value);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]")
{
constexpr std::chrono::milliseconds wait_for{50};
std::atomic<uint64_t> counter{0};
std::thread::id tid;
auto func = [&](coro::io_scheduler& s, std::chrono::milliseconds amount) -> coro::task<void> {
co_await s.schedule_after(amount);
++counter;
// Make sure schedule after context switches into the worker thread.
REQUIRE(tid == std::this_thread::get_id());
co_return;
};
{
coro::io_scheduler s{coro::io_scheduler::options{
.pool = coro::thread_pool::options{
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
auto start = std::chrono::steady_clock::now();
coro::sync_wait(func(s, 0ms));
auto stop = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
REQUIRE(counter == 1);
REQUIRE(duration < wait_for);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
{
coro::io_scheduler s{coro::io_scheduler::options{
.pool = coro::thread_pool::options{
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
auto start = std::chrono::steady_clock::now();
coro::sync_wait(func(s, wait_for));
auto stop = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
REQUIRE(counter == 2);
REQUIRE(duration >= wait_for);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
}
TEST_CASE("io_scheduler schedule_at", "[io_scheduler]")
{
// Because schedule_at() will take its own time internally the wait_for might be off by a bit.
constexpr std::chrono::milliseconds epsilon{3};
constexpr std::chrono::milliseconds wait_for{50};
std::atomic<uint64_t> counter{0};
std::thread::id tid;
coro::io_scheduler s{coro::io_scheduler::options{
.pool = coro::thread_pool::options{
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
auto func = [&](std::chrono::steady_clock::time_point time) -> coro::task<void> {
co_await s.schedule_at(time);
++counter;
REQUIRE(tid == std::this_thread::get_id());
co_return;
};
{
auto start = std::chrono::steady_clock::now();
coro::sync_wait(func(std::chrono::steady_clock::now() + wait_for));
auto stop = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
REQUIRE(counter == 1);
REQUIRE(duration >= (wait_for - epsilon));
}
{
auto start = std::chrono::steady_clock::now();
coro::sync_wait(func(std::chrono::steady_clock::now()));
auto stop = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
REQUIRE(counter == 2);
REQUIRE(duration <= 10ms); // Just verify its less than the wait_for time period.
}
{
auto start = std::chrono::steady_clock::now();
coro::sync_wait(func(std::chrono::steady_clock::now() - 1s));
auto stop = std::chrono::steady_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
REQUIRE(counter == 3);
REQUIRE(duration <= 10ms);
}
}
TEST_CASE("io_scheduler yield", "[io_scheduler]")
{
std::thread::id tid;
coro::io_scheduler s{coro::io_scheduler::options{
.pool = coro::thread_pool::options{
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
auto func = [&]() -> coro::task<void> {
REQUIRE(tid != std::this_thread::get_id());
co_await s.schedule();
REQUIRE(tid == std::this_thread::get_id());
co_await s.yield(); // this is really a thread pool function but /shrug
REQUIRE(tid == std::this_thread::get_id());
co_return;
};
coro::sync_wait(func());
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler yield_for", "[io_scheduler]")
{
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
const std::chrono::milliseconds wait_for{50};
auto make_task = [&]() -> coro::task<std::chrono::milliseconds> {
co_await s.schedule();
auto start = std::chrono::steady_clock::now();
co_await s.yield_for(wait_for);
co_return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
};
auto duration = coro::sync_wait(make_task());
REQUIRE(duration >= wait_for);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler yield_until", "[io_scheduler]")
{
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
// Because yield_until() takes its own time internally the wait_for might be off by a bit.
const std::chrono::milliseconds epsilon{3};
const std::chrono::milliseconds wait_for{50};
auto make_task = [&]() -> coro::task<std::chrono::milliseconds> {
co_await s.schedule();
auto start = std::chrono::steady_clock::now();
co_await s.yield_until(start + wait_for);
co_return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
};
auto duration = coro::sync_wait(make_task());
REQUIRE(duration >= (wait_for - epsilon));
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]")
{
const constexpr std::size_t total{10};
coro::event e{};
coro::io_scheduler s{};
auto func = [&]() -> coro::task<uint64_t> {
co_await e;
co_return 1;
};
auto spawn = [&]() -> coro::task<void> {
co_await s.schedule();
std::vector<coro::task<uint64_t>> tasks;
for (size_t i = 0; i < total; ++i)
{
tasks.emplace_back(func());
}
auto results = co_await coro::when_all(std::move(tasks));
uint64_t counter{0};
for (const auto& task : results)
{
counter += task.return_value();
}
REQUIRE(counter == total);
};
auto release = [&]() -> coro::task<void> {
co_await s.schedule_after(10ms);
e.set(s);
};
coro::sync_wait(coro::when_all(spawn(), release()));
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_scheduler]")
{
const constexpr std::size_t total{1'000'000};
uint64_t counter{0};
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
std::vector<coro::task<void>> tasks;
tasks.reserve(total);
auto func = [&](auto f) -> coro::task<void> {
co_await s.schedule();
++counter;
if (counter % total == 0)
{
co_return;
}
// co_await f(f) _will_ stack overflow since each coroutine links to its parent, by storing
// each new invocation into the vector they are not linked, but we can make sure the scheduler
// doesn't choke on this many tasks being scheduled.
tasks.emplace_back(f(f));
tasks.back().resume();
co_return;
};
coro::sync_wait(func(func));
while (tasks.size() < total - 1)
{
std::this_thread::sleep_for(1ms);
}
REQUIRE(tasks.size() == total - 1);
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
}
TEST_CASE("io_scheduler manual process events thread pool", "[io_scheduler]")
{
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::io_scheduler s{coro::io_scheduler::options{
.thread_strategy = coro::io_scheduler::thread_strategy_t::manual,
.pool = coro::thread_pool::options{
.thread_count = 1,
}}};
std::atomic<bool> polling{false};
auto make_poll_read_task = [&]() -> coro::task<void> {
std::cerr << "poll task start s.size() == " << s.size() << "\n";
co_await s.schedule();
polling = true;
std::cerr << "poll task polling s.size() == " << s.size() << "\n";
auto status = co_await s.poll(trigger_fd, coro::poll_op::read);
REQUIRE(status == coro::poll_status::event);
std::cerr << "poll task exiting s.size() == " << s.size() << "\n";
co_return;
};
auto make_poll_write_task = [&]() -> coro::task<void> {
std::cerr << "write task start s.size() == " << s.size() << "\n";
co_await s.schedule();
uint64_t value{42};
std::cerr << "write task writing s.size() == " << s.size() << "\n";
auto unused = write(trigger_fd, &value, sizeof(value));
(void)unused;
std::cerr << "write task exiting s.size() == " << s.size() << "\n";
co_return;
};
auto poll_task = make_poll_read_task();
auto write_task = make_poll_write_task();
poll_task.resume(); // get to co_await s.poll();
while (!polling)
{
std::this_thread::sleep_for(10ms);
}
write_task.resume();
while (s.process_events(100ms) > 0)
;
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
close(trigger_fd);
}
TEST_CASE("io_scheduler manual process events inline", "[io_scheduler]")
{
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::io_scheduler s{coro::io_scheduler::options{
.thread_strategy = coro::io_scheduler::thread_strategy_t::manual,
.execution_strategy = coro::io_scheduler::execution_strategy_t::process_tasks_inline}};
auto make_poll_read_task = [&]() -> coro::task<void> {
std::cerr << "poll task start s.size() == " << s.size() << "\n";
co_await s.schedule();
std::cerr << "poll task polling s.size() == " << s.size() << "\n";
auto status = co_await s.poll(trigger_fd, coro::poll_op::read);
REQUIRE(status == coro::poll_status::event);
std::cerr << "poll task exiting s.size() == " << s.size() << "\n";
co_return;
};
auto make_poll_write_task = [&]() -> coro::task<void> {
std::cerr << "write task start s.size() == " << s.size() << "\n";
co_await s.schedule();
uint64_t value{42};
std::cerr << "write task writing s.size() == " << s.size() << "\n";
auto unused = write(trigger_fd, &value, sizeof(value));
(void)unused;
std::cerr << "write task exiting s.size() == " << s.size() << "\n";
co_return;
};
auto poll_task = make_poll_read_task();
auto write_task = make_poll_write_task();
// Start the tasks by scheduling them into the io scheduler.
poll_task.resume();
write_task.resume();
// Now process them to completion.
while (true)
{
auto remaining = s.process_events(100ms);
std::cerr << "remaining " << remaining << "\n";
if (remaining == 0)
{
break;
}
};
std::cerr << "io_scheduler.size() before shutdown = " << s.size() << "\n";
s.shutdown();
std::cerr << "io_scheduler.size() after shutdown = " << s.size() << "\n";
REQUIRE(s.empty());
close(trigger_fd);
}
TEST_CASE("io_scheduler task throws", "[io_scheduler]")
{
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto func = [&]() -> coro::task<uint64_t> {
co_await s.schedule();
throw std::runtime_error{"I always throw."};
co_return 42;
};
REQUIRE_THROWS(coro::sync_wait(func()));
}
TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]")
{
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
auto make_thrower = [&]() -> coro::task<bool> {
co_await s.schedule();
std::cerr << "Throwing task is doing some work...\n";
co_await s.yield();
throw std::runtime_error{"I always throw."};
co_return true;
};
REQUIRE_THROWS(coro::sync_wait(make_thrower()));
}

View file

@ -1,110 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
TEST_CASE("latch count=0", "[latch]")
{
coro::latch l{0};
auto make_task = [&]() -> coro::task<uint64_t> {
co_await l;
co_return 42;
};
auto task = make_task();
task.resume();
REQUIRE(task.is_ready()); // The latch never waits due to zero count.
REQUIRE(task.promise().return_value() == 42);
}
TEST_CASE("latch count=1", "[latch]")
{
coro::latch l{1};
auto make_task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
};
auto task = make_task();
task.resume();
REQUIRE_FALSE(task.is_ready());
l.count_down();
REQUIRE(task.is_ready());
REQUIRE(task.promise().return_value() == 1);
}
TEST_CASE("latch count=1 count_down=5", "[latch]")
{
coro::latch l{1};
auto make_task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
};
auto task = make_task();
task.resume();
REQUIRE_FALSE(task.is_ready());
l.count_down(5);
REQUIRE(task.is_ready());
REQUIRE(task.promise().return_value() == 1);
}
TEST_CASE("latch count=5 count_down=1 x5", "[latch]")
{
coro::latch l{5};
auto make_task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
};
auto task = make_task();
task.resume();
REQUIRE_FALSE(task.is_ready());
l.count_down(1);
REQUIRE_FALSE(task.is_ready());
l.count_down(1);
REQUIRE_FALSE(task.is_ready());
l.count_down(1);
REQUIRE_FALSE(task.is_ready());
l.count_down(1);
REQUIRE_FALSE(task.is_ready());
l.count_down(1);
REQUIRE(task.is_ready());
REQUIRE(task.promise().return_value() == 5);
}
TEST_CASE("latch count=5 count_down=5", "[latch]")
{
coro::latch l{5};
auto make_task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
};
auto task = make_task();
task.resume();
REQUIRE_FALSE(task.is_ready());
l.count_down(5);
REQUIRE(task.is_ready());
REQUIRE(task.promise().return_value() == 5);
}

View file

@ -1,113 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
TEST_CASE("mutex single waiter not locked", "[mutex]")
{
std::vector<uint64_t> output;
coro::mutex m;
auto make_emplace_task = [&](coro::mutex& m) -> coro::task<void> {
std::cerr << "Acquiring lock\n";
{
auto scoped_lock = co_await m.lock();
REQUIRE_FALSE(m.try_lock());
std::cerr << "lock acquired, emplacing back 1\n";
output.emplace_back(1);
std::cerr << "coroutine done\n";
}
// The scoped lock should release the lock upon destructing.
REQUIRE(m.try_lock());
REQUIRE_FALSE(m.try_lock());
m.unlock();
co_return;
};
coro::sync_wait(make_emplace_task(m));
REQUIRE(m.try_lock());
m.unlock();
REQUIRE(output.size() == 1);
REQUIRE(output[0] == 1);
}
TEST_CASE("mutex many waiters until event", "[mutex]")
{
std::atomic<uint64_t> value{0};
std::vector<coro::task<void>> tasks;
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
coro::mutex m; // acquires and holds the lock until the event is triggered
coro::event e; // triggers the blocking thread to release the lock
auto make_task = [&](uint64_t id) -> coro::task<void> {
co_await tp.schedule();
std::cerr << "id = " << id << " waiting to acquire the lock\n";
auto scoped_lock = co_await m.lock();
// Should always be locked upon acquiring the locks.
REQUIRE_FALSE(m.try_lock());
std::cerr << "id = " << id << " lock acquired\n";
value.fetch_add(1, std::memory_order::relaxed);
std::cerr << "id = " << id << " coroutine done\n";
co_return;
};
auto make_block_task = [&]() -> coro::task<void> {
co_await tp.schedule();
std::cerr << "block task acquiring lock\n";
auto scoped_lock = co_await m.lock();
REQUIRE_FALSE(m.try_lock());
std::cerr << "block task acquired lock, waiting on event\n";
co_await e;
co_return;
};
auto make_set_task = [&]() -> coro::task<void> {
co_await tp.schedule();
std::cerr << "set task setting event\n";
e.set();
co_return;
};
// Grab mutex so all threads block.
tasks.emplace_back(make_block_task());
// Create N tasks that attempt to lock the mutex.
for (uint64_t i = 1; i <= 4; ++i)
{
tasks.emplace_back(make_task(i));
}
tasks.emplace_back(make_set_task());
coro::sync_wait(coro::when_all(std::move(tasks)));
REQUIRE(value == 4);
}
TEST_CASE("mutex scoped_lock unlock prior to scope exit", "[mutex]")
{
coro::mutex m;
auto make_task = [&]() -> coro::task<void> {
{
auto lk = co_await m.lock();
REQUIRE_FALSE(m.try_lock());
lk.unlock();
REQUIRE(m.try_lock());
}
co_return;
};
coro::sync_wait(make_task());
}

View file

@ -1,113 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
TEST_CASE("ring_buffer zero num_elements", "[ring_buffer]")
{
REQUIRE_THROWS(coro::ring_buffer<uint64_t, 0>{});
}
TEST_CASE("ring_buffer single element", "[ring_buffer]")
{
const size_t iterations = 10;
coro::ring_buffer<uint64_t, 1> rb{};
std::vector<uint64_t> output{};
auto make_producer_task = [&]() -> coro::task<void> {
for (size_t i = 1; i <= iterations; ++i)
{
std::cerr << "produce: " << i << "\n";
co_await rb.produce(i);
}
co_return;
};
auto make_consumer_task = [&]() -> coro::task<void> {
for (size_t i = 1; i <= iterations; ++i)
{
auto value = co_await rb.consume();
std::cerr << "consume: " << value << "\n";
output.emplace_back(value);
}
co_return;
};
coro::sync_wait(coro::when_all(make_producer_task(), make_consumer_task()));
for (size_t i = 1; i <= iterations; ++i)
{
REQUIRE(output[i - 1] == i);
}
REQUIRE(rb.empty());
}
TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buffer]")
{
const size_t iterations = 1'000'000;
const size_t consumers = 100;
const size_t producers = 100;
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
coro::ring_buffer<uint64_t, 64> rb{};
auto make_producer_task = [&]() -> coro::task<void> {
co_await tp.schedule();
auto to_produce = iterations / producers;
for (size_t i = 1; i <= to_produce; ++i)
{
co_await rb.produce(i);
}
// Wait for all the values to be consumed prior to sending the stop signal.
while (!rb.empty())
{
co_await tp.yield();
}
rb.stop_signal_notify_waiters(); // signal to all consumers (or even producers) we are done/shutting down.
co_return;
};
auto make_consumer_task = [&]() -> coro::task<void> {
co_await tp.schedule();
try
{
while (true)
{
auto value = co_await rb.consume();
(void)value;
co_await tp.yield(); // mimic some work
}
}
catch (const coro::stop_signal&)
{
// requested to stop/shutdown.
}
co_return;
};
std::vector<coro::task<void>> tasks{};
tasks.reserve(consumers * producers);
for (size_t i = 0; i < consumers; ++i)
{
tasks.emplace_back(make_consumer_task());
}
for (size_t i = 0; i < producers; ++i)
{
tasks.emplace_back(make_producer_task());
}
coro::sync_wait(coro::when_all(std::move(tasks)));
REQUIRE(rb.empty());
}

View file

@ -1,226 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
#include <vector>
TEST_CASE("semaphore binary", "[semaphore]")
{
std::vector<uint64_t> output;
coro::semaphore s{1};
auto make_emplace_task = [&](coro::semaphore& s) -> coro::task<void> {
std::cerr << "Acquiring semaphore\n";
co_await s.acquire();
REQUIRE_FALSE(s.try_acquire());
std::cerr << "semaphore acquired, emplacing back 1\n";
output.emplace_back(1);
std::cerr << "coroutine done with resource, releasing\n";
REQUIRE(s.value() == 0);
s.release();
REQUIRE(s.value() == 1);
REQUIRE(s.try_acquire());
s.release();
co_return;
};
coro::sync_wait(make_emplace_task(s));
REQUIRE(s.value() == 1);
REQUIRE(s.try_acquire());
REQUIRE(s.value() == 0);
s.release();
REQUIRE(s.value() == 1);
REQUIRE(output.size() == 1);
REQUIRE(output[0] == 1);
}
TEST_CASE("semaphore binary many waiters until event", "[semaphore]")
{
std::atomic<uint64_t> value{0};
std::vector<coro::task<void>> tasks;
coro::semaphore s{1}; // acquires and holds the semaphore until the event is triggered
coro::event e; // triggers the blocking thread to release the semaphore
auto make_task = [&](uint64_t id) -> coro::task<void> {
std::cerr << "id = " << id << " waiting to acquire the semaphore\n";
co_await s.acquire();
// Should always be locked upon acquiring the semaphore.
REQUIRE_FALSE(s.try_acquire());
std::cerr << "id = " << id << " semaphore acquired\n";
value.fetch_add(1, std::memory_order::relaxed);
std::cerr << "id = " << id << " semaphore release\n";
s.release();
co_return;
};
auto make_block_task = [&]() -> coro::task<void> {
std::cerr << "block task acquiring lock\n";
co_await s.acquire();
REQUIRE_FALSE(s.try_acquire());
std::cerr << "block task acquired semaphore, waiting on event\n";
co_await e;
std::cerr << "block task releasing semaphore\n";
s.release();
co_return;
};
auto make_set_task = [&]() -> coro::task<void> {
std::cerr << "set task setting event\n";
e.set();
co_return;
};
tasks.emplace_back(make_block_task());
// Create N tasks that attempt to acquire the semaphore.
for (uint64_t i = 1; i <= 4; ++i)
{
tasks.emplace_back(make_task(i));
}
tasks.emplace_back(make_set_task());
coro::sync_wait(coro::when_all(std::move(tasks)));
REQUIRE(value == 4);
}
TEST_CASE("semaphore ringbuffer", "[semaphore]")
{
const std::size_t iterations = 10;
// This test is run in the context of a thread pool so the producer task can yield. Otherwise
// the producer will just run wild!
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
std::atomic<uint64_t> value{0};
std::vector<coro::task<void>> tasks;
coro::semaphore s{2, 2};
auto make_consumer_task = [&](uint64_t id) -> coro::task<void> {
co_await tp.schedule();
try
{
while (true)
{
std::cerr << "id = " << id << " waiting to acquire the semaphore\n";
co_await s.acquire();
std::cerr << "id = " << id << " semaphore acquired, consuming value\n";
value.fetch_add(1, std::memory_order::release);
// In the ringbfuffer acquire is 'consuming', we never release back into the buffer
}
}
catch (const coro::stop_signal&)
{
std::cerr << "id = " << id << " exiting\n";
}
co_return;
};
auto make_producer_task = [&]() -> coro::task<void> {
co_await tp.schedule();
for (size_t i = 2; i < iterations; ++i)
{
std::cerr << "producer: doing work\n";
// Do some work...
std::cerr << "producer: releasing\n";
s.release();
std::cerr << "producer: produced\n";
co_await tp.yield();
}
std::cerr << "producer exiting\n";
s.stop_signal_notify_waiters();
co_return;
};
tasks.emplace_back(make_producer_task());
tasks.emplace_back(make_consumer_task(1));
coro::sync_wait(coro::when_all(std::move(tasks)));
REQUIRE(value == iterations);
}
TEST_CASE("semaphore ringbuffer many producers and consumers", "[semaphore]")
{
const std::size_t consumers = 16;
const std::size_t producers = 1;
const std::size_t iterations = 100'000;
std::atomic<uint64_t> value{0};
coro::semaphore s{50, 0};
coro::io_scheduler tp{}; // let er rip
auto make_consumer_task = [&](uint64_t id) -> coro::task<void> {
co_await tp.schedule();
try
{
while (true)
{
co_await s.acquire();
co_await tp.schedule();
value.fetch_add(1, std::memory_order::relaxed);
}
}
catch (const coro::stop_signal&)
{
std::cerr << "consumer " << id << " exiting\n";
}
co_return;
};
auto make_producer_task = [&](uint64_t id) -> coro::task<void> {
co_await tp.schedule();
for (size_t i = 0; i < iterations; ++i)
{
s.release();
}
while (value.load(std::memory_order::relaxed) < iterations)
{
co_await tp.yield_for(std::chrono::milliseconds{1});
}
std::cerr << "producer " << id << " exiting\n";
s.stop_signal_notify_waiters();
co_return;
};
std::vector<coro::task<void>> tasks{};
for (size_t i = 0; i < consumers; ++i)
{
tasks.emplace_back(make_consumer_task(i));
}
for (size_t i = 0; i < producers; ++i)
{
tasks.emplace_back(make_producer_task(i));
}
coro::sync_wait(coro::when_all(std::move(tasks)));
REQUIRE(value >= iterations);
}

View file

@ -1,160 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
TEST_CASE("mutex single waiter not locked exclusive", "[shared_mutex]")
{
auto tp = std::make_shared<coro::thread_pool>(coro::thread_pool::options{.thread_count = 1});
std::vector<uint64_t> output;
coro::shared_mutex<coro::thread_pool> m{tp};
auto make_emplace_task = [&](coro::shared_mutex<coro::thread_pool>& m) -> coro::task<void> {
std::cerr << "Acquiring lock exclusive\n";
{
auto scoped_lock = co_await m.lock();
REQUIRE_FALSE(m.try_lock());
REQUIRE_FALSE(m.try_lock_shared());
std::cerr << "lock acquired, emplacing back 1\n";
output.emplace_back(1);
std::cerr << "coroutine done\n";
}
// The scoped lock should release the lock upon destructing.
REQUIRE(m.try_lock());
m.unlock();
co_return;
};
coro::sync_wait(make_emplace_task(m));
REQUIRE(m.try_lock());
m.unlock();
REQUIRE(output.size() == 1);
REQUIRE(output[0] == 1);
}
TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]")
{
auto tp = std::make_shared<coro::thread_pool>(coro::thread_pool::options{.thread_count = 1});
std::vector<uint64_t> values{1, 2, 3};
coro::shared_mutex<coro::thread_pool> m{tp};
auto make_emplace_task = [&](coro::shared_mutex<coro::thread_pool>& m) -> coro::task<void> {
std::cerr << "Acquiring lock shared\n";
{
auto scoped_lock = co_await m.lock_shared();
REQUIRE_FALSE(m.try_lock());
REQUIRE(m.try_lock_shared());
std::cerr << "lock acquired, reading values\n";
for (const auto& v : values)
{
std::cerr << v << ",";
}
std::cerr << "\ncoroutine done\n";
m.unlock_shared(); // manually locked shared on a shared, unlock
}
// The scoped lock should release the lock upon destructing.
REQUIRE(m.try_lock());
m.unlock();
co_return;
};
coro::sync_wait(make_emplace_task(m));
REQUIRE(m.try_lock_shared());
m.unlock_shared();
REQUIRE(m.try_lock());
m.unlock();
}
TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex]")
{
auto tp = std::make_shared<coro::io_scheduler>(
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 8}});
coro::shared_mutex<coro::io_scheduler> m{tp};
std::atomic<bool> read_value{false};
auto make_shared_task = [&]() -> coro::task<bool> {
co_await tp->schedule();
std::cerr << "make_shared_task shared lock acquiring\n";
auto scoped_lock = co_await m.lock_shared();
std::cerr << "make_shared_task shared lock acquired\n";
bool value = read_value.load(std::memory_order::acquire);
std::cerr << "make_shared_task shared lock releasing on thread_id = " << std::this_thread::get_id() << "\n";
co_return value;
};
auto make_exclusive_task = [&]() -> coro::task<void> {
// Let some readers get through.
co_await tp->yield_for(std::chrono::milliseconds{50});
{
std::cerr << "make_shared_task exclusive lock acquiring\n";
auto scoped_lock = co_await m.lock();
std::cerr << "make_shared_task exclusive lock acquired\n";
// Stack readers on the mutex
co_await tp->yield_for(std::chrono::milliseconds{50});
read_value.exchange(true, std::memory_order::release);
std::cerr << "make_shared_task exclusive lock releasing\n";
}
co_return;
};
auto make_shared_tasks_task = [&]() -> coro::task<void> {
co_await tp->schedule();
std::vector<coro::task<bool>> shared_tasks{};
bool stop{false};
while (!stop)
{
shared_tasks.emplace_back(make_shared_task());
shared_tasks.back().resume();
co_await tp->yield_for(std::chrono::milliseconds{1});
for (const auto& st : shared_tasks)
{
if (st.is_ready())
{
stop = st.promise().return_value();
}
}
}
while (true)
{
bool tasks_remaining{false};
for (const auto& st : shared_tasks)
{
if (!st.is_ready())
{
tasks_remaining = true;
break;
}
}
if (!tasks_remaining)
{
break;
}
}
co_return;
};
coro::sync_wait(coro::when_all(make_shared_tasks_task(), make_exclusive_task()));
}

View file

@ -1,58 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
TEST_CASE("sync_wait simple integer return", "[sync_wait]")
{
auto func = []() -> coro::task<int> { co_return 11; };
auto result = coro::sync_wait(func());
REQUIRE(result == 11);
}
TEST_CASE("sync_wait void", "[sync_wait]")
{
std::string output;
auto func = [&]() -> coro::task<void> {
output = "hello from sync_wait<void>\n";
co_return;
};
coro::sync_wait(func());
REQUIRE(output == "hello from sync_wait<void>\n");
}
TEST_CASE("sync_wait task co_await single", "[sync_wait]")
{
auto answer = []() -> coro::task<int> {
std::cerr << "\tThinking deep thoughts...\n";
co_return 42;
};
auto await_answer = [&]() -> coro::task<int> {
std::cerr << "\tStarting to wait for answer.\n";
auto a = answer();
std::cerr << "\tGot the coroutine, getting the value.\n";
auto v = co_await a;
std::cerr << "\tCoroutine value is " << v << "\n";
REQUIRE(v == 42);
v = co_await a;
std::cerr << "\tValue is still " << v << "\n";
REQUIRE(v == 42);
co_return 1337;
};
auto output = coro::sync_wait(await_answer());
REQUIRE(output == 1337);
}
TEST_CASE("sync_wait task that throws", "[sync_wait]")
{
auto f = []() -> coro::task<uint64_t> {
throw std::runtime_error("I always throw!");
co_return 1;
};
REQUIRE_THROWS(coro::sync_wait(f()));
}

View file

@ -1,243 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <chrono>
#include <thread>
TEST_CASE("task hello world", "[task]")
{
using task_type = coro::task<std::string>;
auto h = []() -> task_type { co_return "Hello"; }();
auto w = []() -> task_type { co_return "World"; }();
REQUIRE(h.promise().return_value().empty());
REQUIRE(w.promise().return_value().empty());
h.resume(); // task suspends immediately
w.resume();
REQUIRE(h.is_ready());
REQUIRE(w.is_ready());
auto w_value = std::move(w).promise().return_value();
REQUIRE(h.promise().return_value() == "Hello");
REQUIRE(w_value == "World");
REQUIRE(w.promise().return_value().empty());
}
TEST_CASE("task void", "[task]")
{
using namespace std::chrono_literals;
using task_type = coro::task<>;
auto t = []() -> task_type {
std::this_thread::sleep_for(10ms);
co_return;
}();
t.resume();
REQUIRE(t.is_ready());
}
TEST_CASE("task exception thrown", "[task]")
{
using task_type = coro::task<std::string>;
std::string throw_msg = "I'll be reached";
auto task = [&]() -> task_type {
throw std::runtime_error(throw_msg);
co_return "I'll never be reached";
}();
task.resume();
REQUIRE(task.is_ready());
bool thrown{false};
try
{
auto value = task.promise().return_value();
}
catch (const std::exception& e)
{
thrown = true;
REQUIRE(e.what() == throw_msg);
}
REQUIRE(thrown);
}
TEST_CASE("task in a task", "[task]")
{
auto outer_task = []() -> coro::task<> {
auto inner_task = []() -> coro::task<int> {
std::cerr << "inner_task start\n";
std::cerr << "inner_task stop\n";
co_return 42;
};
std::cerr << "outer_task start\n";
auto v = co_await inner_task();
REQUIRE(v == 42);
std::cerr << "outer_task stop\n";
}();
outer_task.resume(); // all tasks start suspend, kick it off.
REQUIRE(outer_task.is_ready());
}
TEST_CASE("task in a task in a task", "[task]")
{
auto task1 = []() -> coro::task<> {
std::cerr << "task1 start\n";
auto task2 = []() -> coro::task<int> {
std::cerr << "\ttask2 start\n";
auto task3 = []() -> coro::task<int> {
std::cerr << "\t\ttask3 start\n";
std::cerr << "\t\ttask3 stop\n";
co_return 3;
};
auto v2 = co_await task3();
REQUIRE(v2 == 3);
std::cerr << "\ttask2 stop\n";
co_return 2;
};
auto v1 = co_await task2();
REQUIRE(v1 == 2);
std::cerr << "task1 stop\n";
}();
task1.resume(); // all tasks start suspended, kick it off.
REQUIRE(task1.is_ready());
}
TEST_CASE("task multiple suspends return void", "[task]")
{
auto task = []() -> coro::task<void> {
co_await std::suspend_always{};
co_await std::suspend_never{};
co_await std::suspend_always{};
co_await std::suspend_always{};
co_return;
}();
task.resume(); // initial suspend
REQUIRE_FALSE(task.is_ready());
task.resume(); // first internal suspend
REQUIRE_FALSE(task.is_ready());
task.resume(); // second internal suspend
REQUIRE_FALSE(task.is_ready());
task.resume(); // third internal suspend
REQUIRE(task.is_ready());
}
TEST_CASE("task multiple suspends return integer", "[task]")
{
auto task = []() -> coro::task<int> {
co_await std::suspend_always{};
co_await std::suspend_always{};
co_await std::suspend_always{};
co_return 11;
}();
task.resume(); // initial suspend
REQUIRE_FALSE(task.is_ready());
task.resume(); // first internal suspend
REQUIRE_FALSE(task.is_ready());
task.resume(); // second internal suspend
REQUIRE_FALSE(task.is_ready());
task.resume(); // third internal suspend
REQUIRE(task.is_ready());
REQUIRE(task.promise().return_value() == 11);
}
TEST_CASE("task resume from promise to coroutine handles of different types", "[task]")
{
auto task1 = [&]() -> coro::task<int> {
std::cerr << "Task ran\n";
co_return 42;
}();
auto task2 = [&]() -> coro::task<void> {
std::cerr << "Task 2 ran\n";
co_return;
}();
// task.resume(); normal method of resuming
std::vector<std::coroutine_handle<>> handles;
handles.emplace_back(std::coroutine_handle<coro::task<int>::promise_type>::from_promise(task1.promise()));
handles.emplace_back(std::coroutine_handle<coro::task<void>::promise_type>::from_promise(task2.promise()));
auto& coro_handle1 = handles[0];
coro_handle1.resume();
auto& coro_handle2 = handles[1];
coro_handle2.resume();
REQUIRE(task1.is_ready());
REQUIRE(coro_handle1.done());
REQUIRE(task1.promise().return_value() == 42);
REQUIRE(task2.is_ready());
REQUIRE(coro_handle2.done());
}
TEST_CASE("task throws void", "[task]")
{
auto task = []() -> coro::task<void> {
throw std::runtime_error{"I always throw."};
co_return;
}();
REQUIRE_NOTHROW(task.resume());
REQUIRE(task.is_ready());
REQUIRE_THROWS_AS(task.promise().return_void(), std::runtime_error);
}
TEST_CASE("task throws non-void l-value", "[task]")
{
auto task = []() -> coro::task<int> {
throw std::runtime_error{"I always throw."};
co_return 42;
}();
REQUIRE_NOTHROW(task.resume());
REQUIRE(task.is_ready());
REQUIRE_THROWS_AS(task.promise().return_value(), std::runtime_error);
}
TEST_CASE("task throws non-void r-value", "[task]")
{
struct type
{
int m_value;
};
auto task = []() -> coro::task<type> {
type return_value{42};
throw std::runtime_error{"I always throw."};
co_return std::move(return_value);
}();
task.resume();
REQUIRE(task.is_ready());
REQUIRE_THROWS_AS(task.promise().return_value(), std::runtime_error);
}

View file

@ -1,193 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <iostream>
TEST_CASE("thread_pool one worker one task", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
auto func = [&tp]() -> coro::task<uint64_t> {
co_await tp.schedule(); // Schedule this coroutine on the scheduler.
co_return 42;
};
auto result = coro::sync_wait(func());
REQUIRE(result == 42);
}
TEST_CASE("thread_pool one worker many tasks tuple", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
auto f = [&tp]() -> coro::task<uint64_t> {
co_await tp.schedule(); // Schedule this coroutine on the scheduler.
co_return 50;
};
auto tasks = coro::sync_wait(coro::when_all(f(), f(), f(), f(), f()));
REQUIRE(std::tuple_size<decltype(tasks)>() == 5);
uint64_t counter{0};
std::apply([&counter](auto&&... t) -> void { ((counter += t.return_value()), ...); }, tasks);
REQUIRE(counter == 250);
}
TEST_CASE("thread_pool one worker many tasks vector", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
auto f = [&tp]() -> coro::task<uint64_t> {
co_await tp.schedule(); // Schedule this coroutine on the scheduler.
co_return 50;
};
std::vector<coro::task<uint64_t>> input_tasks;
input_tasks.emplace_back(f());
input_tasks.emplace_back(f());
input_tasks.emplace_back(f());
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
REQUIRE(output_tasks.size() == 3);
uint64_t counter{0};
for (const auto& task : output_tasks)
{
counter += task.return_value();
}
REQUIRE(counter == 150);
}
TEST_CASE("thread_pool N workers 100k tasks", "[thread_pool]")
{
constexpr const std::size_t iterations = 100'000;
coro::thread_pool tp{};
auto make_task = [](coro::thread_pool& tp) -> coro::task<uint64_t> {
co_await tp.schedule();
co_return 1;
};
std::vector<coro::task<uint64_t>> input_tasks{};
input_tasks.reserve(iterations);
for (std::size_t i = 0; i < iterations; ++i)
{
input_tasks.emplace_back(make_task(tp));
}
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
REQUIRE(output_tasks.size() == iterations);
uint64_t counter{0};
for (const auto& task : output_tasks)
{
counter += task.return_value();
}
REQUIRE(counter == iterations);
}
TEST_CASE("thread_pool 1 worker task spawns another task", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
auto f1 = [](coro::thread_pool& tp) -> coro::task<uint64_t> {
co_await tp.schedule();
auto f2 = [](coro::thread_pool& tp) -> coro::task<uint64_t> {
co_await tp.schedule();
co_return 5;
};
co_return 1 + co_await f2(tp);
};
REQUIRE(coro::sync_wait(f1(tp)) == 6);
}
TEST_CASE("thread_pool shutdown", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
auto f = [](coro::thread_pool& tp) -> coro::task<bool> {
try
{
co_await tp.schedule();
}
catch (...)
{
co_return true;
}
co_return false;
};
tp.shutdown();
REQUIRE(coro::sync_wait(f(tp)) == true);
}
TEST_CASE("thread_pool schedule functor", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
auto f = []() -> uint64_t { return 1; };
auto result = coro::sync_wait(tp.schedule(f));
REQUIRE(result == 1);
tp.shutdown();
REQUIRE_THROWS(coro::sync_wait(tp.schedule(f)));
}
TEST_CASE("thread_pool schedule functor return_type = void", "[thread_pool]")
{
coro::thread_pool tp{coro::thread_pool::options{1}};
std::atomic<uint64_t> counter{0};
auto f = [](std::atomic<uint64_t>& c) -> void { c++; };
coro::sync_wait(tp.schedule(f, std::ref(counter)));
REQUIRE(counter == 1);
tp.shutdown();
REQUIRE_THROWS(coro::sync_wait(tp.schedule(f, std::ref(counter))));
}
TEST_CASE("thread_pool event jump threads", "[thread_pool]")
{
// This test verifies that the thread that sets the event ends up executing every waiter on the event
coro::thread_pool tp1{coro::thread_pool::options{.thread_count = 1}};
coro::thread_pool tp2{coro::thread_pool::options{.thread_count = 1}};
coro::event e{};
auto make_tp1_task = [&]() -> coro::task<void> {
co_await tp1.schedule();
auto before_thread_id = std::this_thread::get_id();
std::cerr << "before event thread_id = " << before_thread_id << "\n";
co_await e;
auto after_thread_id = std::this_thread::get_id();
std::cerr << "after event thread_id = " << after_thread_id << "\n";
REQUIRE(before_thread_id != after_thread_id);
co_return;
};
auto make_tp2_task = [&]() -> coro::task<void> {
co_await tp2.schedule();
std::this_thread::sleep_for(std::chrono::milliseconds{10});
std::cerr << "setting event\n";
e.set();
co_return;
};
coro::sync_wait(coro::when_all(make_tp1_task(), make_tp2_task()));
}

View file

@ -1,109 +0,0 @@
#include "catch.hpp"
#include <coro/coro.hpp>
#include <list>
#include <vector>
TEST_CASE("when_all single task with tuple container", "[when_all]")
{
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
auto output_tasks = coro::sync_wait(coro::when_all(make_task(100)));
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 1);
uint64_t counter{0};
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
REQUIRE(counter == 100);
}
TEST_CASE("when_all single task with tuple container by move", "[when_all]")
{
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
auto t = make_task(100);
auto output_tasks = coro::sync_wait(coro::when_all(std::move(t)));
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 1);
uint64_t counter{0};
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
REQUIRE(counter == 100);
}
TEST_CASE("when_all multiple tasks with tuple container", "[when_all]")
{
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
auto output_tasks = coro::sync_wait(coro::when_all(make_task(100), make_task(50), make_task(20)));
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 3);
uint64_t counter{0};
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
REQUIRE(counter == 170);
}
TEST_CASE("when_all single task with vector container", "[when_all]")
{
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
std::vector<coro::task<uint64_t>> input_tasks;
input_tasks.emplace_back(make_task(100));
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
REQUIRE(output_tasks.size() == 1);
uint64_t counter{0};
for (const auto& task : output_tasks)
{
counter += task.return_value();
}
REQUIRE(counter == 100);
}
TEST_CASE("when_all multple task withs vector container", "[when_all]")
{
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
std::vector<coro::task<uint64_t>> input_tasks;
input_tasks.emplace_back(make_task(100));
input_tasks.emplace_back(make_task(200));
input_tasks.emplace_back(make_task(550));
input_tasks.emplace_back(make_task(1000));
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
REQUIRE(output_tasks.size() == 4);
uint64_t counter{0};
for (const auto& task : output_tasks)
{
counter += task.return_value();
}
REQUIRE(counter == 1850);
}
TEST_CASE("when_all multple task withs list container", "[when_all]")
{
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
std::list<coro::task<uint64_t>> input_tasks;
input_tasks.emplace_back(make_task(100));
input_tasks.emplace_back(make_task(200));
input_tasks.emplace_back(make_task(550));
input_tasks.emplace_back(make_task(1000));
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
REQUIRE(output_tasks.size() == 4);
uint64_t counter{0};
for (const auto& task : output_tasks)
{
counter += task.return_value();
}
REQUIRE(counter == 1850);
}

@ -1 +0,0 @@
Subproject commit 799e81d4ace75af7d530857d4f8b35913a27463e