mirror of
https://gitlab.com/niansa/libcrosscoro.git
synced 2025-03-06 20:53:32 +01:00
Compare commits
20 commits
Author | SHA1 | Date | |
---|---|---|---|
|
db23d23637 | ||
|
cf6ef72276 | ||
|
9d83519b29 | ||
|
83341c7891 | ||
|
eb8aea39b2 | ||
|
273d0a5bbb | ||
|
42f1212cde | ||
|
40cb369aab | ||
|
8dbad30d2c | ||
|
e955a33c16 | ||
|
c7612f5dba | ||
|
4c506391b0 | ||
|
73d8424008 | ||
|
d1263aebd7 | ||
|
285416bfe5 | ||
|
5de38b2d60 | ||
|
475bcf6d8b | ||
|
78b6e19927 | ||
|
310abc18bc | ||
|
e9b225e42f |
82 changed files with 867 additions and 26079 deletions
|
@ -1,83 +0,0 @@
|
|||
#!/usr/bin/sh
|
||||
|
||||
FILE_EXTS=".c .h .cpp .hpp .cc .hh .cxx .tcc"
|
||||
|
||||
# Determins if a file has the right extension to be clang-format'ed.
|
||||
should_clang_format() {
|
||||
local filename=$(basename "$1")
|
||||
local extension=".${filename##*.}"
|
||||
local ext
|
||||
|
||||
local result=0
|
||||
|
||||
# Ignore the test/catch.hpp file
|
||||
if [[ "$1" != *"catch"* ]]; then
|
||||
for ext in $FILE_EXTS; do
|
||||
# Otherwise, if the extension is in the array of extensions to reformat, echo 1.
|
||||
[[ "$ext" == "$extension" ]] && result=1 && break
|
||||
done
|
||||
fi
|
||||
|
||||
echo $result
|
||||
}
|
||||
|
||||
# Run the clang-format across the project's changed files.
|
||||
for file in $(git diff-index --cached --name-only HEAD); do
|
||||
if [ -f "${file}" ] && [ "$(should_clang_format "${file}")" != "0" ] ; then
|
||||
echo "clang-format ${file}"
|
||||
clang-format -i --style=file "${file}"
|
||||
git add "${file}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Update the README.md example code with the given macros.
|
||||
# template_contents=$(cat '.githooks/readme-template.md')
|
||||
cp .githooks/readme-template.md README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_task.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_TASK_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_generator.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_GENERATOR_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_event.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_EVENT_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_latch.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_LATCH_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_mutex.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_MUTEX_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_thread_pool.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_THREAD_POOL_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_io_scheduler.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_IO_SCHEDULER_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_task_container.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_TASK_CONTAINER_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_semaphore.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_SEMAPHORE_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_ring_buffer.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_RING_BUFFER_CPP\}/$example_contents}" > README.md
|
||||
|
||||
template_contents=$(cat 'README.md')
|
||||
example_contents=$(cat 'examples/coro_shared_mutex.cpp')
|
||||
echo "${template_contents/\$\{EXAMPLE_CORO_SHARED_MUTEX_CPP\}/$example_contents}" > README.md
|
||||
|
||||
|
||||
|
||||
git add README.md
|
|
@ -1,372 +0,0 @@
|
|||
# libcoro C++20 linux coroutine library
|
||||
|
||||
[](https://github.com/jbaldwin/libcoro/workflows/build/badge.svg)
|
||||
[](https://coveralls.io/github/jbaldwin/libcoro?branch=master)
|
||||
[](https://www.codacy.com/gh/jbaldwin/libcoro/dashboard?utm_source=github.com&utm_medium=referral&utm_content=jbaldwin/libcoro&utm_campaign=Badge_Grade)
|
||||
[![language][badge.language]][language]
|
||||
[![license][badge.license]][license]
|
||||
|
||||
**libcoro** is licensed under the Apache 2.0 license.
|
||||
|
||||
**libcoro** is meant to provide low level coroutine constructs for building larger applications, the current focus is around high performance networking coroutine support.
|
||||
|
||||
## Overview
|
||||
* C++20 coroutines!
|
||||
* Modern Safe C++20 API
|
||||
* Higher level coroutine constructs
|
||||
- [coro::task<T>](#task)
|
||||
- [coro::generator<T>](#generator)
|
||||
- [coro::event](#event)
|
||||
- [coro::latch](#latch)
|
||||
- [coro::mutex](#mutex)
|
||||
- [coro::shared_mutex](#shared_mutex)
|
||||
- [coro::semaphore](#semaphore)
|
||||
- [coro::ring_buffer<element, num_elements>](#ring_buffer)
|
||||
- coro::sync_wait(awaitable)
|
||||
- coro::when_all(awaitable...) -> awaitable
|
||||
* Schedulers
|
||||
- [coro::thread_pool](#thread_pool) for coroutine cooperative multitasking
|
||||
- [coro::io_scheduler](#io_scheduler) for driving i/o events, uses thread_pool for coroutine execution upon triggered events
|
||||
- Currently uses an epoll driver
|
||||
- [coro::task_container](#task_container) for dynamic task lifetimes
|
||||
* Coroutine Networking
|
||||
- coro::net::dns_resolver for async dns
|
||||
- Uses libc-ares
|
||||
- [coro::net::tcp_client](#io_scheduler)
|
||||
- Supports SSL/TLS via OpenSSL
|
||||
- [coro::net::tcp_server](#io_scheduler)
|
||||
- Supports SSL/TLS via OpenSSL
|
||||
- coro::net::udp_peer
|
||||
|
||||
## Usage
|
||||
|
||||
### A note on co_await
|
||||
Its important to note with coroutines that depending on the construct used _any_ `co_await` has the potential to switch the thread that is executing the currently running coroutine. In general this shouldn't affect the way any user of the library would write code except for `thread_local`. Usage of `thread_local` should be extremely careful and _never_ used across any `co_await` boundary do to thread switching and work stealing on thread pools.
|
||||
|
||||
### task
|
||||
The `coro::task<T>` is the main coroutine building block within `libcoro`. Use task to create your coroutines and `co_await` or `co_yield` tasks within tasks to perform asynchronous operations, lazily evaluation or even spreading work out across a `coro::thread_pool`. Tasks are lightweight and only begin execution upon awaiting them. If their return type is not `void` then the value can be returned by const reference or by moving (r-value reference).
|
||||
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_TASK_CPP}
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```bash
|
||||
$ ./examples/coro_task
|
||||
Task1 output = 9
|
||||
expensive_struct() move constructor called
|
||||
expensive_struct() move assignment called
|
||||
expensive_struct() move constructor called
|
||||
12345678-1234-5678-9012-123456781234 has 90000 records.
|
||||
Answer to everything = 42
|
||||
```
|
||||
|
||||
### generator
|
||||
The `coro::generator<T>` construct is a coroutine which can generate one or more values.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_GENERATOR_CPP}
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```bash
|
||||
$ ./examples/coro_generator
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
||||
```
|
||||
|
||||
### event
|
||||
The `coro::event` is a thread safe async tool to have 1 or more waiters suspend for an event to be set before proceeding. The implementation of event currently will resume execution of all waiters on the thread that sets the event. If the event is already set when a waiter goes to wait on the thread they will simply continue executing with no suspend or wait time incurred.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_EVENT_CPP}
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```bash
|
||||
$ ./examples/coro_event
|
||||
task 1 is waiting on the event...
|
||||
task 2 is waiting on the event...
|
||||
task 3 is waiting on the event...
|
||||
set task is triggering the event
|
||||
task 3 event triggered, now resuming.
|
||||
task 2 event triggered, now resuming.
|
||||
task 1 event triggered, now resuming.
|
||||
```
|
||||
|
||||
### latch
|
||||
The `coro::latch` is a thread safe async tool to have 1 waiter suspend until all outstanding events have completed before proceeding.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_LATCH_CPP}
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```bash
|
||||
$ ./examples/coro_latch
|
||||
latch task is now waiting on all children tasks...
|
||||
worker task 1 is working...
|
||||
worker task 2 is working...
|
||||
worker task 3 is working...
|
||||
worker task 4 is working...
|
||||
worker task 5 is working...
|
||||
worker task 1 is done, counting down on the latch
|
||||
worker task 2 is done, counting down on the latch
|
||||
worker task 3 is done, counting down on the latch
|
||||
worker task 4 is done, counting down on the latch
|
||||
worker task 5 is done, counting down on the latch
|
||||
latch task dependency tasks completed, resuming.
|
||||
```
|
||||
|
||||
### mutex
|
||||
The `coro::mutex` is a thread safe async tool to protect critical sections and only allow a single thread to execute the critical section at any given time. Mutexes that are uncontended are a simple CAS operation with a memory fence 'acquire' to behave similar to `std::mutex`. If the lock is contended then the thread will add itself to a LIFO queue of waiters and yield excution to allow another coroutine to process on that thread while it waits to acquire the lock.
|
||||
|
||||
Its important to note that upon releasing the mutex that thread unlocking the mutex will immediately start processing the next waiter in line for the `coro::mutex` (if there are any waiters), the mutex is only unlocked/released once all waiters have been processed. This guarantees fair execution in a reasonbly FIFO manner, but it also means all coroutines that stack in the waiter queue will end up shifting to the single thread that is executing all waiting coroutines. It is possible to manually reschedule after the critical section onto a thread pool to re-distribute the work if this is a concern in your use case.
|
||||
|
||||
The suspend waiter queue is LIFO, however the worker that current holds the mutex will periodically 'acquire' the current LIFO waiter list to process those waiters when its internal list becomes empty. This effectively resets the suspended waiter list to empty and the worker holding the mutex will work through the newly acquired LIFO queue of waiters. It would be possible to reverse this list to be as fair as possible, however not reversing the list should result is better throughput at possibly the cost of some latency for the first suspended waiters on the 'current' LIFO queue. Reversing the list, however, would introduce latency for all queue waiters since its done everytime the LIFO queue is swapped.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_MUTEX_CPP}
|
||||
```
|
||||
|
||||
Expected output, note that the output will vary from run to run based on how the thread pool workers
|
||||
are scheduled and in what order they acquire the mutex lock:
|
||||
```bash
|
||||
$ ./examples/coro_mutex
|
||||
1, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 34, 33, 32, 31, 30, 29, 28, 27, 26, 25, 24, 23, 37, 36, 35, 40, 39, 38, 41, 42, 43, 44, 46, 47, 48, 45, 49, 50, 51, 52, 53, 54, 55, 57, 56, 59, 58, 61, 60, 62, 63, 65, 64, 67, 66, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 83, 82, 84, 85, 86, 87, 88, 89, 91, 90, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
||||
```
|
||||
|
||||
Its very easy to see the LIFO 'atomic' queue in action in the beginning where 22->2 are immediately suspended waiting to acquire the mutex.
|
||||
|
||||
### shared_mutex
|
||||
The `coro::shared_mutex` is a thread safe async tool to allow for multiple shared users at once but also exclusive access. The lock is acquired strictly in a FIFO manner in that if the lock is currenty held by shared users and an exclusive attempts to lock, the exclusive waiter will suspend until all the _current_ shared users finish using the lock. Any new users that attempt to lock the mutex in a shared state once there is an exclusive waiter will also wait behind the exclusive waiter. This prevents the exclusive waiter from being starved.
|
||||
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_SHARED_MUTEX_CPP}
|
||||
```
|
||||
|
||||
Example output, notice how the (4,5,6) shared tasks attempt to acquire the lock in a shared state but are blocked behind the exclusive waiter until it completes:
|
||||
```bash
|
||||
$ ./examples/coro_shared_mutex
|
||||
shared task 1 lock_shared()
|
||||
shared task 1 lock_shared() acquired
|
||||
shared task 2 lock_shared()
|
||||
shared task 2 lock_shared() acquired
|
||||
shared task 3 lock_shared()
|
||||
shared task 3 lock_shared() acquired
|
||||
exclusive task lock()
|
||||
shared task 4 lock_shared()
|
||||
shared task 5 lock_shared()
|
||||
shared task 6 lock_shared()
|
||||
shared task 1 unlock_shared()
|
||||
shared task 2 unlock_shared()
|
||||
shared task 3 unlock_shared()
|
||||
exclusive task lock() acquired
|
||||
exclusive task unlock()
|
||||
shared task 4 lock_shared() acquired
|
||||
shared task 5 lock_shared() acquired
|
||||
shared task 6 lock_shared() acquired
|
||||
shared task 4 unlock_shared()
|
||||
shared task 5 unlock_shared()
|
||||
shared task 6 unlock_shared()
|
||||
|
||||
```
|
||||
|
||||
### semaphore
|
||||
The `coro::semaphore` is a thread safe async tool to protect a limited number of resources by only allowing so many consumers to acquire the resources a single time. The `coro::semaphore` also has a maximum number of resources denoted by its constructor. This means if a resource is produced or released when the semaphore is at its maximum resource availability then the release operation will await for space to become available. This is useful for a ringbuffer type situation where the resources are produced and then consumed, but will have no effect on a semaphores usage if there is a set known quantity of resources to start with and are acquired and then released back.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_SEMAPHORE_CPP}
|
||||
```
|
||||
|
||||
Expected output, note that there is no lock around the `std::cout` so some of the output isn't perfect.
|
||||
```bash
|
||||
$ ./examples/coro_semaphore
|
||||
1, 23, 25, 24, 22, 27, 28, 29, 21, 20, 19, 18, 17, 14, 31, 30, 33, 32, 41, 40, 37, 39, 38, 36, 35, 34, 43, 46, 47, 48, 45, 42, 44, 26, 16, 15, 13, 52, 54, 55, 53, 49, 51, 57, 58, 50, 62, 63, 61, 60, 59, 56, 12, 11, 8, 10, 9, 7, 6, 5, 4, 3, 642, , 66, 67, 6568, , 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
||||
```
|
||||
|
||||
### ring_buffer
|
||||
The `coro::ring_buffer<element, num_elements>` is thread safe async multi-producer multi-consumer statically sized ring buffer. Producers will that try to produce a value when the ring buffer is full will suspend until space is available. Consumers that try to consume a value when the ring buffer is empty will suspend until space is available. All waiters on the ring buffer for producing or consuming are resumed in a LIFO manner when their respective operation becomes available.
|
||||
|
||||
The `coro::ring_buffer` also works with `coro::stop_signal` in that if the ring buffers `stop_signal_notify_waiters()` function is called then any producers or consumers that are suspended and waiting will be awoken by throwing a `coro::stop_signal`. This can be useful to write code that will always suspend if data cannot be produced or consumed for long running daemons but will need to break out of the suspend unpon shutdown.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_RING_BUFFER_CPP}
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```bash
|
||||
$ ./examples/coro_ring_buffer
|
||||
(id=3, v=1), (id=2, v=2), (id=1, v=3), (id=0, v=4), (id=3, v=5), (id=2, v=6), (id=1, v=7), (id=0, v=8), (id=3, v=9), (id=2, v=10), (id=1, v=11), (id=0, v=12), (id=3, v=13), (id=2, v=14), (id=1, v=15), (id=0, v=16), (id=3, v=17), (id=2, v=18), (id=1, v=19), (id=0, v=20), (id=3, v=21), (id=2, v=22), (id=1, v=23), (id=0, v=24), (id=3, v=25), (id=2, v=26), (id=1, v=27), (id=0, v=28), (id=3, v=29), (id=2, v=30), (id=1, v=31), (id=0, v=32), (id=3, v=33), (id=2, v=34), (id=1, v=35), (id=0, v=36), (id=3, v=37), (id=2, v=38), (id=1, v=39), (id=0, v=40), (id=3, v=41), (id=2, v=42), (id=0, v=44), (id=1, v=43), (id=3, v=45), (id=2, v=46), (id=0, v=47), (id=3, v=48), (id=2, v=49), (id=0, v=50), (id=3, v=51), (id=2, v=52), (id=0, v=53), (id=3, v=54), (id=2, v=55), (id=0, v=56), (id=3, v=57), (id=2, v=58), (id=0, v=59), (id=3, v=60), (id=1, v=61), (id=2, v=62), (id=0, v=63), (id=3, v=64), (id=1, v=65), (id=2, v=66), (id=0, v=67), (id=3, v=68), (id=1, v=69), (id=2, v=70), (id=0, v=71), (id=3, v=72), (id=1, v=73), (id=2, v=74), (id=0, v=75), (id=3, v=76), (id=1, v=77), (id=2, v=78), (id=0, v=79), (id=3, v=80), (id=2, v=81), (id=1, v=82), (id=0, v=83), (id=3, v=84), (id=2, v=85), (id=1, v=86), (id=0, v=87), (id=3, v=88), (id=2, v=89), (id=1, v=90), (id=0, v=91), (id=3, v=92), (id=2, v=93), (id=1, v=94), (id=0, v=95), (id=3, v=96), (id=2, v=97), (id=1, v=98), (id=0, v=99), (id=3, v=100),
|
||||
producer is sending stop signal
|
||||
consumer 0 shutting down, stop signal received
|
||||
consumer 1 shutting down, stop signal received
|
||||
consumer 2 shutting down, stop signal received
|
||||
consumer 3 shutting down, stop signal received
|
||||
```
|
||||
|
||||
### thread_pool
|
||||
`coro::thread_pool` is a statically sized pool of worker threads to execute scheduled coroutines from a FIFO queue. To schedule a coroutine on a thread pool the pool's `schedule()` function should be `co_awaited` to transfer the execution from the current thread to a thread pool worker thread. Its important to note that scheduling will first place the coroutine into the FIFO queue and will be picked up by the first available thread in the pool, e.g. there could be a delay if there is a lot of work queued up.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_THREAD_POOL_CPP}
|
||||
```
|
||||
|
||||
Example output (will vary based on threads):
|
||||
```bash
|
||||
$ ./examples/coro_thread_pool
|
||||
thread pool worker 0 is starting up.
|
||||
thread pool worker 2 is starting up.
|
||||
thread pool worker 3 is starting up.
|
||||
thread pool worker 1 is starting up.
|
||||
Task 2 is yielding()
|
||||
Task 3 is yielding()
|
||||
Task 0 is yielding()
|
||||
Task 1 is yielding()
|
||||
Task 4 is yielding()
|
||||
Task 5 is yielding()
|
||||
Task 6 is yielding()
|
||||
Task 7 is yielding()
|
||||
Task 8 is yielding()
|
||||
Task 9 is yielding()
|
||||
calculated thread pool result = 4999898
|
||||
thread pool worker 1 is shutting down.
|
||||
thread pool worker 2 is shutting down.
|
||||
thread pool worker 3 is shutting down.
|
||||
thread pool worker 0 is shutting down.
|
||||
```
|
||||
|
||||
### io_scheduler
|
||||
`coro::io_scheduler` is a i/o event scheduler that uses a statically sized pool (`coro::thread_pool`) to process the events that are ready. The `coro::io_scheduler` can use a dedicated spawned thread for processing events that are ready or it can be maually driven via its `process_events()` function for integration into existing event loops. If using the dedicated thread to process i/o events the dedicated thread does not execute and of the tasks itself, it simply schedules them to be executed on the next availble worker thread in its embedded `coro::thread_pool`. Inline execution of tasks on the i/o dedicated thread is not supported since it can introduce poor latency when an expensive task is executing.
|
||||
|
||||
The example provided here shows an i/o scheduler that spins up a basic `coro::net::tcp_server` and a `coro::net::tcp_client` that will connect to each other and then send a request and a response.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_IO_SCHEDULER_CPP}
|
||||
```
|
||||
|
||||
Example output:
|
||||
```bash
|
||||
$ ./examples/coro_io_scheduler
|
||||
io_scheduler::thread_pool worker 0 starting
|
||||
io_scheduler::process event thread start
|
||||
io_scheduler::thread_pool worker 1 starting
|
||||
server: Hello from client.
|
||||
client: Hello from server.
|
||||
io_scheduler::thread_pool worker 0 stopping
|
||||
io_scheduler::thread_pool worker 1 stopping
|
||||
io_scheduler::process event thread stop
|
||||
```
|
||||
|
||||
### task_container
|
||||
`coro::task_container` is a special container type that will maintain the lifetime of tasks that do not have a known lifetime. This is extremely useful for tasks that hold open connections to clients and possibly process multiple requests from that client before shutting down. The task doesn't know how long it will be alive but at some point in the future it will complete and need to have its resources cleaned up. The `coro::task_container` does this by wrapping the users task into anothe coroutine task that will mark itself for deletion upon completing within the parent task container. The task container should then run garbage collection periodically, or by default when a new task is added, to prune completed tasks from the container.
|
||||
|
||||
All tasks that are stored within a `coro::task_container` must have a `void` return type since their result cannot be accessed due to the task's lifetime being indeterminate.
|
||||
|
||||
```C++
|
||||
${EXAMPLE_CORO_TASK_CONTAINER_CPP}
|
||||
```
|
||||
|
||||
```bash
|
||||
$ ./examples/coro_task_container
|
||||
server: Hello from client 1
|
||||
client: Hello from server 1
|
||||
server: Hello from client 2
|
||||
client: Hello from server 2
|
||||
server: Hello from client 3
|
||||
client: Hello from server 3
|
||||
server: Hello from client 4
|
||||
client: Hello from server 4
|
||||
server: Hello from client 5
|
||||
client: Hello from server 5
|
||||
```
|
||||
|
||||
### Requirements
|
||||
C++20 Compiler with coroutine support
|
||||
g++10.2 is tested
|
||||
CMake
|
||||
make or ninja
|
||||
pthreads
|
||||
openssl
|
||||
gcov/lcov (For generating coverage only)
|
||||
|
||||
### Instructions
|
||||
|
||||
#### Cloning the project
|
||||
This project uses gitsubmodules, to properly checkout this project use:
|
||||
|
||||
git clone --recurse-submodules <libcoro-url>
|
||||
|
||||
This project depends on the following git sub-modules:
|
||||
* [libc-ares](https://github.com/c-ares/c-ares) For async DNS resolver.
|
||||
* [catch2](https://github.com/catchorg/Catch2) For testing.
|
||||
|
||||
#### Building
|
||||
mkdir Release && cd Release
|
||||
cmake -DCMAKE_BUILD_TYPE=Release ..
|
||||
cmake --build .
|
||||
|
||||
CMake Options:
|
||||
|
||||
| Name | Default | Description |
|
||||
|:-----------------------|:--------|:--------------------------------------------------------------|
|
||||
| LIBCORO_BUILD_TESTS | ON | Should the tests be built? |
|
||||
| LIBCORO_CODE_COVERAGE | OFF | Should code coverage be enabled? Requires tests to be enabled |
|
||||
| LIBCORO_BUILD_EXAMPLES | ON | Should the examples be built? |
|
||||
|
||||
#### Adding to your project
|
||||
|
||||
##### add_subdirectory()
|
||||
|
||||
```cmake
|
||||
# Include the checked out libcoro code in your CMakeLists.txt file
|
||||
add_subdirectory(path/to/libcoro)
|
||||
|
||||
# Link the libcoro cmake target to your project(s).
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC libcoro)
|
||||
|
||||
```
|
||||
|
||||
##### FetchContent
|
||||
CMake can include the project directly by downloading the source, compiling and linking to your project via FetchContent, below is an example on how you might do this within your project.
|
||||
|
||||
|
||||
```cmake
|
||||
cmake_minimum_required(VERSION 3.11)
|
||||
|
||||
# Fetch the project and make it available for use.
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
libcoro
|
||||
GIT_REPOSITORY https://github.com/jbaldwin/libcoro.git
|
||||
GIT_TAG <TAG_OR_GIT_HASH>
|
||||
)
|
||||
FetchContent_MakeAvailable(libcoro)
|
||||
|
||||
# Link the libcoro cmake target to your project(s).
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC libcoro)
|
||||
|
||||
```
|
||||
|
||||
#### Tests
|
||||
The tests will automatically be run by github actions on creating a pull request. They can also be ran locally:
|
||||
|
||||
# Invoke via cmake with all output from the tests displayed to console:
|
||||
ctest -VV
|
||||
|
||||
# Or invoke directly, can pass the name of tests to execute, the framework used is catch2.
|
||||
# Tests are tagged with their group, below is howt o run all of the coro::net::tcp_server tests:
|
||||
./Debug/test/libcoro_test "[tcp_server]"
|
||||
|
||||
### Support
|
||||
|
||||
File bug reports, feature requests and questions using [GitHub libcoro Issues](https://github.com/jbaldwin/libcoro/issues)
|
||||
|
||||
Copyright © 2020-2021 Josh Baldwin
|
||||
|
||||
[badge.language]: https://img.shields.io/badge/language-C%2B%2B20-yellow.svg
|
||||
[badge.license]: https://img.shields.io/badge/license-Apache--2.0-blue
|
||||
|
||||
[language]: https://en.wikipedia.org/wiki/C%2B%2B17
|
||||
[license]: https://en.wikipedia.org/wiki/Apache_License
|
102
.github/workflows/ci.yml
vendored
102
.github/workflows/ci.yml
vendored
|
@ -1,102 +0,0 @@
|
|||
name: build
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build-ubuntu-20-04:
|
||||
name: ubuntu-20.04
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ubuntu:20.04
|
||||
env:
|
||||
TZ: America/New_York
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
steps:
|
||||
- name: apt
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get -y upgrade
|
||||
apt install -y build-essential software-properties-common
|
||||
add-apt-repository ppa:ubuntu-toolchain-r/test
|
||||
apt-get install -y \
|
||||
cmake \
|
||||
git \
|
||||
ninja-build \
|
||||
g++-10 \
|
||||
libssl-dev
|
||||
- name: Checkout # recurisve checkout requires git to be installed first
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: build-release-g++
|
||||
run: |
|
||||
mkdir build-release-g++
|
||||
cd build-release-g++
|
||||
cmake \
|
||||
-GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_C_COMPILER=gcc-10 \
|
||||
-DCMAKE_CXX_COMPILER=g++-10 \
|
||||
..
|
||||
ninja
|
||||
- name: test-release-g++
|
||||
run: |
|
||||
cd build-release-g++
|
||||
ctest -VV
|
||||
build-fedora-31:
|
||||
name: fedora-32
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: fedora:32
|
||||
steps:
|
||||
- name: dnf
|
||||
run: |
|
||||
sudo dnf install -y \
|
||||
cmake \
|
||||
git \
|
||||
ninja-build \
|
||||
gcc-c++-10.2.1 \
|
||||
lcov \
|
||||
openssl-devel
|
||||
- name: Checkout # recurisve checkout requires git to be installed first
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
submodules: recursive
|
||||
- name: build-debug-g++
|
||||
run: |
|
||||
mkdir build-debug-g++
|
||||
cd build-debug-g++
|
||||
cmake \
|
||||
-GNinja \
|
||||
-DLIBCORO_CODE_COVERAGE=ON \
|
||||
-DCMAKE_BUILD_TYPE=Debug \
|
||||
-DCMAKE_C_COMPILER=gcc \
|
||||
-DCMAKE_CXX_COMPILER=g++ \
|
||||
..
|
||||
ninja
|
||||
- name: build-release-g++
|
||||
run: |
|
||||
mkdir build-release-g++
|
||||
cd build-release-g++
|
||||
cmake \
|
||||
-GNinja \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DCMAKE_C_COMPILER=gcc \
|
||||
-DCMAKE_CXX_COMPILER=g++ \
|
||||
..
|
||||
ninja
|
||||
- name: test-release-g++
|
||||
run: |
|
||||
cd build-release-g++
|
||||
ctest -VV
|
||||
- name: Build coverage info
|
||||
run: |
|
||||
cd build-debug-g++
|
||||
ctest -VV
|
||||
gcov -o ./test/CMakeFiles/libcoro_tests.dir/main.cpp.o ./test/libcoro_tests
|
||||
lcov --include "*/inc/coro/*" --include "*/src/*" --exclude "test/*" -o libcoro_tests.info -c -d .
|
||||
- name: Coveralls GitHub Action
|
||||
uses: coverallsapp/github-action@v1.0.1
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
path-to-lcov: build-debug-g++/libcoro_tests.info
|
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -1,3 +0,0 @@
|
|||
[submodule "vendor/c-ares/c-ares"]
|
||||
path = vendor/c-ares/c-ares
|
||||
url = git@github.com:c-ares/c-ares.git
|
|
@ -1,92 +1,48 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
project(libcoro CXX)
|
||||
|
||||
# Set the githooks directory to auto format and update the readme.
|
||||
message("git config core.hooksPath .githooks")
|
||||
execute_process(
|
||||
COMMAND git config core.hooksPath .githooks
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
)
|
||||
|
||||
option(LIBCORO_BUILD_TESTS "Build the tests, Default=ON." ON)
|
||||
option(LIBCORO_CODE_COVERAGE "Enable code coverage, tests must also be enabled, Default=OFF" OFF)
|
||||
option(LIBCORO_BUILD_EXAMPLES "Build the examples, Default=ON." ON)
|
||||
|
||||
message("${PROJECT_NAME} LIBCORO_BUILD_TESTS = ${LIBCORO_BUILD_TESTS}")
|
||||
message("${PROJECT_NAME} LIBCORO_CODE_COVERAGE = ${LIBCORO_CODE_COVERAGE}")
|
||||
message("${PROJECT_NAME} LIBCORO_BUILD_EXAMPLES = ${LIBCORO_BUILD_EXAMPLES}")
|
||||
project(libcrosscoro CXX)
|
||||
|
||||
set(CARES_STATIC ON CACHE INTERNAL "")
|
||||
set(CARES_SHARED OFF CACHE INTERNAL "")
|
||||
set(CARES_INSTALL OFF CACHE INTERNAL "")
|
||||
|
||||
add_subdirectory(vendor/c-ares/c-ares)
|
||||
|
||||
set(LIBCORO_SOURCE_FILES
|
||||
set(LIBCROSSCORO_SOURCE_FILES
|
||||
inc/coro/concepts/awaitable.hpp
|
||||
inc/coro/concepts/buffer.hpp
|
||||
inc/coro/concepts/executor.hpp
|
||||
inc/coro/concepts/promise.hpp
|
||||
inc/coro/concepts/range_of.hpp
|
||||
|
||||
inc/coro/detail/void_value.hpp
|
||||
|
||||
inc/coro/net/connect.hpp src/net/connect.cpp
|
||||
inc/coro/net/dns_resolver.hpp src/net/dns_resolver.cpp
|
||||
inc/coro/net/hostname.hpp
|
||||
inc/coro/net/ip_address.hpp src/net/ip_address.cpp
|
||||
inc/coro/net/recv_status.hpp src/net/recv_status.cpp
|
||||
inc/coro/net/send_status.hpp src/net/send_status.cpp
|
||||
inc/coro/net/socket.hpp src/net/socket.cpp
|
||||
inc/coro/net/ssl_context.hpp src/net/ssl_context.cpp
|
||||
inc/coro/net/ssl_handshake_status.hpp
|
||||
inc/coro/net/tcp_client.hpp src/net/tcp_client.cpp
|
||||
inc/coro/net/tcp_server.hpp src/net/tcp_server.cpp
|
||||
inc/coro/net/udp_peer.hpp src/net/udp_peer.cpp
|
||||
|
||||
inc/coro/coro.hpp
|
||||
inc/coro/event.hpp src/event.cpp
|
||||
inc/coro/fd.hpp
|
||||
inc/coro/generator.hpp
|
||||
inc/coro/io_scheduler.hpp src/io_scheduler.cpp
|
||||
inc/coro/latch.hpp
|
||||
inc/coro/mutex.hpp src/mutex.cpp
|
||||
inc/coro/poll.hpp
|
||||
inc/coro/ring_buffer.hpp
|
||||
inc/coro/semaphore.hpp src/semaphore.cpp
|
||||
inc/coro/shared_mutex.hpp src/shared_mutex.cpp
|
||||
inc/coro/shutdown.hpp
|
||||
inc/coro/shared_mutex.hpp
|
||||
inc/coro/stop_signal.hpp
|
||||
inc/coro/sync_wait.hpp src/sync_wait.cpp
|
||||
inc/coro/task_container.hpp src/task_container.cpp
|
||||
inc/coro/task_container.hpp
|
||||
inc/coro/task.hpp
|
||||
inc/coro/thread_pool.hpp src/thread_pool.cpp
|
||||
inc/coro/when_all.hpp
|
||||
)
|
||||
|
||||
add_library(${PROJECT_NAME} STATIC ${LIBCORO_SOURCE_FILES})
|
||||
set_target_properties(${PROJECT_NAME} PROPERTIES LINKER_LANGUAGE CXX)
|
||||
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20)
|
||||
target_include_directories(${PROJECT_NAME} PUBLIC inc)
|
||||
target_link_libraries(${PROJECT_NAME} PUBLIC pthread c-ares ssl crypto)
|
||||
add_library(crosscoro STATIC ${LIBCROSSCORO_SOURCE_FILES})
|
||||
set_target_properties(crosscoro PROPERTIES LINKER_LANGUAGE CXX)
|
||||
target_compile_features(crosscoro PUBLIC cxx_std_20)
|
||||
target_include_directories(crosscoro PUBLIC inc)
|
||||
target_link_libraries(crosscoro PUBLIC pthread)
|
||||
|
||||
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
|
||||
if(CMAKE_CXX_COMPILER_VERSION VERSION_LESS "10.2.0")
|
||||
message(FATAL_ERROR "gcc version ${CMAKE_CXX_COMPILER_VERSION} is unsupported, please upgrade to at least 10.2.0")
|
||||
endif()
|
||||
|
||||
target_compile_options(${PROJECT_NAME} PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(crosscoro PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
||||
message(FATAL_ERROR "Clang is currently not supported.")
|
||||
endif()
|
||||
|
||||
if(LIBCORO_BUILD_TESTS)
|
||||
if(LIBCORO_CODE_COVERAGE)
|
||||
target_compile_options(${PROJECT_NAME} PRIVATE --coverage)
|
||||
target_link_libraries(${PROJECT_NAME} PRIVATE gcov)
|
||||
endif()
|
||||
|
||||
enable_testing()
|
||||
add_subdirectory(test)
|
||||
endif()
|
||||
|
||||
if(LIBCORO_BUILD_EXAMPLES)
|
||||
add_subdirectory(examples)
|
||||
endif()
|
|
@ -1,64 +0,0 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
project(libcoro_examples)
|
||||
|
||||
add_executable(coro_task coro_task.cpp)
|
||||
target_compile_features(coro_task PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_task PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_generator coro_generator.cpp)
|
||||
target_compile_features(coro_generator PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_generator PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_event coro_event.cpp)
|
||||
target_compile_features(coro_event PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_event PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_latch coro_latch.cpp)
|
||||
target_compile_features(coro_latch PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_latch PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_mutex coro_mutex.cpp)
|
||||
target_compile_features(coro_mutex PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_mutex PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_thread_pool coro_thread_pool.cpp)
|
||||
target_compile_features(coro_thread_pool PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_thread_pool PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_io_scheduler coro_io_scheduler.cpp)
|
||||
target_compile_features(coro_io_scheduler PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_io_scheduler PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_task_container coro_task_container.cpp)
|
||||
target_compile_features(coro_task_container PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_task_container PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_semaphore coro_semaphore.cpp)
|
||||
target_compile_features(coro_semaphore PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_semaphore PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_ring_buffer coro_ring_buffer.cpp)
|
||||
target_compile_features(coro_ring_buffer PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_ring_buffer PUBLIC libcoro)
|
||||
|
||||
add_executable(coro_shared_mutex coro_shared_mutex.cpp)
|
||||
target_compile_features(coro_shared_mutex PUBLIC cxx_std_20)
|
||||
target_link_libraries(coro_shared_mutex PUBLIC libcoro)
|
||||
|
||||
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
|
||||
target_compile_options(coro_task PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_generator PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_event PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_latch PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_mutex PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_thread_pool PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_io_scheduler PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_task_container PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_semaphore PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_ring_buffer PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
target_compile_options(coro_shared_mutex PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
||||
message(FATAL_ERROR "Clang is currently not supported.")
|
||||
else()
|
||||
message(FATAL_ERROR "Unsupported compiler.")
|
||||
endif()
|
|
@ -1,26 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
coro::event e;
|
||||
|
||||
// These tasks will wait until the given event has been set before advancing.
|
||||
auto make_wait_task = [](const coro::event& e, uint64_t i) -> coro::task<void> {
|
||||
std::cout << "task " << i << " is waiting on the event...\n";
|
||||
co_await e;
|
||||
std::cout << "task " << i << " event triggered, now resuming.\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
// This task will trigger the event allowing all waiting tasks to proceed.
|
||||
auto make_set_task = [](coro::event& e) -> coro::task<void> {
|
||||
std::cout << "set task is triggering the event\n";
|
||||
e.set();
|
||||
co_return;
|
||||
};
|
||||
|
||||
// Given more than a single task to synchronously wait on, use when_all() to execute all the
|
||||
// tasks concurrently on this thread and then sync_wait() for them all to complete.
|
||||
coro::sync_wait(coro::when_all(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
auto task = [](uint64_t count_to) -> coro::task<void> {
|
||||
// Create a generator function that will yield and incrementing
|
||||
// number each time its called.
|
||||
auto gen = []() -> coro::generator<uint64_t> {
|
||||
uint64_t i = 0;
|
||||
while (true)
|
||||
{
|
||||
co_yield i++;
|
||||
}
|
||||
};
|
||||
|
||||
// Generate the next number until its greater than count to.
|
||||
for (auto val : gen())
|
||||
{
|
||||
std::cout << val << ", ";
|
||||
|
||||
if (val >= count_to)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(task(100));
|
||||
}
|
|
@ -1,144 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{
|
||||
// The scheduler will spawn a dedicated event processing thread. This is the default, but
|
||||
// it is possible to use 'manual' and call 'process_events()' to drive the scheduler yourself.
|
||||
.thread_strategy = coro::io_scheduler::thread_strategy_t::spawn,
|
||||
// If the scheduler is in spawn mode this functor is called upon starting the dedicated
|
||||
// event processor thread.
|
||||
.on_io_thread_start_functor = [] { std::cout << "io_scheduler::process event thread start\n"; },
|
||||
// If the scheduler is in spawn mode this functor is called upon stopping the dedicated
|
||||
// event process thread.
|
||||
.on_io_thread_stop_functor = [] { std::cout << "io_scheduler::process event thread stop\n"; },
|
||||
// The io scheduler uses a coro::thread_pool to process the events or tasks it is given.
|
||||
// The tasks are not processed inline on the dedicated event processor thread so events can
|
||||
// be received and handled as soon as a worker thread is available. See the coro::thread_pool
|
||||
// for the available options and their descriptions.
|
||||
.pool =
|
||||
coro::thread_pool::options{
|
||||
.thread_count = 2,
|
||||
.on_thread_start_functor =
|
||||
[](size_t i) { std::cout << "io_scheduler::thread_pool worker " << i << " starting\n"; },
|
||||
.on_thread_stop_functor =
|
||||
[](size_t i) { std::cout << "io_scheduler::thread_pool worker " << i << " stopping\n"; }}}};
|
||||
|
||||
auto make_server_task = [&]() -> coro::task<void> {
|
||||
// Start by creating a tcp server, we'll do this before putting it into the scheduler so
|
||||
// it is immediately available for the client to connect since this will create a socket,
|
||||
// bind the socket and start listening on that socket. See tcp_server for more details on
|
||||
// how to specify the local address and port to bind to as well as enabling SSL/TLS.
|
||||
coro::net::tcp_server server{scheduler};
|
||||
|
||||
// Now scheduler this task onto the scheduler.
|
||||
co_await scheduler.schedule();
|
||||
|
||||
// Wait for an incoming connection and accept it.
|
||||
auto poll_status = co_await server.poll();
|
||||
if (poll_status != coro::poll_status::event)
|
||||
{
|
||||
co_return; // Handle error, see poll_status for detailed error states.
|
||||
}
|
||||
|
||||
// Accept the incoming client connection.
|
||||
auto client = server.accept();
|
||||
|
||||
// Verify the incoming connection was accepted correctly.
|
||||
if (!client.socket().is_valid())
|
||||
{
|
||||
co_return; // Handle error.
|
||||
}
|
||||
|
||||
// Now wait for the client message, this message is small enough it should always arrive
|
||||
// with a single recv() call.
|
||||
poll_status = co_await client.poll(coro::poll_op::read);
|
||||
if (poll_status != coro::poll_status::event)
|
||||
{
|
||||
co_return; // Handle error.
|
||||
}
|
||||
|
||||
// Prepare a buffer and recv() the client's message. This function returns the recv() status
|
||||
// as well as a span<char> that overlaps the given buffer for the bytes that were read. This
|
||||
// can be used to resize the buffer or work with the bytes without modifying the buffer at all.
|
||||
std::string request(256, '\0');
|
||||
auto [recv_status, recv_bytes] = client.recv(request);
|
||||
if (recv_status != coro::net::recv_status::ok)
|
||||
{
|
||||
co_return; // Handle error, see net::recv_status for detailed error states.
|
||||
}
|
||||
|
||||
request.resize(recv_bytes.size());
|
||||
std::cout << "server: " << request << "\n";
|
||||
|
||||
// Make sure the client socket can be written to.
|
||||
poll_status = co_await client.poll(coro::poll_op::write);
|
||||
if (poll_status != coro::poll_status::event)
|
||||
{
|
||||
co_return; // Handle error.
|
||||
}
|
||||
|
||||
// Send the server response to the client.
|
||||
// This message is small enough that it will be sent in a single send() call, but to demonstrate
|
||||
// how to use the 'remaining' portion of the send() result this is wrapped in a loop until
|
||||
// all the bytes are sent.
|
||||
std::string response = "Hello from server.";
|
||||
std::span<const char> remaining = response;
|
||||
do
|
||||
{
|
||||
// Optimistically send() prior to polling.
|
||||
auto [send_status, r] = client.send(remaining);
|
||||
if (send_status != coro::net::send_status::ok)
|
||||
{
|
||||
co_return; // Handle error, see net::send_status for detailed error states.
|
||||
}
|
||||
|
||||
if (r.empty())
|
||||
{
|
||||
break; // The entire message has been sent.
|
||||
}
|
||||
|
||||
// Re-assign remaining bytes for the next loop iteration and poll for the socket to be
|
||||
// able to be written to again.
|
||||
remaining = r;
|
||||
auto pstatus = co_await client.poll(coro::poll_op::write);
|
||||
if (pstatus != coro::poll_status::event)
|
||||
{
|
||||
co_return; // Handle error.
|
||||
}
|
||||
} while (true);
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_client_task = [&]() -> coro::task<void> {
|
||||
// Immediately schedule onto the scheduler.
|
||||
co_await scheduler.schedule();
|
||||
|
||||
// Create the tcp_client with the default settings, see tcp_client for how to set the
|
||||
// ip address, port, and optionally enabling SSL/TLS.
|
||||
coro::net::tcp_client client{scheduler};
|
||||
|
||||
// Ommitting error checking code for the client, each step should check the status and
|
||||
// verify the number of bytes sent or received.
|
||||
|
||||
// Connect to the server.
|
||||
co_await client.connect();
|
||||
|
||||
// Send the request data.
|
||||
client.send(std::string_view{"Hello from client."});
|
||||
|
||||
// Wait for the response an receive it.
|
||||
co_await client.poll(coro::poll_op::read);
|
||||
std::string response(256, '\0');
|
||||
auto [recv_status, recv_bytes] = client.recv(response);
|
||||
response.resize(recv_bytes.size());
|
||||
|
||||
std::cout << "client: " << response << "\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
// Create and wait for the server and client tasks to complete.
|
||||
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
// Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker
|
||||
// tasks can yield for a specific amount of time to mimic difficult work. The pool is only
|
||||
// setup with a single thread to showcase yield_for().
|
||||
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
// This task will wait until the given latch setters have completed.
|
||||
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
|
||||
// It seems like the dependent worker tasks could be created here, but in that case it would
|
||||
// be superior to simply do: `co_await coro::when_all(tasks);`
|
||||
// It is also important to note that the last dependent task will resume the waiting latch
|
||||
// task prior to actually completing -- thus the dependent task's frame could be destroyed
|
||||
// by the latch task completing before it gets a chance to finish after calling resume() on
|
||||
// the latch task!
|
||||
|
||||
std::cout << "latch task is now waiting on all children tasks...\n";
|
||||
co_await l;
|
||||
std::cout << "latch task dependency tasks completed, resuming.\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
// This task does 'work' and counts down on the latch when completed. The final child task to
|
||||
// complete will end up resuming the latch task when the latch's count reaches zero.
|
||||
auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task<void> {
|
||||
// Schedule the worker task onto the thread pool.
|
||||
co_await tp.schedule();
|
||||
std::cout << "worker task " << i << " is working...\n";
|
||||
// Do some expensive calculations, yield to mimic work...! Its also important to never use
|
||||
// std::this_thread::sleep_for() within the context of coroutines, it will block the thread
|
||||
// and other tasks that are ready to execute will be blocked.
|
||||
co_await tp.yield_for(std::chrono::milliseconds{i * 20});
|
||||
std::cout << "worker task " << i << " is done, counting down on the latch\n";
|
||||
l.count_down();
|
||||
co_return;
|
||||
};
|
||||
|
||||
const int64_t num_tasks{5};
|
||||
coro::latch l{num_tasks};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
|
||||
// Make the latch task first so it correctly waits for all worker tasks to count down.
|
||||
tasks.emplace_back(make_latch_task(l));
|
||||
for (int64_t i = 1; i <= num_tasks; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_worker_task(tp, l, i));
|
||||
}
|
||||
|
||||
// Wait for all tasks to complete.
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
|
||||
std::vector<uint64_t> output{};
|
||||
coro::mutex mutex;
|
||||
|
||||
auto make_critical_section_task = [&](uint64_t i) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
// To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the
|
||||
// lock() function returns a coro::scoped_lock that holds the mutex and automatically
|
||||
// unlocks the mutex upon destruction. This behaves just like std::scoped_lock.
|
||||
{
|
||||
auto scoped_lock = co_await mutex.lock();
|
||||
output.emplace_back(i);
|
||||
} // <-- scoped lock unlocks the mutex here.
|
||||
co_return;
|
||||
};
|
||||
|
||||
const size_t num_tasks{100};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
tasks.reserve(num_tasks);
|
||||
for (size_t i = 1; i <= num_tasks; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_critical_section_task(i));
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
// The output will be variable per run depending on how the tasks are picked up on the
|
||||
// thread pool workers.
|
||||
for (const auto& value : output)
|
||||
{
|
||||
std::cout << value << ", ";
|
||||
}
|
||||
}
|
|
@ -1,74 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
const size_t iterations = 100;
|
||||
const size_t consumers = 4;
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
|
||||
coro::ring_buffer<uint64_t, 16> rb{};
|
||||
coro::mutex m{};
|
||||
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
|
||||
auto make_producer_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
for (size_t i = 1; i <= iterations; ++i)
|
||||
{
|
||||
co_await rb.produce(i);
|
||||
}
|
||||
|
||||
// Wait for the ring buffer to clear all items so its a clean stop.
|
||||
while (!rb.empty())
|
||||
{
|
||||
co_await tp.yield();
|
||||
}
|
||||
|
||||
// Now that the ring buffer is empty signal to all the consumers its time to stop. Note that
|
||||
// the stop signal works on producers as well, but this example only uses 1 producer.
|
||||
{
|
||||
auto scoped_lock = co_await m.lock();
|
||||
std::cerr << "\nproducer is sending stop signal";
|
||||
}
|
||||
rb.stop_signal_notify_waiters();
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_consumer_task = [&](size_t id) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
try
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
auto value = co_await rb.consume();
|
||||
{
|
||||
auto scoped_lock = co_await m.lock();
|
||||
std::cout << "(id=" << id << ", v=" << value << "), ";
|
||||
}
|
||||
|
||||
// Mimic doing some work on the consumed value.
|
||||
co_await tp.yield();
|
||||
}
|
||||
}
|
||||
catch (const coro::stop_signal&)
|
||||
{
|
||||
auto scoped_lock = co_await m.lock();
|
||||
std::cerr << "\nconsumer " << id << " shutting down, stop signal received";
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
// Create N consumers
|
||||
for (size_t i = 0; i < consumers; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_consumer_task(i));
|
||||
}
|
||||
// Create 1 producer.
|
||||
tasks.emplace_back(make_producer_task());
|
||||
|
||||
// Wait for all the values to be produced and consumed through the ring buffer.
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
// Have more threads/tasks than the semaphore will allow for at any given point in time.
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 8}};
|
||||
coro::semaphore semaphore{2};
|
||||
|
||||
auto make_rate_limited_task = [&](uint64_t task_num) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
// This will only allow 2 tasks through at any given point in time, all other tasks will
|
||||
// await the resource to be available before proceeding.
|
||||
co_await semaphore.acquire();
|
||||
std::cout << task_num << ", ";
|
||||
semaphore.release();
|
||||
co_return;
|
||||
};
|
||||
|
||||
const size_t num_tasks{100};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
for (size_t i = 1; i <= num_tasks; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_rate_limited_task(i));
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
}
|
|
@ -1,55 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
// Shared mutexes require a thread pool to be able to wake up multiple shared waiters when
|
||||
// there is an exclusive lock holder releasing the lock. This example uses a single thread
|
||||
// to also show the interleaving of coroutines acquiring the shared lock in shared and
|
||||
// exclusive mode as they resume and suspend in a linear manner. Ideally the thread pool
|
||||
// would have more than 1 thread to resume all shared waiters in parallel.
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
||||
coro::shared_mutex mutex{tp};
|
||||
|
||||
auto make_shared_task = [&](uint64_t i) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
{
|
||||
std::cerr << "shared task " << i << " lock_shared()\n";
|
||||
auto scoped_lock = co_await mutex.lock_shared();
|
||||
std::cerr << "shared task " << i << " lock_shared() acquired\n";
|
||||
/// Immediately yield so the other shared tasks also acquire in shared state
|
||||
/// while this task currently holds the mutex in shared state.
|
||||
co_await tp.yield();
|
||||
std::cerr << "shared task " << i << " unlock_shared()\n";
|
||||
}
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_exclusive_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
std::cerr << "exclusive task lock()\n";
|
||||
auto scoped_lock = co_await mutex.lock();
|
||||
std::cerr << "exclusive task lock() acquired\n";
|
||||
// Do the exclusive work..
|
||||
std::cerr << "exclusive task unlock()\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
// Create 3 shared tasks that will acquire the mutex in a shared state.
|
||||
const size_t num_tasks{3};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
for (size_t i = 1; i <= num_tasks; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_shared_task(i));
|
||||
}
|
||||
// Create an exclusive task.
|
||||
tasks.emplace_back(make_exclusive_task());
|
||||
// Create 3 more shared tasks that will be blocked until the exclusive task completes.
|
||||
for (size_t i = num_tasks + 1; i <= num_tasks * 2; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_shared_task(i));
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
// Task that takes a value and doubles it.
|
||||
auto double_task = [](uint64_t x) -> coro::task<uint64_t> { co_return x* x; };
|
||||
|
||||
// Create a task that awaits the doubling of its given value and
|
||||
// then returns the result after adding 5.
|
||||
auto double_and_add_5_task = [&](uint64_t input) -> coro::task<uint64_t> {
|
||||
auto doubled = co_await double_task(input);
|
||||
co_return doubled + 5;
|
||||
};
|
||||
|
||||
auto output = coro::sync_wait(double_and_add_5_task(2));
|
||||
std::cout << "Task1 output = " << output << "\n";
|
||||
|
||||
struct expensive_struct
|
||||
{
|
||||
std::string id{};
|
||||
std::vector<std::string> records{};
|
||||
|
||||
expensive_struct() = default;
|
||||
~expensive_struct() = default;
|
||||
|
||||
// Explicitly delete copy constructor and copy assign, force only moves!
|
||||
// While the default move constructors will work for this struct the example
|
||||
// inserts explicit print statements to show the task is moving the value
|
||||
// out correctly.
|
||||
expensive_struct(const expensive_struct&) = delete;
|
||||
auto operator=(const expensive_struct&) -> expensive_struct& = delete;
|
||||
|
||||
expensive_struct(expensive_struct&& other) : id(std::move(other.id)), records(std::move(other.records))
|
||||
{
|
||||
std::cout << "expensive_struct() move constructor called\n";
|
||||
}
|
||||
auto operator=(expensive_struct&& other) -> expensive_struct&
|
||||
{
|
||||
if (std::addressof(other) != this)
|
||||
{
|
||||
id = std::move(other.id);
|
||||
records = std::move(other.records);
|
||||
}
|
||||
std::cout << "expensive_struct() move assignment called\n";
|
||||
return *this;
|
||||
}
|
||||
};
|
||||
|
||||
// Create a very large object and return it by moving the value so the
|
||||
// contents do not have to be copied out.
|
||||
auto move_output_task = []() -> coro::task<expensive_struct> {
|
||||
expensive_struct data{};
|
||||
data.id = "12345678-1234-5678-9012-123456781234";
|
||||
for (size_t i = 10'000; i < 100'000; ++i)
|
||||
{
|
||||
data.records.emplace_back(std::to_string(i));
|
||||
}
|
||||
|
||||
// Because the struct only has move contructors it will be forced to use
|
||||
// them, no need to explicitly std::move(data).
|
||||
co_return data;
|
||||
};
|
||||
|
||||
auto data = coro::sync_wait(move_output_task());
|
||||
std::cout << data.id << " has " << data.records.size() << " records.\n";
|
||||
|
||||
// std::unique_ptr<T> can also be used to return a larger object.
|
||||
auto unique_ptr_task = []() -> coro::task<std::unique_ptr<uint64_t>> { co_return std::make_unique<uint64_t>(42); };
|
||||
|
||||
auto answer_to_everything = coro::sync_wait(unique_ptr_task());
|
||||
if (answer_to_everything != nullptr)
|
||||
{
|
||||
std::cout << "Answer to everything = " << *answer_to_everything << "\n";
|
||||
}
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
|
||||
int main()
|
||||
{
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_server_task = [&]() -> coro::task<void> {
|
||||
// This is the task that will handle processing a client's requests.
|
||||
auto serve_client = [](coro::net::tcp_client client) -> coro::task<void> {
|
||||
size_t requests{1};
|
||||
|
||||
while (true)
|
||||
{
|
||||
// Continue to accept more requests until the client closes the connection.
|
||||
co_await client.poll(coro::poll_op::read);
|
||||
|
||||
std::string request(64, '\0');
|
||||
auto [recv_status, recv_bytes] = client.recv(request);
|
||||
if (recv_status == coro::net::recv_status::closed)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
request.resize(recv_bytes.size());
|
||||
std::cout << "server: " << request << "\n";
|
||||
|
||||
auto response = "Hello from server " + std::to_string(requests);
|
||||
client.send(response);
|
||||
|
||||
++requests;
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
// Spin up the tcp_server and schedule it onto the io_scheduler.
|
||||
coro::net::tcp_server server{scheduler};
|
||||
co_await scheduler.schedule();
|
||||
|
||||
// All incoming connections will be stored into the task container until they are completed.
|
||||
coro::task_container tc{scheduler};
|
||||
|
||||
// Wait for an incoming connection and accept it, this example will only use 1 connection.
|
||||
co_await server.poll();
|
||||
auto client = server.accept();
|
||||
// Store the task that will serve the client into the container and immediately begin executing it
|
||||
// on the task container's thread pool, which is the same as the scheduler.
|
||||
tc.start(serve_client(std::move(client)));
|
||||
|
||||
// Wait for all clients to complete before shutting down the tcp_server.
|
||||
co_await tc.garbage_collect_and_yield_until_empty();
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_client_task = [&](size_t request_count) -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
coro::net::tcp_client client{scheduler};
|
||||
|
||||
co_await client.connect();
|
||||
|
||||
// Send N requests on the same connection and wait for the server response to each one.
|
||||
for (size_t i = 1; i <= request_count; ++i)
|
||||
{
|
||||
// Send the request data.
|
||||
auto request = "Hello from client " + std::to_string(i);
|
||||
client.send(request);
|
||||
|
||||
co_await client.poll(coro::poll_op::read);
|
||||
std::string response(64, '\0');
|
||||
auto [recv_status, recv_bytes] = client.recv(response);
|
||||
response.resize(recv_bytes.size());
|
||||
|
||||
std::cout << "client: " << response << "\n";
|
||||
}
|
||||
|
||||
co_return; // Upon exiting the tcp_client will close its connection to the server.
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_server_task(), make_client_task(5)));
|
||||
}
|
|
@ -1,78 +0,0 @@
|
|||
#include <coro/coro.hpp>
|
||||
#include <iostream>
|
||||
#include <random>
|
||||
|
||||
int main()
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{
|
||||
// By default all thread pools will create its thread count with the
|
||||
// std::thread::hardware_concurrency() as the number of worker threads in the pool,
|
||||
// but this can be changed via this thread_count option. This example will use 4.
|
||||
.thread_count = 4,
|
||||
// Upon starting each worker thread an optional lambda callback with the worker's
|
||||
// index can be called to make thread changes, perhaps priority or change the thread's
|
||||
// name.
|
||||
.on_thread_start_functor = [](std::size_t worker_idx) -> void {
|
||||
std::cout << "thread pool worker " << worker_idx << " is starting up.\n";
|
||||
},
|
||||
// Upon stopping each worker thread an optional lambda callback with the worker's
|
||||
// index can b called.
|
||||
.on_thread_stop_functor = [](std::size_t worker_idx) -> void {
|
||||
std::cout << "thread pool worker " << worker_idx << " is shutting down.\n";
|
||||
}}};
|
||||
|
||||
auto offload_task = [&](uint64_t child_idx) -> coro::task<uint64_t> {
|
||||
// Start by scheduling this offload worker task onto the thread pool.
|
||||
co_await tp.schedule();
|
||||
// Now any code below this schedule() line will be executed on one of the thread pools
|
||||
// worker threads.
|
||||
|
||||
// Mimic some expensive task that should be run on a background thread...
|
||||
std::random_device rd;
|
||||
std::mt19937 gen{rd()};
|
||||
std::uniform_int_distribution<> d{0, 1};
|
||||
|
||||
size_t calculation{0};
|
||||
for (size_t i = 0; i < 1'000'000; ++i)
|
||||
{
|
||||
calculation += d(gen);
|
||||
|
||||
// Lets be nice and yield() to let other coroutines on the thread pool have some cpu
|
||||
// time. This isn't necessary but is illustrated to show how tasks can cooperatively
|
||||
// yield control at certain points of execution. Its important to never call the
|
||||
// std::this_thread::sleep_for() within the context of a coroutine, that will block
|
||||
// and other coroutines which are ready for execution from starting, always use yield()
|
||||
// or within the context of a coro::io_scheduler you can use yield_for(amount).
|
||||
if (i == 500'000)
|
||||
{
|
||||
std::cout << "Task " << child_idx << " is yielding()\n";
|
||||
co_await tp.yield();
|
||||
}
|
||||
}
|
||||
co_return calculation;
|
||||
};
|
||||
|
||||
auto primary_task = [&]() -> coro::task<uint64_t> {
|
||||
const size_t num_children{10};
|
||||
std::vector<coro::task<uint64_t>> child_tasks{};
|
||||
child_tasks.reserve(num_children);
|
||||
for (size_t i = 0; i < num_children; ++i)
|
||||
{
|
||||
child_tasks.emplace_back(offload_task(i));
|
||||
}
|
||||
|
||||
// Wait for the thread pool workers to process all child tasks.
|
||||
auto results = co_await coro::when_all(std::move(child_tasks));
|
||||
|
||||
// Sum up the results of the completed child tasks.
|
||||
size_t calculation{0};
|
||||
for (const auto& task : results)
|
||||
{
|
||||
calculation += task.return_value();
|
||||
}
|
||||
co_return calculation;
|
||||
};
|
||||
|
||||
auto result = coro::sync_wait(primary_task());
|
||||
std::cout << "calculated thread pool result = " << result << "\n";
|
||||
}
|
|
@ -23,7 +23,7 @@ concept awaiter = requires(type t, std::coroutine_handle<> c)
|
|||
std::same_as<decltype(t.await_suspend(c)), void> ||
|
||||
std::same_as<decltype(t.await_suspend(c)), bool> ||
|
||||
std::same_as<decltype(t.await_suspend(c)), std::coroutine_handle<>>;
|
||||
{t.await_resume()};
|
||||
{ t.await_resume() };
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
24
inc/coro/concepts/executor.hpp
Normal file
24
inc/coro/concepts/executor.hpp
Normal file
|
@ -0,0 +1,24 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/awaitable.hpp"
|
||||
|
||||
#include <concepts>
|
||||
#include <coroutine>
|
||||
|
||||
namespace coro::concepts
|
||||
{
|
||||
template<typename type>
|
||||
concept executor = requires(type t, std::coroutine_handle<> c)
|
||||
{
|
||||
{
|
||||
t.schedule()
|
||||
} -> coro::concepts::awaiter;
|
||||
{
|
||||
t.yield()
|
||||
} -> coro::concepts::awaiter;
|
||||
{
|
||||
t.resume(c)
|
||||
} -> std::same_as<void>;
|
||||
};
|
||||
|
||||
} // namespace coro::concepts
|
20
inc/coro/concepts/range_of.hpp
Normal file
20
inc/coro/concepts/range_of.hpp
Normal file
|
@ -0,0 +1,20 @@
|
|||
#pragma once
|
||||
|
||||
#include <concepts>
|
||||
#include <ranges>
|
||||
|
||||
namespace coro::concepts
|
||||
{
|
||||
/**
|
||||
* Concept to require that the range contains a specific type of value.
|
||||
*/
|
||||
template<class T, class V>
|
||||
concept range_of = std::ranges::range<T> && std::is_same_v<V, std::ranges::range_value_t<T>>;
|
||||
|
||||
/**
|
||||
* Concept to require that a sized range contains a specific type of value.
|
||||
*/
|
||||
template<class T, class V>
|
||||
concept sized_range_of = std::ranges::sized_range<T> && std::is_same_v<V, std::ranges::range_value_t<T>>;
|
||||
|
||||
} // namespace coro::concepts
|
|
@ -2,30 +2,17 @@
|
|||
|
||||
#include "coro/concepts/awaitable.hpp"
|
||||
#include "coro/concepts/buffer.hpp"
|
||||
#include "coro/concepts/executor.hpp"
|
||||
#include "coro/concepts/promise.hpp"
|
||||
|
||||
#include "coro/net/connect.hpp"
|
||||
#include "coro/net/dns_resolver.hpp"
|
||||
#include "coro/net/hostname.hpp"
|
||||
#include "coro/net/ip_address.hpp"
|
||||
#include "coro/net/recv_status.hpp"
|
||||
#include "coro/net/send_status.hpp"
|
||||
#include "coro/net/socket.hpp"
|
||||
#include "coro/net/ssl_context.hpp"
|
||||
#include "coro/net/tcp_client.hpp"
|
||||
#include "coro/net/tcp_server.hpp"
|
||||
#include "coro/net/udp_peer.hpp"
|
||||
#include "coro/concepts/range_of.hpp"
|
||||
|
||||
#include "coro/event.hpp"
|
||||
#include "coro/generator.hpp"
|
||||
#include "coro/io_scheduler.hpp"
|
||||
#include "coro/latch.hpp"
|
||||
#include "coro/mutex.hpp"
|
||||
#include "coro/poll.hpp"
|
||||
#include "coro/ring_buffer.hpp"
|
||||
#include "coro/semaphore.hpp"
|
||||
#include "coro/shared_mutex.hpp"
|
||||
#include "coro/shutdown.hpp"
|
||||
#include "coro/stop_signal.hpp"
|
||||
#include "coro/sync_wait.hpp"
|
||||
#include "coro/task.hpp"
|
||||
|
|
|
@ -1,11 +1,21 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/executor.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <coroutine>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
class thread_pool;
|
||||
enum class resume_order_policy
|
||||
{
|
||||
/// Last in first out, this is the default policy and will execute the fastest
|
||||
/// if you do not need the first waiter to execute first upon the event being set.
|
||||
lifo,
|
||||
/// First in first out, this policy has an extra overhead to reverse the order of
|
||||
/// the waiters but will guarantee the ordering is fifo.
|
||||
fifo
|
||||
};
|
||||
|
||||
/**
|
||||
* Event is a manully triggered thread safe signal that can be co_await()'ed by multiple awaiters.
|
||||
|
@ -27,36 +37,6 @@ t2: resume()
|
|||
class event
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Creates an event with the given initial state of being set or not set.
|
||||
* @param initially_set By default all events start as not set, but if needed this parameter can
|
||||
* set the event to already be triggered.
|
||||
*/
|
||||
explicit event(bool initially_set = false) noexcept;
|
||||
~event() = default;
|
||||
|
||||
event(const event&) = delete;
|
||||
event(event&&) = delete;
|
||||
auto operator=(const event&) -> event& = delete;
|
||||
auto operator=(event&&) -> event& = delete;
|
||||
|
||||
/**
|
||||
* @return True if this event is currently in the set state.
|
||||
*/
|
||||
auto is_set() const noexcept -> bool { return m_state.load(std::memory_order_acquire) == this; }
|
||||
|
||||
/**
|
||||
* Sets this event and resumes all awaiters. Note that all waiters will be resumed onto this
|
||||
* thread of execution.
|
||||
*/
|
||||
auto set() noexcept -> void;
|
||||
|
||||
/**
|
||||
* Sets this event and resumes all awaiters onto the given thread pool. This will distribute
|
||||
* the waiters across the thread pools threads.
|
||||
*/
|
||||
auto set(coro::thread_pool& tp) noexcept -> void;
|
||||
|
||||
struct awaiter
|
||||
{
|
||||
/**
|
||||
|
@ -90,6 +70,59 @@ public:
|
|||
awaiter* m_next{nullptr};
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates an event with the given initial state of being set or not set.
|
||||
* @param initially_set By default all events start as not set, but if needed this parameter can
|
||||
* set the event to already be triggered.
|
||||
*/
|
||||
explicit event(bool initially_set = false) noexcept;
|
||||
~event() = default;
|
||||
|
||||
event(const event&) = delete;
|
||||
event(event&&) = delete;
|
||||
auto operator=(const event&) -> event& = delete;
|
||||
auto operator=(event&&) -> event& = delete;
|
||||
|
||||
/**
|
||||
* @return True if this event is currently in the set state.
|
||||
*/
|
||||
auto is_set() const noexcept -> bool { return m_state.load(std::memory_order_acquire) == this; }
|
||||
|
||||
/**
|
||||
* Sets this event and resumes all awaiters. Note that all waiters will be resumed onto this
|
||||
* thread of execution.
|
||||
* @param policy The order in which the waiters should be resumed, defaults to LIFO since it
|
||||
* is more efficient, FIFO requires reversing the order of the waiters first.
|
||||
*/
|
||||
auto set(resume_order_policy policy = resume_order_policy::lifo) noexcept -> void;
|
||||
|
||||
/**
|
||||
* Sets this event and resumes all awaiters onto the given executor. This will distribute
|
||||
* the waiters across the executor's threads.
|
||||
*/
|
||||
template<concepts::executor executor_type>
|
||||
auto set(executor_type& e, resume_order_policy policy = resume_order_policy::lifo) noexcept -> void
|
||||
{
|
||||
void* old_value = m_state.exchange(this, std::memory_order::acq_rel);
|
||||
if (old_value != this)
|
||||
{
|
||||
// If FIFO has been requsted then reverse the order upon resuming.
|
||||
if (policy == resume_order_policy::fifo)
|
||||
{
|
||||
old_value = reverse(static_cast<awaiter*>(old_value));
|
||||
}
|
||||
// else lifo nothing to do
|
||||
|
||||
auto* waiters = static_cast<awaiter*>(old_value);
|
||||
while (waiters != nullptr)
|
||||
{
|
||||
auto* next = waiters->m_next;
|
||||
e.resume(waiters->m_awaiting_coroutine);
|
||||
waiters = next;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return An awaiter struct to suspend and resume this coroutine for when the event is set.
|
||||
*/
|
||||
|
@ -110,6 +143,12 @@ protected:
|
|||
/// 2) awaiter* == linked list of awaiters waiting for the event to trigger.
|
||||
/// 3) this == The event is triggered and all awaiters are resumed.
|
||||
mutable std::atomic<void*> m_state;
|
||||
|
||||
private:
|
||||
/**
|
||||
* Reverses the set of waiters from LIFO->FIFO and returns the new head.
|
||||
*/
|
||||
auto reverse(awaiter* head) -> awaiter*;
|
||||
};
|
||||
|
||||
} // namespace coro
|
||||
|
|
7
inc/coro/fd.hpp
Normal file
7
inc/coro/fd.hpp
Normal file
|
@ -0,0 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
namespace coro
|
||||
{
|
||||
using fd_t = int;
|
||||
|
||||
} // namespace coro
|
|
@ -24,7 +24,7 @@ public:
|
|||
|
||||
auto initial_suspend() const { return std::suspend_always{}; }
|
||||
|
||||
auto final_suspend() const { return std::suspend_always{}; }
|
||||
auto final_suspend() const noexcept(true) { return std::suspend_always{}; }
|
||||
|
||||
template<typename U = T, std::enable_if_t<!std::is_rvalue_reference<U>::value, int> = 0>
|
||||
auto yield_value(std::remove_reference_t<T>& value) noexcept
|
||||
|
|
|
@ -1,42 +1,54 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/event.hpp"
|
||||
#include "coro/detail/poll_info.hpp"
|
||||
#include "coro/fd.hpp"
|
||||
#include "coro/net/socket.hpp"
|
||||
#include "coro/poll.hpp"
|
||||
#include "coro/task_container.hpp"
|
||||
#include "coro/thread_pool.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
#include <sys/eventfd.h>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
namespace detail
|
||||
class io_scheduler
|
||||
{
|
||||
class poll_info;
|
||||
} // namespace detail
|
||||
|
||||
class io_scheduler : public coro::thread_pool
|
||||
{
|
||||
friend detail::poll_info;
|
||||
|
||||
using clock = std::chrono::steady_clock;
|
||||
using time_point = clock::time_point;
|
||||
using timed_events = std::multimap<time_point, detail::poll_info*>;
|
||||
using clock = detail::poll_info::clock;
|
||||
using time_point = detail::poll_info::time_point;
|
||||
using timed_events = detail::poll_info::timed_events;
|
||||
|
||||
public:
|
||||
using fd_t = int;
|
||||
class schedule_operation;
|
||||
friend schedule_operation;
|
||||
|
||||
enum class thread_strategy_t
|
||||
{
|
||||
/// Spawns a background thread for the scheduler to run on.
|
||||
/// Spawns a dedicated background thread for the scheduler to run on.
|
||||
spawn,
|
||||
/// Requires the user to call process_events() to drive the scheduler
|
||||
/// Requires the user to call process_events() to drive the scheduler.
|
||||
manual
|
||||
};
|
||||
|
||||
enum class execution_strategy_t
|
||||
{
|
||||
/// Tasks will be FIFO queued to be executed on a thread pool. This is better for tasks that
|
||||
/// are long lived and will use lots of CPU because long lived tasks will block other i/o
|
||||
/// operations while they complete. This strategy is generally better for lower latency
|
||||
/// requirements at the cost of throughput.
|
||||
process_tasks_on_thread_pool,
|
||||
/// Tasks will be executed inline on the io scheduler thread. This is better for short tasks
|
||||
/// that can be quickly processed and not block other i/o operations for very long. This
|
||||
/// strategy is generally better for higher throughput at the cost of latency.
|
||||
process_tasks_inline
|
||||
};
|
||||
|
||||
struct options
|
||||
{
|
||||
/// Should the io scheduler spawn a dedicated event processor?
|
||||
|
@ -50,6 +62,10 @@ public:
|
|||
.thread_count = ((std::thread::hardware_concurrency() > 1) ? (std::thread::hardware_concurrency() - 1) : 1),
|
||||
.on_thread_start_functor = nullptr,
|
||||
.on_thread_stop_functor = nullptr};
|
||||
|
||||
/// If inline task processing is enabled then the io worker will resume tasks on its thread
|
||||
/// rather than scheduling them to be picked up by the thread pool.
|
||||
const execution_strategy_t execution_strategy{execution_strategy_t::process_tasks_on_thread_pool};
|
||||
};
|
||||
|
||||
explicit io_scheduler(
|
||||
|
@ -57,18 +73,19 @@ public:
|
|||
.thread_strategy = thread_strategy_t::spawn,
|
||||
.on_io_thread_start_functor = nullptr,
|
||||
.on_io_thread_stop_functor = nullptr,
|
||||
.pool = {
|
||||
.thread_count =
|
||||
((std::thread::hardware_concurrency() > 1) ? (std::thread::hardware_concurrency() - 1) : 1),
|
||||
.on_thread_start_functor = nullptr,
|
||||
.on_thread_stop_functor = nullptr}});
|
||||
.pool =
|
||||
{.thread_count =
|
||||
((std::thread::hardware_concurrency() > 1) ? (std::thread::hardware_concurrency() - 1) : 1),
|
||||
.on_thread_start_functor = nullptr,
|
||||
.on_thread_stop_functor = nullptr},
|
||||
.execution_strategy = execution_strategy_t::process_tasks_on_thread_pool});
|
||||
|
||||
io_scheduler(const io_scheduler&) = delete;
|
||||
io_scheduler(io_scheduler&&) = delete;
|
||||
auto operator=(const io_scheduler&) -> io_scheduler& = delete;
|
||||
auto operator=(io_scheduler&&) -> io_scheduler& = delete;
|
||||
|
||||
virtual ~io_scheduler() override;
|
||||
~io_scheduler();
|
||||
|
||||
/**
|
||||
* Given a thread_strategy_t::manual this function should be called at regular intervals to
|
||||
|
@ -82,6 +99,74 @@ public:
|
|||
*/
|
||||
auto process_events(std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) -> std::size_t;
|
||||
|
||||
class schedule_operation
|
||||
{
|
||||
friend class io_scheduler;
|
||||
explicit schedule_operation(io_scheduler& scheduler) noexcept : m_scheduler(scheduler) {}
|
||||
|
||||
public:
|
||||
/**
|
||||
* Operations always pause so the executing thread can be switched.
|
||||
*/
|
||||
auto await_ready() noexcept -> bool { return false; }
|
||||
|
||||
/**
|
||||
* Suspending always returns to the caller (using void return of await_suspend()) and
|
||||
* stores the coroutine internally for the executing thread to resume from.
|
||||
*/
|
||||
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> void
|
||||
{
|
||||
if (m_scheduler.m_opts.execution_strategy == execution_strategy_t::process_tasks_inline)
|
||||
{
|
||||
m_scheduler.m_size.fetch_add(1, std::memory_order::release);
|
||||
{
|
||||
std::scoped_lock lk{m_scheduler.m_scheduled_tasks_mutex};
|
||||
m_scheduler.m_scheduled_tasks.emplace_back(awaiting_coroutine);
|
||||
}
|
||||
|
||||
// Trigger the event to wake-up the scheduler if this event isn't currently triggered.
|
||||
bool expected{false};
|
||||
if (m_scheduler.m_schedule_fd_triggered.compare_exchange_strong(
|
||||
expected, true, std::memory_order::release, std::memory_order::relaxed))
|
||||
{
|
||||
eventfd_t value{1};
|
||||
eventfd_write(m_scheduler.m_schedule_fd, value);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_scheduler.m_thread_pool->resume(awaiting_coroutine);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* no-op as this is the function called first by the thread pool's executing thread.
|
||||
*/
|
||||
auto await_resume() noexcept -> void {}
|
||||
|
||||
private:
|
||||
/// The thread pool that this operation will execute on.
|
||||
io_scheduler& m_scheduler;
|
||||
};
|
||||
|
||||
/**
|
||||
* Schedules the current task onto this io_scheduler for execution.
|
||||
*/
|
||||
auto schedule() -> schedule_operation { return schedule_operation{*this}; }
|
||||
|
||||
/**
|
||||
* Schedules a task onto the io_scheduler and moves ownership of the task to the io_scheduler.
|
||||
* Only void return type tasks can be scheduled in this manner since the task submitter will no
|
||||
* longer have control over the scheduled task.
|
||||
* @param task The task to execute on this io_scheduler. It's lifetime ownership will be transferred
|
||||
* to this io_scheduler.
|
||||
*/
|
||||
auto schedule(coro::task<void>&& task) -> void
|
||||
{
|
||||
auto* ptr = static_cast<coro::task_container<coro::io_scheduler>*>(m_owned_tasks);
|
||||
ptr->start(std::move(task));
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules the current task to run after the given amount of time has elapsed.
|
||||
* @param amount The amount of time to wait before resuming execution of this task.
|
||||
|
@ -96,6 +181,11 @@ public:
|
|||
*/
|
||||
[[nodiscard]] auto schedule_at(time_point time) -> coro::task<void>;
|
||||
|
||||
/**
|
||||
* Yields the current task to the end of the queue of waiting tasks.
|
||||
*/
|
||||
[[nodiscard]] auto yield() -> schedule_operation { return schedule_operation{*this}; };
|
||||
|
||||
/**
|
||||
* Yields the current task for the given amount of time.
|
||||
* @param amount The amount of time to yield for before resuming executino of this task.
|
||||
|
@ -137,14 +227,57 @@ public:
|
|||
}
|
||||
|
||||
/**
|
||||
* Starts the shutdown of the io scheduler. All currently executing and pending tasks will complete
|
||||
* prior to shutting down.
|
||||
* @param wait_for_tasks Given shutdown_t::sync this function will block until all oustanding
|
||||
* tasks are completed. Given shutdown_t::async this function will trigger
|
||||
* the shutdown process but return immediately. In this case the io_scheduler's
|
||||
* destructor will block if any background threads haven't joined.
|
||||
* Resumes execution of a direct coroutine handle on this io scheduler.
|
||||
* @param handle The coroutine handle to resume execution.
|
||||
*/
|
||||
auto shutdown(shutdown_t wait_for_tasks = shutdown_t::sync) noexcept -> void override;
|
||||
auto resume(std::coroutine_handle<> handle) -> void
|
||||
{
|
||||
if (m_opts.execution_strategy == execution_strategy_t::process_tasks_inline)
|
||||
{
|
||||
{
|
||||
std::scoped_lock lk{m_scheduled_tasks_mutex};
|
||||
m_scheduled_tasks.emplace_back(handle);
|
||||
}
|
||||
|
||||
bool expected{false};
|
||||
if (m_schedule_fd_triggered.compare_exchange_strong(
|
||||
expected, true, std::memory_order::release, std::memory_order::relaxed))
|
||||
{
|
||||
eventfd_t value{1};
|
||||
eventfd_write(m_schedule_fd, value);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
m_thread_pool->resume(handle);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of tasks waiting in the task queue + the executing tasks.
|
||||
*/
|
||||
auto size() const noexcept -> std::size_t
|
||||
{
|
||||
if (m_opts.execution_strategy == execution_strategy_t::process_tasks_inline)
|
||||
{
|
||||
return m_size.load(std::memory_order::acquire);
|
||||
}
|
||||
else
|
||||
{
|
||||
return m_size.load(std::memory_order::acquire) + m_thread_pool->size();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if the task queue is empty and zero tasks are currently executing.
|
||||
*/
|
||||
auto empty() const noexcept -> bool { return size() == 0; }
|
||||
|
||||
/**
|
||||
* Starts the shutdown of the io scheduler. All currently executing and pending tasks will complete
|
||||
* prior to shutting down. This call is blocking and will not return until all tasks complete.
|
||||
*/
|
||||
auto shutdown() noexcept -> void;
|
||||
|
||||
private:
|
||||
/// The configuration options.
|
||||
|
@ -156,27 +289,40 @@ private:
|
|||
fd_t m_shutdown_fd{-1};
|
||||
/// The event loop timer fd for timed events, e.g. yield_for() or scheduler_after().
|
||||
fd_t m_timer_fd{-1};
|
||||
/// The schedule file descriptor if the scheduler is in inline processing mode.
|
||||
fd_t m_schedule_fd{-1};
|
||||
std::atomic<bool> m_schedule_fd_triggered{false};
|
||||
|
||||
/// The number of tasks executing or awaiting events in this io scheduler.
|
||||
std::atomic<std::size_t> m_size{0};
|
||||
|
||||
/// The background io worker threads.
|
||||
std::thread m_io_thread;
|
||||
/// Thread pool for executing tasks when not in inline mode.
|
||||
std::unique_ptr<thread_pool> m_thread_pool{nullptr};
|
||||
|
||||
std::mutex m_timed_events_mutex{};
|
||||
/// The map of time point's to poll infos for tasks that are yielding for a period of time
|
||||
/// or for tasks that are polling with timeouts.
|
||||
timed_events m_timed_events{};
|
||||
|
||||
/// Has the io_scheduler been requested to shut down?
|
||||
std::atomic<bool> m_shutdown_requested{false};
|
||||
|
||||
std::atomic<bool> m_io_processing{false};
|
||||
auto process_events_manual(std::chrono::milliseconds timeout) -> void;
|
||||
auto process_events_dedicated_thread() -> void;
|
||||
auto process_events_execute(std::chrono::milliseconds timeout) -> void;
|
||||
static auto event_to_poll_status(uint32_t events) -> poll_status;
|
||||
|
||||
auto process_event_execute(detail::poll_info* pi, poll_status status) -> void;
|
||||
auto process_timeout_execute() -> void;
|
||||
auto process_scheduled_execute_inline() -> void;
|
||||
std::mutex m_scheduled_tasks_mutex{};
|
||||
std::vector<std::coroutine_handle<>> m_scheduled_tasks{};
|
||||
|
||||
auto add_timer_token(time_point tp, detail::poll_info& pi) -> timed_events::iterator;
|
||||
auto remove_timer_token(timed_events::iterator pos) -> void;
|
||||
auto update_timeout(time_point now) -> void;
|
||||
/// Tasks that have their ownership passed into the scheduler. This is a bit strange for now
|
||||
/// but the concept doesn't pass since io_scheduler isn't fully defined yet.
|
||||
/// The type is coro::task_container<coro::io_scheduler>*
|
||||
void* m_owned_tasks{nullptr};
|
||||
|
||||
static constexpr const int m_shutdown_object{0};
|
||||
static constexpr const void* m_shutdown_ptr = &m_shutdown_object;
|
||||
|
@ -184,10 +330,21 @@ private:
|
|||
static constexpr const int m_timer_object{0};
|
||||
static constexpr const void* m_timer_ptr = &m_timer_object;
|
||||
|
||||
static constexpr const int m_schedule_object{0};
|
||||
static constexpr const void* m_schedule_ptr = &m_schedule_object;
|
||||
|
||||
static const constexpr std::chrono::milliseconds m_default_timeout{1000};
|
||||
static const constexpr std::chrono::milliseconds m_no_timeout{0};
|
||||
static const constexpr std::size_t m_max_events = 8;
|
||||
static const constexpr std::size_t m_max_events = 16;
|
||||
std::array<struct epoll_event, m_max_events> m_events{};
|
||||
std::vector<std::coroutine_handle<>> m_handles_to_resume{};
|
||||
|
||||
auto process_event_execute(detail::poll_info* pi, poll_status status) -> void;
|
||||
auto process_timeout_execute() -> void;
|
||||
|
||||
auto add_timer_token(time_point tp, detail::poll_info& pi) -> timed_events::iterator;
|
||||
auto remove_timer_token(timed_events::iterator pos) -> void;
|
||||
auto update_timeout(time_point now) -> void;
|
||||
};
|
||||
|
||||
} // namespace coro
|
||||
|
|
|
@ -1,25 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
enum class connect_status
|
||||
{
|
||||
/// The connection has been established.
|
||||
connected,
|
||||
/// The given ip address could not be parsed or is invalid.
|
||||
invalid_ip_address,
|
||||
/// The connection operation timed out.
|
||||
timeout,
|
||||
/// There was an error, use errno to get more information on the specific error.
|
||||
error
|
||||
};
|
||||
|
||||
/**
|
||||
* @param status String representation of the connection status.
|
||||
* @throw std::logic_error If provided an invalid connect_status enum value.
|
||||
*/
|
||||
auto to_string(const connect_status& status) -> const std::string&;
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,99 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/io_scheduler.hpp"
|
||||
#include "coro/net/hostname.hpp"
|
||||
#include "coro/net/ip_address.hpp"
|
||||
#include "coro/task.hpp"
|
||||
#include "coro/task_container.hpp"
|
||||
|
||||
#include <ares.h>
|
||||
|
||||
#include <array>
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <sys/epoll.h>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class dns_resolver;
|
||||
|
||||
enum class dns_status
|
||||
{
|
||||
complete,
|
||||
error
|
||||
};
|
||||
|
||||
class dns_result
|
||||
{
|
||||
friend dns_resolver;
|
||||
|
||||
public:
|
||||
dns_result(coro::io_scheduler& scheduler, coro::event& resume, uint64_t pending_dns_requests);
|
||||
~dns_result() = default;
|
||||
|
||||
/**
|
||||
* @return The status of the dns lookup.
|
||||
*/
|
||||
auto status() const -> dns_status { return m_status; }
|
||||
|
||||
/**
|
||||
* @return If the result of the dns looked was successful then the list of ip addresses that
|
||||
* were resolved from the hostname.
|
||||
*/
|
||||
auto ip_addresses() const -> const std::vector<coro::net::ip_address>& { return m_ip_addresses; }
|
||||
|
||||
private:
|
||||
coro::io_scheduler& m_io_scheduler;
|
||||
coro::event& m_resume;
|
||||
uint64_t m_pending_dns_requests{0};
|
||||
dns_status m_status{dns_status::complete};
|
||||
std::vector<coro::net::ip_address> m_ip_addresses{};
|
||||
|
||||
friend auto ares_dns_callback(void* arg, int status, int timeouts, struct hostent* host) -> void;
|
||||
};
|
||||
|
||||
class dns_resolver
|
||||
{
|
||||
public:
|
||||
explicit dns_resolver(io_scheduler& scheduler, std::chrono::milliseconds timeout);
|
||||
dns_resolver(const dns_resolver&) = delete;
|
||||
dns_resolver(dns_resolver&&) = delete;
|
||||
auto operator=(const dns_resolver&) noexcept -> dns_resolver& = delete;
|
||||
auto operator=(dns_resolver&&) noexcept -> dns_resolver& = delete;
|
||||
~dns_resolver();
|
||||
|
||||
/**
|
||||
* @param hn The hostname to resolve its ip addresses.
|
||||
*/
|
||||
auto host_by_name(const net::hostname& hn) -> coro::task<std::unique_ptr<dns_result>>;
|
||||
|
||||
private:
|
||||
/// The io scheduler to drive the events for dns lookups.
|
||||
io_scheduler& m_io_scheduler;
|
||||
|
||||
/// The global timeout per dns lookup request.
|
||||
std::chrono::milliseconds m_timeout{0};
|
||||
|
||||
/// The libc-ares channel for looking up dns entries.
|
||||
ares_channel m_ares_channel{nullptr};
|
||||
|
||||
/// This is the set of sockets that are currently being actively polled so multiple poll tasks
|
||||
/// are not setup when ares_poll() is called.
|
||||
std::unordered_set<io_scheduler::fd_t> m_active_sockets{};
|
||||
|
||||
task_container m_task_container;
|
||||
|
||||
/// Global count to track if c-ares has been initialized or cleaned up.
|
||||
static uint64_t m_ares_count;
|
||||
/// Critical section around the c-ares global init/cleanup to prevent heap corruption.
|
||||
static std::mutex m_ares_mutex;
|
||||
|
||||
auto ares_poll() -> void;
|
||||
auto make_poll_task(io_scheduler::fd_t fd, poll_op ops) -> coro::task<void>;
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,26 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class hostname
|
||||
{
|
||||
public:
|
||||
hostname() = default;
|
||||
explicit hostname(std::string hn) : m_hostname(std::move(hn)) {}
|
||||
hostname(const hostname&) = default;
|
||||
hostname(hostname&&) = default;
|
||||
auto operator=(const hostname&) noexcept -> hostname& = default;
|
||||
auto operator=(hostname&&) noexcept -> hostname& = default;
|
||||
~hostname() = default;
|
||||
|
||||
auto data() const -> const std::string& { return m_hostname; }
|
||||
|
||||
auto operator<=>(const hostname& other) const { return m_hostname <=> other.m_hostname; }
|
||||
|
||||
private:
|
||||
std::string m_hostname;
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,106 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <span>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
enum class domain_t : int
|
||||
{
|
||||
ipv4 = AF_INET,
|
||||
ipv6 = AF_INET6
|
||||
};
|
||||
|
||||
auto to_string(domain_t domain) -> const std::string&;
|
||||
|
||||
class ip_address
|
||||
{
|
||||
public:
|
||||
static const constexpr size_t ipv4_len{4};
|
||||
static const constexpr size_t ipv6_len{16};
|
||||
|
||||
ip_address() = default;
|
||||
ip_address(std::span<const uint8_t> binary_address, domain_t domain = domain_t::ipv4) : m_domain(domain)
|
||||
{
|
||||
if (m_domain == domain_t::ipv4 && binary_address.size() > ipv4_len)
|
||||
{
|
||||
throw std::runtime_error{"coro::net::ip_address provided binary ip address is too long"};
|
||||
}
|
||||
else if (binary_address.size() > ipv6_len)
|
||||
{
|
||||
throw std::runtime_error{"coro::net::ip_address provided binary ip address is too long"};
|
||||
}
|
||||
|
||||
std::copy(binary_address.begin(), binary_address.end(), m_data.begin());
|
||||
}
|
||||
ip_address(const ip_address&) = default;
|
||||
ip_address(ip_address&&) = default;
|
||||
auto operator=(const ip_address&) noexcept -> ip_address& = default;
|
||||
auto operator=(ip_address&&) noexcept -> ip_address& = default;
|
||||
~ip_address() = default;
|
||||
|
||||
auto domain() const -> domain_t { return m_domain; }
|
||||
auto data() const -> std::span<const uint8_t>
|
||||
{
|
||||
if (m_domain == domain_t::ipv4)
|
||||
{
|
||||
return std::span<const uint8_t>{m_data.data(), ipv4_len};
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::span<const uint8_t>{m_data.data(), ipv6_len};
|
||||
}
|
||||
}
|
||||
|
||||
static auto from_string(const std::string& address, domain_t domain = domain_t::ipv4) -> ip_address
|
||||
{
|
||||
ip_address addr{};
|
||||
addr.m_domain = domain;
|
||||
|
||||
auto success = inet_pton(static_cast<int>(addr.m_domain), address.data(), addr.m_data.data());
|
||||
if (success != 1)
|
||||
{
|
||||
throw std::runtime_error{"coro::net::ip_address faild to convert from string"};
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
auto to_string() const -> std::string
|
||||
{
|
||||
std::string output;
|
||||
if (m_domain == domain_t::ipv4)
|
||||
{
|
||||
output.resize(INET_ADDRSTRLEN, '\0');
|
||||
}
|
||||
else
|
||||
{
|
||||
output.resize(INET6_ADDRSTRLEN, '\0');
|
||||
}
|
||||
|
||||
auto success = inet_ntop(static_cast<int>(m_domain), m_data.data(), output.data(), output.length());
|
||||
if (success != nullptr)
|
||||
{
|
||||
auto len = strnlen(success, output.length());
|
||||
output.resize(len);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw std::runtime_error{"coro::net::ip_address failed to convert to string representation"};
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
auto operator<=>(const ip_address& other) const = default;
|
||||
|
||||
private:
|
||||
domain_t m_domain{domain_t::ipv4};
|
||||
std::array<uint8_t, ipv6_len> m_data{};
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,32 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <errno.h>
|
||||
#include <string>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
enum class recv_status : int64_t
|
||||
{
|
||||
ok = 0,
|
||||
/// The peer closed the socket.
|
||||
closed = -1,
|
||||
/// The udp socket has not been bind()'ed to a local port.
|
||||
udp_not_bound = -2,
|
||||
try_again = EAGAIN,
|
||||
would_block = EWOULDBLOCK,
|
||||
bad_file_descriptor = EBADF,
|
||||
connection_refused = ECONNREFUSED,
|
||||
memory_fault = EFAULT,
|
||||
interrupted = EINTR,
|
||||
invalid_argument = EINVAL,
|
||||
no_memory = ENOMEM,
|
||||
not_connected = ENOTCONN,
|
||||
not_a_socket = ENOTSOCK,
|
||||
|
||||
ssl_error = -3
|
||||
};
|
||||
|
||||
auto to_string(recv_status status) -> const std::string&;
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,32 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <cstdint>
|
||||
#include <errno.h>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
enum class send_status : int64_t
|
||||
{
|
||||
ok = 0,
|
||||
permission_denied = EACCES,
|
||||
try_again = EAGAIN,
|
||||
would_block = EWOULDBLOCK,
|
||||
already_in_progress = EALREADY,
|
||||
bad_file_descriptor = EBADF,
|
||||
connection_reset = ECONNRESET,
|
||||
no_peer_address = EDESTADDRREQ,
|
||||
memory_fault = EFAULT,
|
||||
interrupted = EINTR,
|
||||
is_connection = EISCONN,
|
||||
message_size = EMSGSIZE,
|
||||
output_queue_full = ENOBUFS,
|
||||
no_memory = ENOMEM,
|
||||
not_connected = ENOTCONN,
|
||||
not_a_socket = ENOTSOCK,
|
||||
operationg_not_supported = EOPNOTSUPP,
|
||||
pipe_closed = EPIPE,
|
||||
|
||||
ssl_error = -3
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,109 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/net/ip_address.hpp"
|
||||
#include "coro/poll.hpp"
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <fcntl.h>
|
||||
#include <span>
|
||||
#include <unistd.h>
|
||||
#include <utility>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class socket
|
||||
{
|
||||
public:
|
||||
enum class type_t
|
||||
{
|
||||
/// udp datagram socket
|
||||
udp,
|
||||
/// tcp streaming socket
|
||||
tcp
|
||||
};
|
||||
|
||||
enum class blocking_t
|
||||
{
|
||||
/// This socket should block on system calls.
|
||||
yes,
|
||||
/// This socket should not block on system calls.
|
||||
no
|
||||
};
|
||||
|
||||
struct options
|
||||
{
|
||||
/// The domain for the socket.
|
||||
domain_t domain;
|
||||
/// The type of socket.
|
||||
type_t type;
|
||||
/// If the socket should be blocking or non-blocking.
|
||||
blocking_t blocking;
|
||||
};
|
||||
|
||||
static auto type_to_os(type_t type) -> int;
|
||||
|
||||
socket() = default;
|
||||
explicit socket(int fd) : m_fd(fd) {}
|
||||
|
||||
socket(const socket&) = delete;
|
||||
socket(socket&& other) : m_fd(std::exchange(other.m_fd, -1)) {}
|
||||
auto operator=(const socket&) -> socket& = delete;
|
||||
auto operator =(socket&& other) noexcept -> socket&;
|
||||
|
||||
~socket() { close(); }
|
||||
|
||||
/**
|
||||
* This function returns true if the socket's file descriptor is a valid number, however it does
|
||||
* not imply if the socket is still usable.
|
||||
* @return True if the socket file descriptor is > 0.
|
||||
*/
|
||||
auto is_valid() const -> bool { return m_fd != -1; }
|
||||
|
||||
/**
|
||||
* @param block Sets the socket to the given blocking mode.
|
||||
*/
|
||||
auto blocking(blocking_t block) -> bool;
|
||||
|
||||
/**
|
||||
* @param how Shuts the socket down with the given operations.
|
||||
* @param Returns true if the sockets given operations were shutdown.
|
||||
*/
|
||||
auto shutdown(poll_op how = poll_op::read_write) -> bool;
|
||||
|
||||
/**
|
||||
* Closes the socket and sets this socket to an invalid state.
|
||||
*/
|
||||
auto close() -> void;
|
||||
|
||||
/**
|
||||
* @return The native handle (file descriptor) for this socket.
|
||||
*/
|
||||
auto native_handle() const -> int { return m_fd; }
|
||||
|
||||
private:
|
||||
int m_fd{-1};
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a socket with the given socket options, this typically is used for creating sockets to
|
||||
* use within client objects, e.g. tcp_client and udp_client.
|
||||
* @param opts See socket::options for more details.
|
||||
*/
|
||||
auto make_socket(const socket::options& opts) -> socket;
|
||||
|
||||
/**
|
||||
* Creates a socket that can accept connections or packets with the given socket options, address,
|
||||
* port and backlog. This is used for creating sockets to use within server objects, e.g.
|
||||
* tcp_server and udp_server.
|
||||
* @param opts See socket::options for more details
|
||||
* @param address The ip address to bind to. If the type of socket is tcp then it will also listen.
|
||||
* @param port The port to bind to.
|
||||
* @param backlog If the type of socket is tcp then the backlog of connections to allow. Does nothing
|
||||
* for udp types.
|
||||
*/
|
||||
auto make_accept_socket(
|
||||
const socket::options& opts, const net::ip_address& address, uint16_t port, int32_t backlog = 128) -> socket;
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,61 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <mutex>
|
||||
|
||||
#include <openssl/bio.h>
|
||||
#include <openssl/err.h>
|
||||
#include <openssl/pem.h>
|
||||
#include <openssl/ssl.h>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class tcp_client;
|
||||
|
||||
enum class ssl_file_type : int
|
||||
{
|
||||
/// The file is of type ASN1
|
||||
asn1 = SSL_FILETYPE_ASN1,
|
||||
/// The file is of type PEM
|
||||
pem = SSL_FILETYPE_PEM
|
||||
};
|
||||
|
||||
/**
|
||||
* SSL context, used with client or server types to provide secure connections.
|
||||
*/
|
||||
class ssl_context
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Creates a context with no certificate and no private key, maybe useful for testing.
|
||||
*/
|
||||
ssl_context();
|
||||
|
||||
/**
|
||||
* Creates a context with the given certificate and the given private key.
|
||||
* @param certificate The location of the certificate file.
|
||||
* @param certificate_type See `ssl_file_type`.
|
||||
* @param private_key The location of the private key file.
|
||||
* @param private_key_type See `ssl_file_type`.
|
||||
*/
|
||||
ssl_context(
|
||||
std::filesystem::path certificate,
|
||||
ssl_file_type certificate_type,
|
||||
std::filesystem::path private_key,
|
||||
ssl_file_type private_key_type);
|
||||
~ssl_context();
|
||||
|
||||
private:
|
||||
static uint64_t m_ssl_context_count;
|
||||
static std::mutex m_ssl_context_mutex;
|
||||
|
||||
SSL_CTX* m_ssl_ctx{nullptr};
|
||||
|
||||
/// The following classes use the underlying SSL_CTX* object for performing SSL functions.
|
||||
friend tcp_client;
|
||||
|
||||
auto native_handle() -> SSL_CTX* { return m_ssl_ctx; }
|
||||
auto native_handle() const -> const SSL_CTX* { return m_ssl_ctx; }
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,28 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
enum class ssl_handshake_status
|
||||
{
|
||||
/// The ssl handshake was successful.
|
||||
ok,
|
||||
/// The connection hasn't been established yet, use connect() prior to the ssl_handshake().
|
||||
not_connected,
|
||||
/// The connection needs a coro::net::ssl_context to perform the handshake.
|
||||
ssl_context_required,
|
||||
/// The internal ssl memory alocation failed.
|
||||
ssl_resource_allocation_failed,
|
||||
/// Attempting to set the connections ssl socket/file descriptor failed.
|
||||
ssl_set_fd_failure,
|
||||
/// The handshake had an error.
|
||||
handshake_failed,
|
||||
/// The handshake timed out.
|
||||
timeout,
|
||||
/// An error occurred while polling for read or write operations on the socket.
|
||||
poll_error,
|
||||
/// The socket was unexpectedly closed while attempting the handshake.
|
||||
unexpected_close
|
||||
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,304 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/buffer.hpp"
|
||||
#include "coro/io_scheduler.hpp"
|
||||
#include "coro/net/connect.hpp"
|
||||
#include "coro/net/ip_address.hpp"
|
||||
#include "coro/net/recv_status.hpp"
|
||||
#include "coro/net/send_status.hpp"
|
||||
#include "coro/net/socket.hpp"
|
||||
#include "coro/net/ssl_context.hpp"
|
||||
#include "coro/net/ssl_handshake_status.hpp"
|
||||
#include "coro/poll.hpp"
|
||||
#include "coro/task.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class tcp_server;
|
||||
|
||||
class tcp_client
|
||||
{
|
||||
public:
|
||||
struct options
|
||||
{
|
||||
/// The ip address to connect to. Use a dns_resolver to turn hostnames into ip addresses.
|
||||
net::ip_address address{net::ip_address::from_string("127.0.0.1")};
|
||||
/// The port to connect to.
|
||||
uint16_t port{8080};
|
||||
/// Should this tcp_client connect using a secure connection SSL/TLS?
|
||||
ssl_context* ssl_ctx{nullptr};
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a new tcp client that can connect to an ip address + port. By default the socket
|
||||
* created will be in non-blocking mode, meaning that any sending or receiving of data should
|
||||
* poll for event readiness prior.
|
||||
* @param scheduler The io scheduler to drive the tcp client.
|
||||
* @param opts See tcp_client::options for more information.
|
||||
*/
|
||||
tcp_client(
|
||||
io_scheduler& scheduler,
|
||||
options opts = options{
|
||||
.address = {net::ip_address::from_string("127.0.0.1")}, .port = 8080, .ssl_ctx = nullptr});
|
||||
tcp_client(const tcp_client&) = delete;
|
||||
tcp_client(tcp_client&& other);
|
||||
auto operator=(const tcp_client&) noexcept -> tcp_client& = delete;
|
||||
auto operator =(tcp_client&& other) noexcept -> tcp_client&;
|
||||
~tcp_client();
|
||||
|
||||
/**
|
||||
* @return The tcp socket this client is using.
|
||||
* @{
|
||||
**/
|
||||
auto socket() -> net::socket& { return m_socket; }
|
||||
auto socket() const -> const net::socket& { return m_socket; }
|
||||
/** @} */
|
||||
|
||||
/**
|
||||
* Connects to the address+port with the given timeout. Once connected calling this function
|
||||
* only returns the connected status, it will not reconnect.
|
||||
* @param timeout How long to wait for the connection to establish? Timeout of zero is indefinite.
|
||||
* @return The result status of trying to connect.
|
||||
*/
|
||||
auto connect(std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) -> coro::task<net::connect_status>;
|
||||
|
||||
/**
|
||||
* If this client is connected and the connection is SSL/TLS then perform the ssl handshake.
|
||||
* This must be done after a successful connect() call for clients that are initiating a
|
||||
* connection to a server. This must be done after a successful accept() call for clients that
|
||||
* have been accepted by a tcp_server. TCP server 'client's start in the connected state and
|
||||
* thus skip the connect() call.
|
||||
*
|
||||
* tcp_client initiating to a server:
|
||||
* tcp_client client{...options...};
|
||||
* co_await client.connect();
|
||||
* co_await client.ssl_handshake(); // <-- only perform if ssl/tls connection
|
||||
*
|
||||
* tcp_server accepting a client connection:
|
||||
* tcp_server server{...options...};
|
||||
* co_await server.poll();
|
||||
* auto client = server.accept();
|
||||
* if(client.socket().is_valid())
|
||||
* {
|
||||
* co_await client.ssl_handshake(); // <-- only perform if ssl/tls connection
|
||||
* }
|
||||
* @param timeout How long to allow for the ssl handshake to successfully complete?
|
||||
* @return The result of the ssl handshake.
|
||||
*/
|
||||
auto ssl_handshake(std::chrono::milliseconds timeout = std::chrono::milliseconds{0})
|
||||
-> coro::task<ssl_handshake_status>;
|
||||
|
||||
/**
|
||||
* Polls for the given operation on this client's tcp socket. This should be done prior to
|
||||
* calling recv and after a send that doesn't send the entire buffer.
|
||||
* @param op The poll operation to perform, use read for incoming data and write for outgoing.
|
||||
* @param timeout The amount of time to wait for the poll event to be ready. Use zero for infinte timeout.
|
||||
* @return The status result of th poll operation. When poll_status::event is returned then the
|
||||
* event operation is ready.
|
||||
*/
|
||||
auto poll(coro::poll_op op, std::chrono::milliseconds timeout = std::chrono::milliseconds{0})
|
||||
-> coro::task<poll_status>
|
||||
{
|
||||
return m_io_scheduler->poll(m_socket, op, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Receives incoming data into the given buffer. By default since all tcp client sockets are set
|
||||
* to non-blocking use co_await poll() to determine when data is ready to be received.
|
||||
* @param buffer Received bytes are written into this buffer up to the buffers size.
|
||||
* @return The status of the recv call and a span of the bytes recevied (if any). The span of
|
||||
* bytes will be a subspan or full span of the given input buffer.
|
||||
*/
|
||||
template<concepts::mutable_buffer buffer_type>
|
||||
auto recv(buffer_type&& buffer) -> std::pair<recv_status, std::span<char>>
|
||||
{
|
||||
// If the user requested zero bytes, just return.
|
||||
if (buffer.empty())
|
||||
{
|
||||
return {recv_status::ok, std::span<char>{}};
|
||||
}
|
||||
|
||||
if (m_options.ssl_ctx == nullptr)
|
||||
{
|
||||
auto bytes_recv = ::recv(m_socket.native_handle(), buffer.data(), buffer.size(), 0);
|
||||
if (bytes_recv > 0)
|
||||
{
|
||||
// Ok, we've recieved some data.
|
||||
return {recv_status::ok, std::span<char>{buffer.data(), static_cast<size_t>(bytes_recv)}};
|
||||
}
|
||||
else if (bytes_recv == 0)
|
||||
{
|
||||
// On TCP stream sockets 0 indicates the connection has been closed by the peer.
|
||||
return {recv_status::closed, std::span<char>{}};
|
||||
}
|
||||
else
|
||||
{
|
||||
// Report the error to the user.
|
||||
return {static_cast<recv_status>(errno), std::span<char>{}};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ERR_clear_error();
|
||||
size_t bytes_recv{0};
|
||||
int r = SSL_read_ex(m_ssl_info.m_ssl_ptr.get(), buffer.data(), buffer.size(), &bytes_recv);
|
||||
if (r == 0)
|
||||
{
|
||||
int err = SSL_get_error(m_ssl_info.m_ssl_ptr.get(), r);
|
||||
if (err == SSL_ERROR_WANT_READ)
|
||||
{
|
||||
return {recv_status::would_block, std::span<char>{}};
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Flesh out all possible ssl errors:
|
||||
// https://www.openssl.org/docs/man1.1.1/man3/SSL_get_error.html
|
||||
return {recv_status::ssl_error, std::span<char>{}};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return {recv_status::ok, std::span<char>{buffer.data(), static_cast<size_t>(bytes_recv)}};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends outgoing data from the given buffer. If a partial write occurs then use co_await poll()
|
||||
* to determine when the tcp client socket is ready to be written to again. On partial writes
|
||||
* the status will be 'ok' and the span returned will be non-empty, it will contain the buffer
|
||||
* span data that was not written to the client's socket.
|
||||
* @param buffer The data to write on the tcp socket.
|
||||
* @return The status of the send call and a span of any remaining bytes not sent. If all bytes
|
||||
* were successfully sent the status will be 'ok' and the remaining span will be empty.
|
||||
*/
|
||||
template<concepts::const_buffer buffer_type>
|
||||
auto send(const buffer_type& buffer) -> std::pair<send_status, std::span<const char>>
|
||||
{
|
||||
// If the user requested zero bytes, just return.
|
||||
if (buffer.empty())
|
||||
{
|
||||
return {send_status::ok, std::span<const char>{buffer.data(), buffer.size()}};
|
||||
}
|
||||
|
||||
if (m_options.ssl_ctx == nullptr)
|
||||
{
|
||||
auto bytes_sent = ::send(m_socket.native_handle(), buffer.data(), buffer.size(), 0);
|
||||
if (bytes_sent >= 0)
|
||||
{
|
||||
// Some or all of the bytes were written.
|
||||
return {send_status::ok, std::span<const char>{buffer.data() + bytes_sent, buffer.size() - bytes_sent}};
|
||||
}
|
||||
else
|
||||
{
|
||||
// Due to the error none of the bytes were written.
|
||||
return {static_cast<send_status>(errno), std::span<const char>{buffer.data(), buffer.size()}};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ERR_clear_error();
|
||||
size_t bytes_sent{0};
|
||||
int r = SSL_write_ex(m_ssl_info.m_ssl_ptr.get(), buffer.data(), buffer.size(), &bytes_sent);
|
||||
if (r == 0)
|
||||
{
|
||||
int err = SSL_get_error(m_ssl_info.m_ssl_ptr.get(), r);
|
||||
if (err == SSL_ERROR_WANT_WRITE)
|
||||
{
|
||||
return {send_status::would_block, std::span<char>{}};
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Flesh out all possible ssl errors:
|
||||
// https://www.openssl.org/docs/man1.1.1/man3/SSL_get_error.html
|
||||
return {send_status::ssl_error, std::span<char>{}};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return {send_status::ok, std::span<const char>{buffer.data() + bytes_sent, buffer.size() - bytes_sent}};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
struct ssl_deleter
|
||||
{
|
||||
auto operator()(SSL* ssl) const -> void { SSL_free(ssl); }
|
||||
};
|
||||
|
||||
using ssl_unique_ptr = std::unique_ptr<SSL, ssl_deleter>;
|
||||
|
||||
enum class ssl_connection_type
|
||||
{
|
||||
/// This connection is a client connecting to a server.
|
||||
connect,
|
||||
/// This connection is an accepted connection on a sever.
|
||||
accept
|
||||
};
|
||||
|
||||
struct ssl_info
|
||||
{
|
||||
ssl_info() {}
|
||||
explicit ssl_info(ssl_connection_type type) : m_ssl_connection_type(type) {}
|
||||
ssl_info(const ssl_info&) noexcept = delete;
|
||||
ssl_info(ssl_info&& other) noexcept
|
||||
: m_ssl_connection_type(std::exchange(other.m_ssl_connection_type, ssl_connection_type::connect)),
|
||||
m_ssl_ptr(std::move(other.m_ssl_ptr)),
|
||||
m_ssl_error(std::exchange(other.m_ssl_error, false)),
|
||||
m_ssl_handshake_status(std::move(other.m_ssl_handshake_status))
|
||||
{
|
||||
}
|
||||
|
||||
auto operator=(const ssl_info&) noexcept -> ssl_info& = delete;
|
||||
|
||||
auto operator=(ssl_info&& other) noexcept -> ssl_info&
|
||||
{
|
||||
if (std::addressof(other) != this)
|
||||
{
|
||||
m_ssl_connection_type = std::exchange(other.m_ssl_connection_type, ssl_connection_type::connect);
|
||||
m_ssl_ptr = std::move(other.m_ssl_ptr);
|
||||
m_ssl_error = std::exchange(other.m_ssl_error, false);
|
||||
m_ssl_handshake_status = std::move(other.m_ssl_handshake_status);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
/// What kind of connection is this, client initiated connect or server side accept?
|
||||
ssl_connection_type m_ssl_connection_type{ssl_connection_type::connect};
|
||||
/// OpenSSL ssl connection.
|
||||
ssl_unique_ptr m_ssl_ptr{nullptr};
|
||||
/// Was there an error with the SSL/TLS connection?
|
||||
bool m_ssl_error{false};
|
||||
/// The result of the ssl handshake.
|
||||
std::optional<ssl_handshake_status> m_ssl_handshake_status{std::nullopt};
|
||||
};
|
||||
|
||||
/// The tcp_server creates already connected clients and provides a tcp socket pre-built.
|
||||
friend tcp_server;
|
||||
tcp_client(io_scheduler& scheduler, net::socket socket, options opts);
|
||||
|
||||
/// The scheduler that will drive this tcp client.
|
||||
io_scheduler* m_io_scheduler{nullptr};
|
||||
/// Options for what server to connect to.
|
||||
options m_options{};
|
||||
/// The tcp socket.
|
||||
net::socket m_socket{-1};
|
||||
/// Cache the status of the connect in the event the user calls connect() again.
|
||||
std::optional<net::connect_status> m_connect_status{std::nullopt};
|
||||
/// SSL/TLS specific information if m_options.ssl_ctx != nullptr.
|
||||
ssl_info m_ssl_info{};
|
||||
|
||||
private:
|
||||
static auto ssl_shutdown_and_free(
|
||||
io_scheduler& io_scheduler,
|
||||
net::socket s,
|
||||
ssl_unique_ptr ssl_ptr,
|
||||
std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) -> coro::task<void>;
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,74 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/net/ip_address.hpp"
|
||||
#include "coro/net/socket.hpp"
|
||||
#include "coro/net/tcp_client.hpp"
|
||||
#include "coro/task.hpp"
|
||||
|
||||
#include <fcntl.h>
|
||||
#include <sys/socket.h>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
class io_scheduler;
|
||||
} // namespace coro
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class ssl_context;
|
||||
|
||||
class tcp_server
|
||||
{
|
||||
public:
|
||||
struct options
|
||||
{
|
||||
/// The ip address for the tcp server to bind and listen on.
|
||||
net::ip_address address{net::ip_address::from_string("0.0.0.0")};
|
||||
/// The port for the tcp server to bind and listen on.
|
||||
uint16_t port{8080};
|
||||
/// The kernel backlog of connections to buffer.
|
||||
int32_t backlog{128};
|
||||
/// Should this tcp server use TLS/SSL? If provided all accepted connections will use the
|
||||
/// given SSL certificate and private key to secure the connections.
|
||||
ssl_context* ssl_ctx{nullptr};
|
||||
};
|
||||
|
||||
tcp_server(
|
||||
io_scheduler& scheduler,
|
||||
options opts = options{
|
||||
.address = net::ip_address::from_string("0.0.0.0"), .port = 8080, .backlog = 128, .ssl_ctx = nullptr});
|
||||
|
||||
tcp_server(const tcp_server&) = delete;
|
||||
tcp_server(tcp_server&& other);
|
||||
auto operator=(const tcp_server&) -> tcp_server& = delete;
|
||||
auto operator =(tcp_server&& other) -> tcp_server&;
|
||||
~tcp_server() = default;
|
||||
|
||||
/**
|
||||
* Polls for new incoming tcp connections.
|
||||
* @param timeout How long to wait for a new connection before timing out, zero waits indefinitely.
|
||||
* @return The result of the poll, 'event' means the poll was successful and there is at least 1
|
||||
* connection ready to be accepted.
|
||||
*/
|
||||
auto poll(std::chrono::milliseconds timeout = std::chrono::milliseconds{0}) -> coro::task<coro::poll_status>
|
||||
{
|
||||
return m_io_scheduler->poll(m_accept_socket, coro::poll_op::read, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Accepts an incoming tcp client connection. On failure the tcp clients socket will be set to
|
||||
* and invalid state, use the socket.is_value() to verify the client was correctly accepted.
|
||||
* @return The newly connected tcp client connection.
|
||||
*/
|
||||
auto accept() -> coro::net::tcp_client;
|
||||
|
||||
private:
|
||||
/// The io scheduler for awaiting new connections.
|
||||
io_scheduler* m_io_scheduler{nullptr};
|
||||
/// The bind and listen options for this server.
|
||||
options m_options;
|
||||
/// The socket for accepting new tcp connections on.
|
||||
net::socket m_accept_socket{-1};
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,147 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/buffer.hpp"
|
||||
#include "coro/io_scheduler.hpp"
|
||||
#include "coro/net/ip_address.hpp"
|
||||
#include "coro/net/recv_status.hpp"
|
||||
#include "coro/net/send_status.hpp"
|
||||
#include "coro/net/socket.hpp"
|
||||
#include "coro/task.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <span>
|
||||
#include <variant>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
class io_scheduler;
|
||||
} // namespace coro
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
class udp_peer
|
||||
{
|
||||
public:
|
||||
struct info
|
||||
{
|
||||
/// The ip address of the peer.
|
||||
net::ip_address address{net::ip_address::from_string("127.0.0.1")};
|
||||
/// The port of the peer.
|
||||
uint16_t port{8080};
|
||||
|
||||
auto operator<=>(const info& other) const = default;
|
||||
};
|
||||
|
||||
/**
|
||||
* Creates a udp peer that can send packets but not receive them. This udp peer will not explicitly
|
||||
* bind to a local ip+port.
|
||||
*/
|
||||
explicit udp_peer(io_scheduler& scheduler, net::domain_t domain = net::domain_t::ipv4);
|
||||
|
||||
/**
|
||||
* Creates a udp peer that can send and receive packets. This peer will bind to the given ip_port.
|
||||
*/
|
||||
explicit udp_peer(io_scheduler& scheduler, const info& bind_info);
|
||||
|
||||
udp_peer(const udp_peer&) = delete;
|
||||
udp_peer(udp_peer&&) = default;
|
||||
auto operator=(const udp_peer&) noexcept -> udp_peer& = delete;
|
||||
auto operator=(udp_peer&&) noexcept -> udp_peer& = default;
|
||||
~udp_peer() = default;
|
||||
|
||||
/**
|
||||
* @param op The poll operation to perform on the udp socket. Note that if this is a send only
|
||||
* udp socket (did not bind) then polling for read will not work.
|
||||
* @param timeout The timeout for the poll operation to be ready.
|
||||
* @return The result status of the poll operation.
|
||||
*/
|
||||
auto poll(poll_op op, std::chrono::milliseconds timeout = std::chrono::milliseconds{0})
|
||||
-> coro::task<coro::poll_status>
|
||||
{
|
||||
co_return co_await m_io_scheduler.poll(m_socket, op, timeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param peer_info The peer to send the data to.
|
||||
* @param buffer The data to send.
|
||||
* @return The status of send call and a span view of any data that wasn't sent. This data if
|
||||
* un-sent will correspond to bytes at the end of the given buffer.
|
||||
*/
|
||||
template<concepts::const_buffer buffer_type>
|
||||
auto sendto(const info& peer_info, const buffer_type& buffer) -> std::pair<send_status, std::span<const char>>
|
||||
{
|
||||
if (buffer.empty())
|
||||
{
|
||||
return {send_status::ok, std::span<const char>{}};
|
||||
}
|
||||
|
||||
sockaddr_in peer{};
|
||||
peer.sin_family = static_cast<int>(peer_info.address.domain());
|
||||
peer.sin_port = htons(peer_info.port);
|
||||
peer.sin_addr = *reinterpret_cast<const in_addr*>(peer_info.address.data().data());
|
||||
|
||||
socklen_t peer_len{sizeof(peer)};
|
||||
|
||||
auto bytes_sent = ::sendto(
|
||||
m_socket.native_handle(), buffer.data(), buffer.size(), 0, reinterpret_cast<sockaddr*>(&peer), peer_len);
|
||||
|
||||
if (bytes_sent >= 0)
|
||||
{
|
||||
return {send_status::ok, std::span<const char>{buffer.data() + bytes_sent, buffer.size() - bytes_sent}};
|
||||
}
|
||||
else
|
||||
{
|
||||
return {static_cast<send_status>(errno), std::span<const char>{}};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @param buffer The buffer to receive data into.
|
||||
* @return The receive status, if ok then also the peer who sent the data and the data.
|
||||
* The span view of the data will be set to the size of the received data, this will
|
||||
* always start at the beggining of the buffer but depending on how large the data was
|
||||
* it might not fill the entire buffer.
|
||||
*/
|
||||
template<concepts::mutable_buffer buffer_type>
|
||||
auto recvfrom(buffer_type&& buffer) -> std::tuple<recv_status, udp_peer::info, std::span<char>>
|
||||
{
|
||||
// The user must bind locally to be able to receive packets.
|
||||
if (!m_bound)
|
||||
{
|
||||
return {recv_status::udp_not_bound, udp_peer::info{}, std::span<char>{}};
|
||||
}
|
||||
|
||||
sockaddr_in peer{};
|
||||
socklen_t peer_len{sizeof(peer)};
|
||||
|
||||
auto bytes_read = ::recvfrom(
|
||||
m_socket.native_handle(), buffer.data(), buffer.size(), 0, reinterpret_cast<sockaddr*>(&peer), &peer_len);
|
||||
|
||||
if (bytes_read < 0)
|
||||
{
|
||||
return {static_cast<recv_status>(errno), udp_peer::info{}, std::span<char>{}};
|
||||
}
|
||||
|
||||
std::span<const uint8_t> ip_addr_view{
|
||||
reinterpret_cast<uint8_t*>(&peer.sin_addr.s_addr),
|
||||
sizeof(peer.sin_addr.s_addr),
|
||||
};
|
||||
|
||||
return {
|
||||
recv_status::ok,
|
||||
udp_peer::info{
|
||||
.address = net::ip_address{ip_addr_view, static_cast<net::domain_t>(peer.sin_family)},
|
||||
.port = ntohs(peer.sin_port)},
|
||||
std::span<char>{buffer.data(), static_cast<size_t>(bytes_read)}};
|
||||
}
|
||||
|
||||
private:
|
||||
/// The scheduler that will drive this udp client.
|
||||
io_scheduler& m_io_scheduler;
|
||||
/// The udp socket.
|
||||
net::socket m_socket{-1};
|
||||
/// Did the user request this udp socket is bound locally to receive packets?
|
||||
bool m_bound{false};
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,39 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include <sys/epoll.h>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
enum class poll_op : uint64_t
|
||||
{
|
||||
/// Poll for read operations.
|
||||
read = EPOLLIN,
|
||||
/// Poll for write operations.
|
||||
write = EPOLLOUT,
|
||||
/// Poll for read and write operations.
|
||||
read_write = EPOLLIN | EPOLLOUT
|
||||
};
|
||||
|
||||
inline auto poll_op_readable(poll_op op) -> bool
|
||||
{
|
||||
return (static_cast<uint64_t>(op) & EPOLLIN);
|
||||
}
|
||||
|
||||
inline auto poll_op_writeable(poll_op op) -> bool
|
||||
{
|
||||
return (static_cast<uint64_t>(op) & EPOLLOUT);
|
||||
}
|
||||
|
||||
enum class poll_status
|
||||
{
|
||||
/// The poll operation was was successful.
|
||||
event,
|
||||
/// The poll operation timed out.
|
||||
timeout,
|
||||
/// The file descriptor had an error while polling.
|
||||
error,
|
||||
/// The file descriptor has been closed by the remote or an internal error/close.
|
||||
closed
|
||||
};
|
||||
|
||||
} // namespace coro
|
|
@ -1,28 +1,31 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/executor.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <coroutine>
|
||||
#include <mutex>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
template<concepts::executor executor_type>
|
||||
class shared_mutex;
|
||||
class thread_pool;
|
||||
|
||||
/**
|
||||
* A scoped RAII lock holder for a coro::shared_mutex. It will call the appropriate unlock() or
|
||||
* unlock_shared() based on how the coro::shared_mutex was originally acquired, either shared or
|
||||
* exclusive modes.
|
||||
*/
|
||||
template<concepts::executor executor_type>
|
||||
class shared_scoped_lock
|
||||
{
|
||||
public:
|
||||
shared_scoped_lock(shared_mutex& sm, bool exclusive) : m_shared_mutex(&sm), m_exclusive(exclusive) {}
|
||||
shared_scoped_lock(shared_mutex<executor_type>& sm, bool exclusive) : m_shared_mutex(&sm), m_exclusive(exclusive) {}
|
||||
|
||||
/**
|
||||
* Unlocks the mutex upon this shared scoped lock destructing.
|
||||
*/
|
||||
~shared_scoped_lock();
|
||||
~shared_scoped_lock() { unlock(); }
|
||||
|
||||
shared_scoped_lock(const shared_scoped_lock&) = delete;
|
||||
shared_scoped_lock(shared_scoped_lock&& other)
|
||||
|
@ -45,22 +48,44 @@ public:
|
|||
/**
|
||||
* Unlocks the shared mutex prior to this lock going out of scope.
|
||||
*/
|
||||
auto unlock() -> void;
|
||||
auto unlock() -> void
|
||||
{
|
||||
if (m_shared_mutex != nullptr)
|
||||
{
|
||||
if (m_exclusive)
|
||||
{
|
||||
m_shared_mutex->unlock();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_shared_mutex->unlock_shared();
|
||||
}
|
||||
|
||||
m_shared_mutex = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
shared_mutex* m_shared_mutex{nullptr};
|
||||
bool m_exclusive{false};
|
||||
shared_mutex<executor_type>* m_shared_mutex{nullptr};
|
||||
bool m_exclusive{false};
|
||||
};
|
||||
|
||||
template<concepts::executor executor_type>
|
||||
class shared_mutex
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @param tp The thread pool for when multiple shared waiters can be woken up at the same time,
|
||||
* each shared waiter will be scheduled to immediately run on this thread pool in
|
||||
* parralell.
|
||||
* @param e The executor for when multiple shared waiters can be woken up at the same time,
|
||||
* each shared waiter will be scheduled to immediately run on this executor in
|
||||
* parallel.
|
||||
*/
|
||||
explicit shared_mutex(coro::thread_pool& tp);
|
||||
explicit shared_mutex(std::shared_ptr<executor_type> e) : m_executor(std::move(e))
|
||||
{
|
||||
if (m_executor == nullptr)
|
||||
{
|
||||
throw std::runtime_error{"shared_mutex cannot have a nullptr executor"};
|
||||
}
|
||||
}
|
||||
~shared_mutex() = default;
|
||||
|
||||
shared_mutex(const shared_mutex&) = delete;
|
||||
|
@ -72,9 +97,66 @@ public:
|
|||
{
|
||||
lock_operation(shared_mutex& sm, bool exclusive) : m_shared_mutex(sm), m_exclusive(exclusive) {}
|
||||
|
||||
auto await_ready() const noexcept -> bool;
|
||||
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool;
|
||||
auto await_resume() noexcept -> shared_scoped_lock { return shared_scoped_lock{m_shared_mutex, m_exclusive}; }
|
||||
auto await_ready() const noexcept -> bool
|
||||
{
|
||||
if (m_exclusive)
|
||||
{
|
||||
return m_shared_mutex.try_lock();
|
||||
}
|
||||
else
|
||||
{
|
||||
return m_shared_mutex.try_lock_shared();
|
||||
}
|
||||
}
|
||||
|
||||
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
|
||||
{
|
||||
std::unique_lock lk{m_shared_mutex.m_mutex};
|
||||
// Its possible the lock has been released between await_ready() and await_suspend(), double
|
||||
// check and make sure we are not going to suspend when nobody holds the lock.
|
||||
if (m_exclusive)
|
||||
{
|
||||
if (m_shared_mutex.try_lock_locked(lk))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_shared_mutex.try_lock_shared_locked(lk))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// For sure the lock is currently held in a manner that it cannot be acquired, suspend ourself
|
||||
// at the end of the waiter list.
|
||||
|
||||
if (m_shared_mutex.m_tail_waiter == nullptr)
|
||||
{
|
||||
m_shared_mutex.m_head_waiter = this;
|
||||
m_shared_mutex.m_tail_waiter = this;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_shared_mutex.m_tail_waiter->m_next = this;
|
||||
m_shared_mutex.m_tail_waiter = this;
|
||||
}
|
||||
|
||||
// If this is an exclusive lock acquire then mark it as so so that shared locks after this
|
||||
// exclusive one will also suspend so this exclusive lock doens't get starved.
|
||||
if (m_exclusive)
|
||||
{
|
||||
++m_shared_mutex.m_exclusive_waiters;
|
||||
}
|
||||
|
||||
m_awaiting_coroutine = awaiting_coroutine;
|
||||
return true;
|
||||
}
|
||||
auto await_resume() noexcept -> shared_scoped_lock<executor_type>
|
||||
{
|
||||
return shared_scoped_lock{m_shared_mutex, m_exclusive};
|
||||
}
|
||||
|
||||
private:
|
||||
friend class shared_mutex;
|
||||
|
@ -99,12 +181,27 @@ public:
|
|||
/**
|
||||
* @return True if the lock could immediately be acquired in a shared state.
|
||||
*/
|
||||
auto try_lock_shared() -> bool;
|
||||
auto try_lock_shared() -> bool
|
||||
{
|
||||
// To acquire the shared lock the state must be one of two states:
|
||||
// 1) unlocked
|
||||
// 2) shared locked with zero exclusive waiters
|
||||
// Zero exclusive waiters prevents exclusive starvation if shared locks are
|
||||
// always continuously happening.
|
||||
|
||||
std::unique_lock lk{m_mutex};
|
||||
return try_lock_shared_locked(lk);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return True if the lock could immediately be acquired in an exclusive state.
|
||||
*/
|
||||
auto try_lock() -> bool;
|
||||
auto try_lock() -> bool
|
||||
{
|
||||
// To acquire the exclusive lock the state must be unlocked.
|
||||
std::unique_lock lk{m_mutex};
|
||||
return try_lock_locked(lk);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlocks a single shared state user. *REQUIRES* that the lock was first acquired exactly once
|
||||
|
@ -114,7 +211,24 @@ public:
|
|||
* If the shared user count drops to zero and this lock has an exclusive waiter then the exclusive
|
||||
* waiter acquires the lock.
|
||||
*/
|
||||
auto unlock_shared() -> void;
|
||||
auto unlock_shared() -> void
|
||||
{
|
||||
std::unique_lock lk{m_mutex};
|
||||
--m_shared_users;
|
||||
|
||||
// Only wake waiters from shared state if all shared users have completed.
|
||||
if (m_shared_users == 0)
|
||||
{
|
||||
if (m_head_waiter != nullptr)
|
||||
{
|
||||
wake_waiters(lk);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_state = state::unlocked;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unlocks the mutex from its exclusive state. If there is a following exclusive watier then
|
||||
|
@ -122,7 +236,18 @@ public:
|
|||
* shared waiters acquire the lock in a shared state in parallel and are resumed on the original
|
||||
* thread pool this shared mutex was created with.
|
||||
*/
|
||||
auto unlock() -> void;
|
||||
auto unlock() -> void
|
||||
{
|
||||
std::unique_lock lk{m_mutex};
|
||||
if (m_head_waiter != nullptr)
|
||||
{
|
||||
wake_waiters(lk);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_state = state::unlocked;
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
friend class lock_operation;
|
||||
|
@ -134,8 +259,8 @@ private:
|
|||
locked_exclusive
|
||||
};
|
||||
|
||||
/// This thread pool is for resuming multiple shared waiters.
|
||||
coro::thread_pool& m_thread_pool;
|
||||
/// This executor is for resuming multiple shared waiters.
|
||||
std::shared_ptr<executor_type> m_executor{nullptr};
|
||||
|
||||
std::mutex m_mutex;
|
||||
|
||||
|
@ -150,10 +275,87 @@ private:
|
|||
lock_operation* m_head_waiter{nullptr};
|
||||
lock_operation* m_tail_waiter{nullptr};
|
||||
|
||||
auto try_lock_shared_locked(std::unique_lock<std::mutex>& lk) -> bool;
|
||||
auto try_lock_locked(std::unique_lock<std::mutex>& lk) -> bool;
|
||||
auto try_lock_shared_locked(std::unique_lock<std::mutex>& lk) -> bool
|
||||
{
|
||||
if (m_state == state::unlocked)
|
||||
{
|
||||
// If the shared mutex is unlocked put it into shared mode and add ourself as using the lock.
|
||||
m_state = state::locked_shared;
|
||||
++m_shared_users;
|
||||
lk.unlock();
|
||||
return true;
|
||||
}
|
||||
else if (m_state == state::locked_shared && m_exclusive_waiters == 0)
|
||||
{
|
||||
// If the shared mutex is in a shared locked state and there are no exclusive waiters
|
||||
// the add ourself as using the lock.
|
||||
++m_shared_users;
|
||||
lk.unlock();
|
||||
return true;
|
||||
}
|
||||
|
||||
auto wake_waiters(std::unique_lock<std::mutex>& lk) -> void;
|
||||
// If the lock is in shared mode but there are exclusive waiters then we will also wait so
|
||||
// the writers are not starved.
|
||||
|
||||
// If the lock is in exclusive mode already then we need to wait.
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
auto try_lock_locked(std::unique_lock<std::mutex>& lk) -> bool
|
||||
{
|
||||
if (m_state == state::unlocked)
|
||||
{
|
||||
m_state = state::locked_exclusive;
|
||||
lk.unlock();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
auto wake_waiters(std::unique_lock<std::mutex>& lk) -> void
|
||||
{
|
||||
// First determine what the next lock state will be based on the first waiter.
|
||||
if (m_head_waiter->m_exclusive)
|
||||
{
|
||||
// If its exclusive then only this waiter can be woken up.
|
||||
m_state = state::locked_exclusive;
|
||||
lock_operation* to_resume = m_head_waiter;
|
||||
m_head_waiter = m_head_waiter->m_next;
|
||||
--m_exclusive_waiters;
|
||||
if (m_head_waiter == nullptr)
|
||||
{
|
||||
m_tail_waiter = nullptr;
|
||||
}
|
||||
|
||||
// Since this is an exclusive lock waiting we can resume it directly.
|
||||
lk.unlock();
|
||||
to_resume->m_awaiting_coroutine.resume();
|
||||
}
|
||||
else
|
||||
{
|
||||
// If its shared then we will scan forward and awake all shared waiters onto the given
|
||||
// thread pool so they can run in parallel.
|
||||
m_state = state::locked_shared;
|
||||
do
|
||||
{
|
||||
lock_operation* to_resume = m_head_waiter;
|
||||
m_head_waiter = m_head_waiter->m_next;
|
||||
if (m_head_waiter == nullptr)
|
||||
{
|
||||
m_tail_waiter = nullptr;
|
||||
}
|
||||
++m_shared_users;
|
||||
|
||||
m_executor->resume(to_resume->m_awaiting_coroutine);
|
||||
} while (m_head_waiter != nullptr && !m_head_waiter->m_exclusive);
|
||||
|
||||
// Cannot unlock until the entire set of shared waiters has been traversed. I think this
|
||||
// makes more sense than allocating space for all the shared waiters, unlocking, and then
|
||||
// resuming in a batch?
|
||||
lk.unlock();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace coro
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
namespace coro
|
||||
{
|
||||
enum class shutdown_t
|
||||
{
|
||||
/// Synchronously wait for all tasks to complete when calling shutdown.
|
||||
sync,
|
||||
/// Asynchronously let tasks finish on the background thread on shutdown.
|
||||
async
|
||||
};
|
||||
|
||||
} // namespace coro
|
|
@ -123,9 +123,7 @@ public:
|
|||
return completion_notifier{};
|
||||
}
|
||||
|
||||
auto return_void() noexcept -> void {}
|
||||
|
||||
auto return_value()
|
||||
auto return_void() -> void
|
||||
{
|
||||
if (m_exception)
|
||||
{
|
||||
|
@ -170,7 +168,8 @@ public:
|
|||
{
|
||||
if constexpr (std::is_same_v<void, return_type>)
|
||||
{
|
||||
m_coroutine.promise().return_value();
|
||||
// Propagate exceptions.
|
||||
m_coroutine.promise().return_void();
|
||||
return;
|
||||
}
|
||||
else
|
||||
|
@ -183,6 +182,11 @@ private:
|
|||
coroutine_type m_coroutine;
|
||||
};
|
||||
|
||||
template<
|
||||
concepts::awaitable awaitable_type,
|
||||
typename return_type = concepts::awaitable_traits<awaitable_type>::awaiter_return_type>
|
||||
static auto make_sync_wait_task(awaitable_type&& a) -> sync_wait_task<return_type> __attribute__((used));
|
||||
|
||||
template<
|
||||
concepts::awaitable awaitable_type,
|
||||
typename return_type = concepts::awaitable_traits<awaitable_type>::awaiter_return_type>
|
||||
|
|
|
@ -46,7 +46,7 @@ struct promise_base
|
|||
|
||||
auto initial_suspend() { return std::suspend_always{}; }
|
||||
|
||||
auto final_suspend() { return final_awaitable{}; }
|
||||
auto final_suspend() noexcept(true) { return final_awaitable{}; }
|
||||
|
||||
auto unhandled_exception() -> void { m_exception_ptr = std::current_exception(); }
|
||||
|
||||
|
@ -105,9 +105,7 @@ struct promise<void> : public promise_base
|
|||
|
||||
auto get_return_object() noexcept -> task_type;
|
||||
|
||||
auto return_void() noexcept -> void {}
|
||||
|
||||
auto return_value() const -> void
|
||||
auto return_void() -> void
|
||||
{
|
||||
if (m_exception_ptr)
|
||||
{
|
||||
|
@ -119,7 +117,7 @@ struct promise<void> : public promise_base
|
|||
} // namespace detail
|
||||
|
||||
template<typename return_type>
|
||||
class task
|
||||
class [[nodiscard]] task
|
||||
{
|
||||
public:
|
||||
using task_type = task<return_type>;
|
||||
|
@ -202,7 +200,19 @@ public:
|
|||
{
|
||||
struct awaitable : public awaitable_base
|
||||
{
|
||||
auto await_resume() -> decltype(auto) { return this->m_coroutine.promise().return_value(); }
|
||||
auto await_resume() -> decltype(auto)
|
||||
{
|
||||
if constexpr (std::is_same_v<void, return_type>)
|
||||
{
|
||||
// Propagate uncaught exceptions.
|
||||
this->m_coroutine.promise().return_void();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
return this->m_coroutine.promise().return_value();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return awaitable{m_coroutine};
|
||||
|
@ -212,7 +222,19 @@ public:
|
|||
{
|
||||
struct awaitable : public awaitable_base
|
||||
{
|
||||
auto await_resume() -> decltype(auto) { return std::move(this->m_coroutine.promise()).return_value(); }
|
||||
auto await_resume() -> decltype(auto)
|
||||
{
|
||||
if constexpr (std::is_same_v<void, return_type>)
|
||||
{
|
||||
// Propagate uncaught exceptions.
|
||||
this->m_coroutine.promise().return_void();
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::move(this->m_coroutine.promise()).return_value();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return awaitable{m_coroutine};
|
||||
|
|
|
@ -1,16 +1,20 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/executor.hpp"
|
||||
#include "coro/task.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
class thread_pool;
|
||||
class io_scheduler;
|
||||
|
||||
template<concepts::executor executor_type>
|
||||
class task_container
|
||||
{
|
||||
public:
|
||||
|
@ -25,16 +29,35 @@ public:
|
|||
};
|
||||
|
||||
/**
|
||||
* @param tp Tasks started in the container are scheduled onto this thread pool. For tasks created
|
||||
* @param e Tasks started in the container are scheduled onto this executor. For tasks created
|
||||
* from a coro::io_scheduler, this would usually be that coro::io_scheduler instance.
|
||||
* @param opts Task container options.
|
||||
*/
|
||||
task_container(thread_pool& tp, const options opts = options{.reserve_size = 8, .growth_factor = 2});
|
||||
task_container(
|
||||
std::shared_ptr<executor_type> e, const options opts = options{.reserve_size = 8, .growth_factor = 2})
|
||||
: m_growth_factor(opts.growth_factor),
|
||||
m_executor(std::move(e)),
|
||||
m_executor_ptr(m_executor.get())
|
||||
{
|
||||
if (m_executor == nullptr)
|
||||
{
|
||||
throw std::runtime_error{"task_container cannot have a nullptr executor"};
|
||||
}
|
||||
|
||||
init(opts.reserve_size);
|
||||
}
|
||||
task_container(const task_container&) = delete;
|
||||
task_container(task_container&&) = delete;
|
||||
auto operator=(const task_container&) -> task_container& = delete;
|
||||
auto operator=(task_container&&) -> task_container& = delete;
|
||||
~task_container();
|
||||
~task_container()
|
||||
{
|
||||
// This will hang the current thread.. but if tasks are not complete thats also pretty bad.
|
||||
while (!empty())
|
||||
{
|
||||
garbage_collect();
|
||||
}
|
||||
}
|
||||
|
||||
enum class garbage_collect_t
|
||||
{
|
||||
|
@ -51,14 +74,44 @@ public:
|
|||
* call? Calling at regular intervals will reduce memory usage of completed
|
||||
* tasks and allow for the task container to re-use allocated space.
|
||||
*/
|
||||
auto start(coro::task<void> user_task, garbage_collect_t cleanup = garbage_collect_t::yes) -> void;
|
||||
auto start(coro::task<void>&& user_task, garbage_collect_t cleanup = garbage_collect_t::yes) -> void
|
||||
{
|
||||
m_size.fetch_add(1, std::memory_order::relaxed);
|
||||
|
||||
std::scoped_lock lk{m_mutex};
|
||||
|
||||
if (cleanup == garbage_collect_t::yes)
|
||||
{
|
||||
gc_internal();
|
||||
}
|
||||
|
||||
// Only grow if completely full and attempting to add more.
|
||||
if (m_free_pos == m_task_indexes.end())
|
||||
{
|
||||
m_free_pos = grow();
|
||||
}
|
||||
|
||||
// Store the task inside a cleanup task for self deletion.
|
||||
auto index = *m_free_pos;
|
||||
m_tasks[index] = make_cleanup_task(std::move(user_task), m_free_pos);
|
||||
|
||||
// Mark the current used slot as used.
|
||||
std::advance(m_free_pos, 1);
|
||||
|
||||
// Start executing from the cleanup task to schedule the user's task onto the thread pool.
|
||||
m_tasks[index].resume();
|
||||
}
|
||||
|
||||
/**
|
||||
* Garbage collects any tasks that are marked as deleted. This frees up space to be re-used by
|
||||
* the task container for newly stored tasks.
|
||||
* @return The number of tasks that were deleted.
|
||||
*/
|
||||
auto garbage_collect() -> std::size_t;
|
||||
auto garbage_collect() -> std::size_t __attribute__((used))
|
||||
{
|
||||
std::scoped_lock lk{m_mutex};
|
||||
return gc_internal();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return The number of tasks that are awaiting deletion.
|
||||
|
@ -104,19 +157,59 @@ public:
|
|||
* This does not shut down the task container, but can be used when shutting down, or if your
|
||||
* logic requires all the tasks contained within to complete, it is similar to coro::latch.
|
||||
*/
|
||||
auto garbage_collect_and_yield_until_empty() -> coro::task<void>;
|
||||
auto garbage_collect_and_yield_until_empty() -> coro::task<void>
|
||||
{
|
||||
while (!empty())
|
||||
{
|
||||
garbage_collect();
|
||||
co_await m_executor_ptr->yield();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* Grows each task container by the growth factor.
|
||||
* @return The position of the free index after growing.
|
||||
*/
|
||||
auto grow() -> task_position;
|
||||
auto grow() -> task_position
|
||||
{
|
||||
// Save an index at the current last item.
|
||||
auto last_pos = std::prev(m_task_indexes.end());
|
||||
std::size_t new_size = m_tasks.size() * m_growth_factor;
|
||||
for (std::size_t i = m_tasks.size(); i < new_size; ++i)
|
||||
{
|
||||
m_task_indexes.emplace_back(i);
|
||||
}
|
||||
m_tasks.resize(new_size);
|
||||
// Set the free pos to the item just after the previous last item.
|
||||
return std::next(last_pos);
|
||||
}
|
||||
|
||||
/**
|
||||
* Interal GC call, expects the public function to lock.
|
||||
*/
|
||||
auto gc_internal() -> std::size_t;
|
||||
auto gc_internal() -> std::size_t
|
||||
{
|
||||
std::size_t deleted{0};
|
||||
if (!m_tasks_to_delete.empty())
|
||||
{
|
||||
for (const auto& pos : m_tasks_to_delete)
|
||||
{
|
||||
// This doesn't actually 'delete' the task, it'll get overwritten when a
|
||||
// new user task claims the free space. It could be useful to actually
|
||||
// delete the tasks so the coroutine stack frames are destroyed. The advantage
|
||||
// of letting a new task replace and old one though is that its a 1:1 exchange
|
||||
// on delete and create, rather than a large pause here to delete all the
|
||||
// completed tasks.
|
||||
|
||||
// Put the deleted position at the end of the free indexes list.
|
||||
m_task_indexes.splice(m_task_indexes.end(), m_task_indexes, pos);
|
||||
}
|
||||
deleted = m_tasks_to_delete.size();
|
||||
m_tasks_to_delete.clear();
|
||||
}
|
||||
return deleted;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encapsulate the users tasks in a cleanup task which marks itself for deletion upon
|
||||
|
@ -130,7 +223,37 @@ private:
|
|||
* @param pos The position where the task data will be stored in the task manager.
|
||||
* @return The user's task wrapped in a self cleanup task.
|
||||
*/
|
||||
auto make_cleanup_task(task<void> user_task, task_position pos) -> coro::task<void>;
|
||||
auto make_cleanup_task(task<void> user_task, task_position pos) -> coro::task<void>
|
||||
{
|
||||
// Immediately move the task onto the executor.
|
||||
co_await m_executor_ptr->schedule();
|
||||
|
||||
try
|
||||
{
|
||||
// Await the users task to complete.
|
||||
co_await user_task;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
// TODO: what would be a good way to report this to the user...? Catching here is required
|
||||
// since the co_await will unwrap the unhandled exception on the task.
|
||||
// The user's task should ideally be wrapped in a catch all and handle it themselves, but
|
||||
// that cannot be guaranteed.
|
||||
std::cerr << "coro::task_container user_task had an unhandled exception e.what()= " << e.what() << "\n";
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// don't crash if they throw something that isn't derived from std::exception
|
||||
std::cerr << "coro::task_container user_task had unhandle exception, not derived from std::exception.\n";
|
||||
}
|
||||
|
||||
std::scoped_lock lk{m_mutex};
|
||||
m_tasks_to_delete.push_back(pos);
|
||||
// This has to be done within scope lock to make sure this coroutine task completes before the
|
||||
// task container object destructs -- if it was waiting on .empty() to become true.
|
||||
m_size.fetch_sub(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
}
|
||||
|
||||
/// Mutex for safely mutating the task containers across threads, expected usage is within
|
||||
/// thread pools for indeterminate lifetime requests.
|
||||
|
@ -148,8 +271,32 @@ private:
|
|||
task_position m_free_pos{};
|
||||
/// The amount to grow the containers by when all spaces are taken.
|
||||
double m_growth_factor{};
|
||||
/// The thread pool to schedule tasks that have just started.
|
||||
thread_pool& m_thread_pool;
|
||||
/// The executor to schedule tasks that have just started.
|
||||
std::shared_ptr<executor_type> m_executor{nullptr};
|
||||
/// This is used internally since io_scheduler cannot pass itself in as a shared_ptr.
|
||||
executor_type* m_executor_ptr{nullptr};
|
||||
|
||||
/**
|
||||
* Special constructor for internal types to create their embeded task containers.
|
||||
*/
|
||||
|
||||
friend io_scheduler;
|
||||
task_container(executor_type& e, const options opts = options{.reserve_size = 8, .growth_factor = 2})
|
||||
: m_growth_factor(opts.growth_factor),
|
||||
m_executor_ptr(&e)
|
||||
{
|
||||
init(opts.reserve_size);
|
||||
}
|
||||
|
||||
auto init(std::size_t reserve_size) -> void
|
||||
{
|
||||
m_tasks.resize(reserve_size);
|
||||
for (std::size_t i = 0; i < reserve_size; ++i)
|
||||
{
|
||||
m_task_indexes.emplace_back(i);
|
||||
}
|
||||
m_free_pos = m_task_indexes.begin();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace coro
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include "coro/concepts/range_of.hpp"
|
||||
#include "coro/event.hpp"
|
||||
#include "coro/shutdown.hpp"
|
||||
#include "coro/task.hpp"
|
||||
|
||||
#include <atomic>
|
||||
|
@ -11,15 +11,13 @@
|
|||
#include <functional>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <ranges>
|
||||
#include <thread>
|
||||
#include <variant>
|
||||
#include <vector>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
class event;
|
||||
class shared_mutex;
|
||||
|
||||
/**
|
||||
* Creates a thread pool that executes arbitrary coroutine tasks in a FIFO scheduler policy.
|
||||
* The thread pool by default will create an execution thread per available core on the system.
|
||||
|
@ -133,6 +131,46 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Schedules any coroutine handle that is ready to be resumed.
|
||||
* @param handle The coroutine handle to schedule.
|
||||
*/
|
||||
auto resume(std::coroutine_handle<> handle) noexcept -> void;
|
||||
|
||||
/**
|
||||
* Schedules the set of coroutine handles that are ready to be resumed.
|
||||
* @param handles The coroutine handles to schedule.
|
||||
*/
|
||||
template<coro::concepts::range_of<std::coroutine_handle<>> range_type>
|
||||
auto resume(const range_type& handles) noexcept -> void
|
||||
{
|
||||
m_size.fetch_add(std::size(handles), std::memory_order::release);
|
||||
|
||||
size_t null_handles{0};
|
||||
|
||||
{
|
||||
std::scoped_lock lk{m_wait_mutex};
|
||||
for (const auto& handle : handles)
|
||||
{
|
||||
if (handle != nullptr) [[likely]]
|
||||
{
|
||||
m_queue.emplace_back(handle);
|
||||
}
|
||||
else
|
||||
{
|
||||
++null_handles;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (null_handles > 0)
|
||||
{
|
||||
m_size.fetch_sub(null_handles, std::memory_order::release);
|
||||
}
|
||||
|
||||
m_wait_cv.notify_one();
|
||||
}
|
||||
|
||||
/**
|
||||
* Immediately yields the current task and places it at the end of the queue of tasks waiting
|
||||
* to be processed. This will immediately be picked up again once it naturally goes through the
|
||||
|
@ -143,11 +181,10 @@ public:
|
|||
|
||||
/**
|
||||
* Shutsdown the thread pool. This will finish any tasks scheduled prior to calling this
|
||||
* function but will prevent the thread pool from scheduling any new tasks.
|
||||
* @param wait_for_tasks Should this function block until all remaining scheduled tasks have
|
||||
* completed? Pass in sync to wait, or async to not block.
|
||||
* function but will prevent the thread pool from scheduling any new tasks. This call is
|
||||
* blocking and will wait until all inflight tasks are completed before returnin.
|
||||
*/
|
||||
virtual auto shutdown(shutdown_t wait_for_tasks = shutdown_t::sync) noexcept -> void;
|
||||
auto shutdown() noexcept -> void;
|
||||
|
||||
/**
|
||||
* @return The number of tasks waiting in the task queue + the executing tasks.
|
||||
|
@ -198,22 +235,10 @@ private:
|
|||
*/
|
||||
auto schedule_impl(std::coroutine_handle<> handle) noexcept -> void;
|
||||
|
||||
protected:
|
||||
/// The number of tasks in the queue + currently executing.
|
||||
std::atomic<std::size_t> m_size{0};
|
||||
/// Has the thread pool been requested to shut down?
|
||||
std::atomic<bool> m_shutdown_requested{false};
|
||||
|
||||
/// Required to resume all waiters of the event onto a thread_pool.
|
||||
friend event;
|
||||
friend shared_mutex;
|
||||
|
||||
/**
|
||||
* Schedules any coroutine that is ready to be resumed.
|
||||
* @param handle The coroutine handle to schedule.
|
||||
*/
|
||||
auto resume(std::coroutine_handle<> handle) noexcept -> void;
|
||||
auto resume(const std::vector<std::coroutine_handle<>>& handles) noexcept -> void;
|
||||
};
|
||||
|
||||
} // namespace coro
|
||||
|
|
|
@ -340,15 +340,7 @@ public:
|
|||
|
||||
auto unhandled_exception() noexcept -> void { m_exception_ptr = std::current_exception(); }
|
||||
|
||||
auto return_void() noexcept -> void {}
|
||||
|
||||
auto start(when_all_latch& latch) -> void
|
||||
{
|
||||
m_latch = &latch;
|
||||
coroutine_handle_type::from_promise(*this).resume();
|
||||
}
|
||||
|
||||
auto return_value() -> void
|
||||
auto return_void() noexcept -> void
|
||||
{
|
||||
if (m_exception_ptr)
|
||||
{
|
||||
|
@ -356,6 +348,12 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
auto start(when_all_latch& latch) -> void
|
||||
{
|
||||
m_latch = &latch;
|
||||
coroutine_handle_type::from_promise(*this).resume();
|
||||
}
|
||||
|
||||
private:
|
||||
when_all_latch* m_latch{nullptr};
|
||||
std::exception_ptr m_exception_ptr;
|
||||
|
@ -436,6 +434,11 @@ private:
|
|||
coroutine_handle_type m_coroutine;
|
||||
};
|
||||
|
||||
template<
|
||||
concepts::awaitable awaitable,
|
||||
typename return_type = concepts::awaitable_traits<awaitable&&>::awaiter_return_type>
|
||||
static auto make_when_all_task(awaitable a) -> when_all_task<return_type> __attribute__((used));
|
||||
|
||||
template<
|
||||
concepts::awaitable awaitable,
|
||||
typename return_type = concepts::awaitable_traits<awaitable&&>::awaiter_return_type>
|
||||
|
|
|
@ -7,13 +7,20 @@ event::event(bool initially_set) noexcept : m_state((initially_set) ? static_cas
|
|||
{
|
||||
}
|
||||
|
||||
auto event::set() noexcept -> void
|
||||
auto event::set(resume_order_policy policy) noexcept -> void
|
||||
{
|
||||
// Exchange the state to this, if the state was previously not this, then traverse the list
|
||||
// of awaiters and resume their coroutines.
|
||||
void* old_value = m_state.exchange(this, std::memory_order::acq_rel);
|
||||
if (old_value != this)
|
||||
{
|
||||
// If FIFO has been requsted then reverse the order upon resuming.
|
||||
if (policy == resume_order_policy::fifo)
|
||||
{
|
||||
old_value = reverse(static_cast<awaiter*>(old_value));
|
||||
}
|
||||
// else lifo nothing to do
|
||||
|
||||
auto* waiters = static_cast<awaiter*>(old_value);
|
||||
while (waiters != nullptr)
|
||||
{
|
||||
|
@ -24,21 +31,24 @@ auto event::set() noexcept -> void
|
|||
}
|
||||
}
|
||||
|
||||
auto event::set(coro::thread_pool& tp) noexcept -> void
|
||||
auto event::reverse(awaiter* curr) -> awaiter*
|
||||
{
|
||||
// Exchange the state to this, if the state was previously not this, then traverse the list
|
||||
// of awaiters and resume their coroutines.
|
||||
void* old_value = m_state.exchange(this, std::memory_order::acq_rel);
|
||||
if (old_value != this)
|
||||
if (curr == nullptr || curr->m_next == nullptr)
|
||||
{
|
||||
auto* waiters = static_cast<awaiter*>(old_value);
|
||||
while (waiters != nullptr)
|
||||
{
|
||||
auto* next = waiters->m_next;
|
||||
tp.resume(waiters->m_awaiting_coroutine);
|
||||
waiters = next;
|
||||
}
|
||||
return curr;
|
||||
}
|
||||
|
||||
awaiter* prev = nullptr;
|
||||
awaiter* next = nullptr;
|
||||
while (curr != nullptr)
|
||||
{
|
||||
next = curr->m_next;
|
||||
curr->m_next = prev;
|
||||
prev = curr;
|
||||
curr = next;
|
||||
}
|
||||
|
||||
return prev;
|
||||
}
|
||||
|
||||
auto event::awaiter::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
|
||||
|
|
|
@ -1,462 +0,0 @@
|
|||
#include "coro/io_scheduler.hpp"
|
||||
|
||||
#include <atomic>
|
||||
#include <cstring>
|
||||
#include <optional>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/eventfd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/timerfd.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace coro
|
||||
{
|
||||
namespace detail
|
||||
{
|
||||
/**
|
||||
* Poll Info encapsulates everything about a poll operation for the event as well as its paired
|
||||
* timeout. This is important since coroutines that are waiting on an event or timeout do not
|
||||
* immediately execute, they are re-scheduled onto the thread pool, so its possible its pair
|
||||
* event or timeout also triggers while the coroutine is still waiting to resume. This means that
|
||||
* the first one to happen, the event itself or its timeout, needs to disable the other pair item
|
||||
* prior to resuming the coroutine.
|
||||
*
|
||||
* Finally, its also important to note that the event and its paired timeout could happen during
|
||||
* the same epoll_wait and possibly trigger the coroutine to start twice. Only one can win, so the
|
||||
* first one processed sets m_processed to true and any subsequent events in the same epoll batch
|
||||
* are effectively discarded.
|
||||
*/
|
||||
struct poll_info
|
||||
{
|
||||
poll_info() = default;
|
||||
~poll_info() = default;
|
||||
|
||||
poll_info(const poll_info&) = delete;
|
||||
poll_info(poll_info&&) = delete;
|
||||
auto operator=(const poll_info&) -> poll_info& = delete;
|
||||
auto operator=(poll_info&&) -> poll_info& = delete;
|
||||
|
||||
struct poll_awaiter
|
||||
{
|
||||
explicit poll_awaiter(poll_info& pi) noexcept : m_pi(pi) {}
|
||||
|
||||
auto await_ready() const noexcept -> bool { return false; }
|
||||
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> void
|
||||
{
|
||||
m_pi.m_awaiting_coroutine = awaiting_coroutine;
|
||||
std::atomic_thread_fence(std::memory_order::release);
|
||||
}
|
||||
auto await_resume() noexcept -> coro::poll_status { return m_pi.m_poll_status; }
|
||||
|
||||
poll_info& m_pi;
|
||||
};
|
||||
|
||||
auto operator co_await() noexcept -> poll_awaiter { return poll_awaiter{*this}; }
|
||||
|
||||
/// The file descriptor being polled on. This is needed so that if the timeout occurs first then
|
||||
/// the event loop can immediately disable the event within epoll.
|
||||
io_scheduler::fd_t m_fd{-1};
|
||||
/// The timeout's position in the timeout map. A poll() with no timeout or yield() this is empty.
|
||||
/// This is needed so that if the event occurs first then the event loop can immediately disable
|
||||
/// the timeout within epoll.
|
||||
std::optional<io_scheduler::timed_events::iterator> m_timer_pos{std::nullopt};
|
||||
/// The awaiting coroutine for this poll info to resume upon event or timeout.
|
||||
std::coroutine_handle<> m_awaiting_coroutine;
|
||||
/// The status of the poll operation.
|
||||
coro::poll_status m_poll_status{coro::poll_status::error};
|
||||
/// Did the timeout and event trigger at the same time on the same epoll_wait call?
|
||||
/// Once this is set to true all future events on this poll info are null and void.
|
||||
bool m_processed{false};
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
io_scheduler::io_scheduler(options opts)
|
||||
: thread_pool(std::move(opts.pool)),
|
||||
m_opts(std::move(opts)),
|
||||
m_epoll_fd(epoll_create1(EPOLL_CLOEXEC)),
|
||||
m_shutdown_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)),
|
||||
m_timer_fd(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK | TFD_CLOEXEC))
|
||||
{
|
||||
epoll_event e{};
|
||||
e.events = EPOLLIN;
|
||||
|
||||
e.data.ptr = const_cast<void*>(m_shutdown_ptr);
|
||||
epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, m_shutdown_fd, &e);
|
||||
|
||||
e.data.ptr = const_cast<void*>(m_timer_ptr);
|
||||
epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, m_timer_fd, &e);
|
||||
|
||||
if (m_opts.thread_strategy == thread_strategy_t::spawn)
|
||||
{
|
||||
m_io_thread = std::thread([this]() { process_events_dedicated_thread(); });
|
||||
}
|
||||
// else manual mode, the user must call process_events.
|
||||
}
|
||||
|
||||
io_scheduler::~io_scheduler()
|
||||
{
|
||||
shutdown();
|
||||
|
||||
if (m_io_thread.joinable())
|
||||
{
|
||||
m_io_thread.join();
|
||||
}
|
||||
|
||||
if (m_epoll_fd != -1)
|
||||
{
|
||||
close(m_epoll_fd);
|
||||
m_epoll_fd = -1;
|
||||
}
|
||||
if (m_timer_fd != -1)
|
||||
{
|
||||
close(m_timer_fd);
|
||||
m_timer_fd = -1;
|
||||
}
|
||||
}
|
||||
|
||||
auto io_scheduler::process_events(std::chrono::milliseconds timeout) -> std::size_t
|
||||
{
|
||||
process_events_manual(timeout);
|
||||
return m_size.load(std::memory_order::relaxed);
|
||||
}
|
||||
|
||||
auto io_scheduler::schedule_after(std::chrono::milliseconds amount) -> coro::task<void>
|
||||
{
|
||||
return yield_for(amount);
|
||||
}
|
||||
|
||||
auto io_scheduler::schedule_at(time_point time) -> coro::task<void>
|
||||
{
|
||||
return yield_until(time);
|
||||
}
|
||||
|
||||
auto io_scheduler::yield_for(std::chrono::milliseconds amount) -> coro::task<void>
|
||||
{
|
||||
if (amount <= 0ms)
|
||||
{
|
||||
co_await schedule();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Yielding does not requiring setting the timer position on the poll info since
|
||||
// it doesn't have a corresponding 'event' that can trigger, it always waits for
|
||||
// the timeout to occur before resuming.
|
||||
|
||||
detail::poll_info pi{};
|
||||
add_timer_token(clock::now() + amount, pi);
|
||||
co_await pi;
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
auto io_scheduler::yield_until(time_point time) -> coro::task<void>
|
||||
{
|
||||
auto now = clock::now();
|
||||
|
||||
// If the requested time is in the past (or now!) bail out!
|
||||
if (time <= now)
|
||||
{
|
||||
co_await schedule();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto amount = std::chrono::duration_cast<std::chrono::milliseconds>(time - now);
|
||||
|
||||
detail::poll_info pi{};
|
||||
add_timer_token(now + amount, pi);
|
||||
co_await pi;
|
||||
}
|
||||
co_return;
|
||||
}
|
||||
|
||||
auto io_scheduler::poll(fd_t fd, coro::poll_op op, std::chrono::milliseconds timeout) -> coro::task<poll_status>
|
||||
{
|
||||
// Setup two events, a timeout event and the actual poll for op event.
|
||||
// Whichever triggers first will delete the other to guarantee only one wins.
|
||||
// The resume token will be set by the scheduler to what the event turned out to be.
|
||||
|
||||
bool timeout_requested = (timeout > 0ms);
|
||||
|
||||
detail::poll_info pi{};
|
||||
pi.m_fd = fd;
|
||||
|
||||
if (timeout_requested)
|
||||
{
|
||||
pi.m_timer_pos = add_timer_token(clock::now() + timeout, pi);
|
||||
}
|
||||
|
||||
epoll_event e{};
|
||||
e.events = static_cast<uint32_t>(op) | EPOLLONESHOT | EPOLLRDHUP;
|
||||
e.data.ptr = π
|
||||
if (epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, fd, &e) == -1)
|
||||
{
|
||||
std::cerr << "epoll ctl error on fd " << fd << "\n";
|
||||
}
|
||||
|
||||
// The event loop will 'clean-up' whichever event didn't win since the coroutine is scheduled
|
||||
// onto the thread poll its possible the other type of event could trigger while its waiting
|
||||
// to execute again, thus restarting the coroutine twice, that would be quite bad.
|
||||
co_return co_await pi;
|
||||
}
|
||||
|
||||
auto io_scheduler::shutdown(shutdown_t wait_for_tasks) noexcept -> void
|
||||
{
|
||||
thread_pool::shutdown(wait_for_tasks);
|
||||
|
||||
// Signal the event loop to stop asap, triggering the event fd is safe.
|
||||
uint64_t value{1};
|
||||
::write(m_shutdown_fd, &value, sizeof(value));
|
||||
}
|
||||
|
||||
auto io_scheduler::process_events_manual(std::chrono::milliseconds timeout) -> void
|
||||
{
|
||||
bool expected{false};
|
||||
if (m_io_processing.compare_exchange_strong(expected, true, std::memory_order::release, std::memory_order::relaxed))
|
||||
{
|
||||
process_events_execute(timeout);
|
||||
m_io_processing.exchange(false, std::memory_order::release);
|
||||
}
|
||||
}
|
||||
|
||||
auto io_scheduler::process_events_dedicated_thread() -> void
|
||||
{
|
||||
if (m_opts.on_io_thread_start_functor != nullptr)
|
||||
{
|
||||
m_opts.on_io_thread_start_functor();
|
||||
}
|
||||
|
||||
m_io_processing.exchange(true, std::memory_order::release);
|
||||
// Execute tasks until stopped or there are no more tasks to complete.
|
||||
while (!m_shutdown_requested.load(std::memory_order::relaxed) || m_size.load(std::memory_order::relaxed) > 0)
|
||||
{
|
||||
process_events_execute(m_default_timeout);
|
||||
}
|
||||
m_io_processing.exchange(false, std::memory_order::release);
|
||||
|
||||
if (m_opts.on_io_thread_stop_functor != nullptr)
|
||||
{
|
||||
m_opts.on_io_thread_stop_functor();
|
||||
}
|
||||
}
|
||||
|
||||
auto io_scheduler::process_events_execute(std::chrono::milliseconds timeout) -> void
|
||||
{
|
||||
auto event_count = epoll_wait(m_epoll_fd, m_events.data(), m_max_events, timeout.count());
|
||||
if (event_count > 0)
|
||||
{
|
||||
for (std::size_t i = 0; i < static_cast<std::size_t>(event_count); ++i)
|
||||
{
|
||||
epoll_event& event = m_events[i];
|
||||
void* handle_ptr = event.data.ptr;
|
||||
|
||||
if (handle_ptr == m_timer_ptr)
|
||||
{
|
||||
// Process all events that have timed out.
|
||||
process_timeout_execute();
|
||||
}
|
||||
else if (handle_ptr == m_shutdown_ptr) [[unlikely]]
|
||||
{
|
||||
// Nothing to do , just needed to wake-up and smell the flowers
|
||||
}
|
||||
else
|
||||
{
|
||||
// Individual poll task wake-up, this will queue the coroutines waiting
|
||||
// on the resume token into the FIFO queue for processing.
|
||||
process_event_execute(static_cast<detail::poll_info*>(handle_ptr), event_to_poll_status(event.events));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto io_scheduler::event_to_poll_status(uint32_t events) -> poll_status
|
||||
{
|
||||
if (events & EPOLLIN || events & EPOLLOUT)
|
||||
{
|
||||
return poll_status::event;
|
||||
}
|
||||
else if (events & EPOLLERR)
|
||||
{
|
||||
return poll_status::error;
|
||||
}
|
||||
else if (events & EPOLLRDHUP || events & EPOLLHUP)
|
||||
{
|
||||
return poll_status::closed;
|
||||
}
|
||||
|
||||
throw std::runtime_error{"invalid epoll state"};
|
||||
}
|
||||
|
||||
auto io_scheduler::process_event_execute(detail::poll_info* pi, poll_status status) -> void
|
||||
{
|
||||
if (!pi->m_processed)
|
||||
{
|
||||
std::atomic_thread_fence(std::memory_order::acquire);
|
||||
// Its possible the event and the timeout occurred in the same epoll, make sure only one
|
||||
// is ever processed, the other is discarded.
|
||||
pi->m_processed = true;
|
||||
|
||||
// Given a valid fd always remove it from epoll so the next poll can blindly EPOLL_CTL_ADD.
|
||||
if (pi->m_fd != -1)
|
||||
{
|
||||
epoll_ctl(m_epoll_fd, EPOLL_CTL_DEL, pi->m_fd, nullptr);
|
||||
}
|
||||
|
||||
// Since this event triggered, remove its corresponding timeout if it has one.
|
||||
if (pi->m_timer_pos.has_value())
|
||||
{
|
||||
remove_timer_token(pi->m_timer_pos.value());
|
||||
}
|
||||
|
||||
pi->m_poll_status = status;
|
||||
|
||||
while (pi->m_awaiting_coroutine == nullptr)
|
||||
{
|
||||
std::atomic_thread_fence(std::memory_order::acquire);
|
||||
}
|
||||
resume(pi->m_awaiting_coroutine);
|
||||
}
|
||||
}
|
||||
|
||||
auto io_scheduler::process_timeout_execute() -> void
|
||||
{
|
||||
std::vector<detail::poll_info*> poll_infos{};
|
||||
auto now = clock::now();
|
||||
|
||||
{
|
||||
std::scoped_lock lk{m_timed_events_mutex};
|
||||
while (!m_timed_events.empty())
|
||||
{
|
||||
auto first = m_timed_events.begin();
|
||||
auto [tp, pi] = *first;
|
||||
|
||||
if (tp <= now)
|
||||
{
|
||||
m_timed_events.erase(first);
|
||||
poll_infos.emplace_back(pi);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::coroutine_handle<>> handles{};
|
||||
handles.reserve(poll_infos.size());
|
||||
for (auto pi : poll_infos)
|
||||
{
|
||||
if (!pi->m_processed)
|
||||
{
|
||||
// Its possible the event and the timeout occurred in the same epoll, make sure only one
|
||||
// is ever processed, the other is discarded.
|
||||
pi->m_processed = true;
|
||||
|
||||
// Since this timed out, remove its corresponding event if it has one.
|
||||
if (pi->m_fd != -1)
|
||||
{
|
||||
epoll_ctl(m_epoll_fd, EPOLL_CTL_DEL, pi->m_fd, nullptr);
|
||||
}
|
||||
|
||||
while (pi->m_awaiting_coroutine == nullptr)
|
||||
{
|
||||
std::atomic_thread_fence(std::memory_order::acquire);
|
||||
// std::cerr << "process_event_execute() has a nullptr event\n";
|
||||
}
|
||||
|
||||
handles.emplace_back(pi->m_awaiting_coroutine);
|
||||
pi->m_poll_status = coro::poll_status::timeout;
|
||||
}
|
||||
}
|
||||
|
||||
// Resume all timed out coroutines.
|
||||
resume(handles);
|
||||
|
||||
// Update the time to the next smallest time point, re-take the current now time
|
||||
// since updating and resuming tasks could shift the time.
|
||||
update_timeout(clock::now());
|
||||
}
|
||||
|
||||
auto io_scheduler::add_timer_token(time_point tp, detail::poll_info& pi) -> timed_events::iterator
|
||||
{
|
||||
std::scoped_lock lk{m_timed_events_mutex};
|
||||
auto pos = m_timed_events.emplace(tp, &pi);
|
||||
|
||||
// If this item was inserted as the smallest time point, update the timeout.
|
||||
if (pos == m_timed_events.begin())
|
||||
{
|
||||
update_timeout(clock::now());
|
||||
}
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
auto io_scheduler::remove_timer_token(timed_events::iterator pos) -> void
|
||||
{
|
||||
{
|
||||
std::scoped_lock lk{m_timed_events_mutex};
|
||||
auto is_first = (m_timed_events.begin() == pos);
|
||||
|
||||
m_timed_events.erase(pos);
|
||||
|
||||
// If this was the first item, update the timeout. It would be acceptable to just let it
|
||||
// also fire the timeout as the event loop will ignore it since nothing will have timed
|
||||
// out but it feels like the right thing to do to update it to the correct timeout value.
|
||||
if (is_first)
|
||||
{
|
||||
update_timeout(clock::now());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto io_scheduler::update_timeout(time_point now) -> void
|
||||
{
|
||||
if (!m_timed_events.empty())
|
||||
{
|
||||
auto& [tp, pi] = *m_timed_events.begin();
|
||||
|
||||
auto amount = tp - now;
|
||||
|
||||
auto seconds = std::chrono::duration_cast<std::chrono::seconds>(amount);
|
||||
amount -= seconds;
|
||||
auto nanoseconds = std::chrono::duration_cast<std::chrono::nanoseconds>(amount);
|
||||
|
||||
// As a safeguard if both values end up as zero (or negative) then trigger the timeout
|
||||
// immediately as zero disarms timerfd according to the man pages and negative values
|
||||
// will result in an error return value.
|
||||
if (seconds <= 0s)
|
||||
{
|
||||
seconds = 0s;
|
||||
if (nanoseconds <= 0ns)
|
||||
{
|
||||
// just trigger "immediately"!
|
||||
nanoseconds = 1ns;
|
||||
}
|
||||
}
|
||||
|
||||
itimerspec ts{};
|
||||
ts.it_value.tv_sec = seconds.count();
|
||||
ts.it_value.tv_nsec = nanoseconds.count();
|
||||
|
||||
if (timerfd_settime(m_timer_fd, 0, &ts, nullptr) == -1)
|
||||
{
|
||||
std::cerr << "Failed to set timerfd errorno=[" << std::string{strerror(errno)} << "].";
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// Setting these values to zero disables the timer.
|
||||
itimerspec ts{};
|
||||
ts.it_value.tv_sec = 0;
|
||||
ts.it_value.tv_nsec = 0;
|
||||
if (timerfd_settime(m_timer_fd, 0, &ts, nullptr) == -1)
|
||||
{
|
||||
std::cerr << "Failed to set timerfd errorno=[" << std::string{strerror(errno)} << "].";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace coro
|
|
@ -1,29 +0,0 @@
|
|||
#include "coro/net/connect.hpp"
|
||||
|
||||
#include <stdexcept>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
const static std::string connect_status_connected{"connected"};
|
||||
const static std::string connect_status_invalid_ip_address{"invalid_ip_address"};
|
||||
const static std::string connect_status_timeout{"timeout"};
|
||||
const static std::string connect_status_error{"error"};
|
||||
|
||||
auto to_string(const connect_status& status) -> const std::string&
|
||||
{
|
||||
switch (status)
|
||||
{
|
||||
case connect_status::connected:
|
||||
return connect_status_connected;
|
||||
case connect_status::invalid_ip_address:
|
||||
return connect_status_invalid_ip_address;
|
||||
case connect_status::timeout:
|
||||
return connect_status_timeout;
|
||||
case connect_status::error:
|
||||
return connect_status_error;
|
||||
}
|
||||
|
||||
throw std::logic_error{"Invalid/unknown connect status."};
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,190 +0,0 @@
|
|||
#include "coro/net/dns_resolver.hpp"
|
||||
|
||||
#include <arpa/inet.h>
|
||||
#include <iostream>
|
||||
#include <netdb.h>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
uint64_t dns_resolver::m_ares_count{0};
|
||||
std::mutex dns_resolver::m_ares_mutex{};
|
||||
|
||||
auto ares_dns_callback(void* arg, int status, int /*timeouts*/, struct hostent* host) -> void
|
||||
{
|
||||
auto& result = *static_cast<dns_result*>(arg);
|
||||
--result.m_pending_dns_requests;
|
||||
|
||||
if (host == nullptr || status != ARES_SUCCESS)
|
||||
{
|
||||
result.m_status = dns_status::error;
|
||||
}
|
||||
else
|
||||
{
|
||||
result.m_status = dns_status::complete;
|
||||
|
||||
for (size_t i = 0; host->h_addr_list[i] != nullptr; ++i)
|
||||
{
|
||||
size_t len = (host->h_addrtype == AF_INET) ? net::ip_address::ipv4_len : net::ip_address::ipv6_len;
|
||||
net::ip_address ip_addr{
|
||||
std::span<const uint8_t>{reinterpret_cast<const uint8_t*>(host->h_addr_list[i]), len},
|
||||
static_cast<net::domain_t>(host->h_addrtype)};
|
||||
|
||||
result.m_ip_addresses.emplace_back(std::move(ip_addr));
|
||||
}
|
||||
}
|
||||
|
||||
if (result.m_pending_dns_requests == 0)
|
||||
{
|
||||
result.m_resume.set(result.m_io_scheduler);
|
||||
}
|
||||
}
|
||||
|
||||
dns_result::dns_result(coro::io_scheduler& scheduler, coro::event& resume, uint64_t pending_dns_requests)
|
||||
: m_io_scheduler(scheduler),
|
||||
m_resume(resume),
|
||||
m_pending_dns_requests(pending_dns_requests)
|
||||
{
|
||||
}
|
||||
|
||||
dns_resolver::dns_resolver(io_scheduler& scheduler, std::chrono::milliseconds timeout)
|
||||
: m_io_scheduler(scheduler),
|
||||
m_timeout(timeout),
|
||||
m_task_container(scheduler)
|
||||
{
|
||||
{
|
||||
std::scoped_lock g{m_ares_mutex};
|
||||
if (m_ares_count == 0)
|
||||
{
|
||||
auto ares_status = ares_library_init(ARES_LIB_INIT_ALL);
|
||||
if (ares_status != ARES_SUCCESS)
|
||||
{
|
||||
throw std::runtime_error{ares_strerror(ares_status)};
|
||||
}
|
||||
}
|
||||
++m_ares_count;
|
||||
}
|
||||
|
||||
auto channel_init_status = ares_init(&m_ares_channel);
|
||||
if (channel_init_status != ARES_SUCCESS)
|
||||
{
|
||||
throw std::runtime_error{ares_strerror(channel_init_status)};
|
||||
}
|
||||
}
|
||||
|
||||
dns_resolver::~dns_resolver()
|
||||
{
|
||||
if (m_ares_channel != nullptr)
|
||||
{
|
||||
ares_destroy(m_ares_channel);
|
||||
m_ares_channel = nullptr;
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock g{m_ares_mutex};
|
||||
--m_ares_count;
|
||||
if (m_ares_count == 0)
|
||||
{
|
||||
ares_library_cleanup();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto dns_resolver::host_by_name(const net::hostname& hn) -> coro::task<std::unique_ptr<dns_result>>
|
||||
{
|
||||
coro::event resume_event{};
|
||||
auto result_ptr = std::make_unique<dns_result>(m_io_scheduler, resume_event, 2);
|
||||
|
||||
ares_gethostbyname(m_ares_channel, hn.data().data(), AF_INET, ares_dns_callback, result_ptr.get());
|
||||
ares_gethostbyname(m_ares_channel, hn.data().data(), AF_INET6, ares_dns_callback, result_ptr.get());
|
||||
|
||||
std::vector<coro::task<void>> poll_tasks{};
|
||||
|
||||
// Add all required poll calls for ares to kick off the dns requests.
|
||||
ares_poll();
|
||||
|
||||
// Suspend until this specific result is completed by ares.
|
||||
co_await resume_event;
|
||||
co_return result_ptr;
|
||||
}
|
||||
|
||||
auto dns_resolver::ares_poll() -> void
|
||||
{
|
||||
std::array<ares_socket_t, ARES_GETSOCK_MAXNUM> ares_sockets{};
|
||||
std::array<poll_op, ARES_GETSOCK_MAXNUM> poll_ops{};
|
||||
|
||||
int bitmask = ares_getsock(m_ares_channel, ares_sockets.data(), ARES_GETSOCK_MAXNUM);
|
||||
|
||||
size_t new_sockets{0};
|
||||
|
||||
for (size_t i = 0; i < ARES_GETSOCK_MAXNUM; ++i)
|
||||
{
|
||||
uint64_t ops{0};
|
||||
|
||||
if (ARES_GETSOCK_READABLE(bitmask, i))
|
||||
{
|
||||
ops |= static_cast<uint64_t>(poll_op::read);
|
||||
}
|
||||
if (ARES_GETSOCK_WRITABLE(bitmask, i))
|
||||
{
|
||||
ops |= static_cast<uint64_t>(poll_op::write);
|
||||
}
|
||||
|
||||
if (ops != 0)
|
||||
{
|
||||
poll_ops[i] = static_cast<poll_op>(ops);
|
||||
++new_sockets;
|
||||
}
|
||||
else
|
||||
{
|
||||
// According to ares usage within curl once a bitmask for a socket is zero the rest of
|
||||
// the bitmask will also be zero.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<coro::task<void>> poll_tasks{};
|
||||
for (size_t i = 0; i < new_sockets; ++i)
|
||||
{
|
||||
auto fd = static_cast<io_scheduler::fd_t>(ares_sockets[i]);
|
||||
|
||||
// If this socket is not currently actively polling, start polling!
|
||||
if (m_active_sockets.emplace(fd).second)
|
||||
{
|
||||
m_task_container.start(make_poll_task(fd, poll_ops[i]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto dns_resolver::make_poll_task(io_scheduler::fd_t fd, poll_op ops) -> coro::task<void>
|
||||
{
|
||||
auto result = co_await m_io_scheduler.poll(fd, ops, m_timeout);
|
||||
switch (result)
|
||||
{
|
||||
case poll_status::event:
|
||||
{
|
||||
auto read_sock = poll_op_readable(ops) ? fd : ARES_SOCKET_BAD;
|
||||
auto write_sock = poll_op_writeable(ops) ? fd : ARES_SOCKET_BAD;
|
||||
ares_process_fd(m_ares_channel, read_sock, write_sock);
|
||||
}
|
||||
break;
|
||||
case poll_status::timeout:
|
||||
ares_process_fd(m_ares_channel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
|
||||
break;
|
||||
case poll_status::closed:
|
||||
// might need to do something like call with two ARES_SOCKET_BAD?
|
||||
break;
|
||||
case poll_status::error:
|
||||
// might need to do something like call with two ARES_SOCKET_BAD?
|
||||
break;
|
||||
}
|
||||
|
||||
// Remove from the list of actively polling sockets.
|
||||
m_active_sockets.erase(fd);
|
||||
|
||||
// Re-initialize sockets/polls for ares since this one has now triggered.
|
||||
ares_poll();
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,20 +0,0 @@
|
|||
#include "coro/net/ip_address.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
static std::string domain_ipv4{"ipv4"};
|
||||
static std::string domain_ipv6{"ipv6"};
|
||||
|
||||
auto to_string(domain_t domain) -> const std::string&
|
||||
{
|
||||
switch (domain)
|
||||
{
|
||||
case domain_t::ipv4:
|
||||
return domain_ipv4;
|
||||
case domain_t::ipv6:
|
||||
return domain_ipv6;
|
||||
}
|
||||
throw std::runtime_error{"coro::net::to_string(domain_t) unknown domain"};
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,59 +0,0 @@
|
|||
#include "coro/net/recv_status.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
static const std::string recv_status_ok{"ok"};
|
||||
static const std::string recv_status_closed{"closed"};
|
||||
static const std::string recv_status_udp_not_bound{"udp_not_bound"};
|
||||
// static const std::string recv_status_try_again{"try_again"};
|
||||
static const std::string recv_status_would_block{"would_block"};
|
||||
static const std::string recv_status_bad_file_descriptor{"bad_file_descriptor"};
|
||||
static const std::string recv_status_connection_refused{"connection_refused"};
|
||||
static const std::string recv_status_memory_fault{"memory_fault"};
|
||||
static const std::string recv_status_interrupted{"interrupted"};
|
||||
static const std::string recv_status_invalid_argument{"invalid_argument"};
|
||||
static const std::string recv_status_no_memory{"no_memory"};
|
||||
static const std::string recv_status_not_connected{"not_connected"};
|
||||
static const std::string recv_status_not_a_socket{"not_a_socket"};
|
||||
static const std::string recv_status_unknown{"unknown"};
|
||||
|
||||
static const std::string recv_status_ssl_error{"ssl_error"};
|
||||
|
||||
auto to_string(recv_status status) -> const std::string&
|
||||
{
|
||||
switch (status)
|
||||
{
|
||||
case recv_status::ok:
|
||||
return recv_status_ok;
|
||||
case recv_status::closed:
|
||||
return recv_status_closed;
|
||||
case recv_status::udp_not_bound:
|
||||
return recv_status_udp_not_bound;
|
||||
// case recv_status::try_again: return recv_status_try_again;
|
||||
case recv_status::would_block:
|
||||
return recv_status_would_block;
|
||||
case recv_status::bad_file_descriptor:
|
||||
return recv_status_bad_file_descriptor;
|
||||
case recv_status::connection_refused:
|
||||
return recv_status_connection_refused;
|
||||
case recv_status::memory_fault:
|
||||
return recv_status_memory_fault;
|
||||
case recv_status::interrupted:
|
||||
return recv_status_interrupted;
|
||||
case recv_status::invalid_argument:
|
||||
return recv_status_invalid_argument;
|
||||
case recv_status::no_memory:
|
||||
return recv_status_no_memory;
|
||||
case recv_status::not_connected:
|
||||
return recv_status_not_connected;
|
||||
case recv_status::not_a_socket:
|
||||
return recv_status_not_a_socket;
|
||||
|
||||
case recv_status::ssl_error:
|
||||
return recv_status_ssl_error;
|
||||
}
|
||||
|
||||
return recv_status_unknown;
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,5 +0,0 @@
|
|||
#include "coro/net/send_status.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
} // namespace coro::net
|
|
@ -1,130 +0,0 @@
|
|||
#include "coro/net/socket.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
auto socket::type_to_os(type_t type) -> int
|
||||
{
|
||||
switch (type)
|
||||
{
|
||||
case type_t::udp:
|
||||
return SOCK_DGRAM;
|
||||
case type_t::tcp:
|
||||
return SOCK_STREAM;
|
||||
default:
|
||||
throw std::runtime_error{"Unknown socket::type_t."};
|
||||
}
|
||||
}
|
||||
|
||||
auto socket::operator=(socket&& other) noexcept -> socket&
|
||||
{
|
||||
if (std::addressof(other) != this)
|
||||
{
|
||||
m_fd = std::exchange(other.m_fd, -1);
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto socket::blocking(blocking_t block) -> bool
|
||||
{
|
||||
if (m_fd < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int flags = fcntl(m_fd, F_GETFL, 0);
|
||||
if (flags == -1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// Add or subtract non-blocking flag.
|
||||
flags = (block == blocking_t::yes) ? flags & ~O_NONBLOCK : (flags | O_NONBLOCK);
|
||||
|
||||
return (fcntl(m_fd, F_SETFL, flags) == 0);
|
||||
}
|
||||
|
||||
auto socket::shutdown(poll_op how) -> bool
|
||||
{
|
||||
if (m_fd != -1)
|
||||
{
|
||||
int h{0};
|
||||
switch (how)
|
||||
{
|
||||
case poll_op::read:
|
||||
h = SHUT_RD;
|
||||
break;
|
||||
case poll_op::write:
|
||||
h = SHUT_WR;
|
||||
break;
|
||||
case poll_op::read_write:
|
||||
h = SHUT_RDWR;
|
||||
break;
|
||||
}
|
||||
|
||||
return (::shutdown(m_fd, h) == 0);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
auto socket::close() -> void
|
||||
{
|
||||
if (m_fd != -1)
|
||||
{
|
||||
::close(m_fd);
|
||||
m_fd = -1;
|
||||
}
|
||||
}
|
||||
|
||||
auto make_socket(const socket::options& opts) -> socket
|
||||
{
|
||||
socket s{::socket(static_cast<int>(opts.domain), socket::type_to_os(opts.type), 0)};
|
||||
if (s.native_handle() < 0)
|
||||
{
|
||||
throw std::runtime_error{"Failed to create socket."};
|
||||
}
|
||||
|
||||
if (opts.blocking == socket::blocking_t::no)
|
||||
{
|
||||
if (s.blocking(socket::blocking_t::no) == false)
|
||||
{
|
||||
throw std::runtime_error{"Failed to set socket to non-blocking mode."};
|
||||
}
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
auto make_accept_socket(const socket::options& opts, const net::ip_address& address, uint16_t port, int32_t backlog)
|
||||
-> socket
|
||||
{
|
||||
socket s = make_socket(opts);
|
||||
|
||||
int sock_opt{1};
|
||||
if (setsockopt(s.native_handle(), SOL_SOCKET, SO_REUSEADDR | SO_REUSEPORT, &sock_opt, sizeof(sock_opt)) < 0)
|
||||
{
|
||||
throw std::runtime_error{"Failed to setsockopt(SO_REUSEADDR | SO_REUSEPORT)"};
|
||||
}
|
||||
|
||||
sockaddr_in server{};
|
||||
server.sin_family = static_cast<int>(opts.domain);
|
||||
server.sin_port = htons(port);
|
||||
server.sin_addr = *reinterpret_cast<const in_addr*>(address.data().data());
|
||||
|
||||
if (bind(s.native_handle(), (struct sockaddr*)&server, sizeof(server)) < 0)
|
||||
{
|
||||
throw std::runtime_error{"Failed to bind."};
|
||||
}
|
||||
|
||||
if (opts.type == socket::type_t::tcp)
|
||||
{
|
||||
if (listen(s.native_handle(), backlog) < 0)
|
||||
{
|
||||
throw std::runtime_error{"Failed to listen."};
|
||||
}
|
||||
}
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,65 +0,0 @@
|
|||
#include "coro/net/ssl_context.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
uint64_t ssl_context::m_ssl_context_count{0};
|
||||
std::mutex ssl_context::m_ssl_context_mutex{};
|
||||
|
||||
ssl_context::ssl_context()
|
||||
{
|
||||
{
|
||||
std::scoped_lock g{m_ssl_context_mutex};
|
||||
if (m_ssl_context_count == 0)
|
||||
{
|
||||
OPENSSL_init_ssl(0, nullptr);
|
||||
}
|
||||
++m_ssl_context_count;
|
||||
}
|
||||
|
||||
m_ssl_ctx = SSL_CTX_new(TLS_method());
|
||||
if (m_ssl_ctx == nullptr)
|
||||
{
|
||||
throw std::runtime_error{"Failed to initialize OpenSSL Context object."};
|
||||
}
|
||||
|
||||
// Disable SSLv3
|
||||
SSL_CTX_set_options(m_ssl_ctx, SSL_OP_ALL | SSL_OP_NO_SSLv3);
|
||||
}
|
||||
|
||||
ssl_context::ssl_context(
|
||||
std::filesystem::path certificate,
|
||||
ssl_file_type certificate_type,
|
||||
std::filesystem::path private_key,
|
||||
ssl_file_type private_key_type)
|
||||
: ssl_context()
|
||||
{
|
||||
if (auto r = SSL_CTX_use_certificate_file(m_ssl_ctx, certificate.c_str(), static_cast<int>(certificate_type));
|
||||
r != 1)
|
||||
{
|
||||
throw std::runtime_error{"Failed to load certificate file " + certificate.string()};
|
||||
}
|
||||
|
||||
if (auto r = SSL_CTX_use_PrivateKey_file(m_ssl_ctx, private_key.c_str(), static_cast<int>(private_key_type));
|
||||
r != 1)
|
||||
{
|
||||
throw std::runtime_error{"Failed to load private key file " + private_key.string()};
|
||||
}
|
||||
|
||||
if (auto r = SSL_CTX_check_private_key(m_ssl_ctx); r != 1)
|
||||
{
|
||||
throw std::runtime_error{"Certificate and private key do not match."};
|
||||
}
|
||||
}
|
||||
|
||||
ssl_context::~ssl_context()
|
||||
{
|
||||
if (m_ssl_ctx != nullptr)
|
||||
{
|
||||
SSL_CTX_free(m_ssl_ctx);
|
||||
m_ssl_ctx = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,254 +0,0 @@
|
|||
#include "coro/net/tcp_client.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
tcp_client::tcp_client(io_scheduler& scheduler, options opts)
|
||||
: m_io_scheduler(&scheduler),
|
||||
m_options(std::move(opts)),
|
||||
m_socket(net::make_socket(
|
||||
net::socket::options{m_options.address.domain(), net::socket::type_t::tcp, net::socket::blocking_t::no}))
|
||||
{
|
||||
}
|
||||
|
||||
tcp_client::tcp_client(io_scheduler& scheduler, net::socket socket, options opts)
|
||||
: m_io_scheduler(&scheduler),
|
||||
m_options(std::move(opts)),
|
||||
m_socket(std::move(socket)),
|
||||
m_connect_status(connect_status::connected),
|
||||
m_ssl_info(ssl_connection_type::accept)
|
||||
{
|
||||
// Force the socket to be non-blocking.
|
||||
m_socket.blocking(coro::net::socket::blocking_t::no);
|
||||
}
|
||||
|
||||
tcp_client::tcp_client(tcp_client&& other)
|
||||
: m_io_scheduler(std::exchange(other.m_io_scheduler, nullptr)),
|
||||
m_options(std::move(other.m_options)),
|
||||
m_socket(std::move(other.m_socket)),
|
||||
m_connect_status(std::exchange(other.m_connect_status, std::nullopt)),
|
||||
m_ssl_info(std::move(other.m_ssl_info))
|
||||
{
|
||||
}
|
||||
|
||||
tcp_client::~tcp_client()
|
||||
{
|
||||
// If this tcp client is using SSL and the connection did not have an ssl error, schedule a task
|
||||
// to shutdown the connection cleanly. This is done on a background scheduled task since the
|
||||
// tcp client's destructor cannot co_await the SSL_shutdown() read and write poll operations.
|
||||
if (m_ssl_info.m_ssl_ptr != nullptr && !m_ssl_info.m_ssl_error)
|
||||
{
|
||||
// Should the shutdown timeout be configurable?
|
||||
ssl_shutdown_and_free(
|
||||
*m_io_scheduler, std::move(m_socket), std::move(m_ssl_info.m_ssl_ptr), std::chrono::seconds{30});
|
||||
}
|
||||
}
|
||||
|
||||
auto tcp_client::operator=(tcp_client&& other) noexcept -> tcp_client&
|
||||
{
|
||||
if (std::addressof(other) != this)
|
||||
{
|
||||
m_io_scheduler = std::exchange(other.m_io_scheduler, nullptr);
|
||||
m_options = std::move(other.m_options);
|
||||
m_socket = std::move(other.m_socket);
|
||||
m_connect_status = std::exchange(other.m_connect_status, std::nullopt);
|
||||
m_ssl_info = std::move(other.m_ssl_info);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto tcp_client::connect(std::chrono::milliseconds timeout) -> coro::task<connect_status>
|
||||
{
|
||||
// Only allow the user to connect per tcp client once, if they need to re-connect they should
|
||||
// make a new tcp_client.
|
||||
if (m_connect_status.has_value())
|
||||
{
|
||||
co_return m_connect_status.value();
|
||||
}
|
||||
|
||||
// This enforces the connection status is aways set on the client object upon returning.
|
||||
auto return_value = [this](connect_status s) -> connect_status {
|
||||
m_connect_status = s;
|
||||
return s;
|
||||
};
|
||||
|
||||
sockaddr_in server{};
|
||||
server.sin_family = static_cast<int>(m_options.address.domain());
|
||||
server.sin_port = htons(m_options.port);
|
||||
server.sin_addr = *reinterpret_cast<const in_addr*>(m_options.address.data().data());
|
||||
|
||||
auto cret = ::connect(m_socket.native_handle(), (struct sockaddr*)&server, sizeof(server));
|
||||
if (cret == 0)
|
||||
{
|
||||
co_return return_value(connect_status::connected);
|
||||
}
|
||||
else if (cret == -1)
|
||||
{
|
||||
// If the connect is happening in the background poll for write on the socket to trigger
|
||||
// when the connection is established.
|
||||
if (errno == EAGAIN || errno == EINPROGRESS)
|
||||
{
|
||||
auto pstatus = co_await m_io_scheduler->poll(m_socket, poll_op::write, timeout);
|
||||
if (pstatus == poll_status::event)
|
||||
{
|
||||
int result{0};
|
||||
socklen_t result_length{sizeof(result)};
|
||||
if (getsockopt(m_socket.native_handle(), SOL_SOCKET, SO_ERROR, &result, &result_length) < 0)
|
||||
{
|
||||
std::cerr << "connect failed to getsockopt after write poll event\n";
|
||||
}
|
||||
|
||||
if (result == 0)
|
||||
{
|
||||
co_return return_value(connect_status::connected);
|
||||
}
|
||||
}
|
||||
else if (pstatus == poll_status::timeout)
|
||||
{
|
||||
co_return return_value(connect_status::timeout);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
co_return return_value(connect_status::error);
|
||||
}
|
||||
|
||||
auto tcp_client::ssl_handshake(std::chrono::milliseconds timeout) -> coro::task<ssl_handshake_status>
|
||||
{
|
||||
if (!m_connect_status.has_value() || m_connect_status.value() != connect_status::connected)
|
||||
{
|
||||
// Can't ssl handshake if the connection isn't established.
|
||||
co_return ssl_handshake_status::not_connected;
|
||||
}
|
||||
|
||||
if (m_options.ssl_ctx == nullptr)
|
||||
{
|
||||
// ssl isn't setup
|
||||
co_return ssl_handshake_status::ssl_context_required;
|
||||
}
|
||||
|
||||
if (m_ssl_info.m_ssl_handshake_status.has_value())
|
||||
{
|
||||
// The user has already called this function.
|
||||
co_return m_ssl_info.m_ssl_handshake_status.value();
|
||||
}
|
||||
|
||||
// Enforce on any return past here to set the cached handshake status.
|
||||
auto return_value = [this](ssl_handshake_status s) -> ssl_handshake_status {
|
||||
m_ssl_info.m_ssl_handshake_status = s;
|
||||
return s;
|
||||
};
|
||||
|
||||
m_ssl_info.m_ssl_ptr = ssl_unique_ptr{SSL_new(m_options.ssl_ctx->native_handle())};
|
||||
if (m_ssl_info.m_ssl_ptr == nullptr)
|
||||
{
|
||||
co_return return_value(ssl_handshake_status::ssl_resource_allocation_failed);
|
||||
}
|
||||
|
||||
if (auto r = SSL_set_fd(m_ssl_info.m_ssl_ptr.get(), m_socket.native_handle()); r == 0)
|
||||
{
|
||||
co_return return_value(ssl_handshake_status::ssl_set_fd_failure);
|
||||
}
|
||||
|
||||
if (m_ssl_info.m_ssl_connection_type == ssl_connection_type::connect)
|
||||
{
|
||||
SSL_set_connect_state(m_ssl_info.m_ssl_ptr.get());
|
||||
}
|
||||
else // ssl_connection_type::accept
|
||||
{
|
||||
SSL_set_accept_state(m_ssl_info.m_ssl_ptr.get());
|
||||
}
|
||||
|
||||
int r{0};
|
||||
ERR_clear_error();
|
||||
while ((r = SSL_do_handshake(m_ssl_info.m_ssl_ptr.get())) != 1)
|
||||
{
|
||||
poll_op op{poll_op::read_write};
|
||||
int err = SSL_get_error(m_ssl_info.m_ssl_ptr.get(), r);
|
||||
if (err == SSL_ERROR_WANT_WRITE)
|
||||
{
|
||||
op = poll_op::write;
|
||||
}
|
||||
else if (err == SSL_ERROR_WANT_READ)
|
||||
{
|
||||
op = poll_op::read;
|
||||
}
|
||||
else
|
||||
{
|
||||
// char error_buffer[256];
|
||||
// ERR_error_string(err, error_buffer);
|
||||
// std::cerr << "ssl_handleshake error=[" << error_buffer << "]\n";
|
||||
co_return return_value(ssl_handshake_status::handshake_failed);
|
||||
}
|
||||
|
||||
// TODO: adjust timeout based on elapsed time so far.
|
||||
auto pstatus = co_await m_io_scheduler->poll(m_socket, op, timeout);
|
||||
switch (pstatus)
|
||||
{
|
||||
case poll_status::timeout:
|
||||
co_return return_value(ssl_handshake_status::timeout);
|
||||
case poll_status::error:
|
||||
co_return return_value(ssl_handshake_status::poll_error);
|
||||
case poll_status::closed:
|
||||
co_return return_value(ssl_handshake_status::unexpected_close);
|
||||
default:
|
||||
// Event triggered, continue handshake.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
co_return return_value(ssl_handshake_status::ok);
|
||||
}
|
||||
|
||||
auto tcp_client::ssl_shutdown_and_free(
|
||||
io_scheduler& io_scheduler, net::socket s, ssl_unique_ptr ssl_ptr, std::chrono::milliseconds timeout)
|
||||
-> coro::task<void>
|
||||
{
|
||||
// Immediately transfer onto the scheduler thread pool for background processing.
|
||||
co_await io_scheduler.schedule();
|
||||
|
||||
while (true)
|
||||
{
|
||||
auto r = SSL_shutdown(ssl_ptr.get());
|
||||
if (r == 1) // shutdown complete
|
||||
{
|
||||
co_return;
|
||||
}
|
||||
else if (r == 0) // shutdown in progress
|
||||
{
|
||||
coro::poll_op op{coro::poll_op::read_write};
|
||||
auto err = SSL_get_error(ssl_ptr.get(), r);
|
||||
if (err == SSL_ERROR_WANT_WRITE)
|
||||
{
|
||||
op = coro::poll_op::write;
|
||||
}
|
||||
else if (err == SSL_ERROR_WANT_READ)
|
||||
{
|
||||
op = coro::poll_op::read;
|
||||
}
|
||||
else
|
||||
{
|
||||
co_return;
|
||||
}
|
||||
|
||||
auto pstatus = co_await io_scheduler.poll(s, op, timeout);
|
||||
switch (pstatus)
|
||||
{
|
||||
case poll_status::timeout:
|
||||
case poll_status::error:
|
||||
case poll_status::closed:
|
||||
co_return;
|
||||
default:
|
||||
// continue shutdown.
|
||||
break;
|
||||
}
|
||||
}
|
||||
else // r < 0 error
|
||||
{
|
||||
co_return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,56 +0,0 @@
|
|||
#include "coro/net/tcp_server.hpp"
|
||||
|
||||
#include "coro/io_scheduler.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
tcp_server::tcp_server(io_scheduler& scheduler, options opts)
|
||||
: m_io_scheduler(&scheduler),
|
||||
m_options(std::move(opts)),
|
||||
m_accept_socket(net::make_accept_socket(
|
||||
net::socket::options{net::domain_t::ipv4, net::socket::type_t::tcp, net::socket::blocking_t::no},
|
||||
m_options.address,
|
||||
m_options.port,
|
||||
m_options.backlog))
|
||||
{
|
||||
}
|
||||
|
||||
tcp_server::tcp_server(tcp_server&& other)
|
||||
: m_io_scheduler(std::exchange(other.m_io_scheduler, nullptr)),
|
||||
m_options(std::move(other.m_options)),
|
||||
m_accept_socket(std::move(other.m_accept_socket))
|
||||
{
|
||||
}
|
||||
|
||||
auto tcp_server::operator=(tcp_server&& other) -> tcp_server&
|
||||
{
|
||||
if (std::addressof(other) != this)
|
||||
{
|
||||
m_io_scheduler = std::exchange(other.m_io_scheduler, nullptr);
|
||||
m_options = std::move(other.m_options);
|
||||
m_accept_socket = std::move(other.m_accept_socket);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
auto tcp_server::accept() -> coro::net::tcp_client
|
||||
{
|
||||
sockaddr_in client{};
|
||||
constexpr const int len = sizeof(struct sockaddr_in);
|
||||
net::socket s{::accept(m_accept_socket.native_handle(), (struct sockaddr*)&client, (socklen_t*)&len)};
|
||||
|
||||
std::span<const uint8_t> ip_addr_view{
|
||||
reinterpret_cast<uint8_t*>(&client.sin_addr.s_addr),
|
||||
sizeof(client.sin_addr.s_addr),
|
||||
};
|
||||
|
||||
return tcp_client{
|
||||
*m_io_scheduler,
|
||||
std::move(s),
|
||||
tcp_client::options{
|
||||
.address = net::ip_address{ip_addr_view, static_cast<net::domain_t>(client.sin_family)},
|
||||
.port = ntohs(client.sin_port),
|
||||
.ssl_ctx = m_options.ssl_ctx}};
|
||||
};
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,21 +0,0 @@
|
|||
#include "coro/net/udp_peer.hpp"
|
||||
|
||||
namespace coro::net
|
||||
{
|
||||
udp_peer::udp_peer(io_scheduler& scheduler, net::domain_t domain)
|
||||
: m_io_scheduler(scheduler),
|
||||
m_socket(net::make_socket(net::socket::options{domain, net::socket::type_t::udp, net::socket::blocking_t::no}))
|
||||
{
|
||||
}
|
||||
|
||||
udp_peer::udp_peer(io_scheduler& scheduler, const info& bind_info)
|
||||
: m_io_scheduler(scheduler),
|
||||
m_socket(net::make_accept_socket(
|
||||
net::socket::options{bind_info.address.domain(), net::socket::type_t::udp, net::socket::blocking_t::no},
|
||||
bind_info.address,
|
||||
bind_info.port)),
|
||||
m_bound(true)
|
||||
{
|
||||
}
|
||||
|
||||
} // namespace coro::net
|
|
@ -1,221 +0,0 @@
|
|||
#include "coro/shared_mutex.hpp"
|
||||
#include "coro/thread_pool.hpp"
|
||||
|
||||
namespace coro
|
||||
{
|
||||
shared_scoped_lock::~shared_scoped_lock()
|
||||
{
|
||||
unlock();
|
||||
}
|
||||
|
||||
auto shared_scoped_lock::unlock() -> void
|
||||
{
|
||||
if (m_shared_mutex != nullptr)
|
||||
{
|
||||
if (m_exclusive)
|
||||
{
|
||||
m_shared_mutex->unlock();
|
||||
}
|
||||
else
|
||||
{
|
||||
m_shared_mutex->unlock_shared();
|
||||
}
|
||||
|
||||
m_shared_mutex = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
shared_mutex::shared_mutex(coro::thread_pool& tp) : m_thread_pool(tp)
|
||||
{
|
||||
}
|
||||
|
||||
auto shared_mutex::lock_operation::await_ready() const noexcept -> bool
|
||||
{
|
||||
if (m_exclusive)
|
||||
{
|
||||
return m_shared_mutex.try_lock();
|
||||
}
|
||||
else
|
||||
{
|
||||
return m_shared_mutex.try_lock_shared();
|
||||
}
|
||||
}
|
||||
|
||||
auto shared_mutex::lock_operation::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
|
||||
{
|
||||
std::unique_lock lk{m_shared_mutex.m_mutex};
|
||||
// Its possible the lock has been released between await_ready() and await_suspend(), double
|
||||
// check and make sure we are not going to suspend when nobody holds the lock.
|
||||
if (m_exclusive)
|
||||
{
|
||||
if (m_shared_mutex.try_lock_locked(lk))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
if (m_shared_mutex.try_lock_shared_locked(lk))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// For sure the lock is currently held in a manner that it cannot be acquired, suspend ourself
|
||||
// at the end of the waiter list.
|
||||
|
||||
if (m_shared_mutex.m_tail_waiter == nullptr)
|
||||
{
|
||||
m_shared_mutex.m_head_waiter = this;
|
||||
m_shared_mutex.m_tail_waiter = this;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_shared_mutex.m_tail_waiter->m_next = this;
|
||||
m_shared_mutex.m_tail_waiter = this;
|
||||
}
|
||||
|
||||
// If this is an exclusive lock acquire then mark it as so so that shared locks after this
|
||||
// exclusive one will also suspend so this exclusive lock doens't get starved.
|
||||
if (m_exclusive)
|
||||
{
|
||||
++m_shared_mutex.m_exclusive_waiters;
|
||||
}
|
||||
|
||||
m_awaiting_coroutine = awaiting_coroutine;
|
||||
return true;
|
||||
}
|
||||
|
||||
auto shared_mutex::try_lock_shared() -> bool
|
||||
{
|
||||
// To acquire the shared lock the state must be one of two states:
|
||||
// 1) unlocked
|
||||
// 2) shared locked with zero exclusive waiters
|
||||
// Zero exclusive waiters prevents exclusive starvation if shared locks are
|
||||
// always continuously happening.
|
||||
|
||||
std::unique_lock lk{m_mutex};
|
||||
return try_lock_shared_locked(lk);
|
||||
}
|
||||
|
||||
auto shared_mutex::try_lock() -> bool
|
||||
{
|
||||
// To acquire the exclusive lock the state must be unlocked.
|
||||
std::unique_lock lk{m_mutex};
|
||||
return try_lock_locked(lk);
|
||||
}
|
||||
|
||||
auto shared_mutex::unlock_shared() -> void
|
||||
{
|
||||
std::unique_lock lk{m_mutex};
|
||||
--m_shared_users;
|
||||
|
||||
// Only wake waiters from shared state if all shared users have completed.
|
||||
if (m_shared_users == 0)
|
||||
{
|
||||
if (m_head_waiter != nullptr)
|
||||
{
|
||||
wake_waiters(lk);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_state = state::unlocked;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
auto shared_mutex::unlock() -> void
|
||||
{
|
||||
std::unique_lock lk{m_mutex};
|
||||
if (m_head_waiter != nullptr)
|
||||
{
|
||||
wake_waiters(lk);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_state = state::unlocked;
|
||||
}
|
||||
}
|
||||
auto shared_mutex::try_lock_shared_locked(std::unique_lock<std::mutex>& lk) -> bool
|
||||
{
|
||||
if (m_state == state::unlocked)
|
||||
{
|
||||
// If the shared mutex is unlocked put it into shared mode and add ourself as using the lock.
|
||||
m_state = state::locked_shared;
|
||||
++m_shared_users;
|
||||
lk.unlock();
|
||||
return true;
|
||||
}
|
||||
else if (m_state == state::locked_shared && m_exclusive_waiters == 0)
|
||||
{
|
||||
// If the shared mutex is in a shared locked state and there are no exclusive waiters
|
||||
// the add ourself as using the lock.
|
||||
++m_shared_users;
|
||||
lk.unlock();
|
||||
return true;
|
||||
}
|
||||
|
||||
// If the lock is in shared mode but there are exclusive waiters then we will also wait so
|
||||
// the writers are not starved.
|
||||
|
||||
// If the lock is in exclusive mode already then we need to wait.
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
auto shared_mutex::try_lock_locked(std::unique_lock<std::mutex>& lk) -> bool
|
||||
{
|
||||
if (m_state == state::unlocked)
|
||||
{
|
||||
m_state = state::locked_exclusive;
|
||||
lk.unlock();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
auto shared_mutex::wake_waiters(std::unique_lock<std::mutex>& lk) -> void
|
||||
{
|
||||
// First determine what the next lock state will be based on the first waiter.
|
||||
if (m_head_waiter->m_exclusive)
|
||||
{
|
||||
// If its exclusive then only this waiter can be woken up.
|
||||
m_state = state::locked_exclusive;
|
||||
lock_operation* to_resume = m_head_waiter;
|
||||
m_head_waiter = m_head_waiter->m_next;
|
||||
--m_exclusive_waiters;
|
||||
if (m_head_waiter == nullptr)
|
||||
{
|
||||
m_tail_waiter = nullptr;
|
||||
}
|
||||
|
||||
// Since this is an exclusive lock waiting we can resume it directly.
|
||||
lk.unlock();
|
||||
to_resume->m_awaiting_coroutine.resume();
|
||||
}
|
||||
else
|
||||
{
|
||||
// If its shared then we will scan forward and awake all shared waiters onto the given
|
||||
// thread pool so they can run in parallel.
|
||||
m_state = state::locked_shared;
|
||||
do
|
||||
{
|
||||
lock_operation* to_resume = m_head_waiter;
|
||||
m_head_waiter = m_head_waiter->m_next;
|
||||
if (m_head_waiter == nullptr)
|
||||
{
|
||||
m_tail_waiter = nullptr;
|
||||
}
|
||||
++m_shared_users;
|
||||
|
||||
m_thread_pool.resume(to_resume->m_awaiting_coroutine);
|
||||
} while (m_head_waiter != nullptr && !m_head_waiter->m_exclusive);
|
||||
|
||||
// Cannot unlock until the entire set of shared waiters has been traversed. I think this
|
||||
// makes more sense than allocating space for all the shared waiters, unlocking, and then
|
||||
// resuming in a batch?
|
||||
lk.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace coro
|
|
@ -1,141 +0,0 @@
|
|||
#include "coro/task_container.hpp"
|
||||
#include "coro/thread_pool.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
task_container::task_container(thread_pool& tp, const options opts)
|
||||
: m_growth_factor(opts.growth_factor),
|
||||
m_thread_pool(tp)
|
||||
{
|
||||
m_tasks.resize(opts.reserve_size);
|
||||
for (std::size_t i = 0; i < opts.reserve_size; ++i)
|
||||
{
|
||||
m_task_indexes.emplace_back(i);
|
||||
}
|
||||
m_free_pos = m_task_indexes.begin();
|
||||
}
|
||||
|
||||
task_container::~task_container()
|
||||
{
|
||||
// This will hang the current thread.. but if tasks are not complete thats also pretty bad.
|
||||
while (!empty())
|
||||
{
|
||||
garbage_collect();
|
||||
}
|
||||
}
|
||||
|
||||
auto task_container::start(coro::task<void> user_task, garbage_collect_t cleanup) -> void
|
||||
{
|
||||
m_size.fetch_add(1, std::memory_order::relaxed);
|
||||
|
||||
std::scoped_lock lk{m_mutex};
|
||||
|
||||
if (cleanup == garbage_collect_t::yes)
|
||||
{
|
||||
gc_internal();
|
||||
}
|
||||
|
||||
// Only grow if completely full and attempting to add more.
|
||||
if (m_free_pos == m_task_indexes.end())
|
||||
{
|
||||
m_free_pos = grow();
|
||||
}
|
||||
|
||||
// Store the task inside a cleanup task for self deletion.
|
||||
auto index = *m_free_pos;
|
||||
m_tasks[index] = make_cleanup_task(std::move(user_task), m_free_pos);
|
||||
|
||||
// Mark the current used slot as used.
|
||||
std::advance(m_free_pos, 1);
|
||||
|
||||
// Start executing from the cleanup task to schedule the user's task onto the thread pool.
|
||||
m_tasks[index].resume();
|
||||
}
|
||||
|
||||
auto task_container::garbage_collect() -> std::size_t
|
||||
{
|
||||
std::scoped_lock lk{m_mutex};
|
||||
return gc_internal();
|
||||
}
|
||||
|
||||
auto task_container::garbage_collect_and_yield_until_empty() -> coro::task<void>
|
||||
{
|
||||
while (!empty())
|
||||
{
|
||||
garbage_collect();
|
||||
co_await m_thread_pool.yield();
|
||||
}
|
||||
}
|
||||
|
||||
auto task_container::grow() -> task_position
|
||||
{
|
||||
// Save an index at the current last item.
|
||||
auto last_pos = std::prev(m_task_indexes.end());
|
||||
std::size_t new_size = m_tasks.size() * m_growth_factor;
|
||||
for (std::size_t i = m_tasks.size(); i < new_size; ++i)
|
||||
{
|
||||
m_task_indexes.emplace_back(i);
|
||||
}
|
||||
m_tasks.resize(new_size);
|
||||
// Set the free pos to the item just after the previous last item.
|
||||
return std::next(last_pos);
|
||||
}
|
||||
|
||||
auto task_container::gc_internal() -> std::size_t
|
||||
{
|
||||
std::size_t deleted{0};
|
||||
if (!m_tasks_to_delete.empty())
|
||||
{
|
||||
for (const auto& pos : m_tasks_to_delete)
|
||||
{
|
||||
// This doesn't actually 'delete' the task, it'll get overwritten when a
|
||||
// new user task claims the free space. It could be useful to actually
|
||||
// delete the tasks so the coroutine stack frames are destroyed. The advantage
|
||||
// of letting a new task replace and old one though is that its a 1:1 exchange
|
||||
// on delete and create, rather than a large pause here to delete all the
|
||||
// completed tasks.
|
||||
|
||||
// Put the deleted position at the end of the free indexes list.
|
||||
m_task_indexes.splice(m_task_indexes.end(), m_task_indexes, pos);
|
||||
}
|
||||
deleted = m_tasks_to_delete.size();
|
||||
m_tasks_to_delete.clear();
|
||||
}
|
||||
return deleted;
|
||||
}
|
||||
|
||||
auto task_container::make_cleanup_task(task<void> user_task, task_position pos) -> coro::task<void>
|
||||
{
|
||||
// Immediately move the task onto the thread pool.
|
||||
co_await m_thread_pool.schedule();
|
||||
|
||||
try
|
||||
{
|
||||
// Await the users task to complete.
|
||||
co_await user_task;
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
// TODO: what would be a good way to report this to the user...? Catching here is required
|
||||
// since the co_await will unwrap the unhandled exception on the task.
|
||||
// The user's task should ideally be wrapped in a catch all and handle it themselves, but
|
||||
// that cannot be guaranteed.
|
||||
std::cerr << "coro::task_container user_task had an unhandled exception e.what()= " << e.what() << "\n";
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
// don't crash if they throw something that isn't derived from std::exception
|
||||
std::cerr << "coro::task_container user_task had unhandle exception, not derived from std::exception.\n";
|
||||
}
|
||||
|
||||
std::scoped_lock lk{m_mutex};
|
||||
m_tasks_to_delete.push_back(pos);
|
||||
// This has to be done within scope lock to make sure this coroutine task completes before the
|
||||
// task container object destructs -- if it was waiting on .empty() to become true.
|
||||
m_size.fetch_sub(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
}
|
||||
|
||||
} // namespace coro
|
|
@ -1,5 +1,7 @@
|
|||
#include "coro/thread_pool.hpp"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
namespace coro
|
||||
{
|
||||
thread_pool::operation::operation(thread_pool& tp) noexcept : m_thread_pool(tp)
|
||||
|
@ -35,30 +37,39 @@ auto thread_pool::schedule() -> operation
|
|||
{
|
||||
if (!m_shutdown_requested.load(std::memory_order::relaxed))
|
||||
{
|
||||
m_size.fetch_add(1, std::memory_order::relaxed);
|
||||
m_size.fetch_add(1, std::memory_order::release);
|
||||
return operation{*this};
|
||||
}
|
||||
|
||||
throw std::runtime_error("coro::thread_pool is shutting down, unable to schedule new tasks.");
|
||||
}
|
||||
|
||||
auto thread_pool::shutdown(shutdown_t wait_for_tasks) noexcept -> void
|
||||
auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> void
|
||||
{
|
||||
if (!m_shutdown_requested.exchange(true, std::memory_order::release))
|
||||
if (handle == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
m_size.fetch_add(1, std::memory_order::release);
|
||||
schedule_impl(handle);
|
||||
}
|
||||
|
||||
auto thread_pool::shutdown() noexcept -> void
|
||||
{
|
||||
// Only allow shutdown to occur once.
|
||||
if (m_shutdown_requested.exchange(true, std::memory_order::acq_rel) == false)
|
||||
{
|
||||
for (auto& thread : m_threads)
|
||||
{
|
||||
thread.request_stop();
|
||||
}
|
||||
|
||||
if (wait_for_tasks == shutdown_t::sync)
|
||||
for (auto& thread : m_threads)
|
||||
{
|
||||
for (auto& thread : m_threads)
|
||||
if (thread.joinable())
|
||||
{
|
||||
if (thread.joinable())
|
||||
{
|
||||
thread.join();
|
||||
}
|
||||
thread.join();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -115,37 +126,4 @@ auto thread_pool::schedule_impl(std::coroutine_handle<> handle) noexcept -> void
|
|||
m_wait_cv.notify_one();
|
||||
}
|
||||
|
||||
auto thread_pool::resume(std::coroutine_handle<> handle) noexcept -> void
|
||||
{
|
||||
if (handle == nullptr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
m_size.fetch_add(1, std::memory_order::relaxed);
|
||||
schedule_impl(handle);
|
||||
}
|
||||
|
||||
auto thread_pool::resume(const std::vector<std::coroutine_handle<>>& handles) noexcept -> void
|
||||
{
|
||||
m_size.fetch_add(handles.size(), std::memory_order::relaxed);
|
||||
|
||||
{
|
||||
std::scoped_lock lk{m_wait_mutex};
|
||||
for (const auto& handle : handles)
|
||||
{
|
||||
if (handle != nullptr) [[likely]]
|
||||
{
|
||||
m_queue.emplace_back(handle);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_size.fetch_sub(1, std::memory_order::release);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_wait_cv.notify_one();
|
||||
}
|
||||
|
||||
} // namespace coro
|
||||
|
|
|
@ -1,42 +0,0 @@
|
|||
cmake_minimum_required(VERSION 3.0)
|
||||
project(libcoro_test)
|
||||
|
||||
set(LIBCORO_TEST_SOURCE_FILES
|
||||
net/test_dns_resolver.cpp
|
||||
net/test_ip_address.cpp
|
||||
net/test_tcp_server.cpp
|
||||
net/test_udp_peers.cpp
|
||||
|
||||
bench.cpp
|
||||
test_event.cpp
|
||||
test_generator.cpp
|
||||
test_io_scheduler.cpp
|
||||
test_latch.cpp
|
||||
test_mutex.cpp
|
||||
test_ring_buffer.cpp
|
||||
test_semaphore.cpp
|
||||
test_shared_mutex.cpp
|
||||
test_sync_wait.cpp
|
||||
test_task.cpp
|
||||
test_thread_pool.cpp
|
||||
test_when_all.cpp
|
||||
)
|
||||
|
||||
add_executable(${PROJECT_NAME} main.cpp ${LIBCORO_TEST_SOURCE_FILES})
|
||||
target_compile_features(${PROJECT_NAME} PUBLIC cxx_std_20)
|
||||
target_include_directories(${PROJECT_NAME} PRIVATE ${CMAKE_CURRENT_SOURCE_DIR})
|
||||
target_link_libraries(${PROJECT_NAME} PRIVATE libcoro)
|
||||
target_compile_options(${PROJECT_NAME} PUBLIC -fcoroutines)
|
||||
|
||||
if(LIBCORO_CODE_COVERAGE)
|
||||
target_compile_options(${PROJECT_NAME} PRIVATE --coverage)
|
||||
target_link_libraries(${PROJECT_NAME} PRIVATE gcov)
|
||||
endif()
|
||||
|
||||
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
|
||||
target_compile_options(${PROJECT_NAME} PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
||||
message(FATAL_ERROR "Clang is currently not supported.")
|
||||
endif()
|
||||
|
||||
add_test(NAME libcoro_tests COMMAND ${PROJECT_NAME})
|
535
test/bench.cpp
535
test/bench.cpp
|
@ -1,535 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
using sc = std::chrono::steady_clock;
|
||||
|
||||
constexpr std::size_t default_iterations = 5'000'000;
|
||||
|
||||
static auto print_stats(const std::string& bench_name, uint64_t operations, sc::time_point start, sc::time_point stop)
|
||||
-> void
|
||||
{
|
||||
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start);
|
||||
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration);
|
||||
|
||||
std::cout << bench_name << "\n";
|
||||
std::cout << " " << operations << " ops in " << ms.count() << "ms\n";
|
||||
|
||||
double seconds = duration.count() / 1'000'000'000.0;
|
||||
double ops_per_sec = static_cast<uint64_t>(operations / seconds);
|
||||
|
||||
std::cout << " ops/sec: " << std::fixed << ops_per_sec << "\n";
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter func direct call", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
std::atomic<uint64_t> counter{0};
|
||||
auto func = [&]() -> void {
|
||||
counter.fetch_add(1, std::memory_order::relaxed);
|
||||
return;
|
||||
};
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
func();
|
||||
}
|
||||
|
||||
print_stats("benchmark counter func direct call", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter func coro::sync_wait(awaitable)", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
uint64_t counter{0};
|
||||
auto func = []() -> coro::task<uint64_t> { co_return 1; };
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
counter += coro::sync_wait(func());
|
||||
}
|
||||
|
||||
print_stats("benchmark counter func coro::sync_wait(awaitable)", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all(awaitable)) x10", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
uint64_t counter{0};
|
||||
auto f = []() -> coro::task<uint64_t> { co_return 1; };
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; i += 10)
|
||||
{
|
||||
auto tasks = coro::sync_wait(coro::when_all(f(), f(), f(), f(), f(), f(), f(), f(), f(), f()));
|
||||
|
||||
std::apply([&counter](auto&&... t) { ((counter += t.return_value()), ...); }, tasks);
|
||||
}
|
||||
|
||||
print_stats("benchmark counter func coro::sync_wait(coro::when_all(awaitable))", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all(vector<awaitable>)) x10", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
uint64_t counter{0};
|
||||
auto f = []() -> coro::task<uint64_t> { co_return 1; };
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; i += 10)
|
||||
{
|
||||
std::vector<coro::task<uint64_t>> tasks{};
|
||||
tasks.reserve(10);
|
||||
for (size_t j = 0; j < 10; ++j)
|
||||
{
|
||||
tasks.emplace_back(f());
|
||||
}
|
||||
|
||||
auto results = coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
for (const auto& r : results)
|
||||
{
|
||||
counter += r.return_value();
|
||||
}
|
||||
}
|
||||
|
||||
print_stats("benchmark counter func coro::sync_wait(coro::when_all(awaitable))", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark thread_pool{1} counter task", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
std::atomic<uint64_t> counter{0};
|
||||
|
||||
auto make_task = [](coro::thread_pool& tp, std::atomic<uint64_t>& c) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
c.fetch_add(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
};
|
||||
|
||||
std::vector<coro::task<void>> tasks;
|
||||
tasks.reserve(iterations);
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task(tp, counter));
|
||||
tasks.back().resume();
|
||||
}
|
||||
|
||||
// This will fail in valgrind since it runs in a single 'thread', and thus is shutsdown prior
|
||||
// to any coroutine actually getting properly scheduled onto the background thread pool.
|
||||
// Inject a sleep here so it forces a thread context switch within valgrind.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{10});
|
||||
tp.shutdown();
|
||||
|
||||
print_stats("benchmark thread_pool{1} counter task", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
REQUIRE(tp.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark thread_pool{2} counter task", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
|
||||
coro::thread_pool tp{coro::thread_pool::options{2}};
|
||||
std::atomic<uint64_t> counter{0};
|
||||
|
||||
auto make_task = [](coro::thread_pool& tp, std::atomic<uint64_t>& c) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
c.fetch_add(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
};
|
||||
|
||||
std::vector<coro::task<void>> tasks;
|
||||
tasks.reserve(iterations);
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task(tp, counter));
|
||||
tasks.back().resume();
|
||||
}
|
||||
|
||||
// This will fail in valgrind since it runs in a single 'thread', and thus is shutsdown prior
|
||||
// to any coroutine actually getting properly scheduled onto the background thread pool.
|
||||
// Inject a sleep here so it forces a thread context switch within valgrind.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{10});
|
||||
tp.shutdown();
|
||||
|
||||
print_stats("benchmark thread_pool{2} counter task", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
REQUIRE(tp.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark thread_pool{N} counter task", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
|
||||
coro::thread_pool tp{};
|
||||
std::atomic<uint64_t> counter{0};
|
||||
|
||||
auto make_task = [](coro::thread_pool& tp, std::atomic<uint64_t>& c) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
c.fetch_add(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
};
|
||||
|
||||
std::vector<coro::task<void>> tasks;
|
||||
tasks.reserve(iterations);
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task(tp, counter));
|
||||
tasks.back().resume();
|
||||
}
|
||||
|
||||
// This will fail in valgrind since it runs in a single 'thread', and thus is shutsdown prior
|
||||
// to any coroutine actually getting properly scheduled onto the background thread pool.
|
||||
// Inject a sleep here so it forces a thread context switch within valgrind.
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{10});
|
||||
tp.shutdown();
|
||||
|
||||
print_stats("benchmark thread_pool{N} counter task", iterations, start, sc::now());
|
||||
REQUIRE(counter == iterations);
|
||||
REQUIRE(tp.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
constexpr std::size_t ops = iterations * 2; // the external resume is still a resume op
|
||||
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
std::atomic<uint64_t> counter{0};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
tasks.reserve(iterations);
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
co_await s.yield();
|
||||
counter.fetch_add(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task());
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
auto stop = sc::now();
|
||||
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
|
||||
REQUIRE(s.empty());
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
constexpr std::size_t ops = iterations * 2; // the external resume is still a resume op
|
||||
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
std::atomic<uint64_t> counter{0};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
tasks.reserve(iterations);
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
co_await s.yield_for(std::chrono::milliseconds{1});
|
||||
counter.fetch_add(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task());
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
auto stop = sc::now();
|
||||
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
|
||||
REQUIRE(s.empty());
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark counter task scheduler await event from another coroutine", "[benchmark]")
|
||||
{
|
||||
constexpr std::size_t iterations = default_iterations;
|
||||
constexpr std::size_t ops = iterations * 3; // two tasks + event resume
|
||||
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
std::vector<std::unique_ptr<coro::event>> events{};
|
||||
events.reserve(iterations);
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
events.emplace_back(std::make_unique<coro::event>());
|
||||
}
|
||||
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
tasks.reserve(iterations * 2); // one for wait, one for resume
|
||||
|
||||
std::atomic<uint64_t> counter{0};
|
||||
|
||||
auto wait_func = [&](std::size_t index) -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
co_await* events[index];
|
||||
counter.fetch_add(1, std::memory_order::relaxed);
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto resume_func = [&](std::size_t index) -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
events[index]->set();
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
tasks.emplace_back(wait_func(i));
|
||||
tasks.emplace_back(resume_func(i));
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
auto stop = sc::now();
|
||||
print_stats("benchmark counter task scheduler await event from another coroutine", ops, start, stop);
|
||||
REQUIRE(counter == iterations);
|
||||
|
||||
// valgrind workaround
|
||||
while (!s.empty())
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{1});
|
||||
}
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("benchmark tcp_server echo server", "[benchmark]")
|
||||
{
|
||||
const constexpr std::size_t connections = 16;
|
||||
const constexpr std::size_t messages_per_connection = 10'000;
|
||||
const constexpr std::size_t ops = connections * messages_per_connection;
|
||||
|
||||
const std::string msg = "im a data point in a stream of bytes";
|
||||
|
||||
const constexpr std::size_t server_count = 1;
|
||||
const constexpr std::size_t client_count = 1;
|
||||
|
||||
const constexpr std::size_t server_thread_count = 1;
|
||||
const constexpr std::size_t client_thread_count = 1;
|
||||
|
||||
std::atomic<uint64_t> listening{0};
|
||||
std::atomic<uint64_t> accepted{0};
|
||||
std::atomic<uint64_t> clients_completed{0};
|
||||
|
||||
std::atomic<uint64_t> server_id{0};
|
||||
|
||||
struct server
|
||||
{
|
||||
uint64_t id;
|
||||
coro::io_scheduler scheduler{
|
||||
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = server_thread_count}}};
|
||||
coro::task_container task_container{scheduler};
|
||||
uint64_t live_clients{0};
|
||||
coro::event wait_for_clients{};
|
||||
};
|
||||
|
||||
struct client
|
||||
{
|
||||
coro::io_scheduler scheduler{
|
||||
coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = client_thread_count}}};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
};
|
||||
|
||||
auto make_on_connection_task = [&](server& s, coro::net::tcp_client client) -> coro::task<void> {
|
||||
std::string in(64, '\0');
|
||||
|
||||
// Echo the messages until the socket is closed.
|
||||
while (true)
|
||||
{
|
||||
auto pstatus = co_await client.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
auto [rstatus, rspan] = client.recv(in);
|
||||
if (rstatus == coro::net::recv_status::closed)
|
||||
{
|
||||
REQUIRE(rspan.empty());
|
||||
break;
|
||||
}
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
|
||||
in.resize(rspan.size());
|
||||
|
||||
auto [sstatus, remaining] = client.send(in);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
}
|
||||
|
||||
s.live_clients--;
|
||||
if (s.live_clients == 0)
|
||||
{
|
||||
s.wait_for_clients.set();
|
||||
}
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_server_task = [&](server& s) -> coro::task<void> {
|
||||
co_await s.scheduler.schedule();
|
||||
|
||||
coro::net::tcp_server server{s.scheduler};
|
||||
|
||||
listening++;
|
||||
|
||||
while (accepted.load(std::memory_order::acquire) < connections)
|
||||
{
|
||||
auto pstatus = co_await server.poll(std::chrono::milliseconds{1});
|
||||
if (pstatus == coro::poll_status::event)
|
||||
{
|
||||
auto c = server.accept();
|
||||
if (c.socket().is_valid())
|
||||
{
|
||||
accepted.fetch_add(1, std::memory_order::release);
|
||||
|
||||
s.live_clients++;
|
||||
s.task_container.start(make_on_connection_task(s, std::move(c)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
co_await s.wait_for_clients;
|
||||
co_return;
|
||||
};
|
||||
|
||||
std::mutex g_histogram_mutex;
|
||||
std::map<std::chrono::milliseconds, uint64_t> g_histogram;
|
||||
|
||||
auto make_client_task = [&](client& c) -> coro::task<void> {
|
||||
co_await c.scheduler.schedule();
|
||||
std::map<std::chrono::milliseconds, uint64_t> histogram;
|
||||
coro::net::tcp_client client{c.scheduler};
|
||||
|
||||
auto cstatus = co_await client.connect(); // std::chrono::seconds{1});
|
||||
REQUIRE(cstatus == coro::net::connect_status::connected);
|
||||
|
||||
for (size_t i = 1; i <= messages_per_connection; ++i)
|
||||
{
|
||||
auto req_start = std::chrono::steady_clock::now();
|
||||
auto [sstatus, remaining] = client.send(msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
|
||||
auto pstatus = co_await client.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string response(64, '\0');
|
||||
auto [rstatus, rspan] = client.recv(response);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(rspan.size() == msg.size());
|
||||
response.resize(rspan.size());
|
||||
REQUIRE(response == msg);
|
||||
|
||||
auto req_stop = std::chrono::steady_clock::now();
|
||||
histogram[std::chrono::duration_cast<std::chrono::milliseconds>(req_stop - req_start)]++;
|
||||
}
|
||||
|
||||
{
|
||||
std::scoped_lock lk{g_histogram_mutex};
|
||||
for (auto [ms, count] : histogram)
|
||||
{
|
||||
g_histogram[ms] += count;
|
||||
}
|
||||
}
|
||||
|
||||
clients_completed.fetch_add(1);
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto start = sc::now();
|
||||
|
||||
// Create the server to accept incoming tcp connections.
|
||||
std::vector<std::thread> server_threads{};
|
||||
for (size_t i = 0; i < server_count; ++i)
|
||||
{
|
||||
server_threads.emplace_back(std::thread{[&]() {
|
||||
server s{};
|
||||
s.id = server_id++;
|
||||
coro::sync_wait(make_server_task(s));
|
||||
s.scheduler.shutdown();
|
||||
}});
|
||||
}
|
||||
|
||||
// The server can take a small bit of time to start up, if we don't wait for it to notify then
|
||||
// the first few connections can easily fail to connect causing this test to fail.
|
||||
while (listening != server_count)
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{1});
|
||||
}
|
||||
|
||||
// Spawn N client connections across a set number of clients.
|
||||
std::vector<std::thread> client_threads{};
|
||||
std::vector<client> clients{};
|
||||
for (size_t i = 0; i < client_count; ++i)
|
||||
{
|
||||
client_threads.emplace_back(std::thread{[&]() {
|
||||
client c{};
|
||||
for (size_t i = 0; i < connections / client_count; ++i)
|
||||
{
|
||||
c.tasks.emplace_back(make_client_task(c));
|
||||
}
|
||||
coro::sync_wait(coro::when_all(std::move(c.tasks)));
|
||||
c.scheduler.shutdown();
|
||||
}});
|
||||
}
|
||||
|
||||
for (auto& ct : client_threads)
|
||||
{
|
||||
ct.join();
|
||||
}
|
||||
|
||||
for (auto& st : server_threads)
|
||||
{
|
||||
st.join();
|
||||
}
|
||||
|
||||
auto stop = sc::now();
|
||||
print_stats("benchmark tcp_client and tcp_server", ops, start, stop);
|
||||
|
||||
for (const auto& [ms, count] : g_histogram)
|
||||
{
|
||||
std::cerr << ms.count() << " : " << count << "\n";
|
||||
}
|
||||
}
|
17615
test/catch.hpp
17615
test/catch.hpp
File diff suppressed because it is too large
Load diff
|
@ -1,30 +0,0 @@
|
|||
#define CATCH_CONFIG_MAIN
|
||||
#include "catch.hpp"
|
||||
|
||||
#include <signal.h>
|
||||
|
||||
/**
|
||||
* This structure invokes a constructor to setup some global test settings that are needed prior
|
||||
* to executing the tests.
|
||||
*/
|
||||
struct test_setup
|
||||
{
|
||||
test_setup()
|
||||
{
|
||||
// Ignore SIGPIPE, the library should be handling these gracefully.
|
||||
signal(SIGPIPE, SIG_IGN);
|
||||
|
||||
// For SSL/TLS tests create a localhost cert.pem and key.pem, tests expected these files
|
||||
// to be generated into the same directory that the tests are running in.
|
||||
system(
|
||||
"openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -days 365 -subj '/CN=localhost' -nodes");
|
||||
}
|
||||
|
||||
~test_setup()
|
||||
{
|
||||
// Cleanup the temporary key.pem and cert.pem files.
|
||||
system("rm key.pem cert.pem");
|
||||
}
|
||||
};
|
||||
|
||||
static test_setup g_test_setup{};
|
|
@ -1,31 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
|
||||
TEST_CASE("dns_resolver basic")
|
||||
{
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
coro::net::dns_resolver dns_resolver{scheduler, std::chrono::milliseconds{5000}};
|
||||
|
||||
auto make_host_by_name_task = [&](coro::net::hostname hn) -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
auto result_ptr = co_await std::move(dns_resolver.host_by_name(hn));
|
||||
|
||||
if (result_ptr->status() == coro::net::dns_status::complete)
|
||||
{
|
||||
for (const auto& ip_addr : result_ptr->ip_addresses())
|
||||
{
|
||||
std::cerr << coro::net::to_string(ip_addr.domain()) << " " << ip_addr.to_string() << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_host_by_name_task(coro::net::hostname{"www.example.com"}));
|
||||
|
||||
scheduler.shutdown();
|
||||
REQUIRE(scheduler.empty());
|
||||
}
|
|
@ -1,73 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
|
||||
TEST_CASE("net::ip_address from_string() ipv4")
|
||||
{
|
||||
{
|
||||
auto ip_addr = coro::net::ip_address::from_string("127.0.0.1");
|
||||
REQUIRE(ip_addr.to_string() == "127.0.0.1");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv4);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv4_len> expected{127, 0, 0, 1};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
|
||||
{
|
||||
auto ip_addr = coro::net::ip_address::from_string("255.255.0.0");
|
||||
REQUIRE(ip_addr.to_string() == "255.255.0.0");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv4);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv4_len> expected{255, 255, 0, 0};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("net::ip_address from_string() ipv6")
|
||||
{
|
||||
{
|
||||
auto ip_addr =
|
||||
coro::net::ip_address::from_string("0123:4567:89ab:cdef:0123:4567:89ab:cdef", coro::net::domain_t::ipv6);
|
||||
REQUIRE(ip_addr.to_string() == "123:4567:89ab:cdef:123:4567:89ab:cdef");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
|
||||
0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef, 0x01, 0x23, 0x45, 0x67, 0x89, 0xab, 0xcd, 0xef};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
|
||||
{
|
||||
auto ip_addr = coro::net::ip_address::from_string("::", coro::net::domain_t::ipv6);
|
||||
REQUIRE(ip_addr.to_string() == "::");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
|
||||
{
|
||||
auto ip_addr = coro::net::ip_address::from_string("::1", coro::net::domain_t::ipv6);
|
||||
REQUIRE(ip_addr.to_string() == "::1");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
|
||||
{
|
||||
auto ip_addr = coro::net::ip_address::from_string("1::1", coro::net::domain_t::ipv6);
|
||||
REQUIRE(ip_addr.to_string() == "1::1");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
|
||||
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
|
||||
{
|
||||
auto ip_addr = coro::net::ip_address::from_string("1::", coro::net::domain_t::ipv6);
|
||||
REQUIRE(ip_addr.to_string() == "1::");
|
||||
REQUIRE(ip_addr.domain() == coro::net::domain_t::ipv6);
|
||||
std::array<uint8_t, coro::net::ip_address::ipv6_len> expected{
|
||||
0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
|
||||
REQUIRE(std::equal(expected.begin(), expected.end(), ip_addr.data().begin()));
|
||||
}
|
||||
}
|
|
@ -1,197 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
TEST_CASE("tcp_server ping server", "[tcp_server]")
|
||||
{
|
||||
const std::string client_msg{"Hello from client"};
|
||||
const std::string server_msg{"Reply from server!"};
|
||||
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_client_task = [&]() -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
coro::net::tcp_client client{scheduler};
|
||||
|
||||
std::cerr << "client connect\n";
|
||||
auto cstatus = co_await client.connect();
|
||||
REQUIRE(cstatus == coro::net::connect_status::connected);
|
||||
|
||||
// Skip polling for write, should really only poll if the write is partial, shouldn't be
|
||||
// required for this test.
|
||||
std::cerr << "client send()\n";
|
||||
auto [sstatus, remaining] = client.send(client_msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
|
||||
// Poll for the server's response.
|
||||
std::cerr << "client poll(read)\n";
|
||||
auto pstatus = co_await client.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string buffer(256, '\0');
|
||||
std::cerr << "client recv()\n";
|
||||
auto [rstatus, rspan] = client.recv(buffer);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(rspan.size() == server_msg.length());
|
||||
buffer.resize(rspan.size());
|
||||
REQUIRE(buffer == server_msg);
|
||||
|
||||
std::cerr << "client return\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_server_task = [&]() -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
coro::net::tcp_server server{scheduler};
|
||||
|
||||
// Poll for client connection.
|
||||
std::cerr << "server poll(accept)\n";
|
||||
auto pstatus = co_await server.poll();
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
std::cerr << "server accept()\n";
|
||||
auto client = server.accept();
|
||||
REQUIRE(client.socket().is_valid());
|
||||
|
||||
// Poll for client request.
|
||||
std::cerr << "server poll(read)\n";
|
||||
pstatus = co_await client.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string buffer(256, '\0');
|
||||
std::cerr << "server recv()\n";
|
||||
auto [rstatus, rspan] = client.recv(buffer);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(rspan.size() == client_msg.size());
|
||||
buffer.resize(rspan.size());
|
||||
REQUIRE(buffer == client_msg);
|
||||
|
||||
// Respond to client.
|
||||
std::cerr << "server send()\n";
|
||||
auto [sstatus, remaining] = client.send(server_msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
|
||||
std::cerr << "server return\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
|
||||
}
|
||||
|
||||
TEST_CASE("tcp_server with ssl", "[tcp_server]")
|
||||
{
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
coro::net::ssl_context client_ssl_context{};
|
||||
|
||||
coro::net::ssl_context server_ssl_context{
|
||||
"cert.pem", coro::net::ssl_file_type::pem, "key.pem", coro::net::ssl_file_type::pem};
|
||||
|
||||
std::string client_msg = "Hello world from SSL client!";
|
||||
std::string server_msg = "Hello world from SSL server!!";
|
||||
|
||||
auto make_client_task = [&]() -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
|
||||
coro::net::tcp_client client{scheduler, coro::net::tcp_client::options{.ssl_ctx = &client_ssl_context}};
|
||||
|
||||
std::cerr << "client.connect()\n";
|
||||
auto cstatus = co_await client.connect();
|
||||
REQUIRE(cstatus == coro::net::connect_status::connected);
|
||||
std::cerr << "client.connected\n";
|
||||
|
||||
std::cerr << "client.ssl_handshake()\n";
|
||||
auto hstatus = co_await client.ssl_handshake();
|
||||
REQUIRE(hstatus == coro::net::ssl_handshake_status::ok);
|
||||
|
||||
std::cerr << "client.poll(write)\n";
|
||||
auto pstatus = co_await client.poll(coro::poll_op::write);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::cerr << "client.send()\n";
|
||||
auto [sstatus, remaining] = client.send(client_msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
|
||||
std::string response;
|
||||
response.resize(256, '\0');
|
||||
|
||||
while (true)
|
||||
{
|
||||
std::cerr << "client.poll(read)\n";
|
||||
pstatus = co_await client.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::cerr << "client.recv()\n";
|
||||
auto [rstatus, rspan] = client.recv(response);
|
||||
if (rstatus == coro::net::recv_status::would_block)
|
||||
{
|
||||
std::cerr << coro::net::to_string(rstatus) << "\n";
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << coro::net::to_string(rstatus) << "\n";
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(rspan.size() == server_msg.size());
|
||||
response.resize(rspan.size());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
REQUIRE(response == server_msg);
|
||||
std::cerr << "client received message: " << response << "\n";
|
||||
|
||||
std::cerr << "client finished\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_server_task = [&]() -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
|
||||
coro::net::tcp_server server{scheduler, coro::net::tcp_server::options{.ssl_ctx = &server_ssl_context}};
|
||||
|
||||
std::cerr << "server.poll()\n";
|
||||
auto pstatus = co_await server.poll();
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::cerr << "server.accept()\n";
|
||||
auto client = server.accept();
|
||||
REQUIRE(client.socket().is_valid());
|
||||
|
||||
std::cerr << "server client.handshake()\n";
|
||||
auto hstatus = co_await client.ssl_handshake();
|
||||
REQUIRE(hstatus == coro::net::ssl_handshake_status::ok);
|
||||
|
||||
std::cerr << "server client.poll(read)\n";
|
||||
pstatus = co_await client.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string buffer;
|
||||
buffer.resize(256, '\0');
|
||||
std::cerr << "server client.recv()\n";
|
||||
auto [rstatus, rspan] = client.recv(buffer);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(rspan.size() == client_msg.size());
|
||||
buffer.resize(rspan.size());
|
||||
REQUIRE(buffer == client_msg);
|
||||
std::cerr << "server received message: " << buffer << "\n";
|
||||
|
||||
std::cerr << "server client.poll(write)\n";
|
||||
pstatus = co_await client.poll(coro::poll_op::write);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::cerr << "server client.send()\n";
|
||||
auto [sstatus, remaining] = client.send(server_msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
|
||||
std::cerr << "server finished\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
TEST_CASE("udp one way")
|
||||
{
|
||||
const std::string msg{"aaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbcccccccccccccccccc"};
|
||||
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_send_task = [&]() -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
coro::net::udp_peer peer{scheduler};
|
||||
coro::net::udp_peer::info peer_info{};
|
||||
|
||||
auto [sstatus, remaining] = peer.sendto(peer_info, msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_recv_task = [&]() -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
coro::net::udp_peer::info self_info{.address = coro::net::ip_address::from_string("0.0.0.0")};
|
||||
|
||||
coro::net::udp_peer self{scheduler, self_info};
|
||||
|
||||
auto pstatus = co_await self.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string buffer(64, '\0');
|
||||
auto [rstatus, peer_info, rspan] = self.recvfrom(buffer);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(peer_info.address == coro::net::ip_address::from_string("127.0.0.1"));
|
||||
// The peer's port will be randomly picked by the kernel since it wasn't bound.
|
||||
REQUIRE(rspan.size() == msg.size());
|
||||
buffer.resize(rspan.size());
|
||||
REQUIRE(buffer == msg);
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_recv_task(), make_send_task()));
|
||||
}
|
||||
|
||||
TEST_CASE("udp echo peers")
|
||||
{
|
||||
const std::string peer1_msg{"Hello from peer1!"};
|
||||
const std::string peer2_msg{"Hello from peer2!!"};
|
||||
|
||||
coro::io_scheduler scheduler{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_peer_task = [&scheduler](
|
||||
uint16_t my_port,
|
||||
uint16_t peer_port,
|
||||
bool send_first,
|
||||
const std::string my_msg,
|
||||
const std::string peer_msg) -> coro::task<void> {
|
||||
co_await scheduler.schedule();
|
||||
coro::net::udp_peer::info my_info{.address = coro::net::ip_address::from_string("0.0.0.0"), .port = my_port};
|
||||
coro::net::udp_peer::info peer_info{
|
||||
.address = coro::net::ip_address::from_string("127.0.0.1"), .port = peer_port};
|
||||
|
||||
coro::net::udp_peer me{scheduler, my_info};
|
||||
|
||||
if (send_first)
|
||||
{
|
||||
// Send my message to my peer first.
|
||||
auto [sstatus, remaining] = me.sendto(peer_info, my_msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
}
|
||||
else
|
||||
{
|
||||
// Poll for my peers message first.
|
||||
auto pstatus = co_await me.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string buffer(64, '\0');
|
||||
auto [rstatus, recv_peer_info, rspan] = me.recvfrom(buffer);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(recv_peer_info == peer_info);
|
||||
REQUIRE(rspan.size() == peer_msg.size());
|
||||
buffer.resize(rspan.size());
|
||||
REQUIRE(buffer == peer_msg);
|
||||
}
|
||||
|
||||
if (send_first)
|
||||
{
|
||||
// I sent first so now I need to await my peer's message.
|
||||
auto pstatus = co_await me.poll(coro::poll_op::read);
|
||||
REQUIRE(pstatus == coro::poll_status::event);
|
||||
|
||||
std::string buffer(64, '\0');
|
||||
auto [rstatus, recv_peer_info, rspan] = me.recvfrom(buffer);
|
||||
REQUIRE(rstatus == coro::net::recv_status::ok);
|
||||
REQUIRE(recv_peer_info == peer_info);
|
||||
REQUIRE(rspan.size() == peer_msg.size());
|
||||
buffer.resize(rspan.size());
|
||||
REQUIRE(buffer == peer_msg);
|
||||
}
|
||||
else
|
||||
{
|
||||
auto [sstatus, remaining] = me.sendto(peer_info, my_msg);
|
||||
REQUIRE(sstatus == coro::net::send_status::ok);
|
||||
REQUIRE(remaining.empty());
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(
|
||||
make_peer_task(8081, 8080, false, peer2_msg, peer1_msg),
|
||||
make_peer_task(8080, 8081, true, peer1_msg, peer2_msg)));
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
TEST_CASE("event single awaiter", "[event]")
|
||||
{
|
||||
coro::event e{};
|
||||
|
||||
auto func = [&]() -> coro::task<uint64_t> {
|
||||
co_await e;
|
||||
co_return 42;
|
||||
};
|
||||
|
||||
auto task = func();
|
||||
|
||||
task.resume();
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
e.set(); // this will automaticaly resume the task that is awaiting the event.
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE(task.promise().return_value() == 42);
|
||||
}
|
||||
|
||||
auto producer(coro::event& event) -> void
|
||||
{
|
||||
// Long running task that consumers are waiting for goes here...
|
||||
event.set();
|
||||
}
|
||||
|
||||
auto consumer(const coro::event& event) -> coro::task<uint64_t>
|
||||
{
|
||||
co_await event;
|
||||
// Normally consume from some object which has the stored result from the producer
|
||||
co_return 42;
|
||||
}
|
||||
|
||||
TEST_CASE("event one watcher", "[event]")
|
||||
{
|
||||
coro::event e{};
|
||||
|
||||
auto value = consumer(e);
|
||||
value.resume(); // start co_awaiting event
|
||||
REQUIRE_FALSE(value.is_ready());
|
||||
|
||||
producer(e);
|
||||
|
||||
REQUIRE(value.promise().return_value() == 42);
|
||||
}
|
||||
|
||||
TEST_CASE("event multiple watchers", "[event]")
|
||||
{
|
||||
coro::event e{};
|
||||
|
||||
auto value1 = consumer(e);
|
||||
auto value2 = consumer(e);
|
||||
auto value3 = consumer(e);
|
||||
value1.resume(); // start co_awaiting event
|
||||
value2.resume();
|
||||
value3.resume();
|
||||
REQUIRE_FALSE(value1.is_ready());
|
||||
REQUIRE_FALSE(value2.is_ready());
|
||||
REQUIRE_FALSE(value3.is_ready());
|
||||
|
||||
producer(e);
|
||||
|
||||
REQUIRE(value1.promise().return_value() == 42);
|
||||
REQUIRE(value2.promise().return_value() == 42);
|
||||
REQUIRE(value3.promise().return_value() == 42);
|
||||
}
|
||||
|
||||
TEST_CASE("event reset", "[event]")
|
||||
{
|
||||
coro::event e{};
|
||||
|
||||
e.reset();
|
||||
REQUIRE_FALSE(e.is_set());
|
||||
|
||||
auto value1 = consumer(e);
|
||||
value1.resume(); // start co_awaiting event
|
||||
REQUIRE_FALSE(value1.is_ready());
|
||||
|
||||
producer(e);
|
||||
REQUIRE(value1.promise().return_value() == 42);
|
||||
|
||||
e.reset();
|
||||
|
||||
auto value2 = consumer(e);
|
||||
value2.resume();
|
||||
REQUIRE_FALSE(value2.is_ready());
|
||||
|
||||
producer(e);
|
||||
|
||||
REQUIRE(value2.promise().return_value() == 42);
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
TEST_CASE("generator single yield", "[generator]")
|
||||
{
|
||||
std::string msg{"Hello World Generator!"};
|
||||
auto func = [&]() -> coro::generator<std::string> { co_yield msg; };
|
||||
|
||||
for (const auto& v : func())
|
||||
{
|
||||
REQUIRE(v == msg);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("generator infinite incrementing integer yield", "[generator]")
|
||||
{
|
||||
constexpr const int64_t max = 1024;
|
||||
|
||||
auto func = []() -> coro::generator<int64_t> {
|
||||
int64_t i{0};
|
||||
while (true)
|
||||
{
|
||||
++i;
|
||||
co_yield i;
|
||||
}
|
||||
};
|
||||
|
||||
int64_t v{1};
|
||||
for (const auto& v_1 : func())
|
||||
{
|
||||
REQUIRE(v == v_1);
|
||||
++v;
|
||||
|
||||
if (v > max)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,616 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
#include <cstring>
|
||||
#include <sys/epoll.h>
|
||||
#include <sys/eventfd.h>
|
||||
#include <sys/socket.h>
|
||||
#include <sys/timerfd.h>
|
||||
#include <sys/types.h>
|
||||
#include <unistd.h>
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
TEST_CASE("io_scheduler schedule single task", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_task = [&]() -> coro::task<uint64_t> {
|
||||
co_await s.schedule();
|
||||
co_return 42;
|
||||
};
|
||||
|
||||
auto value = coro::sync_wait(make_task());
|
||||
REQUIRE(value == 42);
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]")
|
||||
{
|
||||
constexpr std::size_t n = 1000;
|
||||
std::atomic<uint64_t> counter{0};
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
tasks.reserve(n);
|
||||
coro::io_scheduler s{};
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
counter++;
|
||||
co_return;
|
||||
};
|
||||
for (std::size_t i = 0; i < n; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task());
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
REQUIRE(counter == n);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]")
|
||||
{
|
||||
std::atomic<uint64_t> counter{0};
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
coro::event e1;
|
||||
coro::event e2;
|
||||
coro::event e3;
|
||||
|
||||
auto make_wait_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
co_await e1;
|
||||
counter++;
|
||||
co_await e2;
|
||||
counter++;
|
||||
co_await e3;
|
||||
counter++;
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_set_task = [&](coro::event& e) -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
e.set();
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_wait_task(), make_set_task(e1), make_set_task(e2), make_set_task(e3)));
|
||||
|
||||
REQUIRE(counter == 3);
|
||||
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler task with read poll", "[io_scheduler]")
|
||||
{
|
||||
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_poll_read_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
auto status = co_await s.poll(trigger_fd, coro::poll_op::read);
|
||||
REQUIRE(status == coro::poll_status::event);
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_poll_write_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
uint64_t value{42};
|
||||
write(trigger_fd, &value, sizeof(value));
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task()));
|
||||
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
close(trigger_fd);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]")
|
||||
{
|
||||
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_poll_read_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
// Poll with a timeout but don't timeout.
|
||||
auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 50ms);
|
||||
REQUIRE(status == coro::poll_status::event);
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_poll_write_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
uint64_t value{42};
|
||||
write(trigger_fd, &value, sizeof(value));
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task()));
|
||||
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
close(trigger_fd);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler task with read poll timeout", "[io_scheduler]")
|
||||
{
|
||||
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
// Poll with a timeout and timeout.
|
||||
auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 10ms);
|
||||
REQUIRE(status == coro::poll_status::timeout);
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_task());
|
||||
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
close(trigger_fd);
|
||||
}
|
||||
|
||||
// TODO: This probably requires a TCP socket?
|
||||
// TEST_CASE("io_scheduler task with read poll closed socket", "[io_scheduler]")
|
||||
// {
|
||||
// auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
||||
// coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options { .thread_count = 1 }}};
|
||||
|
||||
// auto make_poll_task = [&]() -> coro::task<void> {
|
||||
// co_await s.schedule();
|
||||
// auto status = co_await s.poll(trigger_fd, coro::poll_op::read, 1000ms);
|
||||
// REQUIRE(status == coro::poll_status::closed);
|
||||
// co_return;
|
||||
// };
|
||||
|
||||
// auto make_close_task = [&]() -> coro::task<void> {
|
||||
// co_await s.schedule();
|
||||
// std::this_thread::sleep_for(100ms);
|
||||
// // shutdown(trigger_fd, SHUT_RDWR);
|
||||
// close(trigger_fd);
|
||||
// co_return;
|
||||
// };
|
||||
|
||||
// coro::sync_wait(coro::when_all(make_poll_task(), make_close_task()));
|
||||
|
||||
// s.shutdown();
|
||||
// REQUIRE(s.empty());
|
||||
// }
|
||||
|
||||
TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s1{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
coro::io_scheduler s2{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
coro::event e{};
|
||||
|
||||
auto make_s1_task = [&]() -> coro::task<void> {
|
||||
co_await s1.schedule();
|
||||
auto tid = std::this_thread::get_id();
|
||||
co_await e;
|
||||
|
||||
// This coroutine will hop to the other scheduler's single thread upon resuming.
|
||||
REQUIRE_FALSE(tid == std::this_thread::get_id());
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_s2_task = [&]() -> coro::task<void> {
|
||||
co_await s2.schedule();
|
||||
// Wait a bit to be sure the wait on 'e' in the other scheduler is done first.
|
||||
std::this_thread::sleep_for(10ms);
|
||||
e.set();
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_s1_task(), make_s2_task()));
|
||||
|
||||
s1.shutdown();
|
||||
REQUIRE(s1.empty());
|
||||
s2.shutdown();
|
||||
REQUIRE(s2.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler separate thread resume spawned thread", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
coro::event e{};
|
||||
|
||||
auto tid = std::this_thread::get_id();
|
||||
|
||||
// Normally this thread is probably already running for real world use cases, but in general
|
||||
// the 3rd party function api will be set, they should have "user data" void* or ability
|
||||
// to capture variables via lambdas for on complete callbacks, here we mimic an on complete
|
||||
// callback by capturing the hande.
|
||||
std::thread third_party_thread([&e, &s]() -> void {
|
||||
// mimic some expensive computation
|
||||
// Resume the coroutine back onto the scheduler, not this background thread.
|
||||
e.set(s);
|
||||
});
|
||||
third_party_thread.detach();
|
||||
|
||||
// Wait on the handle until the 3rd party service is completed.
|
||||
co_await e;
|
||||
REQUIRE(tid == std::this_thread::get_id());
|
||||
};
|
||||
|
||||
coro::sync_wait(make_task());
|
||||
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler separate thread resume with return", "[io_scheduler]")
|
||||
{
|
||||
constexpr uint64_t expected_value{1337};
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
coro::event start_service{};
|
||||
coro::event service_done{};
|
||||
std::atomic<uint64_t> output;
|
||||
|
||||
std::thread service{[&]() -> void {
|
||||
while (!start_service.is_set())
|
||||
{
|
||||
std::this_thread::sleep_for(1ms);
|
||||
}
|
||||
|
||||
output = expected_value;
|
||||
service_done.set(s);
|
||||
}};
|
||||
|
||||
auto third_party_service = [&](int multiplier) -> coro::task<uint64_t> {
|
||||
start_service.set();
|
||||
co_await service_done;
|
||||
co_return output* multiplier;
|
||||
};
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
|
||||
int multiplier{5};
|
||||
uint64_t value = co_await third_party_service(multiplier);
|
||||
REQUIRE(value == (expected_value * multiplier));
|
||||
};
|
||||
|
||||
coro::sync_wait(make_task());
|
||||
|
||||
service.join();
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler with basic task", "[io_scheduler]")
|
||||
{
|
||||
constexpr std::size_t expected_value{5};
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto add_data = [&](uint64_t val) -> coro::task<int> {
|
||||
co_await s.schedule();
|
||||
co_return val;
|
||||
};
|
||||
|
||||
auto func = [&]() -> coro::task<int> {
|
||||
co_await s.schedule();
|
||||
|
||||
auto output_tasks = co_await coro::when_all(add_data(1), add_data(1), add_data(1), add_data(1), add_data(1));
|
||||
|
||||
int counter{0};
|
||||
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
|
||||
|
||||
co_return counter;
|
||||
};
|
||||
|
||||
auto counter = coro::sync_wait(func());
|
||||
|
||||
REQUIRE(counter == expected_value);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler scheduler_after", "[io_scheduler]")
|
||||
{
|
||||
constexpr std::chrono::milliseconds wait_for{50};
|
||||
std::atomic<uint64_t> counter{0};
|
||||
std::thread::id tid;
|
||||
|
||||
auto func = [&](coro::io_scheduler& s, std::chrono::milliseconds amount) -> coro::task<void> {
|
||||
co_await s.schedule_after(amount);
|
||||
++counter;
|
||||
// Make sure schedule after context switches into the worker thread.
|
||||
REQUIRE(tid == std::this_thread::get_id());
|
||||
co_return;
|
||||
};
|
||||
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{
|
||||
.pool = coro::thread_pool::options{
|
||||
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
coro::sync_wait(func(s, 0ms));
|
||||
auto stop = std::chrono::steady_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
|
||||
|
||||
REQUIRE(counter == 1);
|
||||
REQUIRE(duration < wait_for);
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{
|
||||
.pool = coro::thread_pool::options{
|
||||
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
|
||||
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
coro::sync_wait(func(s, wait_for));
|
||||
auto stop = std::chrono::steady_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
|
||||
|
||||
REQUIRE(counter == 2);
|
||||
REQUIRE(duration >= wait_for);
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler schedule_at", "[io_scheduler]")
|
||||
{
|
||||
// Because schedule_at() will take its own time internally the wait_for might be off by a bit.
|
||||
constexpr std::chrono::milliseconds epsilon{3};
|
||||
constexpr std::chrono::milliseconds wait_for{50};
|
||||
std::atomic<uint64_t> counter{0};
|
||||
std::thread::id tid;
|
||||
|
||||
coro::io_scheduler s{coro::io_scheduler::options{
|
||||
.pool = coro::thread_pool::options{
|
||||
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
|
||||
|
||||
auto func = [&](std::chrono::steady_clock::time_point time) -> coro::task<void> {
|
||||
co_await s.schedule_at(time);
|
||||
++counter;
|
||||
REQUIRE(tid == std::this_thread::get_id());
|
||||
co_return;
|
||||
};
|
||||
|
||||
{
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
coro::sync_wait(func(std::chrono::steady_clock::now() + wait_for));
|
||||
auto stop = std::chrono::steady_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
|
||||
|
||||
REQUIRE(counter == 1);
|
||||
REQUIRE(duration >= (wait_for - epsilon));
|
||||
}
|
||||
|
||||
{
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
coro::sync_wait(func(std::chrono::steady_clock::now()));
|
||||
auto stop = std::chrono::steady_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
|
||||
|
||||
REQUIRE(counter == 2);
|
||||
REQUIRE(duration <= 10ms); // Just verify its less than the wait_for time period.
|
||||
}
|
||||
|
||||
{
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
coro::sync_wait(func(std::chrono::steady_clock::now() - 1s));
|
||||
auto stop = std::chrono::steady_clock::now();
|
||||
auto duration = std::chrono::duration_cast<std::chrono::milliseconds>(stop - start);
|
||||
|
||||
REQUIRE(counter == 3);
|
||||
REQUIRE(duration <= 10ms);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler yield", "[io_scheduler]")
|
||||
{
|
||||
std::thread::id tid;
|
||||
coro::io_scheduler s{coro::io_scheduler::options{
|
||||
.pool = coro::thread_pool::options{
|
||||
.thread_count = 1, .on_thread_start_functor = [&](std::size_t) { tid = std::this_thread::get_id(); }}}};
|
||||
|
||||
auto func = [&]() -> coro::task<void> {
|
||||
REQUIRE(tid != std::this_thread::get_id());
|
||||
co_await s.schedule();
|
||||
REQUIRE(tid == std::this_thread::get_id());
|
||||
co_await s.yield(); // this is really a thread pool function but /shrug
|
||||
REQUIRE(tid == std::this_thread::get_id());
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(func());
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler yield_for", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
const std::chrono::milliseconds wait_for{50};
|
||||
|
||||
auto make_task = [&]() -> coro::task<std::chrono::milliseconds> {
|
||||
co_await s.schedule();
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
co_await s.yield_for(wait_for);
|
||||
co_return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
|
||||
};
|
||||
|
||||
auto duration = coro::sync_wait(make_task());
|
||||
REQUIRE(duration >= wait_for);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler yield_until", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
// Because yield_until() takes its own time internally the wait_for might be off by a bit.
|
||||
const std::chrono::milliseconds epsilon{3};
|
||||
const std::chrono::milliseconds wait_for{50};
|
||||
|
||||
auto make_task = [&]() -> coro::task<std::chrono::milliseconds> {
|
||||
co_await s.schedule();
|
||||
auto start = std::chrono::steady_clock::now();
|
||||
co_await s.yield_until(start + wait_for);
|
||||
co_return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - start);
|
||||
};
|
||||
|
||||
auto duration = coro::sync_wait(make_task());
|
||||
REQUIRE(duration >= (wait_for - epsilon));
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]")
|
||||
{
|
||||
const constexpr std::size_t total{10};
|
||||
coro::event e{};
|
||||
coro::io_scheduler s{};
|
||||
|
||||
auto func = [&]() -> coro::task<uint64_t> {
|
||||
co_await e;
|
||||
co_return 1;
|
||||
};
|
||||
|
||||
auto spawn = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
std::vector<coro::task<uint64_t>> tasks;
|
||||
for (size_t i = 0; i < total; ++i)
|
||||
{
|
||||
tasks.emplace_back(func());
|
||||
}
|
||||
|
||||
auto results = co_await coro::when_all(std::move(tasks));
|
||||
|
||||
uint64_t counter{0};
|
||||
for (const auto& task : results)
|
||||
{
|
||||
counter += task.return_value();
|
||||
}
|
||||
REQUIRE(counter == total);
|
||||
};
|
||||
|
||||
auto release = [&]() -> coro::task<void> {
|
||||
co_await s.schedule_after(10ms);
|
||||
e.set(s);
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(spawn(), release()));
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_scheduler]")
|
||||
{
|
||||
const constexpr std::size_t total{1'000'000};
|
||||
uint64_t counter{0};
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
std::vector<coro::task<void>> tasks;
|
||||
tasks.reserve(total);
|
||||
|
||||
auto func = [&](auto f) -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
++counter;
|
||||
|
||||
if (counter % total == 0)
|
||||
{
|
||||
co_return;
|
||||
}
|
||||
|
||||
// co_await f(f) _will_ stack overflow since each coroutine links to its parent, by storing
|
||||
// each new invocation into the vector they are not linked, but we can make sure the scheduler
|
||||
// doesn't choke on this many tasks being scheduled.
|
||||
tasks.emplace_back(f(f));
|
||||
tasks.back().resume();
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(func(func));
|
||||
|
||||
while (tasks.size() < total - 1)
|
||||
{
|
||||
std::this_thread::sleep_for(1ms);
|
||||
}
|
||||
|
||||
REQUIRE(tasks.size() == total - 1);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler manual process events", "[io_scheduler]")
|
||||
{
|
||||
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
|
||||
coro::io_scheduler s{coro::io_scheduler::options{
|
||||
.thread_strategy = coro::io_scheduler::thread_strategy_t::manual,
|
||||
.pool = coro::thread_pool::options{
|
||||
.thread_count = 1,
|
||||
}}};
|
||||
|
||||
std::atomic<bool> polling{false};
|
||||
|
||||
auto make_poll_read_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
polling = true;
|
||||
auto status = co_await s.poll(trigger_fd, coro::poll_op::read);
|
||||
REQUIRE(status == coro::poll_status::event);
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_poll_write_task = [&]() -> coro::task<void> {
|
||||
co_await s.schedule();
|
||||
uint64_t value{42};
|
||||
write(trigger_fd, &value, sizeof(value));
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto poll_task = make_poll_read_task();
|
||||
auto write_task = make_poll_write_task();
|
||||
|
||||
poll_task.resume(); // get to co_await s.poll();
|
||||
while (!polling)
|
||||
{
|
||||
std::this_thread::sleep_for(10ms);
|
||||
}
|
||||
|
||||
write_task.resume();
|
||||
|
||||
REQUIRE(s.process_events(100ms) == 1);
|
||||
|
||||
s.shutdown();
|
||||
REQUIRE(s.empty());
|
||||
close(trigger_fd);
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler task throws", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto func = [&]() -> coro::task<uint64_t> {
|
||||
co_await s.schedule();
|
||||
throw std::runtime_error{"I always throw."};
|
||||
co_return 42;
|
||||
};
|
||||
|
||||
REQUIRE_THROWS(coro::sync_wait(func()));
|
||||
}
|
||||
|
||||
TEST_CASE("io_scheduler task throws after resume", "[io_scheduler]")
|
||||
{
|
||||
coro::io_scheduler s{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||
|
||||
auto make_thrower = [&]() -> coro::task<bool> {
|
||||
co_await s.schedule();
|
||||
std::cerr << "Throwing task is doing some work...\n";
|
||||
co_await s.yield();
|
||||
throw std::runtime_error{"I always throw."};
|
||||
co_return true;
|
||||
};
|
||||
|
||||
REQUIRE_THROWS(coro::sync_wait(make_thrower()));
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
TEST_CASE("latch count=0", "[latch]")
|
||||
{
|
||||
coro::latch l{0};
|
||||
|
||||
auto task = [&]() -> coro::task<uint64_t> {
|
||||
co_await l;
|
||||
co_return 42;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE(task.is_ready()); // The latch never waits due to zero count.
|
||||
REQUIRE(task.promise().return_value() == 42);
|
||||
}
|
||||
|
||||
TEST_CASE("latch count=1", "[latch]")
|
||||
{
|
||||
coro::latch l{1};
|
||||
|
||||
auto task = [&]() -> coro::task<uint64_t> {
|
||||
auto workers = l.remaining();
|
||||
co_await l;
|
||||
co_return workers;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
l.count_down();
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE(task.promise().return_value() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("latch count=1 count_down=5", "[latch]")
|
||||
{
|
||||
coro::latch l{1};
|
||||
|
||||
auto task = [&]() -> coro::task<uint64_t> {
|
||||
auto workers = l.remaining();
|
||||
co_await l;
|
||||
co_return workers;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
l.count_down(5);
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE(task.promise().return_value() == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("latch count=5 count_down=1 x5", "[latch]")
|
||||
{
|
||||
coro::latch l{5};
|
||||
|
||||
auto task = [&]() -> coro::task<uint64_t> {
|
||||
auto workers = l.remaining();
|
||||
co_await l;
|
||||
co_return workers;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
l.count_down(1);
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
l.count_down(1);
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
l.count_down(1);
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
l.count_down(1);
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
l.count_down(1);
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE(task.promise().return_value() == 5);
|
||||
}
|
||||
|
||||
TEST_CASE("latch count=5 count_down=5", "[latch]")
|
||||
{
|
||||
coro::latch l{5};
|
||||
|
||||
auto task = [&]() -> coro::task<uint64_t> {
|
||||
auto workers = l.remaining();
|
||||
co_await l;
|
||||
co_return workers;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
l.count_down(5);
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE(task.promise().return_value() == 5);
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
TEST_CASE("mutex single waiter not locked", "[mutex]")
|
||||
{
|
||||
std::vector<uint64_t> output;
|
||||
|
||||
coro::mutex m;
|
||||
|
||||
auto make_emplace_task = [&](coro::mutex& m) -> coro::task<void> {
|
||||
std::cerr << "Acquiring lock\n";
|
||||
{
|
||||
auto scoped_lock = co_await m.lock();
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
std::cerr << "lock acquired, emplacing back 1\n";
|
||||
output.emplace_back(1);
|
||||
std::cerr << "coroutine done\n";
|
||||
}
|
||||
|
||||
// The scoped lock should release the lock upon destructing.
|
||||
REQUIRE(m.try_lock());
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
m.unlock();
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_emplace_task(m));
|
||||
|
||||
REQUIRE(m.try_lock());
|
||||
m.unlock();
|
||||
|
||||
REQUIRE(output.size() == 1);
|
||||
REQUIRE(output[0] == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("mutex many waiters until event", "[mutex]")
|
||||
{
|
||||
std::atomic<uint64_t> value{0};
|
||||
std::vector<coro::task<void>> tasks;
|
||||
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
||||
|
||||
coro::mutex m; // acquires and holds the lock until the event is triggered
|
||||
coro::event e; // triggers the blocking thread to release the lock
|
||||
|
||||
auto make_task = [&](uint64_t id) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
std::cerr << "id = " << id << " waiting to acquire the lock\n";
|
||||
auto scoped_lock = co_await m.lock();
|
||||
|
||||
// Should always be locked upon acquiring the locks.
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
|
||||
std::cerr << "id = " << id << " lock acquired\n";
|
||||
value.fetch_add(1, std::memory_order::relaxed);
|
||||
std::cerr << "id = " << id << " coroutine done\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_block_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
std::cerr << "block task acquiring lock\n";
|
||||
auto scoped_lock = co_await m.lock();
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
std::cerr << "block task acquired lock, waiting on event\n";
|
||||
co_await e;
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_set_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
std::cerr << "set task setting event\n";
|
||||
e.set();
|
||||
co_return;
|
||||
};
|
||||
|
||||
// Grab mutex so all threads block.
|
||||
tasks.emplace_back(make_block_task());
|
||||
|
||||
// Create N tasks that attempt to lock the mutex.
|
||||
for (uint64_t i = 1; i <= 4; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task(i));
|
||||
}
|
||||
|
||||
tasks.emplace_back(make_set_task());
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
REQUIRE(value == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("mutex scoped_lock unlock prior to scope exit", "[mutex]")
|
||||
{
|
||||
coro::mutex m;
|
||||
|
||||
auto make_task = [&]() -> coro::task<void> {
|
||||
{
|
||||
auto lk = co_await m.lock();
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
lk.unlock();
|
||||
REQUIRE(m.try_lock());
|
||||
}
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_task());
|
||||
}
|
|
@ -1,113 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
TEST_CASE("ring_buffer zero num_elements", "[ring_buffer]")
|
||||
{
|
||||
REQUIRE_THROWS(coro::ring_buffer<uint64_t, 0>{});
|
||||
}
|
||||
|
||||
TEST_CASE("ring_buffer single element", "[ring_buffer]")
|
||||
{
|
||||
const size_t iterations = 10;
|
||||
coro::ring_buffer<uint64_t, 1> rb{};
|
||||
|
||||
std::vector<uint64_t> output{};
|
||||
|
||||
auto make_producer_task = [&]() -> coro::task<void> {
|
||||
for (size_t i = 1; i <= iterations; ++i)
|
||||
{
|
||||
std::cerr << "produce: " << i << "\n";
|
||||
co_await rb.produce(i);
|
||||
}
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_consumer_task = [&]() -> coro::task<void> {
|
||||
for (size_t i = 1; i <= iterations; ++i)
|
||||
{
|
||||
auto value = co_await rb.consume();
|
||||
std::cerr << "consume: " << value << "\n";
|
||||
output.emplace_back(value);
|
||||
}
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_producer_task(), make_consumer_task()));
|
||||
|
||||
for (size_t i = 1; i <= iterations; ++i)
|
||||
{
|
||||
REQUIRE(output[i - 1] == i);
|
||||
}
|
||||
|
||||
REQUIRE(rb.empty());
|
||||
}
|
||||
|
||||
TEST_CASE("ring_buffer many elements many producers many consumers", "[ring_buffer]")
|
||||
{
|
||||
const size_t iterations = 1'000'000;
|
||||
const size_t consumers = 100;
|
||||
const size_t producers = 100;
|
||||
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
|
||||
coro::ring_buffer<uint64_t, 64> rb{};
|
||||
|
||||
auto make_producer_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
auto to_produce = iterations / producers;
|
||||
|
||||
for (size_t i = 1; i <= to_produce; ++i)
|
||||
{
|
||||
co_await rb.produce(i);
|
||||
}
|
||||
|
||||
// Wait for all the values to be consumed prior to sending the stop signal.
|
||||
while (!rb.empty())
|
||||
{
|
||||
co_await tp.yield();
|
||||
}
|
||||
|
||||
rb.stop_signal_notify_waiters(); // signal to all consumers (or even producers) we are done/shutting down.
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_consumer_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
try
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
auto value = co_await rb.consume();
|
||||
(void)value;
|
||||
co_await tp.yield(); // mimic some work
|
||||
}
|
||||
}
|
||||
catch (const coro::stop_signal&)
|
||||
{
|
||||
// requested to stop/shutdown.
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
tasks.reserve(consumers * producers);
|
||||
|
||||
for (size_t i = 0; i < consumers; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_consumer_task());
|
||||
}
|
||||
for (size_t i = 0; i < producers; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_producer_task());
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
REQUIRE(rb.empty());
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
TEST_CASE("semaphore binary", "[semaphore]")
|
||||
{
|
||||
std::vector<uint64_t> output;
|
||||
|
||||
coro::semaphore s{1};
|
||||
|
||||
auto make_emplace_task = [&](coro::semaphore& s) -> coro::task<void> {
|
||||
std::cerr << "Acquiring semaphore\n";
|
||||
co_await s.acquire();
|
||||
REQUIRE_FALSE(s.try_acquire());
|
||||
std::cerr << "semaphore acquired, emplacing back 1\n";
|
||||
output.emplace_back(1);
|
||||
std::cerr << "coroutine done with resource, releasing\n";
|
||||
REQUIRE(s.value() == 0);
|
||||
s.release();
|
||||
|
||||
REQUIRE(s.value() == 1);
|
||||
|
||||
REQUIRE(s.try_acquire());
|
||||
s.release();
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_emplace_task(s));
|
||||
|
||||
REQUIRE(s.value() == 1);
|
||||
REQUIRE(s.try_acquire());
|
||||
REQUIRE(s.value() == 0);
|
||||
s.release();
|
||||
REQUIRE(s.value() == 1);
|
||||
|
||||
REQUIRE(output.size() == 1);
|
||||
REQUIRE(output[0] == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("semaphore binary many waiters until event", "[semaphore]")
|
||||
{
|
||||
std::atomic<uint64_t> value{0};
|
||||
std::vector<coro::task<void>> tasks;
|
||||
|
||||
coro::semaphore s{1}; // acquires and holds the semaphore until the event is triggered
|
||||
coro::event e; // triggers the blocking thread to release the semaphore
|
||||
|
||||
auto make_task = [&](uint64_t id) -> coro::task<void> {
|
||||
std::cerr << "id = " << id << " waiting to acquire the semaphore\n";
|
||||
co_await s.acquire();
|
||||
|
||||
// Should always be locked upon acquiring the semaphore.
|
||||
REQUIRE_FALSE(s.try_acquire());
|
||||
|
||||
std::cerr << "id = " << id << " semaphore acquired\n";
|
||||
value.fetch_add(1, std::memory_order::relaxed);
|
||||
std::cerr << "id = " << id << " semaphore release\n";
|
||||
s.release();
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_block_task = [&]() -> coro::task<void> {
|
||||
std::cerr << "block task acquiring lock\n";
|
||||
co_await s.acquire();
|
||||
REQUIRE_FALSE(s.try_acquire());
|
||||
std::cerr << "block task acquired semaphore, waiting on event\n";
|
||||
co_await e;
|
||||
std::cerr << "block task releasing semaphore\n";
|
||||
s.release();
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_set_task = [&]() -> coro::task<void> {
|
||||
std::cerr << "set task setting event\n";
|
||||
e.set();
|
||||
co_return;
|
||||
};
|
||||
|
||||
tasks.emplace_back(make_block_task());
|
||||
|
||||
// Create N tasks that attempt to acquire the semaphore.
|
||||
for (uint64_t i = 1; i <= 4; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_task(i));
|
||||
}
|
||||
|
||||
tasks.emplace_back(make_set_task());
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
REQUIRE(value == 4);
|
||||
}
|
||||
|
||||
TEST_CASE("semaphore ringbuffer", "[semaphore]")
|
||||
{
|
||||
const std::size_t iterations = 10;
|
||||
|
||||
// This test is run in the context of a thread pool so the producer task can yield. Otherwise
|
||||
// the producer will just run wild!
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
||||
std::atomic<uint64_t> value{0};
|
||||
std::vector<coro::task<void>> tasks;
|
||||
|
||||
coro::semaphore s{2, 2};
|
||||
|
||||
auto make_consumer_task = [&](uint64_t id) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
try
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
std::cerr << "id = " << id << " waiting to acquire the semaphore\n";
|
||||
co_await s.acquire();
|
||||
std::cerr << "id = " << id << " semaphore acquired, consuming value\n";
|
||||
|
||||
value.fetch_add(1, std::memory_order::release);
|
||||
// In the ringbfuffer acquire is 'consuming', we never release back into the buffer
|
||||
}
|
||||
}
|
||||
catch (const coro::stop_signal&)
|
||||
{
|
||||
std::cerr << "id = " << id << " exiting\n";
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_producer_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
for (size_t i = 2; i < iterations; ++i)
|
||||
{
|
||||
std::cerr << "producer: doing work\n";
|
||||
// Do some work...
|
||||
|
||||
std::cerr << "producer: releasing\n";
|
||||
s.release();
|
||||
std::cerr << "producer: produced\n";
|
||||
co_await tp.yield();
|
||||
}
|
||||
|
||||
std::cerr << "producer exiting\n";
|
||||
s.stop_signal_notify_waiters();
|
||||
co_return;
|
||||
};
|
||||
|
||||
tasks.emplace_back(make_producer_task());
|
||||
tasks.emplace_back(make_consumer_task(1));
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
REQUIRE(value == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("semaphore ringbuffer many producers and consumers", "[semaphore]")
|
||||
{
|
||||
const std::size_t consumers = 16;
|
||||
const std::size_t producers = 1;
|
||||
const std::size_t iterations = 100'000;
|
||||
|
||||
std::atomic<uint64_t> value{0};
|
||||
|
||||
coro::semaphore s{50, 0};
|
||||
|
||||
coro::io_scheduler tp{}; // let er rip
|
||||
|
||||
auto make_consumer_task = [&](uint64_t id) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
try
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
co_await s.acquire();
|
||||
co_await tp.schedule();
|
||||
value.fetch_add(1, std::memory_order::relaxed);
|
||||
}
|
||||
}
|
||||
catch (const coro::stop_signal&)
|
||||
{
|
||||
std::cerr << "consumer " << id << " exiting\n";
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_producer_task = [&](uint64_t id) -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
for (size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
s.release();
|
||||
}
|
||||
|
||||
while (value.load(std::memory_order::relaxed) < iterations)
|
||||
{
|
||||
co_await tp.yield_for(std::chrono::milliseconds{1});
|
||||
}
|
||||
|
||||
std::cerr << "producer " << id << " exiting\n";
|
||||
|
||||
s.stop_signal_notify_waiters();
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
std::vector<coro::task<void>> tasks{};
|
||||
for (size_t i = 0; i < consumers; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_consumer_task(i));
|
||||
}
|
||||
for (size_t i = 0; i < producers; ++i)
|
||||
{
|
||||
tasks.emplace_back(make_producer_task(i));
|
||||
}
|
||||
|
||||
coro::sync_wait(coro::when_all(std::move(tasks)));
|
||||
|
||||
REQUIRE(value >= iterations);
|
||||
}
|
|
@ -1,159 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
TEST_CASE("mutex single waiter not locked exclusive", "[shared_mutex]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
||||
std::vector<uint64_t> output;
|
||||
|
||||
coro::shared_mutex m{tp};
|
||||
|
||||
auto make_emplace_task = [&](coro::shared_mutex& m) -> coro::task<void> {
|
||||
std::cerr << "Acquiring lock exclusive\n";
|
||||
{
|
||||
auto scoped_lock = co_await m.lock();
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
REQUIRE_FALSE(m.try_lock_shared());
|
||||
std::cerr << "lock acquired, emplacing back 1\n";
|
||||
output.emplace_back(1);
|
||||
std::cerr << "coroutine done\n";
|
||||
}
|
||||
|
||||
// The scoped lock should release the lock upon destructing.
|
||||
REQUIRE(m.try_lock());
|
||||
m.unlock();
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_emplace_task(m));
|
||||
|
||||
REQUIRE(m.try_lock());
|
||||
m.unlock();
|
||||
|
||||
REQUIRE(output.size() == 1);
|
||||
REQUIRE(output[0] == 1);
|
||||
}
|
||||
|
||||
TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
||||
std::vector<uint64_t> values{1, 2, 3};
|
||||
|
||||
coro::shared_mutex m{tp};
|
||||
|
||||
auto make_emplace_task = [&](coro::shared_mutex& m) -> coro::task<void> {
|
||||
std::cerr << "Acquiring lock shared\n";
|
||||
{
|
||||
auto scoped_lock = co_await m.lock_shared();
|
||||
REQUIRE_FALSE(m.try_lock());
|
||||
REQUIRE(m.try_lock_shared());
|
||||
std::cerr << "lock acquired, reading values\n";
|
||||
for (const auto& v : values)
|
||||
{
|
||||
std::cerr << v << ",";
|
||||
}
|
||||
std::cerr << "\ncoroutine done\n";
|
||||
|
||||
m.unlock_shared(); // manually locked shared on a shared, unlock
|
||||
}
|
||||
|
||||
// The scoped lock should release the lock upon destructing.
|
||||
REQUIRE(m.try_lock());
|
||||
m.unlock();
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(make_emplace_task(m));
|
||||
|
||||
REQUIRE(m.try_lock_shared());
|
||||
m.unlock_shared();
|
||||
|
||||
REQUIRE(m.try_lock());
|
||||
m.unlock();
|
||||
}
|
||||
|
||||
TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex]")
|
||||
{
|
||||
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 8}}};
|
||||
coro::shared_mutex m{tp};
|
||||
|
||||
std::atomic<bool> read_value{false};
|
||||
|
||||
auto make_shared_task = [&]() -> coro::task<bool> {
|
||||
co_await tp.schedule();
|
||||
std::cerr << "make_shared_task shared lock acquiring\n";
|
||||
auto scoped_lock = co_await m.lock_shared();
|
||||
std::cerr << "make_shared_task shared lock acquired\n";
|
||||
bool value = read_value.load(std::memory_order::acquire);
|
||||
std::cerr << "make_shared_task shared lock releasing on thread_id = " << std::this_thread::get_id() << "\n";
|
||||
co_return value;
|
||||
};
|
||||
|
||||
auto make_exclusive_task = [&]() -> coro::task<void> {
|
||||
// Let some readers get through.
|
||||
co_await tp.yield_for(std::chrono::milliseconds{50});
|
||||
|
||||
{
|
||||
std::cerr << "make_shared_task exclusive lock acquiring\n";
|
||||
auto scoped_lock = co_await m.lock();
|
||||
std::cerr << "make_shared_task exclusive lock acquired\n";
|
||||
// Stack readers on the mutex
|
||||
co_await tp.yield_for(std::chrono::milliseconds{50});
|
||||
read_value.exchange(true, std::memory_order::release);
|
||||
std::cerr << "make_shared_task exclusive lock releasing\n";
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_shared_tasks_task = [&]() -> coro::task<void> {
|
||||
co_await tp.schedule();
|
||||
|
||||
std::vector<coro::task<bool>> shared_tasks{};
|
||||
|
||||
bool stop{false};
|
||||
while (!stop)
|
||||
{
|
||||
shared_tasks.emplace_back(make_shared_task());
|
||||
shared_tasks.back().resume();
|
||||
|
||||
co_await tp.yield_for(std::chrono::milliseconds{1});
|
||||
|
||||
for (const auto& st : shared_tasks)
|
||||
{
|
||||
if (st.is_ready())
|
||||
{
|
||||
stop = st.promise().return_value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
while (true)
|
||||
{
|
||||
bool tasks_remaining{false};
|
||||
for (const auto& st : shared_tasks)
|
||||
{
|
||||
if (!st.is_ready())
|
||||
{
|
||||
tasks_remaining = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!tasks_remaining)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_shared_tasks_task(), make_exclusive_task()));
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
TEST_CASE("sync_wait simple integer return", "[sync_wait]")
|
||||
{
|
||||
auto func = []() -> coro::task<int> { co_return 11; };
|
||||
|
||||
auto result = coro::sync_wait(func());
|
||||
REQUIRE(result == 11);
|
||||
}
|
||||
|
||||
TEST_CASE("sync_wait void", "[sync_wait]")
|
||||
{
|
||||
std::string output;
|
||||
|
||||
auto func = [&]() -> coro::task<void> {
|
||||
output = "hello from sync_wait<void>\n";
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(func());
|
||||
REQUIRE(output == "hello from sync_wait<void>\n");
|
||||
}
|
||||
|
||||
TEST_CASE("sync_wait task co_await single", "[sync_wait]")
|
||||
{
|
||||
auto answer = []() -> coro::task<int> {
|
||||
std::cerr << "\tThinking deep thoughts...\n";
|
||||
co_return 42;
|
||||
};
|
||||
|
||||
auto await_answer = [&]() -> coro::task<int> {
|
||||
std::cerr << "\tStarting to wait for answer.\n";
|
||||
auto a = answer();
|
||||
std::cerr << "\tGot the coroutine, getting the value.\n";
|
||||
auto v = co_await a;
|
||||
std::cerr << "\tCoroutine value is " << v << "\n";
|
||||
REQUIRE(v == 42);
|
||||
v = co_await a;
|
||||
std::cerr << "\tValue is still " << v << "\n";
|
||||
REQUIRE(v == 42);
|
||||
co_return 1337;
|
||||
};
|
||||
|
||||
auto output = coro::sync_wait(await_answer());
|
||||
REQUIRE(output == 1337);
|
||||
}
|
||||
|
||||
TEST_CASE("sync_wait task that throws", "[sync_wait]")
|
||||
{
|
||||
auto f = []() -> coro::task<uint64_t> {
|
||||
throw std::runtime_error("I always throw!");
|
||||
co_return 1;
|
||||
};
|
||||
|
||||
REQUIRE_THROWS(coro::sync_wait(f()));
|
||||
}
|
|
@ -1,243 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
TEST_CASE("task hello world", "[task]")
|
||||
{
|
||||
using task_type = coro::task<std::string>;
|
||||
|
||||
auto h = []() -> task_type { co_return "Hello"; }();
|
||||
auto w = []() -> task_type { co_return "World"; }();
|
||||
|
||||
REQUIRE(h.promise().return_value().empty());
|
||||
REQUIRE(w.promise().return_value().empty());
|
||||
|
||||
h.resume(); // task suspends immediately
|
||||
w.resume();
|
||||
|
||||
REQUIRE(h.is_ready());
|
||||
REQUIRE(w.is_ready());
|
||||
|
||||
auto w_value = std::move(w).promise().return_value();
|
||||
|
||||
REQUIRE(h.promise().return_value() == "Hello");
|
||||
REQUIRE(w_value == "World");
|
||||
REQUIRE(w.promise().return_value().empty());
|
||||
}
|
||||
|
||||
TEST_CASE("task void", "[task]")
|
||||
{
|
||||
using namespace std::chrono_literals;
|
||||
using task_type = coro::task<>;
|
||||
|
||||
auto t = []() -> task_type {
|
||||
std::this_thread::sleep_for(10ms);
|
||||
co_return;
|
||||
}();
|
||||
t.resume();
|
||||
|
||||
REQUIRE(t.is_ready());
|
||||
}
|
||||
|
||||
TEST_CASE("task exception thrown", "[task]")
|
||||
{
|
||||
using task_type = coro::task<std::string>;
|
||||
|
||||
std::string throw_msg = "I'll be reached";
|
||||
|
||||
auto task = [&]() -> task_type {
|
||||
throw std::runtime_error(throw_msg);
|
||||
co_return "I'll never be reached";
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
|
||||
REQUIRE(task.is_ready());
|
||||
|
||||
bool thrown{false};
|
||||
try
|
||||
{
|
||||
auto value = task.promise().return_value();
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
thrown = true;
|
||||
REQUIRE(e.what() == throw_msg);
|
||||
}
|
||||
|
||||
REQUIRE(thrown);
|
||||
}
|
||||
|
||||
TEST_CASE("task in a task", "[task]")
|
||||
{
|
||||
auto outer_task = []() -> coro::task<> {
|
||||
auto inner_task = []() -> coro::task<int> {
|
||||
std::cerr << "inner_task start\n";
|
||||
std::cerr << "inner_task stop\n";
|
||||
co_return 42;
|
||||
};
|
||||
|
||||
std::cerr << "outer_task start\n";
|
||||
auto v = co_await inner_task();
|
||||
REQUIRE(v == 42);
|
||||
std::cerr << "outer_task stop\n";
|
||||
}();
|
||||
|
||||
outer_task.resume(); // all tasks start suspend, kick it off.
|
||||
|
||||
REQUIRE(outer_task.is_ready());
|
||||
}
|
||||
|
||||
TEST_CASE("task in a task in a task", "[task]")
|
||||
{
|
||||
auto task1 = []() -> coro::task<> {
|
||||
std::cerr << "task1 start\n";
|
||||
auto task2 = []() -> coro::task<int> {
|
||||
std::cerr << "\ttask2 start\n";
|
||||
auto task3 = []() -> coro::task<int> {
|
||||
std::cerr << "\t\ttask3 start\n";
|
||||
std::cerr << "\t\ttask3 stop\n";
|
||||
co_return 3;
|
||||
};
|
||||
|
||||
auto v2 = co_await task3();
|
||||
REQUIRE(v2 == 3);
|
||||
|
||||
std::cerr << "\ttask2 stop\n";
|
||||
co_return 2;
|
||||
};
|
||||
|
||||
auto v1 = co_await task2();
|
||||
REQUIRE(v1 == 2);
|
||||
|
||||
std::cerr << "task1 stop\n";
|
||||
}();
|
||||
|
||||
task1.resume(); // all tasks start suspended, kick it off.
|
||||
|
||||
REQUIRE(task1.is_ready());
|
||||
}
|
||||
|
||||
TEST_CASE("task multiple suspends return void", "[task]")
|
||||
{
|
||||
auto task = []() -> coro::task<void> {
|
||||
co_await std::suspend_always{};
|
||||
co_await std::suspend_never{};
|
||||
co_await std::suspend_always{};
|
||||
co_await std::suspend_always{};
|
||||
co_return;
|
||||
}();
|
||||
|
||||
task.resume(); // initial suspend
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
task.resume(); // first internal suspend
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
task.resume(); // second internal suspend
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
task.resume(); // third internal suspend
|
||||
REQUIRE(task.is_ready());
|
||||
}
|
||||
|
||||
TEST_CASE("task multiple suspends return integer", "[task]")
|
||||
{
|
||||
auto task = []() -> coro::task<int> {
|
||||
co_await std::suspend_always{};
|
||||
co_await std::suspend_always{};
|
||||
co_await std::suspend_always{};
|
||||
co_return 11;
|
||||
}();
|
||||
|
||||
task.resume(); // initial suspend
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
task.resume(); // first internal suspend
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
task.resume(); // second internal suspend
|
||||
REQUIRE_FALSE(task.is_ready());
|
||||
|
||||
task.resume(); // third internal suspend
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE(task.promise().return_value() == 11);
|
||||
}
|
||||
|
||||
TEST_CASE("task resume from promise to coroutine handles of different types", "[task]")
|
||||
{
|
||||
auto task1 = [&]() -> coro::task<int> {
|
||||
std::cerr << "Task ran\n";
|
||||
co_return 42;
|
||||
}();
|
||||
|
||||
auto task2 = [&]() -> coro::task<void> {
|
||||
std::cerr << "Task 2 ran\n";
|
||||
co_return;
|
||||
}();
|
||||
|
||||
// task.resume(); normal method of resuming
|
||||
|
||||
std::vector<std::coroutine_handle<>> handles;
|
||||
|
||||
handles.emplace_back(std::coroutine_handle<coro::task<int>::promise_type>::from_promise(task1.promise()));
|
||||
handles.emplace_back(std::coroutine_handle<coro::task<void>::promise_type>::from_promise(task2.promise()));
|
||||
|
||||
auto& coro_handle1 = handles[0];
|
||||
coro_handle1.resume();
|
||||
auto& coro_handle2 = handles[1];
|
||||
coro_handle2.resume();
|
||||
|
||||
REQUIRE(task1.is_ready());
|
||||
REQUIRE(coro_handle1.done());
|
||||
REQUIRE(task1.promise().return_value() == 42);
|
||||
|
||||
REQUIRE(task2.is_ready());
|
||||
REQUIRE(coro_handle2.done());
|
||||
}
|
||||
|
||||
TEST_CASE("task throws void", "[task]")
|
||||
{
|
||||
auto task = []() -> coro::task<void> {
|
||||
throw std::runtime_error{"I always throw."};
|
||||
co_return;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE_THROWS_AS(task.promise().return_value(), std::runtime_error);
|
||||
}
|
||||
|
||||
TEST_CASE("task throws non-void l-value", "[task]")
|
||||
{
|
||||
auto task = []() -> coro::task<int> {
|
||||
throw std::runtime_error{"I always throw."};
|
||||
co_return 42;
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE_THROWS_AS(task.promise().return_value(), std::runtime_error);
|
||||
}
|
||||
|
||||
TEST_CASE("task throws non-void r-value", "[task]")
|
||||
{
|
||||
struct type
|
||||
{
|
||||
int m_value;
|
||||
};
|
||||
|
||||
auto task = []() -> coro::task<type> {
|
||||
type return_value{42};
|
||||
|
||||
throw std::runtime_error{"I always throw."};
|
||||
co_return std::move(return_value);
|
||||
}();
|
||||
|
||||
task.resume();
|
||||
REQUIRE(task.is_ready());
|
||||
REQUIRE_THROWS_AS(task.promise().return_value(), std::runtime_error);
|
||||
}
|
|
@ -1,193 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
TEST_CASE("thread_pool one worker one task", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
auto func = [&tp]() -> coro::task<uint64_t> {
|
||||
co_await tp.schedule(); // Schedule this coroutine on the scheduler.
|
||||
co_return 42;
|
||||
};
|
||||
|
||||
auto result = coro::sync_wait(func());
|
||||
REQUIRE(result == 42);
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool one worker many tasks tuple", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
auto f = [&tp]() -> coro::task<uint64_t> {
|
||||
co_await tp.schedule(); // Schedule this coroutine on the scheduler.
|
||||
co_return 50;
|
||||
};
|
||||
|
||||
auto tasks = coro::sync_wait(coro::when_all(f(), f(), f(), f(), f()));
|
||||
REQUIRE(std::tuple_size<decltype(tasks)>() == 5);
|
||||
|
||||
uint64_t counter{0};
|
||||
std::apply([&counter](auto&&... t) -> void { ((counter += t.return_value()), ...); }, tasks);
|
||||
|
||||
REQUIRE(counter == 250);
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool one worker many tasks vector", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
auto f = [&tp]() -> coro::task<uint64_t> {
|
||||
co_await tp.schedule(); // Schedule this coroutine on the scheduler.
|
||||
co_return 50;
|
||||
};
|
||||
|
||||
std::vector<coro::task<uint64_t>> input_tasks;
|
||||
input_tasks.emplace_back(f());
|
||||
input_tasks.emplace_back(f());
|
||||
input_tasks.emplace_back(f());
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
|
||||
|
||||
REQUIRE(output_tasks.size() == 3);
|
||||
|
||||
uint64_t counter{0};
|
||||
for (const auto& task : output_tasks)
|
||||
{
|
||||
counter += task.return_value();
|
||||
}
|
||||
|
||||
REQUIRE(counter == 150);
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool N workers 100k tasks", "[thread_pool]")
|
||||
{
|
||||
constexpr const std::size_t iterations = 100'000;
|
||||
coro::thread_pool tp{};
|
||||
|
||||
auto make_task = [](coro::thread_pool& tp) -> coro::task<uint64_t> {
|
||||
co_await tp.schedule();
|
||||
co_return 1;
|
||||
};
|
||||
|
||||
std::vector<coro::task<uint64_t>> input_tasks{};
|
||||
input_tasks.reserve(iterations);
|
||||
for (std::size_t i = 0; i < iterations; ++i)
|
||||
{
|
||||
input_tasks.emplace_back(make_task(tp));
|
||||
}
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
|
||||
REQUIRE(output_tasks.size() == iterations);
|
||||
|
||||
uint64_t counter{0};
|
||||
for (const auto& task : output_tasks)
|
||||
{
|
||||
counter += task.return_value();
|
||||
}
|
||||
|
||||
REQUIRE(counter == iterations);
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool 1 worker task spawns another task", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
auto f1 = [](coro::thread_pool& tp) -> coro::task<uint64_t> {
|
||||
co_await tp.schedule();
|
||||
|
||||
auto f2 = [](coro::thread_pool& tp) -> coro::task<uint64_t> {
|
||||
co_await tp.schedule();
|
||||
co_return 5;
|
||||
};
|
||||
|
||||
co_return 1 + co_await f2(tp);
|
||||
};
|
||||
|
||||
REQUIRE(coro::sync_wait(f1(tp)) == 6);
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool shutdown", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
auto f = [](coro::thread_pool& tp) -> coro::task<bool> {
|
||||
try
|
||||
{
|
||||
co_await tp.schedule();
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
co_return true;
|
||||
}
|
||||
co_return false;
|
||||
};
|
||||
|
||||
tp.shutdown(coro::shutdown_t::async);
|
||||
|
||||
REQUIRE(coro::sync_wait(f(tp)) == true);
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool schedule functor", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
auto f = []() -> uint64_t { return 1; };
|
||||
|
||||
auto result = coro::sync_wait(tp.schedule(f));
|
||||
REQUIRE(result == 1);
|
||||
|
||||
tp.shutdown();
|
||||
|
||||
REQUIRE_THROWS(coro::sync_wait(tp.schedule(f)));
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool schedule functor return_type = void", "[thread_pool]")
|
||||
{
|
||||
coro::thread_pool tp{coro::thread_pool::options{1}};
|
||||
|
||||
std::atomic<uint64_t> counter{0};
|
||||
auto f = [](std::atomic<uint64_t>& c) -> void { c++; };
|
||||
|
||||
coro::sync_wait(tp.schedule(f, std::ref(counter)));
|
||||
REQUIRE(counter == 1);
|
||||
|
||||
tp.shutdown();
|
||||
|
||||
REQUIRE_THROWS(coro::sync_wait(tp.schedule(f, std::ref(counter))));
|
||||
}
|
||||
|
||||
TEST_CASE("thread_pool event jump threads", "[thread_pool]")
|
||||
{
|
||||
// This test verifies that the thread that sets the event ends up executing every waiter on the event
|
||||
|
||||
coro::thread_pool tp1{coro::thread_pool::options{.thread_count = 1}};
|
||||
coro::thread_pool tp2{coro::thread_pool::options{.thread_count = 1}};
|
||||
|
||||
coro::event e{};
|
||||
|
||||
auto make_tp1_task = [&]() -> coro::task<void> {
|
||||
co_await tp1.schedule();
|
||||
auto before_thread_id = std::this_thread::get_id();
|
||||
std::cerr << "before event thread_id = " << before_thread_id << "\n";
|
||||
co_await e;
|
||||
auto after_thread_id = std::this_thread::get_id();
|
||||
std::cerr << "after event thread_id = " << after_thread_id << "\n";
|
||||
|
||||
REQUIRE(before_thread_id != after_thread_id);
|
||||
|
||||
co_return;
|
||||
};
|
||||
|
||||
auto make_tp2_task = [&]() -> coro::task<void> {
|
||||
co_await tp2.schedule();
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds{10});
|
||||
std::cerr << "setting event\n";
|
||||
e.set();
|
||||
co_return;
|
||||
};
|
||||
|
||||
coro::sync_wait(coro::when_all(make_tp1_task(), make_tp2_task()));
|
||||
}
|
|
@ -1,109 +0,0 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include <coro/coro.hpp>
|
||||
|
||||
#include <list>
|
||||
#include <vector>
|
||||
|
||||
TEST_CASE("when_all single task with tuple container", "[when_all]")
|
||||
{
|
||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(make_task(100)));
|
||||
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 1);
|
||||
|
||||
uint64_t counter{0};
|
||||
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
|
||||
|
||||
REQUIRE(counter == 100);
|
||||
}
|
||||
|
||||
TEST_CASE("when_all single task with tuple container by move", "[when_all]")
|
||||
{
|
||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||
|
||||
auto t = make_task(100);
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(std::move(t)));
|
||||
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 1);
|
||||
|
||||
uint64_t counter{0};
|
||||
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
|
||||
|
||||
REQUIRE(counter == 100);
|
||||
}
|
||||
|
||||
TEST_CASE("when_all multiple tasks with tuple container", "[when_all]")
|
||||
{
|
||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(make_task(100), make_task(50), make_task(20)));
|
||||
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 3);
|
||||
|
||||
uint64_t counter{0};
|
||||
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
|
||||
|
||||
REQUIRE(counter == 170);
|
||||
}
|
||||
|
||||
TEST_CASE("when_all single task with vector container", "[when_all]")
|
||||
{
|
||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||
|
||||
std::vector<coro::task<uint64_t>> input_tasks;
|
||||
input_tasks.emplace_back(make_task(100));
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
|
||||
REQUIRE(output_tasks.size() == 1);
|
||||
|
||||
uint64_t counter{0};
|
||||
for (const auto& task : output_tasks)
|
||||
{
|
||||
counter += task.return_value();
|
||||
}
|
||||
|
||||
REQUIRE(counter == 100);
|
||||
}
|
||||
|
||||
TEST_CASE("when_all multple task withs vector container", "[when_all]")
|
||||
{
|
||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||
|
||||
std::vector<coro::task<uint64_t>> input_tasks;
|
||||
input_tasks.emplace_back(make_task(100));
|
||||
input_tasks.emplace_back(make_task(200));
|
||||
input_tasks.emplace_back(make_task(550));
|
||||
input_tasks.emplace_back(make_task(1000));
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
|
||||
REQUIRE(output_tasks.size() == 4);
|
||||
|
||||
uint64_t counter{0};
|
||||
for (const auto& task : output_tasks)
|
||||
{
|
||||
counter += task.return_value();
|
||||
}
|
||||
|
||||
REQUIRE(counter == 1850);
|
||||
}
|
||||
|
||||
TEST_CASE("when_all multple task withs list container", "[when_all]")
|
||||
{
|
||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||
|
||||
std::list<coro::task<uint64_t>> input_tasks;
|
||||
input_tasks.emplace_back(make_task(100));
|
||||
input_tasks.emplace_back(make_task(200));
|
||||
input_tasks.emplace_back(make_task(550));
|
||||
input_tasks.emplace_back(make_task(1000));
|
||||
|
||||
auto output_tasks = coro::sync_wait(coro::when_all(std::move(input_tasks)));
|
||||
REQUIRE(output_tasks.size() == 4);
|
||||
|
||||
uint64_t counter{0};
|
||||
for (const auto& task : output_tasks)
|
||||
{
|
||||
counter += task.return_value();
|
||||
}
|
||||
|
||||
REQUIRE(counter == 1850);
|
||||
}
|
1
vendor/c-ares/c-ares
vendored
1
vendor/c-ares/c-ares
vendored
|
@ -1 +0,0 @@
|
|||
Subproject commit 799e81d4ace75af7d530857d4f8b35913a27463e
|
Loading…
Add table
Reference in a new issue