mirror of
https://gitlab.com/niansa/libcrosscoro.git
synced 2025-03-06 20:53:32 +01:00
Add coro::mutex example to readme (#45)
* Add coro::mutex example to readme * explicit lock_operation ctor * lock_operation await_ready() uses try_lock This allows for the lock operation to skip await_suspend() entirely if the lock was unlocked.
This commit is contained in:
parent
80fea9c49a
commit
60994334fe
19 changed files with 429 additions and 199 deletions
|
@ -35,11 +35,15 @@ done
|
||||||
cp .githooks/readme-template.md README.md
|
cp .githooks/readme-template.md README.md
|
||||||
|
|
||||||
template_contents=$(cat 'README.md')
|
template_contents=$(cat 'README.md')
|
||||||
coro_event_cpp_contents=$(cat 'examples/coro_event.cpp')
|
example_contents=$(cat 'examples/coro_event.cpp')
|
||||||
echo "${template_contents/\$\{EXAMPLE_CORO_EVENT_CPP\}/$coro_event_cpp_contents}" > README.md
|
echo "${template_contents/\$\{EXAMPLE_CORO_EVENT_CPP\}/$example_contents}" > README.md
|
||||||
|
|
||||||
template_contents=$(cat 'README.md')
|
template_contents=$(cat 'README.md')
|
||||||
coro_latch_cpp_contents=$(cat 'examples/coro_latch.cpp')
|
example_contents=$(cat 'examples/coro_latch.cpp')
|
||||||
echo "${template_contents/\$\{EXAMPLE_CORO_LATCH_CPP\}/$coro_latch_cpp_contents}" > README.md
|
echo "${template_contents/\$\{EXAMPLE_CORO_LATCH_CPP\}/$example_contents}" > README.md
|
||||||
|
|
||||||
|
template_contents=$(cat 'README.md')
|
||||||
|
example_contents=$(cat 'examples/coro_mutex.cpp')
|
||||||
|
echo "${template_contents/\$\{EXAMPLE_CORO_MUTEX_CPP\}/$example_contents}" > README.md
|
||||||
|
|
||||||
git add README.md
|
git add README.md
|
||||||
|
|
|
@ -20,8 +20,8 @@
|
||||||
- coro::latch
|
- coro::latch
|
||||||
- coro::mutex
|
- coro::mutex
|
||||||
- coro::sync_wait(awaitable)
|
- coro::sync_wait(awaitable)
|
||||||
- coro::when_all_awaitabe(awaitable...) -> coro::task<T>...
|
- coro::when_all(awaitable...) -> coro::task<T>...
|
||||||
- coro::when_all(awaitable...) -> T... (Future)
|
- coro::when_all_results(awaitable...) -> T... (Future)
|
||||||
* Schedulers
|
* Schedulers
|
||||||
- coro::thread_pool for coroutine cooperative multitasking
|
- coro::thread_pool for coroutine cooperative multitasking
|
||||||
- coro::io_scheduler for driving i/o events, uses thread_pool for coroutine execution
|
- coro::io_scheduler for driving i/o events, uses thread_pool for coroutine execution
|
||||||
|
@ -73,17 +73,30 @@ Expected output:
|
||||||
```bash
|
```bash
|
||||||
$ ./examples/coro_latch
|
$ ./examples/coro_latch
|
||||||
latch task is now waiting on all children tasks...
|
latch task is now waiting on all children tasks...
|
||||||
work task 1 is working...
|
worker task 1 is working...
|
||||||
work task 1 is done, counting down on the latch
|
worker task 2 is working...
|
||||||
work task 2 is working...
|
worker task 3 is working...
|
||||||
work task 2 is done, counting down on the latch
|
worker task 4 is working...
|
||||||
work task 3 is working...
|
worker task 5 is working...
|
||||||
work task 3 is done, counting down on the latch
|
worker task 1 is done, counting down on the latch
|
||||||
work task 4 is working...
|
worker task 2 is done, counting down on the latch
|
||||||
work task 4 is done, counting down on the latch
|
worker task 3 is done, counting down on the latch
|
||||||
work task 5 is working...
|
worker task 4 is done, counting down on the latch
|
||||||
work task 5 is done, counting down on the latch
|
worker task 5 is done, counting down on the latch
|
||||||
latch task children tasks completed, resuming.
|
latch task dependency tasks completed, resuming.
|
||||||
|
```
|
||||||
|
|
||||||
|
### coro::mutex
|
||||||
|
|
||||||
|
```C++
|
||||||
|
${EXAMPLE_CORO_MUTEX_CPP}
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected output, note that the output will vary from run to run based on how the thread pool workers
|
||||||
|
are scheduled and in what order they acquire the mutex lock:
|
||||||
|
```bash
|
||||||
|
$ ./examples/coro_mutex
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8, 10, 9, 12, 11, 13, 14, 15, 16, 17, 18, 19, 21, 22, 20, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 47, 48, 49, 46, 50, 51, 52, 53, 54, 55, 57, 58, 59, 56, 60, 62, 61, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
126
README.md
126
README.md
|
@ -20,8 +20,8 @@
|
||||||
- coro::latch
|
- coro::latch
|
||||||
- coro::mutex
|
- coro::mutex
|
||||||
- coro::sync_wait(awaitable)
|
- coro::sync_wait(awaitable)
|
||||||
- coro::when_all_awaitabe(awaitable...) -> coro::task<T>...
|
- coro::when_all(awaitable...) -> coro::task<T>...
|
||||||
- coro::when_all(awaitable...) -> T... (Future)
|
- coro::when_all_results(awaitable...) -> T... (Future)
|
||||||
* Schedulers
|
* Schedulers
|
||||||
- coro::thread_pool for coroutine cooperative multitasking
|
- coro::thread_pool for coroutine cooperative multitasking
|
||||||
- coro::io_scheduler for driving i/o events, uses thread_pool for coroutine execution
|
- coro::io_scheduler for driving i/o events, uses thread_pool for coroutine execution
|
||||||
|
@ -68,11 +68,9 @@ int main()
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Synchronously wait until all the tasks are completed, this is intentionally
|
// Given more than a single task to synchronously wait on, use when_all() to execute all the
|
||||||
// starting the first 3 wait tasks prior to the final set task so the waiters suspend
|
// tasks concurrently on this thread and then sync_wait() for them all to complete.
|
||||||
// their coroutine before being resumed.
|
coro::sync_wait(coro::when_all(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
|
||||||
coro::sync_wait(
|
|
||||||
coro::when_all_awaitable(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
|
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -98,35 +96,41 @@ have completed before proceeding.
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
|
// Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker
|
||||||
|
// tasks can yield for a specific amount of time to mimic difficult work. The pool is only
|
||||||
|
// setup with a single thread to showcase yield_for().
|
||||||
|
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||||
|
|
||||||
// This task will wait until the given latch setters have completed.
|
// This task will wait until the given latch setters have completed.
|
||||||
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
|
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
|
||||||
|
// It seems like the dependent worker tasks could be created here, but in that case it would
|
||||||
|
// be superior to simply do: `co_await coro::when_all(tasks);`
|
||||||
|
// It is also important to note that the last dependent task will resume the waiting latch
|
||||||
|
// task prior to actually completing -- thus the dependent task's frame could be destroyed
|
||||||
|
// by the latch task completing before it gets a chance to finish after calling resume() on
|
||||||
|
// the latch task!
|
||||||
|
|
||||||
std::cout << "latch task is now waiting on all children tasks...\n";
|
std::cout << "latch task is now waiting on all children tasks...\n";
|
||||||
co_await l;
|
co_await l;
|
||||||
std::cout << "latch task children tasks completed, resuming.\n";
|
std::cout << "latch task dependency tasks completed, resuming.\n";
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// This task does 'work' and counts down on the latch when completed. The final child task to
|
// This task does 'work' and counts down on the latch when completed. The final child task to
|
||||||
// complete will end up resuming the latch task when the latch's count reaches zero.
|
// complete will end up resuming the latch task when the latch's count reaches zero.
|
||||||
auto make_worker_task = [](coro::latch& l, int64_t i) -> coro::task<void> {
|
auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task<void> {
|
||||||
std::cout << "work task " << i << " is working...\n";
|
// Schedule the worker task onto the thread pool.
|
||||||
std::cout << "work task " << i << " is done, counting down on the latch\n";
|
co_await tp.schedule();
|
||||||
|
std::cout << "worker task " << i << " is working...\n";
|
||||||
|
// Do some expensive calculations, yield to mimic work...! Its also important to never use
|
||||||
|
// std::this_thread::sleep_for() within the context of coroutines, it will block the thread
|
||||||
|
// and other tasks that are ready to execute will be blocked.
|
||||||
|
co_await tp.yield_for(std::chrono::milliseconds{i * 20});
|
||||||
|
std::cout << "worker task " << i << " is done, counting down on the latch\n";
|
||||||
l.count_down();
|
l.count_down();
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// It is important to note that the latch task must not 'own' the worker tasks within its
|
|
||||||
// coroutine stack frame because the final worker task thread will execute the latch task upon
|
|
||||||
// setting the latch counter to zero. This means that:
|
|
||||||
// 1) final worker task calls count_down() => 0
|
|
||||||
// 2) resume execution of latch task to its next suspend point or completion, IF completed
|
|
||||||
// then this coroutine's stack frame is destroyed!
|
|
||||||
// 3) final worker task continues exection
|
|
||||||
// If the latch task 'own's the worker task objects then they will destruct prior to step (3)
|
|
||||||
// if the latch task completes on that resume, and it will be attempting to execute an already
|
|
||||||
// destructed coroutine frame.
|
|
||||||
// This example correctly has the latch task and all its waiting tasks on the same scope/frame
|
|
||||||
// to avoid this issue.
|
|
||||||
const int64_t num_tasks{5};
|
const int64_t num_tasks{5};
|
||||||
coro::latch l{num_tasks};
|
coro::latch l{num_tasks};
|
||||||
std::vector<coro::task<void>> tasks{};
|
std::vector<coro::task<void>> tasks{};
|
||||||
|
@ -135,11 +139,11 @@ int main()
|
||||||
tasks.emplace_back(make_latch_task(l));
|
tasks.emplace_back(make_latch_task(l));
|
||||||
for (int64_t i = 1; i <= num_tasks; ++i)
|
for (int64_t i = 1; i <= num_tasks; ++i)
|
||||||
{
|
{
|
||||||
tasks.emplace_back(make_worker_task(l, i));
|
tasks.emplace_back(make_worker_task(tp, l, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all tasks to complete.
|
// Wait for all tasks to complete.
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -147,17 +151,67 @@ Expected output:
|
||||||
```bash
|
```bash
|
||||||
$ ./examples/coro_latch
|
$ ./examples/coro_latch
|
||||||
latch task is now waiting on all children tasks...
|
latch task is now waiting on all children tasks...
|
||||||
work task 1 is working...
|
worker task 1 is working...
|
||||||
work task 1 is done, counting down on the latch
|
worker task 2 is working...
|
||||||
work task 2 is working...
|
worker task 3 is working...
|
||||||
work task 2 is done, counting down on the latch
|
worker task 4 is working...
|
||||||
work task 3 is working...
|
worker task 5 is working...
|
||||||
work task 3 is done, counting down on the latch
|
worker task 1 is done, counting down on the latch
|
||||||
work task 4 is working...
|
worker task 2 is done, counting down on the latch
|
||||||
work task 4 is done, counting down on the latch
|
worker task 3 is done, counting down on the latch
|
||||||
work task 5 is working...
|
worker task 4 is done, counting down on the latch
|
||||||
work task 5 is done, counting down on the latch
|
worker task 5 is done, counting down on the latch
|
||||||
latch task children tasks completed, resuming.
|
latch task dependency tasks completed, resuming.
|
||||||
|
```
|
||||||
|
|
||||||
|
### coro::mutex
|
||||||
|
|
||||||
|
```C++
|
||||||
|
#include <coro/coro.hpp>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
|
||||||
|
std::vector<uint64_t> output{};
|
||||||
|
coro::mutex mutex;
|
||||||
|
|
||||||
|
auto make_critical_section_task = [&](uint64_t i) -> coro::task<void> {
|
||||||
|
co_await tp.schedule();
|
||||||
|
// To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the
|
||||||
|
// lock() function returns a coro::scoped_lock that holds the mutex and automatically
|
||||||
|
// unlocks the mutex upon destruction. This behaves just like std::scoped_lock.
|
||||||
|
{
|
||||||
|
auto scoped_lock = co_await mutex.lock();
|
||||||
|
output.emplace_back(i);
|
||||||
|
} // <-- scoped lock unlocks the mutex here.
|
||||||
|
co_return;
|
||||||
|
};
|
||||||
|
|
||||||
|
const size_t num_tasks{100};
|
||||||
|
std::vector<coro::task<void>> tasks{};
|
||||||
|
tasks.reserve(num_tasks);
|
||||||
|
for (size_t i = 1; i <= num_tasks; ++i)
|
||||||
|
{
|
||||||
|
tasks.emplace_back(make_critical_section_task(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
|
// The output will be variable per run depending on how the tasks are picked up on the
|
||||||
|
// thread pool workers.
|
||||||
|
for (const auto& value : output)
|
||||||
|
{
|
||||||
|
std::cout << value << ", ";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Expected output, note that the output will vary from run to run based on how the thread pool workers
|
||||||
|
are scheduled and in what order they acquire the mutex lock:
|
||||||
|
```bash
|
||||||
|
$ ./examples/coro_mutex
|
||||||
|
1, 2, 3, 4, 5, 6, 7, 8, 10, 9, 12, 11, 13, 14, 15, 16, 17, 18, 19, 21, 22, 20, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 47, 48, 49, 46, 50, 51, 52, 53, 54, 55, 57, 58, 59, 56, 60, 62, 61, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100,
|
||||||
```
|
```
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
|
@ -9,9 +9,14 @@ add_executable(coro_latch coro_latch.cpp)
|
||||||
target_compile_features(coro_latch PUBLIC cxx_std_20)
|
target_compile_features(coro_latch PUBLIC cxx_std_20)
|
||||||
target_link_libraries(coro_latch PUBLIC libcoro)
|
target_link_libraries(coro_latch PUBLIC libcoro)
|
||||||
|
|
||||||
|
add_executable(coro_mutex coro_mutex.cpp)
|
||||||
|
target_compile_features(coro_mutex PUBLIC cxx_std_20)
|
||||||
|
target_link_libraries(coro_mutex PUBLIC libcoro)
|
||||||
|
|
||||||
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
|
if(${CMAKE_CXX_COMPILER_ID} MATCHES "GNU")
|
||||||
target_compile_options(coro_event PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
target_compile_options(coro_event PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||||
target_compile_options(coro_latch PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
target_compile_options(coro_latch PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||||
|
target_compile_options(coro_mutex PUBLIC -fcoroutines -Wall -Wextra -pipe)
|
||||||
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
elseif(${CMAKE_CXX_COMPILER_ID} MATCHES "Clang")
|
||||||
message(FATAL_ERROR "Clang is currently not supported.")
|
message(FATAL_ERROR "Clang is currently not supported.")
|
||||||
else()
|
else()
|
||||||
|
|
|
@ -20,9 +20,7 @@ int main()
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Synchronously wait until all the tasks are completed, this is intentionally
|
// Given more than a single task to synchronously wait on, use when_all() to execute all the
|
||||||
// starting the first 3 wait tasks prior to the final set task so the waiters suspend
|
// tasks concurrently on this thread and then sync_wait() for them all to complete.
|
||||||
// their coroutine before being resumed.
|
coro::sync_wait(coro::when_all(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
|
||||||
coro::sync_wait(
|
|
||||||
coro::when_all_awaitable(make_wait_task(e, 1), make_wait_task(e, 2), make_wait_task(e, 3), make_set_task(e)));
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,35 +3,41 @@
|
||||||
|
|
||||||
int main()
|
int main()
|
||||||
{
|
{
|
||||||
|
// Complete worker tasks faster on a thread pool, using the io_scheduler version so the worker
|
||||||
|
// tasks can yield for a specific amount of time to mimic difficult work. The pool is only
|
||||||
|
// setup with a single thread to showcase yield_for().
|
||||||
|
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 1}}};
|
||||||
|
|
||||||
// This task will wait until the given latch setters have completed.
|
// This task will wait until the given latch setters have completed.
|
||||||
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
|
auto make_latch_task = [](coro::latch& l) -> coro::task<void> {
|
||||||
|
// It seems like the dependent worker tasks could be created here, but in that case it would
|
||||||
|
// be superior to simply do: `co_await coro::when_all(tasks);`
|
||||||
|
// It is also important to note that the last dependent task will resume the waiting latch
|
||||||
|
// task prior to actually completing -- thus the dependent task's frame could be destroyed
|
||||||
|
// by the latch task completing before it gets a chance to finish after calling resume() on
|
||||||
|
// the latch task!
|
||||||
|
|
||||||
std::cout << "latch task is now waiting on all children tasks...\n";
|
std::cout << "latch task is now waiting on all children tasks...\n";
|
||||||
co_await l;
|
co_await l;
|
||||||
std::cout << "latch task children tasks completed, resuming.\n";
|
std::cout << "latch task dependency tasks completed, resuming.\n";
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// This task does 'work' and counts down on the latch when completed. The final child task to
|
// This task does 'work' and counts down on the latch when completed. The final child task to
|
||||||
// complete will end up resuming the latch task when the latch's count reaches zero.
|
// complete will end up resuming the latch task when the latch's count reaches zero.
|
||||||
auto make_worker_task = [](coro::latch& l, int64_t i) -> coro::task<void> {
|
auto make_worker_task = [](coro::io_scheduler& tp, coro::latch& l, int64_t i) -> coro::task<void> {
|
||||||
std::cout << "work task " << i << " is working...\n";
|
// Schedule the worker task onto the thread pool.
|
||||||
std::cout << "work task " << i << " is done, counting down on the latch\n";
|
co_await tp.schedule();
|
||||||
|
std::cout << "worker task " << i << " is working...\n";
|
||||||
|
// Do some expensive calculations, yield to mimic work...! Its also important to never use
|
||||||
|
// std::this_thread::sleep_for() within the context of coroutines, it will block the thread
|
||||||
|
// and other tasks that are ready to execute will be blocked.
|
||||||
|
co_await tp.yield_for(std::chrono::milliseconds{i * 20});
|
||||||
|
std::cout << "worker task " << i << " is done, counting down on the latch\n";
|
||||||
l.count_down();
|
l.count_down();
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// It is important to note that the latch task must not 'own' the worker tasks within its
|
|
||||||
// coroutine stack frame because the final worker task thread will execute the latch task upon
|
|
||||||
// setting the latch counter to zero. This means that:
|
|
||||||
// 1) final worker task calls count_down() => 0
|
|
||||||
// 2) resume execution of latch task to its next suspend point or completion, IF completed
|
|
||||||
// then this coroutine's stack frame is destroyed!
|
|
||||||
// 3) final worker task continues exection
|
|
||||||
// If the latch task 'own's the worker task objects then they will destruct prior to step (3)
|
|
||||||
// if the latch task completes on that resume, and it will be attempting to execute an already
|
|
||||||
// destructed coroutine frame.
|
|
||||||
// This example correctly has the latch task and all its waiting tasks on the same scope/frame
|
|
||||||
// to avoid this issue.
|
|
||||||
const int64_t num_tasks{5};
|
const int64_t num_tasks{5};
|
||||||
coro::latch l{num_tasks};
|
coro::latch l{num_tasks};
|
||||||
std::vector<coro::task<void>> tasks{};
|
std::vector<coro::task<void>> tasks{};
|
||||||
|
@ -40,9 +46,9 @@ int main()
|
||||||
tasks.emplace_back(make_latch_task(l));
|
tasks.emplace_back(make_latch_task(l));
|
||||||
for (int64_t i = 1; i <= num_tasks; ++i)
|
for (int64_t i = 1; i <= num_tasks; ++i)
|
||||||
{
|
{
|
||||||
tasks.emplace_back(make_worker_task(l, i));
|
tasks.emplace_back(make_worker_task(tp, l, i));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait for all tasks to complete.
|
// Wait for all tasks to complete.
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
}
|
}
|
||||||
|
|
38
examples/coro_mutex.cpp
Normal file
38
examples/coro_mutex.cpp
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
#include <coro/coro.hpp>
|
||||||
|
#include <iostream>
|
||||||
|
|
||||||
|
int main()
|
||||||
|
{
|
||||||
|
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 4}};
|
||||||
|
std::vector<uint64_t> output{};
|
||||||
|
coro::mutex mutex;
|
||||||
|
|
||||||
|
auto make_critical_section_task = [&](uint64_t i) -> coro::task<void> {
|
||||||
|
co_await tp.schedule();
|
||||||
|
// To acquire a mutex lock co_await its lock() function. Upon acquiring the lock the
|
||||||
|
// lock() function returns a coro::scoped_lock that holds the mutex and automatically
|
||||||
|
// unlocks the mutex upon destruction. This behaves just like std::scoped_lock.
|
||||||
|
{
|
||||||
|
auto scoped_lock = co_await mutex.lock();
|
||||||
|
output.emplace_back(i);
|
||||||
|
} // <-- scoped lock unlocks the mutex here.
|
||||||
|
co_return;
|
||||||
|
};
|
||||||
|
|
||||||
|
const size_t num_tasks{100};
|
||||||
|
std::vector<coro::task<void>> tasks{};
|
||||||
|
tasks.reserve(num_tasks);
|
||||||
|
for (size_t i = 1; i <= num_tasks; ++i)
|
||||||
|
{
|
||||||
|
tasks.emplace_back(make_critical_section_task(i));
|
||||||
|
}
|
||||||
|
|
||||||
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
|
// The output will be variable per run depending on how the tasks are picked up on the
|
||||||
|
// thread pool workers.
|
||||||
|
for (const auto& value : output)
|
||||||
|
{
|
||||||
|
std::cout << value << ", ";
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "coro/event.hpp"
|
#include "coro/event.hpp"
|
||||||
|
#include "coro/thread_pool.hpp"
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
|
|
||||||
|
@ -41,6 +42,7 @@ public:
|
||||||
auto remaining() const noexcept -> std::size_t { return m_count.load(std::memory_order::acquire); }
|
auto remaining() const noexcept -> std::size_t { return m_count.load(std::memory_order::acquire); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
* If the latch counter goes to zero then the task awaiting the latch is resumed.
|
||||||
* @param n The number of tasks to complete towards the latch, defaults to 1.
|
* @param n The number of tasks to complete towards the latch, defaults to 1.
|
||||||
*/
|
*/
|
||||||
auto count_down(std::ptrdiff_t n = 1) noexcept -> void
|
auto count_down(std::ptrdiff_t n = 1) noexcept -> void
|
||||||
|
@ -51,6 +53,20 @@ public:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If the latch counter goes to then the task awaiting the latch is resumed on the given
|
||||||
|
* thread pool.
|
||||||
|
* @param tp The thread pool to schedule the task that is waiting on the latch on.
|
||||||
|
* @param n The number of tasks to complete towards the latch, defaults to 1.
|
||||||
|
*/
|
||||||
|
auto count_down(coro::thread_pool& tp, std::ptrdiff_t n = 1) noexcept -> void
|
||||||
|
{
|
||||||
|
if (m_count.fetch_sub(n, std::memory_order::acq_rel) <= n)
|
||||||
|
{
|
||||||
|
m_event.set(tp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
auto operator co_await() const noexcept -> event::awaiter { return m_event.operator co_await(); }
|
auto operator co_await() const noexcept -> event::awaiter { return m_event.operator co_await(); }
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
@ -2,36 +2,55 @@
|
||||||
|
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <coroutine>
|
#include <coroutine>
|
||||||
#include <deque>
|
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
|
||||||
namespace coro
|
namespace coro
|
||||||
{
|
{
|
||||||
|
class mutex;
|
||||||
|
|
||||||
|
class scoped_lock
|
||||||
|
{
|
||||||
|
friend class mutex;
|
||||||
|
|
||||||
|
public:
|
||||||
|
enum class lock_strategy
|
||||||
|
{
|
||||||
|
/// The lock is already acquired, adopt it as the new owner.
|
||||||
|
adopt
|
||||||
|
};
|
||||||
|
|
||||||
|
explicit scoped_lock(mutex& m, lock_strategy strategy = lock_strategy::adopt) : m_mutex(&m)
|
||||||
|
{
|
||||||
|
// Future -> support acquiring the lock? Not sure how to do that without being able to
|
||||||
|
// co_await in the constructor.
|
||||||
|
(void)strategy;
|
||||||
|
}
|
||||||
|
~scoped_lock();
|
||||||
|
|
||||||
|
scoped_lock(const scoped_lock&) = delete;
|
||||||
|
scoped_lock(scoped_lock&& other) : m_mutex(std::exchange(other.m_mutex, nullptr)) {}
|
||||||
|
auto operator=(const scoped_lock&) -> scoped_lock& = delete;
|
||||||
|
auto operator =(scoped_lock&& other) -> scoped_lock&
|
||||||
|
{
|
||||||
|
if (std::addressof(other) != this)
|
||||||
|
{
|
||||||
|
m_mutex = std::exchange(other.m_mutex, nullptr);
|
||||||
|
}
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unlocks the scoped lock prior to it going out of scope.
|
||||||
|
*/
|
||||||
|
auto unlock() -> void;
|
||||||
|
|
||||||
|
private:
|
||||||
|
mutex* m_mutex{nullptr};
|
||||||
|
};
|
||||||
|
|
||||||
class mutex
|
class mutex
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
struct scoped_lock
|
|
||||||
{
|
|
||||||
friend class mutex;
|
|
||||||
|
|
||||||
scoped_lock(mutex& m) : m_mutex(m) {}
|
|
||||||
~scoped_lock() { m_mutex.unlock(); }
|
|
||||||
|
|
||||||
mutex& m_mutex;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct awaiter
|
|
||||||
{
|
|
||||||
awaiter(mutex& m) noexcept : m_mutex(m) {}
|
|
||||||
|
|
||||||
auto await_ready() const noexcept -> bool;
|
|
||||||
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool;
|
|
||||||
auto await_resume() noexcept -> scoped_lock;
|
|
||||||
|
|
||||||
mutex& m_mutex;
|
|
||||||
std::coroutine_handle<> m_awaiting_coroutine;
|
|
||||||
};
|
|
||||||
|
|
||||||
explicit mutex() noexcept = default;
|
explicit mutex() noexcept = default;
|
||||||
~mutex() = default;
|
~mutex() = default;
|
||||||
|
|
||||||
|
@ -40,16 +59,45 @@ public:
|
||||||
auto operator=(const mutex&) -> mutex& = delete;
|
auto operator=(const mutex&) -> mutex& = delete;
|
||||||
auto operator=(mutex&&) -> mutex& = delete;
|
auto operator=(mutex&&) -> mutex& = delete;
|
||||||
|
|
||||||
auto lock() -> awaiter;
|
struct lock_operation
|
||||||
|
{
|
||||||
|
explicit lock_operation(mutex& m) : m_mutex(m) {}
|
||||||
|
|
||||||
|
auto await_ready() const noexcept -> bool { return m_mutex.try_lock(); }
|
||||||
|
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool;
|
||||||
|
auto await_resume() noexcept -> scoped_lock { return scoped_lock{m_mutex}; }
|
||||||
|
|
||||||
|
mutex& m_mutex;
|
||||||
|
std::coroutine_handle<> m_awaiting_coroutine;
|
||||||
|
lock_operation* m_next{nullptr};
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* To acquire the mutex's lock co_await this function. Upon acquiring the lock it returns
|
||||||
|
* a coro::scoped_lock which will hold the mutex until the coro::scoped_lock destructs.
|
||||||
|
* @return A co_await'able operation to acquire the mutex.
|
||||||
|
*/
|
||||||
|
[[nodiscard]] auto lock() -> lock_operation { return lock_operation{*this}; };
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Attempts to lock the mutex.
|
||||||
|
* @return True if the mutex lock was acquired, otherwise false.
|
||||||
|
*/
|
||||||
auto try_lock() -> bool;
|
auto try_lock() -> bool;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Releases the mutex's lock.
|
||||||
|
*/
|
||||||
auto unlock() -> void;
|
auto unlock() -> void;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class scoped_lock;
|
// friend class scoped_lock;
|
||||||
|
friend class lock_operation;
|
||||||
|
|
||||||
std::atomic<bool> m_state{false};
|
std::atomic<bool> m_state{false};
|
||||||
std::mutex m_waiter_mutex{};
|
std::mutex m_waiter_mutex{};
|
||||||
std::deque<awaiter*> m_waiter_list{};
|
lock_operation* m_head_waiter{nullptr};
|
||||||
|
lock_operation* m_tail_waiter{nullptr};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace coro
|
} // namespace coro
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "coro/concepts/awaitable.hpp"
|
#include "coro/concepts/awaitable.hpp"
|
||||||
|
#include "coro/when_all.hpp"
|
||||||
|
|
||||||
#include <condition_variable>
|
#include <condition_variable>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
@ -183,28 +184,28 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
template<
|
template<
|
||||||
concepts::awaitable awaitable,
|
concepts::awaitable awaitable_type,
|
||||||
typename return_type = concepts::awaitable_traits<awaitable>::awaiter_return_type>
|
typename return_type = concepts::awaitable_traits<awaitable_type>::awaiter_return_type>
|
||||||
static auto make_sync_wait_task(awaitable&& a) -> sync_wait_task<return_type>
|
static auto make_sync_wait_task(awaitable_type&& a) -> sync_wait_task<return_type>
|
||||||
{
|
{
|
||||||
if constexpr (std::is_void_v<return_type>)
|
if constexpr (std::is_void_v<return_type>)
|
||||||
{
|
{
|
||||||
co_await std::forward<awaitable>(a);
|
co_await std::forward<awaitable_type>(a);
|
||||||
co_return;
|
co_return;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
co_yield co_await std::forward<awaitable>(a);
|
co_yield co_await std::forward<awaitable_type>(a);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
template<concepts::awaitable awaitable>
|
template<concepts::awaitable awaitable_type>
|
||||||
auto sync_wait(awaitable&& a) -> decltype(auto)
|
auto sync_wait(awaitable_type&& a) -> decltype(auto)
|
||||||
{
|
{
|
||||||
detail::sync_wait_event e{};
|
detail::sync_wait_event e{};
|
||||||
auto task = detail::make_sync_wait_task(std::forward<awaitable>(a));
|
auto task = detail::make_sync_wait_task(std::forward<awaitable_type>(a));
|
||||||
task.start(e);
|
task.start(e);
|
||||||
e.wait();
|
e.wait();
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,7 @@
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <coroutine>
|
#include <coroutine>
|
||||||
#include <tuple>
|
#include <tuple>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
namespace coro
|
namespace coro
|
||||||
{
|
{
|
||||||
|
@ -453,7 +454,7 @@ static auto make_when_all_task(awaitable&& a) -> when_all_task<return_type>
|
||||||
} // namespace detail
|
} // namespace detail
|
||||||
|
|
||||||
template<concepts::awaitable... awaitables_type>
|
template<concepts::awaitable... awaitables_type>
|
||||||
[[nodiscard]] auto when_all_awaitable(awaitables_type&&... awaitables)
|
[[nodiscard]] auto when_all(awaitables_type&&... awaitables)
|
||||||
{
|
{
|
||||||
return detail::when_all_ready_awaitable<std::tuple<
|
return detail::when_all_ready_awaitable<std::tuple<
|
||||||
detail::when_all_task<typename concepts::awaitable_traits<awaitables_type>::awaiter_return_type>...>>(
|
detail::when_all_task<typename concepts::awaitable_traits<awaitables_type>::awaiter_return_type>...>>(
|
||||||
|
@ -461,9 +462,10 @@ template<concepts::awaitable... awaitables_type>
|
||||||
}
|
}
|
||||||
|
|
||||||
template<
|
template<
|
||||||
concepts::awaitable awaitable,
|
concepts::awaitable awaitable_type,
|
||||||
typename return_type = concepts::awaitable_traits<awaitable>::awaiter_return_type>
|
typename return_type = concepts::awaitable_traits<awaitable_type>::awaiter_return_type,
|
||||||
[[nodiscard]] auto when_all_awaitable(std::vector<awaitable>& awaitables)
|
typename allocator_type = std::allocator<awaitable_type>>
|
||||||
|
[[nodiscard]] auto when_all(std::vector<awaitable_type, allocator_type>& awaitables)
|
||||||
-> detail::when_all_ready_awaitable<std::vector<detail::when_all_task<return_type>>>
|
-> detail::when_all_ready_awaitable<std::vector<detail::when_all_task<return_type>>>
|
||||||
{
|
{
|
||||||
std::vector<detail::when_all_task<return_type>> tasks;
|
std::vector<detail::when_all_task<return_type>> tasks;
|
||||||
|
|
106
src/mutex.cpp
106
src/mutex.cpp
|
@ -2,9 +2,49 @@
|
||||||
|
|
||||||
namespace coro
|
namespace coro
|
||||||
{
|
{
|
||||||
auto mutex::lock() -> awaiter
|
scoped_lock::~scoped_lock()
|
||||||
{
|
{
|
||||||
return awaiter(*this);
|
if (m_mutex != nullptr)
|
||||||
|
{
|
||||||
|
m_mutex->unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto scoped_lock::unlock() -> void
|
||||||
|
{
|
||||||
|
if (m_mutex != nullptr)
|
||||||
|
{
|
||||||
|
m_mutex->unlock();
|
||||||
|
m_mutex = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
auto mutex::lock_operation::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
|
||||||
|
{
|
||||||
|
std::scoped_lock lk{m_mutex.m_waiter_mutex};
|
||||||
|
if (m_mutex.try_lock())
|
||||||
|
{
|
||||||
|
// If we just straight up acquire the lock, don't suspend.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// The lock is currently held, so append ourself to the waiter list.
|
||||||
|
if (m_mutex.m_tail_waiter == nullptr)
|
||||||
|
{
|
||||||
|
// If there are no current waiters this lock operation is the head and tail.
|
||||||
|
m_mutex.m_head_waiter = this;
|
||||||
|
m_mutex.m_tail_waiter = this;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// Update the current tail pointer to ourself.
|
||||||
|
m_mutex.m_tail_waiter->m_next = this;
|
||||||
|
// Update the tail pointer on the mutex to ourself.
|
||||||
|
m_mutex.m_tail_waiter = this;
|
||||||
|
}
|
||||||
|
|
||||||
|
m_awaiting_coroutine = awaiting_coroutine;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto mutex::try_lock() -> bool
|
auto mutex::try_lock() -> bool
|
||||||
|
@ -15,58 +55,36 @@ auto mutex::try_lock() -> bool
|
||||||
|
|
||||||
auto mutex::unlock() -> void
|
auto mutex::unlock() -> void
|
||||||
{
|
{
|
||||||
// Get the next waiter before releasing the lock.
|
// Acquire the next waiter before releasing _or_ moving ownship of the lock.
|
||||||
awaiter* next{nullptr};
|
lock_operation* next{nullptr};
|
||||||
{
|
{
|
||||||
std::scoped_lock lk{m_waiter_mutex};
|
std::scoped_lock lk{m_waiter_mutex};
|
||||||
if (!m_waiter_list.empty())
|
if (m_head_waiter != nullptr)
|
||||||
{
|
{
|
||||||
next = m_waiter_list.front();
|
next = m_head_waiter;
|
||||||
m_waiter_list.pop_front();
|
m_head_waiter = m_head_waiter->m_next;
|
||||||
|
|
||||||
|
// Null out the tail waiter if this was the last waiter.
|
||||||
|
if (m_head_waiter == nullptr)
|
||||||
|
{
|
||||||
|
m_tail_waiter = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// If there were no waiters, release the lock. This is done under the waiter list being
|
||||||
|
// locked so another thread doesn't add themselves to the waiter list before the lock
|
||||||
|
// is actually released.
|
||||||
|
m_state.exchange(false, std::memory_order::release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unlock the mutex
|
// If there were any waiters resume the next in line, this will pass ownership of the mutex to
|
||||||
m_state.exchange(false, std::memory_order::release);
|
// that waiter, only the final waiter in the list actually unlocks the mutex.
|
||||||
|
|
||||||
// If there was a awaiter, resume it. Here would be good place to _resume_ the waiter onto
|
|
||||||
// the thread pool to distribute the work, this currently implementation will end up having
|
|
||||||
// every waiter on the mutex jump onto a single thread.
|
|
||||||
if (next != nullptr)
|
if (next != nullptr)
|
||||||
{
|
{
|
||||||
next->m_awaiting_coroutine.resume();
|
next->m_awaiting_coroutine.resume();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
auto mutex::awaiter::await_ready() const noexcept -> bool
|
|
||||||
{
|
|
||||||
return m_mutex.try_lock();
|
|
||||||
}
|
|
||||||
|
|
||||||
auto mutex::awaiter::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
|
|
||||||
{
|
|
||||||
m_awaiting_coroutine = awaiting_coroutine;
|
|
||||||
|
|
||||||
{
|
|
||||||
// Its possible between await_ready() and await_suspend() the lock was released,
|
|
||||||
// if thats the case acquire it immediately.
|
|
||||||
std::scoped_lock lk{m_mutex.m_waiter_mutex};
|
|
||||||
if (m_mutex.m_waiter_list.empty() && m_mutex.try_lock())
|
|
||||||
{
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ok its still held, add ourself to the waiter list.
|
|
||||||
m_mutex.m_waiter_list.emplace_back(this);
|
|
||||||
}
|
|
||||||
|
|
||||||
// The mutex is still locked and we've added this to the waiter list, suspend now.
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
auto mutex::awaiter::await_resume() noexcept -> scoped_lock
|
|
||||||
{
|
|
||||||
return scoped_lock{m_mutex};
|
|
||||||
}
|
|
||||||
|
|
||||||
} // namespace coro
|
} // namespace coro
|
||||||
|
|
|
@ -64,7 +64,7 @@ TEST_CASE("benchmark counter func coro::sync_wait(awaitable)", "[benchmark]")
|
||||||
REQUIRE(counter == iterations);
|
REQUIRE(counter == iterations);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all_awaitable(awaitable)) x10", "[benchmark]")
|
TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all(awaitable)) x10", "[benchmark]")
|
||||||
{
|
{
|
||||||
constexpr std::size_t iterations = default_iterations;
|
constexpr std::size_t iterations = default_iterations;
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
@ -74,13 +74,12 @@ TEST_CASE("benchmark counter func coro::sync_wait(coro::when_all_awaitable(await
|
||||||
|
|
||||||
for (std::size_t i = 0; i < iterations; i += 10)
|
for (std::size_t i = 0; i < iterations; i += 10)
|
||||||
{
|
{
|
||||||
auto tasks = coro::sync_wait(coro::when_all_awaitable(f(), f(), f(), f(), f(), f(), f(), f(), f(), f()));
|
auto tasks = coro::sync_wait(coro::when_all(f(), f(), f(), f(), f(), f(), f(), f(), f(), f()));
|
||||||
|
|
||||||
std::apply([&counter](auto&&... t) { ((counter += t.return_value()), ...); }, tasks);
|
std::apply([&counter](auto&&... t) { ((counter += t.return_value()), ...); }, tasks);
|
||||||
}
|
}
|
||||||
|
|
||||||
print_stats(
|
print_stats("benchmark counter func coro::sync_wait(coro::when_all(awaitable))", iterations, start, sc::now());
|
||||||
"benchmark counter func coro::sync_wait(coro::when_all_awaitable(awaitable))", iterations, start, sc::now());
|
|
||||||
REQUIRE(counter == iterations);
|
REQUIRE(counter == iterations);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,7 +170,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield", "[benchmark]")
|
||||||
tasks.emplace_back(make_task());
|
tasks.emplace_back(make_task());
|
||||||
}
|
}
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
auto stop = sc::now();
|
auto stop = sc::now();
|
||||||
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
|
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
|
||||||
|
@ -204,7 +203,7 @@ TEST_CASE("benchmark counter task scheduler{1} yield_for", "[benchmark]")
|
||||||
tasks.emplace_back(make_task());
|
tasks.emplace_back(make_task());
|
||||||
}
|
}
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
auto stop = sc::now();
|
auto stop = sc::now();
|
||||||
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
|
print_stats("benchmark counter task scheduler{1} yield", ops, start, stop);
|
||||||
|
@ -252,7 +251,7 @@ TEST_CASE("benchmark counter task scheduler await event from another coroutine",
|
||||||
tasks.emplace_back(resume_func(i));
|
tasks.emplace_back(resume_func(i));
|
||||||
}
|
}
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
auto stop = sc::now();
|
auto stop = sc::now();
|
||||||
print_stats("benchmark counter task scheduler await event from another coroutine", ops, start, stop);
|
print_stats("benchmark counter task scheduler await event from another coroutine", ops, start, stop);
|
||||||
|
@ -433,7 +432,7 @@ TEST_CASE("benchmark tcp_server echo server", "[benchmark]")
|
||||||
{
|
{
|
||||||
c.tasks.emplace_back(make_client_task(c));
|
c.tasks.emplace_back(make_client_task(c));
|
||||||
}
|
}
|
||||||
coro::sync_wait(coro::when_all_awaitable(c.tasks));
|
coro::sync_wait(coro::when_all(c.tasks));
|
||||||
c.scheduler.shutdown();
|
c.scheduler.shutdown();
|
||||||
}});
|
}});
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,5 +78,5 @@ TEST_CASE("tcp_server ping server", "[tcp_server]")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(make_server_task(), make_client_task()));
|
coro::sync_wait(coro::when_all(make_server_task(), make_client_task()));
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,7 +41,7 @@ TEST_CASE("udp one way")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(make_recv_task(), make_send_task()));
|
coro::sync_wait(coro::when_all(make_recv_task(), make_send_task()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("udp echo peers")
|
TEST_CASE("udp echo peers")
|
||||||
|
@ -110,7 +110,7 @@ TEST_CASE("udp echo peers")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(
|
coro::sync_wait(coro::when_all(
|
||||||
make_peer_task(8081, 8080, false, peer2_msg, peer1_msg),
|
make_peer_task(8081, 8080, false, peer2_msg, peer1_msg),
|
||||||
make_peer_task(8080, 8081, true, peer1_msg, peer2_msg)));
|
make_peer_task(8080, 8081, true, peer1_msg, peer2_msg)));
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,7 +49,7 @@ TEST_CASE("io_scheduler submit mutiple tasks", "[io_scheduler]")
|
||||||
tasks.emplace_back(make_task());
|
tasks.emplace_back(make_task());
|
||||||
}
|
}
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
REQUIRE(counter == n);
|
REQUIRE(counter == n);
|
||||||
}
|
}
|
||||||
|
@ -79,8 +79,7 @@ TEST_CASE("io_scheduler task with multiple events", "[io_scheduler]")
|
||||||
e.set();
|
e.set();
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(
|
coro::sync_wait(coro::when_all(make_wait_task(), make_set_task(e1), make_set_task(e2), make_set_task(e3)));
|
||||||
coro::when_all_awaitable(make_wait_task(), make_set_task(e1), make_set_task(e2), make_set_task(e3)));
|
|
||||||
|
|
||||||
REQUIRE(counter == 3);
|
REQUIRE(counter == 3);
|
||||||
|
|
||||||
|
@ -107,7 +106,7 @@ TEST_CASE("io_scheduler task with read poll", "[io_scheduler]")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(make_poll_read_task(), make_poll_write_task()));
|
coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task()));
|
||||||
|
|
||||||
s.shutdown();
|
s.shutdown();
|
||||||
REQUIRE(s.empty());
|
REQUIRE(s.empty());
|
||||||
|
@ -134,7 +133,7 @@ TEST_CASE("io_scheduler task with read poll with timeout", "[io_scheduler]")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(make_poll_read_task(), make_poll_write_task()));
|
coro::sync_wait(coro::when_all(make_poll_read_task(), make_poll_write_task()));
|
||||||
|
|
||||||
s.shutdown();
|
s.shutdown();
|
||||||
REQUIRE(s.empty());
|
REQUIRE(s.empty());
|
||||||
|
@ -182,7 +181,7 @@ TEST_CASE("io_scheduler task with read poll timeout", "[io_scheduler]")
|
||||||
// co_return;
|
// co_return;
|
||||||
// };
|
// };
|
||||||
|
|
||||||
// coro::sync_wait(coro::when_all_awaitable(make_poll_task(), make_close_task()));
|
// coro::sync_wait(coro::when_all(make_poll_task(), make_close_task()));
|
||||||
|
|
||||||
// s.shutdown();
|
// s.shutdown();
|
||||||
// REQUIRE(s.empty());
|
// REQUIRE(s.empty());
|
||||||
|
@ -214,7 +213,7 @@ TEST_CASE("io_scheduler separate thread resume", "[io_scheduler]")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(make_s1_task(), make_s2_task()));
|
coro::sync_wait(coro::when_all(make_s1_task(), make_s2_task()));
|
||||||
|
|
||||||
s1.shutdown();
|
s1.shutdown();
|
||||||
REQUIRE(s1.empty());
|
REQUIRE(s1.empty());
|
||||||
|
@ -307,8 +306,7 @@ TEST_CASE("io_scheduler with basic task", "[io_scheduler]")
|
||||||
auto func = [&]() -> coro::task<int> {
|
auto func = [&]() -> coro::task<int> {
|
||||||
co_await s.schedule();
|
co_await s.schedule();
|
||||||
|
|
||||||
auto output_tasks =
|
auto output_tasks = co_await coro::when_all(add_data(1), add_data(1), add_data(1), add_data(1), add_data(1));
|
||||||
co_await coro::when_all_awaitable(add_data(1), add_data(1), add_data(1), add_data(1), add_data(1));
|
|
||||||
|
|
||||||
int counter{0};
|
int counter{0};
|
||||||
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
|
std::apply([&counter](auto&&... tasks) -> void { ((counter += tasks.return_value()), ...); }, output_tasks);
|
||||||
|
@ -491,7 +489,7 @@ TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]")
|
||||||
tasks.emplace_back(func());
|
tasks.emplace_back(func());
|
||||||
}
|
}
|
||||||
|
|
||||||
auto results = co_await coro::when_all_awaitable(tasks);
|
auto results = co_await coro::when_all(tasks);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
for (const auto& task : results)
|
for (const auto& task : results)
|
||||||
|
@ -506,7 +504,7 @@ TEST_CASE("io_scheduler multipler event waiters", "[io_scheduler]")
|
||||||
e.set(s);
|
e.set(s);
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(spawn(), release()));
|
coro::sync_wait(coro::when_all(spawn(), release()));
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_scheduler]")
|
TEST_CASE("io_scheduler self generating coroutine (stack overflow check)", "[io_scheduler]")
|
||||||
|
|
|
@ -13,10 +13,18 @@ TEST_CASE("mutex single waiter not locked", "[mutex]")
|
||||||
|
|
||||||
auto make_emplace_task = [&](coro::mutex& m) -> coro::task<void> {
|
auto make_emplace_task = [&](coro::mutex& m) -> coro::task<void> {
|
||||||
std::cerr << "Acquiring lock\n";
|
std::cerr << "Acquiring lock\n";
|
||||||
auto scoped_lock = co_await m.lock();
|
{
|
||||||
std::cerr << "lock acquired, emplacing back 1\n";
|
auto scoped_lock = co_await m.lock();
|
||||||
output.emplace_back(1);
|
REQUIRE_FALSE(m.try_lock());
|
||||||
std::cerr << "coroutine done\n";
|
std::cerr << "lock acquired, emplacing back 1\n";
|
||||||
|
output.emplace_back(1);
|
||||||
|
std::cerr << "coroutine done\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
// The scoped lock should release the lock upon destructing.
|
||||||
|
REQUIRE(m.try_lock());
|
||||||
|
m.unlock();
|
||||||
|
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -43,6 +51,10 @@ TEST_CASE("mutex many waiters until event", "[mutex]")
|
||||||
co_await tp.schedule();
|
co_await tp.schedule();
|
||||||
std::cerr << "id = " << id << " waiting to acquire the lock\n";
|
std::cerr << "id = " << id << " waiting to acquire the lock\n";
|
||||||
auto scoped_lock = co_await m.lock();
|
auto scoped_lock = co_await m.lock();
|
||||||
|
|
||||||
|
// Should always be locked upon acquiring the locks.
|
||||||
|
REQUIRE_FALSE(m.try_lock());
|
||||||
|
|
||||||
std::cerr << "id = " << id << " lock acquired\n";
|
std::cerr << "id = " << id << " lock acquired\n";
|
||||||
value.fetch_add(1, std::memory_order::relaxed);
|
value.fetch_add(1, std::memory_order::relaxed);
|
||||||
std::cerr << "id = " << id << " coroutine done\n";
|
std::cerr << "id = " << id << " coroutine done\n";
|
||||||
|
@ -53,6 +65,7 @@ TEST_CASE("mutex many waiters until event", "[mutex]")
|
||||||
co_await tp.schedule();
|
co_await tp.schedule();
|
||||||
std::cerr << "block task acquiring lock\n";
|
std::cerr << "block task acquiring lock\n";
|
||||||
auto scoped_lock = co_await m.lock();
|
auto scoped_lock = co_await m.lock();
|
||||||
|
REQUIRE_FALSE(m.try_lock());
|
||||||
std::cerr << "block task acquired lock, waiting on event\n";
|
std::cerr << "block task acquired lock, waiting on event\n";
|
||||||
co_await e;
|
co_await e;
|
||||||
co_return;
|
co_return;
|
||||||
|
@ -76,7 +89,24 @@ TEST_CASE("mutex many waiters until event", "[mutex]")
|
||||||
|
|
||||||
tasks.emplace_back(make_set_task());
|
tasks.emplace_back(make_set_task());
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(tasks));
|
coro::sync_wait(coro::when_all(tasks));
|
||||||
|
|
||||||
REQUIRE(value == 4);
|
REQUIRE(value == 4);
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE("mutex scoped_lock unlock prior to scope exit", "[mutex]")
|
||||||
|
{
|
||||||
|
coro::mutex m;
|
||||||
|
|
||||||
|
auto make_task = [&]() -> coro::task<void> {
|
||||||
|
{
|
||||||
|
auto lk = co_await m.lock();
|
||||||
|
REQUIRE_FALSE(m.try_lock());
|
||||||
|
lk.unlock();
|
||||||
|
REQUIRE(m.try_lock());
|
||||||
|
}
|
||||||
|
co_return;
|
||||||
|
};
|
||||||
|
|
||||||
|
coro::sync_wait(make_task());
|
||||||
}
|
}
|
|
@ -26,7 +26,7 @@ TEST_CASE("thread_pool one worker many tasks tuple", "[thread_pool]")
|
||||||
co_return 50;
|
co_return 50;
|
||||||
};
|
};
|
||||||
|
|
||||||
auto tasks = coro::sync_wait(coro::when_all_awaitable(f(), f(), f(), f(), f()));
|
auto tasks = coro::sync_wait(coro::when_all(f(), f(), f(), f(), f()));
|
||||||
REQUIRE(std::tuple_size<decltype(tasks)>() == 5);
|
REQUIRE(std::tuple_size<decltype(tasks)>() == 5);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
@ -49,7 +49,7 @@ TEST_CASE("thread_pool one worker many tasks vector", "[thread_pool]")
|
||||||
input_tasks.emplace_back(f());
|
input_tasks.emplace_back(f());
|
||||||
input_tasks.emplace_back(f());
|
input_tasks.emplace_back(f());
|
||||||
|
|
||||||
auto output_tasks = coro::sync_wait(coro::when_all_awaitable(input_tasks));
|
auto output_tasks = coro::sync_wait(coro::when_all(input_tasks));
|
||||||
|
|
||||||
REQUIRE(output_tasks.size() == 3);
|
REQUIRE(output_tasks.size() == 3);
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ TEST_CASE("thread_pool N workers 100k tasks", "[thread_pool]")
|
||||||
input_tasks.emplace_back(make_task(tp));
|
input_tasks.emplace_back(make_task(tp));
|
||||||
}
|
}
|
||||||
|
|
||||||
auto output_tasks = coro::sync_wait(coro::when_all_awaitable(input_tasks));
|
auto output_tasks = coro::sync_wait(coro::when_all(input_tasks));
|
||||||
REQUIRE(output_tasks.size() == iterations);
|
REQUIRE(output_tasks.size() == iterations);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
@ -189,5 +189,5 @@ TEST_CASE("thread_pool event jump threads", "[thread_pool]")
|
||||||
co_return;
|
co_return;
|
||||||
};
|
};
|
||||||
|
|
||||||
coro::sync_wait(coro::when_all_awaitable(make_tp1_task(), make_tp2_task()));
|
coro::sync_wait(coro::when_all(make_tp1_task(), make_tp2_task()));
|
||||||
}
|
}
|
|
@ -2,11 +2,11 @@
|
||||||
|
|
||||||
#include <coro/coro.hpp>
|
#include <coro/coro.hpp>
|
||||||
|
|
||||||
TEST_CASE("when_all_awaitable single task with tuple container", "[when_all]")
|
TEST_CASE("when_all single task with tuple container", "[when_all]")
|
||||||
{
|
{
|
||||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||||
|
|
||||||
auto output_tasks = coro::sync_wait(coro::when_all_awaitable(make_task(100)));
|
auto output_tasks = coro::sync_wait(coro::when_all(make_task(100)));
|
||||||
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 1);
|
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 1);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
@ -15,11 +15,11 @@ TEST_CASE("when_all_awaitable single task with tuple container", "[when_all]")
|
||||||
REQUIRE(counter == 100);
|
REQUIRE(counter == 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("when_all_awaitable multiple tasks with tuple container", "[when_all]")
|
TEST_CASE("when_all multiple tasks with tuple container", "[when_all]")
|
||||||
{
|
{
|
||||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||||
|
|
||||||
auto output_tasks = coro::sync_wait(coro::when_all_awaitable(make_task(100), make_task(50), make_task(20)));
|
auto output_tasks = coro::sync_wait(coro::when_all(make_task(100), make_task(50), make_task(20)));
|
||||||
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 3);
|
REQUIRE(std::tuple_size<decltype(output_tasks)>() == 3);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
@ -28,14 +28,14 @@ TEST_CASE("when_all_awaitable multiple tasks with tuple container", "[when_all]"
|
||||||
REQUIRE(counter == 170);
|
REQUIRE(counter == 170);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("when_all_awaitable single task with vector container", "[when_all]")
|
TEST_CASE("when_all single task with vector container", "[when_all]")
|
||||||
{
|
{
|
||||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||||
|
|
||||||
std::vector<coro::task<uint64_t>> input_tasks;
|
std::vector<coro::task<uint64_t>> input_tasks;
|
||||||
input_tasks.emplace_back(make_task(100));
|
input_tasks.emplace_back(make_task(100));
|
||||||
|
|
||||||
auto output_tasks = coro::sync_wait(coro::when_all_awaitable(input_tasks));
|
auto output_tasks = coro::sync_wait(coro::when_all(input_tasks));
|
||||||
REQUIRE(output_tasks.size() == 1);
|
REQUIRE(output_tasks.size() == 1);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
@ -47,7 +47,7 @@ TEST_CASE("when_all_awaitable single task with vector container", "[when_all]")
|
||||||
REQUIRE(counter == 100);
|
REQUIRE(counter == 100);
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE("when_all_ready multple task withs vector container", "[when_all]")
|
TEST_CASE("when_all multple task withs vector container", "[when_all]")
|
||||||
{
|
{
|
||||||
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
auto make_task = [](uint64_t amount) -> coro::task<uint64_t> { co_return amount; };
|
||||||
|
|
||||||
|
@ -57,7 +57,7 @@ TEST_CASE("when_all_ready multple task withs vector container", "[when_all]")
|
||||||
input_tasks.emplace_back(make_task(550));
|
input_tasks.emplace_back(make_task(550));
|
||||||
input_tasks.emplace_back(make_task(1000));
|
input_tasks.emplace_back(make_task(1000));
|
||||||
|
|
||||||
auto output_tasks = coro::sync_wait(coro::when_all_awaitable(input_tasks));
|
auto output_tasks = coro::sync_wait(coro::when_all(input_tasks));
|
||||||
REQUIRE(output_tasks.size() == 4);
|
REQUIRE(output_tasks.size() == 4);
|
||||||
|
|
||||||
uint64_t counter{0};
|
uint64_t counter{0};
|
||||||
|
|
Loading…
Add table
Reference in a new issue