1
0
Fork 0
mirror of https://gitlab.com/niansa/libcrosscoro.git synced 2025-03-06 20:53:32 +01:00

Issue 5/clang format (#6)

* clang-format all existing files

* Add detailed comments for event
This commit is contained in:
Josh Baldwin 2020-10-14 08:53:00 -06:00 committed by GitHub
parent 1a2ec073ca
commit 303cc3384c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 513 additions and 712 deletions

View file

@ -1,4 +1,5 @@
---
AccessModifierOffset: -4
AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveMacros: 'true'
AlignConsecutiveAssignments: 'true'

View file

@ -9,7 +9,7 @@ message("${PROJECT_NAME} CORO_CODE_COVERAGE = ${CORO_CODE_COVERAGE}")
set(LIBCORO_SOURCE_FILES
inc/coro/coro.hpp
inc/coro/event.hpp
inc/coro/event.hpp src/event.cpp
inc/coro/generator.hpp
inc/coro/latch.hpp
inc/coro/scheduler.hpp

View file

@ -1,108 +1,105 @@
#pragma once
#include <coroutine>
#include <optional>
#include <atomic>
#include <coroutine>
namespace coro
{
/**
* Event is a manully triggered thread safe signal that can be co_await()'ed by multiple awaiters.
* Each awaiter should co_await the event and upon the event being set each awaiter will have their
* coroutine resumed.
*
* The event can be manually reset to the un-set state to be re-used.
* \code
t1: coro::event e;
...
t2: func(coro::event& e) { ... co_await e; ... }
...
t1: do_work();
t1: e.set();
...
t2: resume()
* \endcode
*/
class event
{
public:
event(bool initially_set = false) noexcept
: m_state((initially_set) ? static_cast<void*>(this) : nullptr)
{
}
virtual ~event() = default;
/**
* Creates an event with the given initial state of being set or not set.
* @param initially_set By default all events start as not set, but if needed this parameter can
* set the event to already be triggered.
*/
explicit event(bool initially_set = false) noexcept;
~event() = default;
event(const event&) = delete;
event(event&&) = delete;
auto operator=(const event&) -> event& = delete;
auto operator=(event &&) -> event& = delete;
bool is_set() const noexcept
{
return m_state.load(std::memory_order_acquire) == this;
}
/**
* @return True if this event is currently in the set state.
*/
auto is_set() const noexcept -> bool { return m_state.load(std::memory_order_acquire) == this; }
auto set() noexcept -> void
{
void* old_value = m_state.exchange(this, std::memory_order_acq_rel);
if(old_value != this)
{
auto* waiters = static_cast<awaiter*>(old_value);
while(waiters != nullptr)
{
auto* next = waiters->m_next;
waiters->m_awaiting_coroutine.resume();
waiters = next;
}
}
}
/**
* Sets this event and resumes all awaiters.
*/
auto set() noexcept -> void;
struct awaiter
{
awaiter(const event& event) noexcept
: m_event(event)
{
/**
* @param e The event to wait for it to be set.
*/
awaiter(const event& e) noexcept : m_event(e) {}
}
/**
* @return True if the event is already set, otherwise false to suspend this coroutine.
*/
auto await_ready() const noexcept -> bool { return m_event.is_set(); }
auto await_ready() const noexcept -> bool
{
return m_event.is_set();
}
/**
* Adds this coroutine to the list of awaiters in a thread safe fashion. If the event
* is set while attempting to add this coroutine to the awaiters then this will return false
* to resume execution immediately.
* @return False if the event is already set, otherwise true to suspend this coroutine.
*/
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool;
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
{
const void* const set_state = &m_event;
m_awaiting_coroutine = awaiting_coroutine;
// This value will update if other threads write to it via acquire.
void* old_value = m_event.m_state.load(std::memory_order_acquire);
do
{
// Resume immediately if already in the set state.
if(old_value == set_state)
{
return false;
}
m_next = static_cast<awaiter*>(old_value);
} while(!m_event.m_state.compare_exchange_weak(
old_value,
this,
std::memory_order_release,
std::memory_order_acquire));
return true;
}
auto await_resume() noexcept
{
}
/**
* Nothing to do on resume.
*/
auto await_resume() noexcept {}
/// Refernce to the event that this awaiter is waiting on.
const event& m_event;
/// The awaiting continuation coroutine handle.
std::coroutine_handle<> m_awaiting_coroutine;
/// The next awaiter in line for this event, nullptr if this is the end.
awaiter* m_next{nullptr};
};
auto operator co_await() const noexcept -> awaiter
{
return awaiter(*this);
}
/**
* @return An awaiter struct to suspend and resume this coroutine for when the event is set.
*/
auto operator co_await() const noexcept -> awaiter { return awaiter(*this); }
auto reset() noexcept -> void
{
void* old_value = this;
m_state.compare_exchange_strong(old_value, nullptr, std::memory_order_acquire);
}
/**
* Resets the event from set to not set so it can be re-used. If the event is not currently
* set then this function has no effect.
*/
auto reset() noexcept -> void;
protected:
/// For access to m_state.
friend struct awaiter;
/// The state of the event, nullptr is not set with zero awaiters. Set to an awaiter* there are
/// coroutines awaiting the event to be set, and set to this the event has triggered.
/// 1) nullptr == not set
/// 2) awaiter* == linked list of awaiters waiting for the event to trigger.
/// 3) this == The event is triggered and all awaiters are resumed.
mutable std::atomic<void*> m_state;
};

View file

@ -5,13 +5,11 @@
namespace coro
{
template<typename T>
class generator;
namespace detail
{
template<typename T>
class generator_promise
{
@ -24,47 +22,28 @@ public:
auto get_return_object() noexcept -> generator<T>;
auto initial_suspend() const
{
return std::suspend_always{};
}
auto initial_suspend() const { return std::suspend_always{}; }
auto final_suspend() const
{
return std::suspend_always{};
}
auto final_suspend() const { return std::suspend_always{}; }
template<
typename U = T,
std::enable_if_t<!std::is_rvalue_reference<U>::value, int> = 0>
template<typename U = T, std::enable_if_t<!std::is_rvalue_reference<U>::value, int> = 0>
auto yield_value(std::remove_reference_t<T>& value) noexcept
{
m_value = std::addressof(value);
return std::suspend_always{};
}
auto yield_value(std::remove_reference_t<T>&& value) noexcept
{
m_value = std::addressof(value);
return std::suspend_always{};
}
auto unhandled_exception() -> void
{
m_exception = std::current_exception();
}
auto unhandled_exception() -> void { m_exception = std::current_exception(); }
auto return_void() -> void
{
auto return_void() -> void {}
}
auto value() const noexcept -> reference_type
{
return static_cast<reference_type>(*m_value);
}
auto value() const noexcept -> reference_type { return static_cast<reference_type>(*m_value); }
template<typename U>
auto await_transform(U&& value) -> std::suspend_never = delete;
@ -76,17 +55,21 @@ public:
std::rethrow_exception(m_exception);
}
}
private:
pointer_type m_value{nullptr};
std::exception_ptr m_exception;
};
struct generator_sentinel {};
struct generator_sentinel
{
};
template<typename T>
class generator_iterator
{
using coroutine_handle = std::coroutine_handle<generator_promise<T>>;
public:
using iterator_category = std::input_iterator_tag;
using difference_type = std::ptrdiff_t;
@ -94,36 +77,20 @@ public:
using reference = typename generator_promise<T>::reference_type;
using pointer = typename generator_promise<T>::pointer_type;
generator_iterator() noexcept
{
generator_iterator() noexcept {}
}
explicit generator_iterator(coroutine_handle coroutine) noexcept
: m_coroutine(coroutine)
{
}
explicit generator_iterator(coroutine_handle coroutine) noexcept : m_coroutine(coroutine) {}
friend auto operator==(const generator_iterator& it, generator_sentinel) noexcept -> bool
{
return it.m_coroutine == nullptr || it.m_coroutine.done();
}
friend auto operator!=(const generator_iterator& it, generator_sentinel s) noexcept -> bool
{
return !(it == s);
}
friend auto operator!=(const generator_iterator& it, generator_sentinel s) noexcept -> bool { return !(it == s); }
friend auto operator==(generator_sentinel s, const generator_iterator& it) noexcept -> bool
{
return (it == s);
}
friend auto operator==(generator_sentinel s, const generator_iterator& it) noexcept -> bool { return (it == s); }
friend auto operator!=(generator_sentinel s, const generator_iterator& it) noexcept -> bool
{
return it != s;
}
friend auto operator!=(generator_sentinel s, const generator_iterator& it) noexcept -> bool { return it != s; }
generator_iterator& operator++()
{
@ -136,20 +103,12 @@ public:
return *this;
}
auto operator++(int) -> void
{
(void)operator++();
}
auto operator++(int) -> void { (void)operator++(); }
reference operator*() const noexcept
{
return m_coroutine.promise().value();
}
reference operator*() const noexcept { return m_coroutine.promise().value(); }
pointer operator->() const noexcept { return std::addressof(operator*()); }
pointer operator->() const noexcept
{
return std::addressof(operator*());
}
private:
coroutine_handle m_coroutine{nullptr};
};
@ -164,18 +123,10 @@ public:
using iterator = detail::generator_iterator<T>;
using sentinel = detail::generator_sentinel;
generator() noexcept
: m_coroutine(nullptr)
{
}
generator() noexcept : m_coroutine(nullptr) {}
generator(const generator&) = delete;
generator(generator&& other) noexcept
: m_coroutine(other.m_coroutine)
{
other.m_coroutine = nullptr;
}
generator(generator&& other) noexcept : m_coroutine(other.m_coroutine) { other.m_coroutine = nullptr; }
auto operator=(const generator&) = delete;
auto operator =(generator&& other) noexcept -> generator&
@ -208,27 +159,18 @@ public:
return iterator{m_coroutine};
}
auto end() noexcept -> sentinel
{
return sentinel{};
}
auto end() noexcept -> sentinel { return sentinel{}; }
private:
friend class detail::generator_promise<T>;
explicit generator(std::coroutine_handle<promise_type> coroutine) noexcept
: m_coroutine(coroutine)
{
}
explicit generator(std::coroutine_handle<promise_type> coroutine) noexcept : m_coroutine(coroutine) {}
std::coroutine_handle<promise_type> m_coroutine;
};
namespace detail
{
template<typename T>
auto generator_promise<T>::get_return_object() noexcept -> generator<T>
{

View file

@ -6,31 +6,19 @@
namespace coro
{
class latch
{
public:
latch(std::ptrdiff_t count) noexcept
: m_count(count),
m_event(count <= 0)
{
}
latch(std::ptrdiff_t count) noexcept : m_count(count), m_event(count <= 0) {}
latch(const latch&) = delete;
latch(latch&&) = delete;
auto operator=(const latch&) -> latch& = delete;
auto operator=(latch &&) -> latch& = delete;
auto is_ready() const noexcept -> bool
{
return m_event.is_set();
}
auto is_ready() const noexcept -> bool { return m_event.is_set(); }
auto remaining() const noexcept -> std::size_t
{
return m_count.load(std::memory_order::acquire);
}
auto remaining() const noexcept -> std::size_t { return m_count.load(std::memory_order::acquire); }
auto count_down(std::ptrdiff_t n = 1) noexcept -> void
{
@ -40,10 +28,7 @@ public:
}
}
auto operator co_await() const noexcept -> event::awaiter
{
return m_event.operator co_await();
}
auto operator co_await() const noexcept -> event::awaiter { return m_event.operator co_await(); }
private:
std::atomic<std::ptrdiff_t> m_count;

View file

@ -3,31 +3,30 @@
#include "coro/task.hpp"
#include <atomic>
#include <vector>
#include <coroutine>
#include <list>
#include <map>
#include <memory>
#include <mutex>
#include <thread>
#include <span>
#include <list>
#include <queue>
#include <variant>
#include <coroutine>
#include <optional>
#include <queue>
#include <span>
#include <thread>
#include <variant>
#include <vector>
#include <cstring>
#include <sys/epoll.h>
#include <sys/eventfd.h>
#include <sys/socket.h>
#include <sys/timerfd.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <unistd.h>
#include <cstring>
#include <iostream>
namespace coro
{
class scheduler;
namespace detail
@ -35,11 +34,7 @@ namespace detail
class resume_token_base
{
public:
resume_token_base(scheduler* eng) noexcept
: m_scheduler(eng),
m_state(nullptr)
{
}
resume_token_base(scheduler* eng) noexcept : m_scheduler(eng), m_state(nullptr) {}
virtual ~resume_token_base() = default;
@ -50,7 +45,6 @@ public:
m_state = other.m_state.exchange(0);
other.m_scheduler = nullptr;
}
auto operator=(const resume_token_base&) -> resume_token_base& = delete;
auto operator =(resume_token_base&& other) -> resume_token_base&
@ -66,23 +60,13 @@ public:
return *this;
}
bool is_set() const noexcept
{
return m_state.load(std::memory_order::acquire) == this;
}
bool is_set() const noexcept { return m_state.load(std::memory_order::acquire) == this; }
struct awaiter
{
awaiter(const resume_token_base& token) noexcept
: m_token(token)
{
awaiter(const resume_token_base& token) noexcept : m_token(token) {}
}
auto await_ready() const noexcept -> bool
{
return m_token.is_set();
}
auto await_ready() const noexcept -> bool { return m_token.is_set(); }
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
{
@ -102,10 +86,7 @@ public:
m_next = static_cast<awaiter*>(old_value);
} while (!m_token.m_state.compare_exchange_weak(
old_value,
this,
std::memory_order::release,
std::memory_order::acquire));
old_value, this, std::memory_order::release, std::memory_order::acquire));
return true;
}
@ -120,10 +101,7 @@ public:
awaiter* m_next{nullptr};
};
auto operator co_await() const noexcept -> awaiter
{
return awaiter{*this};
}
auto operator co_await() const noexcept -> awaiter { return awaiter{*this}; }
auto reset() noexcept -> void
{
@ -143,18 +121,10 @@ template<typename return_type>
class resume_token final : public detail::resume_token_base
{
friend scheduler;
resume_token()
: detail::resume_token_base(nullptr)
{
resume_token() : detail::resume_token_base(nullptr) {}
resume_token(scheduler& s) : detail::resume_token_base(&s) {}
}
resume_token(scheduler& s)
: detail::resume_token_base(&s)
{
}
public:
~resume_token() override = default;
resume_token(const resume_token&) = delete;
@ -164,15 +134,10 @@ public:
auto resume(return_type value) noexcept -> void;
auto return_value() const & -> const return_type&
{
return m_return_value;
}
auto return_value() const& -> const return_type& { return m_return_value; }
auto return_value() && -> return_type&& { return std::move(m_return_value); }
auto return_value() && -> return_type&&
{
return std::move(m_return_value);
}
private:
return_type m_return_value;
};
@ -181,16 +146,9 @@ template<>
class resume_token<void> final : public detail::resume_token_base
{
friend scheduler;
resume_token()
: detail::resume_token_base(nullptr)
{
resume_token() : detail::resume_token_base(nullptr) {}
resume_token(scheduler& s) : detail::resume_token_base(&s) {}
}
resume_token(scheduler& s)
: detail::resume_token_base(&s)
{
}
public:
~resume_token() override = default;
@ -235,8 +193,7 @@ private:
public:
using task_position = std::list<std::size_t>::iterator;
task_manager(const std::size_t reserve_size, const double growth_factor)
: m_growth_factor(growth_factor)
task_manager(const std::size_t reserve_size, const double growth_factor) : m_growth_factor(growth_factor)
{
m_tasks.resize(reserve_size);
for (std::size_t i = 0; i < reserve_size; ++i)
@ -397,15 +354,15 @@ public:
/**
* @param options Various scheduler options to tune how it behaves.
*/
scheduler(
const options opts = options{8, 2, thread_strategy_t::spawn}
)
scheduler(const options opts = options{8, 2, thread_strategy_t::spawn})
: m_epoll_fd(epoll_create1(EPOLL_CLOEXEC)),
m_accept_fd(eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK)),
m_thread_strategy(opts.thread_strategy),
m_task_manager(opts.reserve_size, opts.growth_factor)
{
struct epoll_event e{};
struct epoll_event e
{
};
e.events = EPOLLIN;
e.data.ptr = const_cast<void*>(m_accept_ptr);
@ -470,11 +427,7 @@ public:
// Send an event if one isn't already set. We use strong here to avoid spurious failures
// but if it fails due to it actually being set we don't want to retry.
bool expected{false};
if(m_event_set.compare_exchange_strong(
expected,
true,
std::memory_order::release,
std::memory_order::relaxed))
if (m_event_set.compare_exchange_strong(expected, true, std::memory_order::release, std::memory_order::relaxed))
{
uint64_t value{1};
::write(m_accept_fd, &value, sizeof(value));
@ -506,15 +459,14 @@ public:
*/
auto poll(fd_t fd, poll_op op) -> coro::task<void>
{
co_await unsafe_yield<void>(
[&](resume_token<void>& token)
co_await unsafe_yield<void>([&](resume_token<void>& token) {
struct epoll_event e
{
struct epoll_event e{};
};
e.events = static_cast<uint32_t>(op) | EPOLLONESHOT | EPOLLET;
e.data.ptr = &token;
epoll_ctl(m_epoll_fd, EPOLL_CTL_ADD, fd, &e);
}
);
});
epoll_ctl(m_epoll_fd, EPOLL_CTL_DEL, fd, nullptr);
}
@ -543,7 +495,8 @@ public:
auto write(fd_t fd, const std::span<const char> buffer) -> coro::task<ssize_t>
{
co_await poll(fd, poll_op::write);
co_return ::write(fd, buffer.data(), buffer.size());;
co_return ::write(fd, buffer.data(), buffer.size());
;
}
/**
@ -609,7 +562,9 @@ public:
throw std::runtime_error(msg.data());
}
struct itimerspec ts{};
struct itimerspec ts
{
};
auto seconds = std::chrono::duration_cast<std::chrono::seconds>(amount);
amount -= seconds;
@ -807,7 +762,6 @@ private:
}
}
inline auto process_task_variant(task_variant& tv) -> void
{
if (std::holds_alternative<coro::task<void>>(tv))
@ -879,10 +833,8 @@ private:
// This needs to succeed so best practice is to loop compare exchange weak.
bool expected{true};
while (!m_event_set.compare_exchange_weak(
expected,
false,
std::memory_order::release,
std::memory_order::relaxed)) { }
expected, false, std::memory_order::release, std::memory_order::relaxed))
{}
tasks_ready = true;
}
@ -911,11 +863,7 @@ private:
{
// Do not allow two threads to process events at the same time.
bool expected{false};
if(m_running.compare_exchange_strong(
expected,
true,
std::memory_order::release,
std::memory_order::relaxed))
if (m_running.compare_exchange_strong(expected, true, std::memory_order::release, std::memory_order::relaxed))
{
process_events_poll_execute(user_timeout);
m_running.exchange(false, std::memory_order::release);
@ -989,4 +937,3 @@ inline auto resume_token<void>::resume() noexcept -> void
}
} // namespace coro

View file

@ -1,11 +1,10 @@
#pragma once
#include "coro/task.hpp"
#include "coro/scheduler.hpp"
#include "coro/task.hpp"
namespace coro
{
template<typename task_type>
auto sync_wait(task_type&& task) -> decltype(auto)
{
@ -20,13 +19,12 @@ template<typename ... tasks>
auto sync_wait_all(tasks&&... awaitables) -> void
{
scheduler s{scheduler::options{
.reserve_size = sizeof...(awaitables),
.thread_strategy = scheduler::thread_strategy_t::manual }
};
.reserve_size = sizeof...(awaitables), .thread_strategy = scheduler::thread_strategy_t::manual}};
(s.schedule(std::move(awaitables)), ...);
while(s.process_events() > 0) ;
while (s.process_events() > 0)
;
}
} // namespace coro

View file

@ -4,27 +4,22 @@
namespace coro
{
template<typename return_type = void>
class task;
namespace detail
{
struct promise_base
{
friend struct final_awaitable;
struct final_awaitable
{
auto await_ready() const noexcept -> bool
{
return false;
}
auto await_ready() const noexcept -> bool { return false; }
template<typename promise_type>
auto await_suspend(std::coroutine_handle<promise_type> coroutine) noexcept -> std::coroutine_handle<>
{
// // If there is a continuation call it, otherwise this is the end of the line.
// If there is a continuation call it, otherwise this is the end of the line.
auto& promise = coroutine.promise();
if (promise.m_continuation != nullptr)
{
@ -45,25 +40,13 @@ struct promise_base
promise_base() noexcept = default;
~promise_base() = default;
auto initial_suspend()
{
return std::suspend_always{};
}
auto initial_suspend() { return std::suspend_always{}; }
auto final_suspend()
{
return final_awaitable{};
}
auto final_suspend() { return final_awaitable{}; }
auto unhandled_exception() -> void
{
m_exception_ptr = std::current_exception();
}
auto unhandled_exception() -> void { m_exception_ptr = std::current_exception(); }
auto continuation(std::coroutine_handle<> continuation) noexcept -> void
{
m_continuation = continuation;
}
auto continuation(std::coroutine_handle<> continuation) noexcept -> void { m_continuation = continuation; }
protected:
std::coroutine_handle<> m_continuation{nullptr};
@ -81,10 +64,7 @@ struct promise final : public promise_base
auto get_return_object() noexcept -> task_type;
auto return_value(return_type value) -> void
{
m_return_value = std::move(value);
}
auto return_value(return_type value) -> void { m_return_value = std::move(value); }
auto return_value() const& -> const return_type&
{
@ -121,10 +101,7 @@ struct promise<void> : public promise_base
auto get_return_object() noexcept -> task_type;
auto return_void() noexcept -> void
{
}
auto return_void() noexcept -> void {}
auto return_value() const -> void
{
@ -147,16 +124,9 @@ public:
struct awaitable_base
{
awaitable_base(coroutine_handle coroutine) noexcept
: m_coroutine(coroutine)
{
awaitable_base(coroutine_handle coroutine) noexcept : m_coroutine(coroutine) {}
}
auto await_ready() const noexcept -> bool
{
return !m_coroutine || m_coroutine.done();
}
auto await_ready() const noexcept -> bool { return !m_coroutine || m_coroutine.done(); }
auto await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> std::coroutine_handle<>
{
@ -167,23 +137,11 @@ public:
std::coroutine_handle<promise_type> m_coroutine{nullptr};
};
task() noexcept
: m_coroutine(nullptr)
{
task() noexcept : m_coroutine(nullptr) {}
}
task(coroutine_handle handle)
: m_coroutine(handle)
{
}
task(coroutine_handle handle) : m_coroutine(handle) {}
task(const task&) = delete;
task(task&& other) noexcept
: m_coroutine(other.m_coroutine)
{
other.m_coroutine = nullptr;
}
task(task&& other) noexcept : m_coroutine(other.m_coroutine) { other.m_coroutine = nullptr; }
~task()
{
@ -213,10 +171,7 @@ public:
/**
* @return True if the task is in its final suspend or if the task has been destroyed.
*/
auto is_ready() const noexcept -> bool
{
return m_coroutine == nullptr || m_coroutine.done();
}
auto is_ready() const noexcept -> bool { return m_coroutine == nullptr || m_coroutine.done(); }
auto resume() -> bool
{
@ -243,33 +198,18 @@ public:
{
struct awaitable : public awaitable_base
{
auto await_resume() noexcept -> decltype(auto)
{
return this->m_coroutine.promise().return_value();
}
auto await_resume() noexcept -> decltype(auto) { return this->m_coroutine.promise().return_value(); }
};
return awaitable{m_coroutine};
}
auto promise() & -> promise_type&
{
return m_coroutine.promise();
}
auto promise() & -> promise_type& { return m_coroutine.promise(); }
auto promise() const & -> const promise_type&
{
return m_coroutine.promise();
}
auto promise() && -> promise_type&&
{
return std::move(m_coroutine.promise());
}
auto promise() const& -> const promise_type& { return m_coroutine.promise(); }
auto promise() && -> promise_type&& { return std::move(m_coroutine.promise()); }
auto handle() -> coroutine_handle
{
return m_coroutine;
}
auto handle() -> coroutine_handle { return m_coroutine; }
private:
coroutine_handle m_coroutine{nullptr};
@ -277,7 +217,6 @@ private:
namespace detail
{
template<typename return_type>
inline auto promise<return_type>::get_return_object() noexcept -> task<return_type>
{

55
src/event.cpp Normal file
View file

@ -0,0 +1,55 @@
#include "coro/event.hpp"
namespace coro
{
event::event(bool initially_set) noexcept : m_state((initially_set) ? static_cast<void*>(this) : nullptr)
{
}
auto event::set() noexcept -> void
{
// Exchange the state to this, if the state was previously not this, then traverse the list
// of awaiters and resume their coroutines.
void* old_value = m_state.exchange(this, std::memory_order::acq_rel);
if (old_value != this)
{
auto* waiters = static_cast<awaiter*>(old_value);
while (waiters != nullptr)
{
auto* next = waiters->m_next;
waiters->m_awaiting_coroutine.resume();
waiters = next;
}
}
}
auto event::awaiter::await_suspend(std::coroutine_handle<> awaiting_coroutine) noexcept -> bool
{
const void* const set_state = &m_event;
m_awaiting_coroutine = awaiting_coroutine;
// This value will update if other threads write to it via acquire.
void* old_value = m_event.m_state.load(std::memory_order::acquire);
do
{
// Resume immediately if already in the set state.
if (old_value == set_state)
{
return false;
}
m_next = static_cast<awaiter*>(old_value);
} while (!m_event.m_state.compare_exchange_weak(
old_value, this, std::memory_order::release, std::memory_order::acquire));
return true;
}
auto event::reset() noexcept -> void
{
void* old_value = this;
m_state.compare_exchange_strong(old_value, nullptr, std::memory_order::acquire);
}
} // namespace coro

View file

@ -2,22 +2,18 @@
#include <coro/coro.hpp>
#include <chrono>
#include <iostream>
#include <atomic>
#include <chrono>
#include <iomanip>
#include <iostream>
using namespace std::chrono_literals;
using sc = std::chrono::steady_clock;
constexpr std::size_t default_iterations = 5'000'000;
static auto print_stats(
const std::string& bench_name,
uint64_t operations,
sc::time_point start,
sc::time_point stop
) -> void
static auto print_stats(const std::string& bench_name, uint64_t operations, sc::time_point start, sc::time_point stop)
-> void
{
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>(stop - start);
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(duration);
@ -35,8 +31,7 @@ TEST_CASE("benchmark counter func direct call")
{
constexpr std::size_t iterations = default_iterations;
std::atomic<uint64_t> counter{0};
auto func = [&]() -> void
{
auto func = [&]() -> void {
counter.fetch_add(1, std::memory_order::relaxed);
return;
};
@ -56,8 +51,7 @@ TEST_CASE("benchmark counter func coro::sync_wait(awaitable)")
{
constexpr std::size_t iterations = default_iterations;
std::atomic<uint64_t> counter{0};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
@ -66,7 +60,6 @@ TEST_CASE("benchmark counter func coro::sync_wait(awaitable)")
for (std::size_t i = 0; i < iterations; ++i)
{
coro::sync_wait(func());
}
@ -78,8 +71,7 @@ TEST_CASE("benchmark counter func coro::sync_wait_all(awaitable)")
{
constexpr std::size_t iterations = default_iterations;
std::atomic<uint64_t> counter{0};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
@ -101,8 +93,7 @@ TEST_CASE("benchmark counter task scheduler")
coro::scheduler s1{};
std::atomic<uint64_t> counter{0};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
@ -134,8 +125,7 @@ TEST_CASE("benchmark counter task scheduler yield -> resume from main")
std::atomic<uint64_t> counter{0};
auto wait_func = [&](std::size_t index) -> coro::task<void>
{
auto wait_func = [&](std::size_t index) -> coro::task<void> {
co_await s.yield<void>(tokens[index]);
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
@ -175,15 +165,13 @@ TEST_CASE("benchmark counter task scheduler yield -> resume from coroutine")
std::atomic<uint64_t> counter{0};
auto wait_func = [&](std::size_t index) -> coro::task<void>
{
auto wait_func = [&](std::size_t index) -> coro::task<void> {
co_await s.yield<void>(tokens[index]);
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
auto resume_func = [&](std::size_t index) -> coro::task<void>
{
auto resume_func = [&](std::size_t index) -> coro::task<void> {
tokens[index].resume();
co_return;
};
@ -218,15 +206,13 @@ TEST_CASE("benchmark counter task scheduler resume from coroutine -> yield")
std::atomic<uint64_t> counter{0};
auto wait_func = [&](std::size_t index) -> coro::task<void>
{
auto wait_func = [&](std::size_t index) -> coro::task<void> {
co_await s.yield<void>(tokens[index]);
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
auto resume_func = [&](std::size_t index) -> coro::task<void>
{
auto resume_func = [&](std::size_t index) -> coro::task<void> {
tokens[index].resume();
co_return;
};
@ -261,15 +247,13 @@ TEST_CASE("benchmark counter task scheduler yield (all) -> resume (all) from cor
std::atomic<uint64_t> counter{0};
auto wait_func = [&](std::size_t index) -> coro::task<void>
{
auto wait_func = [&](std::size_t index) -> coro::task<void> {
co_await s.yield<void>(tokens[index]);
counter.fetch_add(1, std::memory_order::relaxed);
co_return;
};
auto resume_func = [&](std::size_t index) -> coro::task<void>
{
auto resume_func = [&](std::size_t index) -> coro::task<void> {
tokens[index].resume();
co_return;
};

View file

@ -23,7 +23,6 @@ TEST_CASE("event single awaiter")
REQUIRE(task.promise().return_value() == 42);
}
auto producer(coro::event& event) -> void
{
// Long running task that consumers are waiting for goes here...
@ -70,3 +69,28 @@ TEST_CASE("event multiple watchers")
REQUIRE(value2.promise().return_value() == 42);
REQUIRE(value3.promise().return_value() == 42);
}
TEST_CASE("event reset")
{
coro::event e{};
e.reset();
REQUIRE_FALSE(e.is_set());
auto value1 = consumer(e);
value1.resume(); // start co_awaiting event
REQUIRE_FALSE(value1.is_ready());
producer(e);
REQUIRE(value1.promise().return_value() == 42);
e.reset();
auto value2 = consumer(e);
value2.resume();
REQUIRE_FALSE(value2.is_ready());
producer(e);
REQUIRE(value2.promise().return_value() == 42);
}

View file

@ -5,10 +5,7 @@
TEST_CASE("generator single yield")
{
std::string msg{"Hello World Generator!"};
auto func = [&]() -> coro::generator<std::string>
{
co_yield msg;
};
auto func = [&]() -> coro::generator<std::string> { co_yield msg; };
for (const auto& v : func())
{
@ -20,8 +17,7 @@ TEST_CASE("generator infinite incrementing integer yield")
{
constexpr const int64_t max = 1024;
auto func = []() -> coro::generator<int64_t>
{
auto func = []() -> coro::generator<int64_t> {
int64_t i{0};
while (true)
{

View file

@ -5,13 +5,11 @@
#include <chrono>
#include <thread>
TEST_CASE("latch count=0")
{
coro::latch l{0};
auto task = [&]() -> coro::task<uint64_t>
{
auto task = [&]() -> coro::task<uint64_t> {
co_await l;
co_return 42;
}();
@ -25,8 +23,7 @@ TEST_CASE("latch count=1")
{
coro::latch l{1};
auto task = [&]() -> coro::task<uint64_t>
{
auto task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
@ -44,8 +41,7 @@ TEST_CASE("latch count=1 count_down=5")
{
coro::latch l{1};
auto task = [&]() -> coro::task<uint64_t>
{
auto task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
@ -63,8 +59,7 @@ TEST_CASE("latch count=5 count_down=1 x5")
{
coro::latch l{5};
auto task = [&]() -> coro::task<uint64_t>
{
auto task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;
@ -90,8 +85,7 @@ TEST_CASE("latch count=5 count_down=5")
{
coro::latch l{5};
auto task = [&]() -> coro::task<uint64_t>
{
auto task = [&]() -> coro::task<uint64_t> {
auto workers = l.remaining();
co_await l;
co_return workers;

View file

@ -2,11 +2,11 @@
#include <coro/coro.hpp>
#include <thread>
#include <chrono>
#include <sys/eventfd.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/eventfd.h>
#include <thread>
#include <unistd.h>
using namespace std::chrono_literals;
@ -16,7 +16,8 @@ TEST_CASE("scheduler sizeof()")
std::cerr << "sizeof(coro:task<void>)=[" << sizeof(coro::task<void>) << "]\n";
std::cerr << "sizeof(std::coroutine_handle<>)=[" << sizeof(std::coroutine_handle<>) << "]\n";
std::cerr << "sizeof(std::variant<std::coroutine_handle<>>)=[" << sizeof(std::variant<std::coroutine_handle<>>) << "]\n";
std::cerr << "sizeof(std::variant<std::coroutine_handle<>>)=[" << sizeof(std::variant<std::coroutine_handle<>>)
<< "]\n";
REQUIRE(true);
}
@ -31,8 +32,7 @@ TEST_CASE("scheduler submit single task")
// and thus will always outlive the coroutines, but in a real application this is dangerous
// and coroutine 'captures' should be passed in via paramters to the function to be copied
// into the coroutines stack frame. Lets
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
std::cerr << "Hello world from scheduler task!\n";
counter++;
co_return;
@ -55,8 +55,7 @@ TEST_CASE("scheduler submit single task with move and auto initializing lambda")
std::atomic<uint64_t> counter{0};
coro::scheduler s{};
auto task = [](std::atomic<uint64_t>& counter) -> coro::task<void>
{
auto task = [](std::atomic<uint64_t>& counter) -> coro::task<void> {
std::cerr << "Hello world from scheduler task!\n";
counter++;
co_return;
@ -75,7 +74,10 @@ TEST_CASE("scheduler submit mutiple tasks")
std::atomic<uint64_t> counter{0};
coro::scheduler s{};
auto func = [&]() -> coro::task<void> { counter++; co_return; };
auto func = [&]() -> coro::task<void> {
counter++;
co_return;
};
for (std::size_t i = 0; i < n; ++i)
{
s.schedule(func());
@ -92,8 +94,7 @@ TEST_CASE("scheduler task with multiple yields on event")
auto token = s.generate_resume_token<uint64_t>();
// coro::resume_token<uint64_t> token{s};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
std::cerr << "1st suspend\n";
co_await s.yield(token);
std::cerr << "1st resume\n";
@ -138,8 +139,7 @@ TEST_CASE("scheduler task with read poll")
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
// Poll will block until there is data to read.
co_await s.poll(trigger_fd, coro::poll_op::read);
REQUIRE(true);
@ -161,13 +161,9 @@ TEST_CASE("scheduler task with read")
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
uint64_t val{0};
auto bytes_read = co_await s.read(
trigger_fd,
std::span<char>(reinterpret_cast<char*>(&val), sizeof(val))
);
auto bytes_read = co_await s.read(trigger_fd, std::span<char>(reinterpret_cast<char*>(&val), sizeof(val)));
REQUIRE(bytes_read == sizeof(uint64_t));
REQUIRE(val == expected_value);
@ -194,20 +190,14 @@ TEST_CASE("scheduler task with read and write same fd")
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
auto bytes_written = co_await s.write(
trigger_fd,
std::span<const char>(reinterpret_cast<const char*>(&expected_value), sizeof(expected_value))
);
trigger_fd, std::span<const char>(reinterpret_cast<const char*>(&expected_value), sizeof(expected_value)));
REQUIRE(bytes_written == sizeof(uint64_t));
uint64_t val{0};
auto bytes_read = co_await s.read(
trigger_fd,
std::span<char>(reinterpret_cast<char*>(&val), sizeof(val))
);
auto bytes_read = co_await s.read(trigger_fd, std::span<char>(reinterpret_cast<char*>(&val), sizeof(val)));
REQUIRE(bytes_read == sizeof(uint64_t));
REQUIRE(val == expected_value);
@ -228,8 +218,7 @@ TEST_CASE("scheduler task with read and write pipe")
coro::scheduler s{};
auto read_func = [&]() -> coro::task<void>
{
auto read_func = [&]() -> coro::task<void> {
std::string buffer(4096, '0');
std::span<char> view{buffer.data(), buffer.size()};
auto bytes_read = co_await s.read(pipe_fd[0], view);
@ -238,8 +227,7 @@ TEST_CASE("scheduler task with read and write pipe")
REQUIRE(buffer == msg);
};
auto write_func = [&]() -> coro::task<void>
{
auto write_func = [&]() -> coro::task<void> {
std::span<const char> view{msg.data(), msg.size()};
auto bytes_written = co_await s.write(pipe_fd[1], view);
REQUIRE(bytes_written == msg.size());
@ -253,11 +241,8 @@ TEST_CASE("scheduler task with read and write pipe")
close(pipe_fd[1]);
}
static auto standalone_read(
coro::scheduler& s,
coro::scheduler::fd_t socket,
std::span<char> buffer
) -> coro::task<ssize_t>
static auto standalone_read(coro::scheduler& s, coro::scheduler::fd_t socket, std::span<char> buffer)
-> coro::task<ssize_t>
{
// do other stuff in larger function
co_return co_await s.read(socket, buffer);
@ -270,10 +255,10 @@ TEST_CASE("scheduler standalone read task")
auto trigger_fd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
ssize_t v{0};
auto bytes_read = co_await standalone_read(s, trigger_fd, std::span<char>(reinterpret_cast<char*>(&v), sizeof(v)));
auto bytes_read =
co_await standalone_read(s, trigger_fd, std::span<char>(reinterpret_cast<char*>(&v), sizeof(v)));
REQUIRE(bytes_read == sizeof(ssize_t));
REQUIRE(v == expected_value);
@ -292,8 +277,7 @@ TEST_CASE("scheduler separate thread resume")
{
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
// User manual resume token, create one specifically for each task being generated
// coro::resume_token<void> token{s};
auto token = s.generate_resume_token<void>();
@ -325,28 +309,21 @@ TEST_CASE("scheduler separate thread resume with return")
std::atomic<coro::resume_token<uint64_t>*> token{};
std::thread service{
[&]() -> void
{
std::thread service{[&]() -> void {
while (token == nullptr)
{
std::this_thread::sleep_for(1ms);
}
token.load()->resume(expected_value);
}
};
}};
auto third_party_service = [&](int multiplier) -> coro::task<uint64_t>
{
auto output = co_await s.yield<uint64_t>([&](coro::resume_token<uint64_t>& t) {
token = &t;
});
auto third_party_service = [&](int multiplier) -> coro::task<uint64_t> {
auto output = co_await s.yield<uint64_t>([&](coro::resume_token<uint64_t>& t) { token = &t; });
co_return output* multiplier;
};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
int multiplier{5};
uint64_t value = co_await third_party_service(multiplier);
REQUIRE(value == (expected_value * multiplier));
@ -364,13 +341,9 @@ TEST_CASE("scheduler with basic task")
std::atomic<uint64_t> counter{0};
coro::scheduler s{};
auto add_data = [&](uint64_t val) -> coro::task<int>
{
co_return val;
};
auto add_data = [&](uint64_t val) -> coro::task<int> { co_return val; };
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
counter += co_await add_data(expected_value);
co_return;
};
@ -387,8 +360,7 @@ TEST_CASE("schedule yield for")
std::atomic<uint64_t> counter{0};
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
++counter;
co_return;
};
@ -409,8 +381,7 @@ TEST_CASE("scheduler trigger growth of internal tasks storage")
constexpr std::size_t iterations{512};
coro::scheduler s{coro::scheduler::options{.reserve_size = 1}};
auto wait_func = [&](std::chrono::milliseconds wait_time) -> coro::task<void>
{
auto wait_func = [&](std::chrono::milliseconds wait_time) -> coro::task<void> {
co_await s.yield_for(wait_time);
++counter;
co_return;
@ -431,14 +402,8 @@ TEST_CASE("scheduler yield with scheduler event void")
std::atomic<uint64_t> counter{0};
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
co_await s.yield<void>(
[&](coro::resume_token<void>& token) -> void
{
token.resume();
}
);
auto func = [&]() -> coro::task<void> {
co_await s.yield<void>([&](coro::resume_token<void>& token) -> void { token.resume(); });
counter += 42;
co_return;
@ -456,14 +421,8 @@ TEST_CASE("scheduler yield with scheduler event uint64_t")
std::atomic<uint64_t> counter{0};
coro::scheduler s{};
auto func = [&]() -> coro::task<void>
{
counter += co_await s.yield<uint64_t>(
[&](coro::resume_token<uint64_t>& token) -> void
{
token.resume(42);
}
);
auto func = [&]() -> coro::task<void> {
counter += co_await s.yield<uint64_t>([&](coro::resume_token<uint64_t>& token) -> void { token.resume(42); });
co_return;
};
@ -482,8 +441,7 @@ TEST_CASE("scheduler yield user event")
auto token = s.generate_resume_token<std::string>();
// coro::resume_token<std::string> token{s};
auto func = [&]() -> coro::task<void>
{
auto func = [&]() -> coro::task<void> {
co_await s.yield(token);
REQUIRE(token.return_value() == expected_result);
co_return;
@ -502,8 +460,7 @@ TEST_CASE("scheduler yield user event multiple waiters")
coro::scheduler s{};
auto token = s.generate_resume_token<void>();
auto func = [&](int amount) -> coro::task<void>
{
auto func = [&](int amount) -> coro::task<void> {
co_await token;
std::cerr << "amount=" << amount << "\n";
counter += amount;
@ -532,8 +489,7 @@ TEST_CASE("scheduler manual process events with self generating coroutine (stack
uint64_t counter{0};
coro::scheduler s{coro::scheduler::options{.thread_strategy = coro::scheduler::thread_strategy_t::manual}};
auto func = [&](auto f) -> coro::task<void>
{
auto func = [&](auto f) -> coro::task<void> {
++counter;
// this should detect stack overflows well enough
@ -549,7 +505,8 @@ TEST_CASE("scheduler manual process events with self generating coroutine (stack
std::cerr << "Scheduling recursive function.\n";
s.schedule(func(func));
while(s.process_events()) ;
while (s.process_events())
;
std::cerr << "Recursive test done.\n";
}
@ -557,8 +514,7 @@ TEST_CASE("scheduler task throws")
{
coro::scheduler s{};
auto func = []() -> coro::task<void>
{
auto func = []() -> coro::task<void> {
// Is it possible to actually notify the user when running a task in a scheduler?
// Seems like the user will need to manually catch.
throw std::runtime_error{"I always throw."};

View file

@ -4,8 +4,7 @@
TEST_CASE("sync_wait task multiple suspends return integer with sync_wait")
{
auto func = []() -> coro::task<int>
{
auto func = []() -> coro::task<int> {
co_await std::suspend_always{};
co_await std::suspend_always{};
co_await std::suspend_always{};
@ -18,14 +17,12 @@ TEST_CASE("sync_wait task multiple suspends return integer with sync_wait")
TEST_CASE("sync_wait task co_await single")
{
auto answer = []() -> coro::task<int>
{
auto answer = []() -> coro::task<int> {
std::cerr << "\tThinking deep thoughts...\n";
co_return 42;
};
auto await_answer = [&]() -> coro::task<int>
{
auto await_answer = [&]() -> coro::task<int> {
std::cerr << "\tStarting to wait for answer.\n";
auto a = answer();
std::cerr << "\tGot the coroutine, getting the value.\n";
@ -45,8 +42,7 @@ TEST_CASE("sync_wait task co_await single")
TEST_CASE("sync_wait_all accumulate")
{
std::atomic<uint64_t> counter{0};
auto func = [&](uint64_t amount) -> coro::task<void>
{
auto func = [&](uint64_t amount) -> coro::task<void> {
std::cerr << "amount=" << amount << "\n";
counter += amount;
co_return;

View file

@ -5,7 +5,6 @@
#include <chrono>
#include <thread>
TEST_CASE("task hello world")
{
using task_type = coro::task<std::string>;
@ -74,10 +73,8 @@ TEST_CASE("task exception thrown")
TEST_CASE("task in a task")
{
auto outer_task = []() -> coro::task<>
{
auto inner_task = []() -> coro::task<int>
{
auto outer_task = []() -> coro::task<> {
auto inner_task = []() -> coro::task<int> {
std::cerr << "inner_task start\n";
std::cerr << "inner_task stop\n";
co_return 42;
@ -96,14 +93,11 @@ TEST_CASE("task in a task")
TEST_CASE("task in a task in a task")
{
auto task1 = []() -> coro::task<>
{
auto task1 = []() -> coro::task<> {
std::cerr << "task1 start\n";
auto task2 = []() -> coro::task<int>
{
auto task2 = []() -> coro::task<int> {
std::cerr << "\ttask2 start\n";
auto task3 = []() -> coro::task<int>
{
auto task3 = []() -> coro::task<int> {
std::cerr << "\t\ttask3 start\n";
std::cerr << "\t\ttask3 stop\n";
co_return 3;
@ -129,8 +123,7 @@ TEST_CASE("task in a task in a task")
TEST_CASE("task multiple suspends return void")
{
auto task = []() -> coro::task<void>
{
auto task = []() -> coro::task<void> {
co_await std::suspend_always{};
co_await std::suspend_never{};
co_await std::suspend_always{};
@ -153,8 +146,7 @@ TEST_CASE("task multiple suspends return void")
TEST_CASE("task multiple suspends return integer")
{
auto task = []() -> coro::task<int>
{
auto task = []() -> coro::task<int> {
co_await std::suspend_always{};
co_await std::suspend_always{};
co_await std::suspend_always{};
@ -177,14 +169,12 @@ TEST_CASE("task multiple suspends return integer")
TEST_CASE("task resume from promise to coroutine handles of different types")
{
auto task1 = [&]() -> coro::task<int>
{
auto task1 = [&]() -> coro::task<int> {
std::cerr << "Task ran\n";
co_return 42;
}();
auto task2 = [&]() -> coro::task<void>
{
auto task2 = [&]() -> coro::task<void> {
std::cerr << "Task 2 ran\n";
co_return;
}();
@ -211,8 +201,7 @@ TEST_CASE("task resume from promise to coroutine handles of different types")
TEST_CASE("task throws void")
{
auto task = []() -> coro::task<void>
{
auto task = []() -> coro::task<void> {
throw std::runtime_error{"I always throw."};
co_return;
}();
@ -224,8 +213,7 @@ TEST_CASE("task throws void")
TEST_CASE("task throws non-void l-value")
{
auto task = []() -> coro::task<int>
{
auto task = []() -> coro::task<int> {
throw std::runtime_error{"I always throw."};
co_return 42;
}();
@ -242,8 +230,7 @@ TEST_CASE("task throws non-void r-value")
int m_value;
};
auto task = []() -> coro::task<type>
{
auto task = []() -> coro::task<type> {
type return_value{42};
throw std::runtime_error{"I always throw."};