mirror of
https://gitlab.com/niansa/libcrosscoro.git
synced 2025-03-06 20:53:32 +01:00
* io_scheduler inline support * add debug info for io_scheduler size issue * move poll info into its own file * cleanup for feature * Fix valgrind introduced use after free with inline processing Running the coroutines inline with event processing caused a use after free bug with valgrind detected in the inline tcp server/client benchmark code. Basically if an event and a timeout occured in the same time period because the inline processing would resume _inline_ with the event or the timeout -- if the timeout and event occured in the same epoll_wait() function call then the second one's coroutine stackframe would already be destroyed upon resuming it so the poll_info->processed check would be reading already free'ed memory. The solution to this was to introduce a vector of coroutine handles which are appended into on each epoll_wait() iteration of events and timeouts, and only then after the events and timeouts are deduplicated are the coroutine handles resumed. This new vector has elided a malloc in the timeout function, but there is still a malloc to extract the poll infos from the timeout multimap data structure. The vector is also on the class member list and is only ever cleared, it is possible with a monster set of timeouts that this vector could grow extremely large, but I think that is worth the price of not re-allocating it.
159 lines
4.6 KiB
C++
159 lines
4.6 KiB
C++
#include "catch.hpp"
|
|
|
|
#include <coro/coro.hpp>
|
|
|
|
#include <chrono>
|
|
#include <thread>
|
|
|
|
TEST_CASE("mutex single waiter not locked exclusive", "[shared_mutex]")
|
|
{
|
|
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
|
std::vector<uint64_t> output;
|
|
|
|
coro::shared_mutex<coro::thread_pool> m{tp};
|
|
|
|
auto make_emplace_task = [&](coro::shared_mutex<coro::thread_pool>& m) -> coro::task<void> {
|
|
std::cerr << "Acquiring lock exclusive\n";
|
|
{
|
|
auto scoped_lock = co_await m.lock();
|
|
REQUIRE_FALSE(m.try_lock());
|
|
REQUIRE_FALSE(m.try_lock_shared());
|
|
std::cerr << "lock acquired, emplacing back 1\n";
|
|
output.emplace_back(1);
|
|
std::cerr << "coroutine done\n";
|
|
}
|
|
|
|
// The scoped lock should release the lock upon destructing.
|
|
REQUIRE(m.try_lock());
|
|
m.unlock();
|
|
|
|
co_return;
|
|
};
|
|
|
|
coro::sync_wait(make_emplace_task(m));
|
|
|
|
REQUIRE(m.try_lock());
|
|
m.unlock();
|
|
|
|
REQUIRE(output.size() == 1);
|
|
REQUIRE(output[0] == 1);
|
|
}
|
|
|
|
TEST_CASE("mutex single waiter not locked shared", "[shared_mutex]")
|
|
{
|
|
coro::thread_pool tp{coro::thread_pool::options{.thread_count = 1}};
|
|
std::vector<uint64_t> values{1, 2, 3};
|
|
|
|
coro::shared_mutex<coro::thread_pool> m{tp};
|
|
|
|
auto make_emplace_task = [&](coro::shared_mutex<coro::thread_pool>& m) -> coro::task<void> {
|
|
std::cerr << "Acquiring lock shared\n";
|
|
{
|
|
auto scoped_lock = co_await m.lock_shared();
|
|
REQUIRE_FALSE(m.try_lock());
|
|
REQUIRE(m.try_lock_shared());
|
|
std::cerr << "lock acquired, reading values\n";
|
|
for (const auto& v : values)
|
|
{
|
|
std::cerr << v << ",";
|
|
}
|
|
std::cerr << "\ncoroutine done\n";
|
|
|
|
m.unlock_shared(); // manually locked shared on a shared, unlock
|
|
}
|
|
|
|
// The scoped lock should release the lock upon destructing.
|
|
REQUIRE(m.try_lock());
|
|
m.unlock();
|
|
|
|
co_return;
|
|
};
|
|
|
|
coro::sync_wait(make_emplace_task(m));
|
|
|
|
REQUIRE(m.try_lock_shared());
|
|
m.unlock_shared();
|
|
|
|
REQUIRE(m.try_lock());
|
|
m.unlock();
|
|
}
|
|
|
|
TEST_CASE("mutex many shared and exclusive waiters interleaved", "[shared_mutex]")
|
|
{
|
|
coro::io_scheduler tp{coro::io_scheduler::options{.pool = coro::thread_pool::options{.thread_count = 8}}};
|
|
coro::shared_mutex<coro::io_scheduler> m{tp};
|
|
|
|
std::atomic<bool> read_value{false};
|
|
|
|
auto make_shared_task = [&]() -> coro::task<bool> {
|
|
co_await tp.schedule();
|
|
std::cerr << "make_shared_task shared lock acquiring\n";
|
|
auto scoped_lock = co_await m.lock_shared();
|
|
std::cerr << "make_shared_task shared lock acquired\n";
|
|
bool value = read_value.load(std::memory_order::acquire);
|
|
std::cerr << "make_shared_task shared lock releasing on thread_id = " << std::this_thread::get_id() << "\n";
|
|
co_return value;
|
|
};
|
|
|
|
auto make_exclusive_task = [&]() -> coro::task<void> {
|
|
// Let some readers get through.
|
|
co_await tp.yield_for(std::chrono::milliseconds{50});
|
|
|
|
{
|
|
std::cerr << "make_shared_task exclusive lock acquiring\n";
|
|
auto scoped_lock = co_await m.lock();
|
|
std::cerr << "make_shared_task exclusive lock acquired\n";
|
|
// Stack readers on the mutex
|
|
co_await tp.yield_for(std::chrono::milliseconds{50});
|
|
read_value.exchange(true, std::memory_order::release);
|
|
std::cerr << "make_shared_task exclusive lock releasing\n";
|
|
}
|
|
|
|
co_return;
|
|
};
|
|
|
|
auto make_shared_tasks_task = [&]() -> coro::task<void> {
|
|
co_await tp.schedule();
|
|
|
|
std::vector<coro::task<bool>> shared_tasks{};
|
|
|
|
bool stop{false};
|
|
while (!stop)
|
|
{
|
|
shared_tasks.emplace_back(make_shared_task());
|
|
shared_tasks.back().resume();
|
|
|
|
co_await tp.yield_for(std::chrono::milliseconds{1});
|
|
|
|
for (const auto& st : shared_tasks)
|
|
{
|
|
if (st.is_ready())
|
|
{
|
|
stop = st.promise().return_value();
|
|
}
|
|
}
|
|
}
|
|
|
|
while (true)
|
|
{
|
|
bool tasks_remaining{false};
|
|
for (const auto& st : shared_tasks)
|
|
{
|
|
if (!st.is_ready())
|
|
{
|
|
tasks_remaining = true;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (!tasks_remaining)
|
|
{
|
|
break;
|
|
}
|
|
}
|
|
|
|
co_return;
|
|
};
|
|
|
|
coro::sync_wait(coro::when_all(make_shared_tasks_task(), make_exclusive_task()));
|
|
}
|