Initial Commit

This commit is contained in:
2025-11-06 15:51:56 +01:00
commit 0f7191ad54
648 changed files with 170981 additions and 0 deletions

View File

@@ -0,0 +1,30 @@
//
// detail/array.hpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_ARRAY_HPP
#define ASIO_DETAIL_ARRAY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <array>
namespace asio {
namespace detail {
using std::array;
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_ARRAY_HPP

View File

@@ -0,0 +1,32 @@
//
// detail/array_fwd.hpp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_ARRAY_FWD_HPP
#define ASIO_DETAIL_ARRAY_FWD_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
namespace boost {
template<class T, std::size_t N>
class array;
} // namespace boost
// Standard library components can't be forward declared, so we'll have to
// include the array header. Fortunately, it's fairly lightweight and doesn't
// add significantly to the compile time.
#include <array>
#endif // ASIO_DETAIL_ARRAY_FWD_HPP

View File

@@ -0,0 +1,32 @@
//
// detail/assert.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_ASSERT_HPP
#define ASIO_DETAIL_ASSERT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_BOOST_ASSERT)
# include <boost/assert.hpp>
#else // defined(ASIO_HAS_BOOST_ASSERT)
# include <cassert>
#endif // defined(ASIO_HAS_BOOST_ASSERT)
#if defined(ASIO_HAS_BOOST_ASSERT)
# define ASIO_ASSERT(expr) BOOST_ASSERT(expr)
#else // defined(ASIO_HAS_BOOST_ASSERT)
# define ASIO_ASSERT(expr) assert(expr)
#endif // defined(ASIO_HAS_BOOST_ASSERT)
#endif // ASIO_DETAIL_ASSERT_HPP

View File

@@ -0,0 +1,59 @@
//
// detail/atomic_count.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_ATOMIC_COUNT_HPP
#define ASIO_DETAIL_ATOMIC_COUNT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_THREADS)
// Nothing to include.
#else // !defined(ASIO_HAS_THREADS)
# include <atomic>
#endif // !defined(ASIO_HAS_THREADS)
namespace asio {
namespace detail {
#if !defined(ASIO_HAS_THREADS)
typedef long atomic_count;
inline void increment(atomic_count& a, long b) { a += b; }
inline void decrement(atomic_count& a, long b) { a -= b; }
inline void ref_count_up(atomic_count& a) { ++a; }
inline bool ref_count_down(atomic_count& a) { return --a == 0; }
#else // !defined(ASIO_HAS_THREADS)
typedef std::atomic<long> atomic_count;
inline void increment(atomic_count& a, long b) { a += b; }
inline void decrement(atomic_count& a, long b) { a -= b; }
inline void ref_count_up(atomic_count& a)
{
a.fetch_add(1, std::memory_order_relaxed);
}
inline bool ref_count_down(atomic_count& a)
{
if (a.fetch_sub(1, std::memory_order_release) == 1)
{
std::atomic_thread_fence(std::memory_order_acquire);
return true;
}
return false;
}
#endif // !defined(ASIO_HAS_THREADS)
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_ATOMIC_COUNT_HPP

View File

@@ -0,0 +1,164 @@
//
// detail/base_from_cancellation_state.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BASE_FROM_CANCELLATION_STATE_HPP
#define ASIO_DETAIL_BASE_FROM_CANCELLATION_STATE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/associated_cancellation_slot.hpp"
#include "asio/cancellation_state.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Handler, typename = void>
class base_from_cancellation_state
{
public:
typedef cancellation_slot cancellation_slot_type;
cancellation_slot_type get_cancellation_slot() const noexcept
{
return cancellation_state_.slot();
}
cancellation_state get_cancellation_state() const noexcept
{
return cancellation_state_;
}
protected:
explicit base_from_cancellation_state(const Handler& handler)
: cancellation_state_(
asio::get_associated_cancellation_slot(handler))
{
}
template <typename Filter>
base_from_cancellation_state(const Handler& handler, Filter filter)
: cancellation_state_(
asio::get_associated_cancellation_slot(handler), filter, filter)
{
}
template <typename InFilter, typename OutFilter>
base_from_cancellation_state(const Handler& handler,
InFilter&& in_filter,
OutFilter&& out_filter)
: cancellation_state_(
asio::get_associated_cancellation_slot(handler),
static_cast<InFilter&&>(in_filter),
static_cast<OutFilter&&>(out_filter))
{
}
void reset_cancellation_state(const Handler& handler)
{
cancellation_state_ = cancellation_state(
asio::get_associated_cancellation_slot(handler));
}
template <typename Filter>
void reset_cancellation_state(const Handler& handler, Filter filter)
{
cancellation_state_ = cancellation_state(
asio::get_associated_cancellation_slot(handler), filter, filter);
}
template <typename InFilter, typename OutFilter>
void reset_cancellation_state(const Handler& handler,
InFilter&& in_filter,
OutFilter&& out_filter)
{
cancellation_state_ = cancellation_state(
asio::get_associated_cancellation_slot(handler),
static_cast<InFilter&&>(in_filter),
static_cast<OutFilter&&>(out_filter));
}
cancellation_type_t cancelled() const noexcept
{
return cancellation_state_.cancelled();
}
private:
cancellation_state cancellation_state_;
};
template <typename Handler>
class base_from_cancellation_state<Handler,
enable_if_t<
is_same<
typename associated_cancellation_slot<
Handler, cancellation_slot
>::asio_associated_cancellation_slot_is_unspecialised,
void
>::value
>
>
{
public:
cancellation_state get_cancellation_state() const noexcept
{
return cancellation_state();
}
protected:
explicit base_from_cancellation_state(const Handler&)
{
}
template <typename Filter>
base_from_cancellation_state(const Handler&, Filter)
{
}
template <typename InFilter, typename OutFilter>
base_from_cancellation_state(const Handler&,
InFilter&&,
OutFilter&&)
{
}
void reset_cancellation_state(const Handler&)
{
}
template <typename Filter>
void reset_cancellation_state(const Handler&, Filter)
{
}
template <typename InFilter, typename OutFilter>
void reset_cancellation_state(const Handler&,
InFilter&&,
OutFilter&&)
{
}
constexpr cancellation_type_t cancelled() const noexcept
{
return cancellation_type::none;
}
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_BASE_FROM_CANCELLATION_STATE_HPP

View File

@@ -0,0 +1,69 @@
//
// detail/base_from_completion_cond.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP
#define ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/completion_condition.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename CompletionCondition>
class base_from_completion_cond
{
protected:
explicit base_from_completion_cond(CompletionCondition& completion_condition)
: completion_condition_(
static_cast<CompletionCondition&&>(completion_condition))
{
}
std::size_t check_for_completion(
const asio::error_code& ec,
std::size_t total_transferred)
{
return detail::adapt_completion_condition_result(
completion_condition_(ec, total_transferred));
}
private:
CompletionCondition completion_condition_;
};
template <>
class base_from_completion_cond<transfer_all_t>
{
protected:
explicit base_from_completion_cond(transfer_all_t)
{
}
static std::size_t check_for_completion(
const asio::error_code& ec,
std::size_t total_transferred)
{
return transfer_all_t()(ec, total_transferred);
}
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_BASE_FROM_COMPLETION_COND_HPP

View File

@@ -0,0 +1,711 @@
//
// detail/bind_handler.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BIND_HANDLER_HPP
#define ASIO_DETAIL_BIND_HANDLER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/associator.hpp"
#include "asio/detail/handler_cont_helpers.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Handler>
class binder0
{
public:
template <typename T>
binder0(int, T&& handler)
: handler_(static_cast<T&&>(handler))
{
}
binder0(Handler& handler)
: handler_(static_cast<Handler&&>(handler))
{
}
binder0(const binder0& other)
: handler_(other.handler_)
{
}
binder0(binder0&& other)
: handler_(static_cast<Handler&&>(other.handler_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)();
}
void operator()() const
{
handler_();
}
//private:
Handler handler_;
};
template <typename Handler>
inline bool asio_handler_is_continuation(
binder0<Handler>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler>
inline binder0<decay_t<Handler>> bind_handler(
Handler&& handler)
{
return binder0<decay_t<Handler>>(
0, static_cast<Handler&&>(handler));
}
template <typename Handler, typename Arg1>
class binder1
{
public:
template <typename T>
binder1(int, T&& handler, const Arg1& arg1)
: handler_(static_cast<T&&>(handler)),
arg1_(arg1)
{
}
binder1(Handler& handler, const Arg1& arg1)
: handler_(static_cast<Handler&&>(handler)),
arg1_(arg1)
{
}
binder1(const binder1& other)
: handler_(other.handler_),
arg1_(other.arg1_)
{
}
binder1(binder1&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<const Arg1&>(arg1_));
}
void operator()() const
{
handler_(arg1_);
}
//private:
Handler handler_;
Arg1 arg1_;
};
template <typename Handler, typename Arg1>
inline bool asio_handler_is_continuation(
binder1<Handler, Arg1>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler, typename Arg1>
inline binder1<decay_t<Handler>, Arg1> bind_handler(
Handler&& handler, const Arg1& arg1)
{
return binder1<decay_t<Handler>, Arg1>(0,
static_cast<Handler&&>(handler), arg1);
}
template <typename Handler, typename Arg1, typename Arg2>
class binder2
{
public:
template <typename T>
binder2(int, T&& handler,
const Arg1& arg1, const Arg2& arg2)
: handler_(static_cast<T&&>(handler)),
arg1_(arg1),
arg2_(arg2)
{
}
binder2(Handler& handler, const Arg1& arg1, const Arg2& arg2)
: handler_(static_cast<Handler&&>(handler)),
arg1_(arg1),
arg2_(arg2)
{
}
binder2(const binder2& other)
: handler_(other.handler_),
arg1_(other.arg1_),
arg2_(other.arg2_)
{
}
binder2(binder2&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_)),
arg2_(static_cast<Arg2&&>(other.arg2_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<const Arg1&>(arg1_),
static_cast<const Arg2&>(arg2_));
}
void operator()() const
{
handler_(arg1_, arg2_);
}
//private:
Handler handler_;
Arg1 arg1_;
Arg2 arg2_;
};
template <typename Handler, typename Arg1, typename Arg2>
inline bool asio_handler_is_continuation(
binder2<Handler, Arg1, Arg2>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler, typename Arg1, typename Arg2>
inline binder2<decay_t<Handler>, Arg1, Arg2> bind_handler(
Handler&& handler, const Arg1& arg1, const Arg2& arg2)
{
return binder2<decay_t<Handler>, Arg1, Arg2>(0,
static_cast<Handler&&>(handler), arg1, arg2);
}
template <typename Handler, typename Arg1, typename Arg2, typename Arg3>
class binder3
{
public:
template <typename T>
binder3(int, T&& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3)
: handler_(static_cast<T&&>(handler)),
arg1_(arg1),
arg2_(arg2),
arg3_(arg3)
{
}
binder3(Handler& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3)
: handler_(static_cast<Handler&&>(handler)),
arg1_(arg1),
arg2_(arg2),
arg3_(arg3)
{
}
binder3(const binder3& other)
: handler_(other.handler_),
arg1_(other.arg1_),
arg2_(other.arg2_),
arg3_(other.arg3_)
{
}
binder3(binder3&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_)),
arg2_(static_cast<Arg2&&>(other.arg2_)),
arg3_(static_cast<Arg3&&>(other.arg3_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<const Arg1&>(arg1_),
static_cast<const Arg2&>(arg2_),
static_cast<const Arg3&>(arg3_));
}
void operator()() const
{
handler_(arg1_, arg2_, arg3_);
}
//private:
Handler handler_;
Arg1 arg1_;
Arg2 arg2_;
Arg3 arg3_;
};
template <typename Handler, typename Arg1, typename Arg2, typename Arg3>
inline bool asio_handler_is_continuation(
binder3<Handler, Arg1, Arg2, Arg3>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler, typename Arg1, typename Arg2, typename Arg3>
inline binder3<decay_t<Handler>, Arg1, Arg2, Arg3> bind_handler(
Handler&& handler, const Arg1& arg1, const Arg2& arg2,
const Arg3& arg3)
{
return binder3<decay_t<Handler>, Arg1, Arg2, Arg3>(0,
static_cast<Handler&&>(handler), arg1, arg2, arg3);
}
template <typename Handler, typename Arg1,
typename Arg2, typename Arg3, typename Arg4>
class binder4
{
public:
template <typename T>
binder4(int, T&& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3, const Arg4& arg4)
: handler_(static_cast<T&&>(handler)),
arg1_(arg1),
arg2_(arg2),
arg3_(arg3),
arg4_(arg4)
{
}
binder4(Handler& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3, const Arg4& arg4)
: handler_(static_cast<Handler&&>(handler)),
arg1_(arg1),
arg2_(arg2),
arg3_(arg3),
arg4_(arg4)
{
}
binder4(const binder4& other)
: handler_(other.handler_),
arg1_(other.arg1_),
arg2_(other.arg2_),
arg3_(other.arg3_),
arg4_(other.arg4_)
{
}
binder4(binder4&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_)),
arg2_(static_cast<Arg2&&>(other.arg2_)),
arg3_(static_cast<Arg3&&>(other.arg3_)),
arg4_(static_cast<Arg4&&>(other.arg4_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<const Arg1&>(arg1_),
static_cast<const Arg2&>(arg2_),
static_cast<const Arg3&>(arg3_),
static_cast<const Arg4&>(arg4_));
}
void operator()() const
{
handler_(arg1_, arg2_, arg3_, arg4_);
}
//private:
Handler handler_;
Arg1 arg1_;
Arg2 arg2_;
Arg3 arg3_;
Arg4 arg4_;
};
template <typename Handler, typename Arg1,
typename Arg2, typename Arg3, typename Arg4>
inline bool asio_handler_is_continuation(
binder4<Handler, Arg1, Arg2, Arg3, Arg4>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler, typename Arg1,
typename Arg2, typename Arg3, typename Arg4>
inline binder4<decay_t<Handler>, Arg1, Arg2, Arg3, Arg4>
bind_handler(Handler&& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3, const Arg4& arg4)
{
return binder4<decay_t<Handler>, Arg1, Arg2, Arg3, Arg4>(0,
static_cast<Handler&&>(handler), arg1, arg2, arg3, arg4);
}
template <typename Handler, typename Arg1, typename Arg2,
typename Arg3, typename Arg4, typename Arg5>
class binder5
{
public:
template <typename T>
binder5(int, T&& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5)
: handler_(static_cast<T&&>(handler)),
arg1_(arg1),
arg2_(arg2),
arg3_(arg3),
arg4_(arg4),
arg5_(arg5)
{
}
binder5(Handler& handler, const Arg1& arg1, const Arg2& arg2,
const Arg3& arg3, const Arg4& arg4, const Arg5& arg5)
: handler_(static_cast<Handler&&>(handler)),
arg1_(arg1),
arg2_(arg2),
arg3_(arg3),
arg4_(arg4),
arg5_(arg5)
{
}
binder5(const binder5& other)
: handler_(other.handler_),
arg1_(other.arg1_),
arg2_(other.arg2_),
arg3_(other.arg3_),
arg4_(other.arg4_),
arg5_(other.arg5_)
{
}
binder5(binder5&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_)),
arg2_(static_cast<Arg2&&>(other.arg2_)),
arg3_(static_cast<Arg3&&>(other.arg3_)),
arg4_(static_cast<Arg4&&>(other.arg4_)),
arg5_(static_cast<Arg5&&>(other.arg5_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<const Arg1&>(arg1_),
static_cast<const Arg2&>(arg2_),
static_cast<const Arg3&>(arg3_),
static_cast<const Arg4&>(arg4_),
static_cast<const Arg5&>(arg5_));
}
void operator()() const
{
handler_(arg1_, arg2_, arg3_, arg4_, arg5_);
}
//private:
Handler handler_;
Arg1 arg1_;
Arg2 arg2_;
Arg3 arg3_;
Arg4 arg4_;
Arg5 arg5_;
};
template <typename Handler, typename Arg1, typename Arg2,
typename Arg3, typename Arg4, typename Arg5>
inline bool asio_handler_is_continuation(
binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler, typename Arg1, typename Arg2,
typename Arg3, typename Arg4, typename Arg5>
inline binder5<decay_t<Handler>, Arg1, Arg2, Arg3, Arg4, Arg5>
bind_handler(Handler&& handler, const Arg1& arg1,
const Arg2& arg2, const Arg3& arg3, const Arg4& arg4, const Arg5& arg5)
{
return binder5<decay_t<Handler>, Arg1, Arg2, Arg3, Arg4, Arg5>(0,
static_cast<Handler&&>(handler), arg1, arg2, arg3, arg4, arg5);
}
template <typename Handler, typename Arg1>
class move_binder1
{
public:
move_binder1(int, Handler&& handler,
Arg1&& arg1)
: handler_(static_cast<Handler&&>(handler)),
arg1_(static_cast<Arg1&&>(arg1))
{
}
move_binder1(move_binder1&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<Arg1&&>(arg1_));
}
//private:
Handler handler_;
Arg1 arg1_;
};
template <typename Handler, typename Arg1>
inline bool asio_handler_is_continuation(
move_binder1<Handler, Arg1>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
template <typename Handler, typename Arg1, typename Arg2>
class move_binder2
{
public:
move_binder2(int, Handler&& handler,
const Arg1& arg1, Arg2&& arg2)
: handler_(static_cast<Handler&&>(handler)),
arg1_(arg1),
arg2_(static_cast<Arg2&&>(arg2))
{
}
move_binder2(move_binder2&& other)
: handler_(static_cast<Handler&&>(other.handler_)),
arg1_(static_cast<Arg1&&>(other.arg1_)),
arg2_(static_cast<Arg2&&>(other.arg2_))
{
}
void operator()()
{
static_cast<Handler&&>(handler_)(
static_cast<const Arg1&>(arg1_),
static_cast<Arg2&&>(arg2_));
}
//private:
Handler handler_;
Arg1 arg1_;
Arg2 arg2_;
};
template <typename Handler, typename Arg1, typename Arg2>
inline bool asio_handler_is_continuation(
move_binder2<Handler, Arg1, Arg2>* this_handler)
{
return asio_handler_cont_helpers::is_continuation(
this_handler->handler_);
}
} // namespace detail
template <template <typename, typename> class Associator,
typename Handler, typename DefaultCandidate>
struct associator<Associator,
detail::binder0<Handler>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::binder0<Handler>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::binder0<Handler>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename DefaultCandidate>
struct associator<Associator,
detail::binder1<Handler, Arg1>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::binder1<Handler, Arg1>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::binder1<Handler, Arg1>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename Arg2,
typename DefaultCandidate>
struct associator<Associator,
detail::binder2<Handler, Arg1, Arg2>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::binder2<Handler, Arg1, Arg2>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::binder2<Handler, Arg1, Arg2>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename Arg2, typename Arg3,
typename DefaultCandidate>
struct associator<Associator,
detail::binder3<Handler, Arg1, Arg2, Arg3>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::binder3<Handler, Arg1, Arg2, Arg3>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::binder3<Handler, Arg1, Arg2, Arg3>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename Arg2, typename Arg3,
typename Arg4, typename DefaultCandidate>
struct associator<Associator,
detail::binder4<Handler, Arg1, Arg2, Arg3, Arg4>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::binder4<Handler, Arg1, Arg2, Arg3, Arg4>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::binder4<Handler, Arg1, Arg2, Arg3, Arg4>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename Arg2, typename Arg3,
typename Arg4, typename Arg5, typename DefaultCandidate>
struct associator<Associator,
detail::binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(
const detail::binder5<Handler, Arg1, Arg2, Arg3, Arg4, Arg5>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename DefaultCandidate>
struct associator<Associator,
detail::move_binder1<Handler, Arg1>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::move_binder1<Handler, Arg1>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::move_binder1<Handler, Arg1>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
template <template <typename, typename> class Associator,
typename Handler, typename Arg1, typename Arg2, typename DefaultCandidate>
struct associator<Associator,
detail::move_binder2<Handler, Arg1, Arg2>, DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::move_binder2<Handler, Arg1, Arg2>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(const detail::move_binder2<Handler, Arg1, Arg2>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_BIND_HANDLER_HPP

View File

@@ -0,0 +1,107 @@
//
// detail/blocking_executor_op.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BLOCKING_EXECUTOR_OP_HPP
#define ASIO_DETAIL_BLOCKING_EXECUTOR_OP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/event.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/mutex.hpp"
#include "asio/detail/scheduler_operation.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Operation = scheduler_operation>
class blocking_executor_op_base : public Operation
{
public:
blocking_executor_op_base(typename Operation::func_type complete_func)
: Operation(complete_func),
is_complete_(false)
{
}
void wait()
{
asio::detail::mutex::scoped_lock lock(mutex_);
while (!is_complete_)
event_.wait(lock);
}
protected:
struct do_complete_cleanup
{
~do_complete_cleanup()
{
asio::detail::mutex::scoped_lock lock(op_->mutex_);
op_->is_complete_ = true;
op_->event_.unlock_and_signal_one_for_destruction(lock);
}
blocking_executor_op_base* op_;
};
private:
asio::detail::mutex mutex_;
asio::detail::event event_;
bool is_complete_;
};
template <typename Handler, typename Operation = scheduler_operation>
class blocking_executor_op : public blocking_executor_op_base<Operation>
{
public:
blocking_executor_op(Handler& h)
: blocking_executor_op_base<Operation>(&blocking_executor_op::do_complete),
handler_(h)
{
}
static void do_complete(void* owner, Operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
ASIO_ASSUME(base != 0);
blocking_executor_op* o(static_cast<blocking_executor_op*>(base));
typename blocking_executor_op_base<Operation>::do_complete_cleanup
on_exit = { o };
(void)on_exit;
ASIO_HANDLER_COMPLETION((*o));
// Make the upcall if required.
if (owner)
{
fenced_block b(fenced_block::half);
ASIO_HANDLER_INVOCATION_BEGIN(());
static_cast<Handler&&>(o->handler_)();
ASIO_HANDLER_INVOCATION_END;
}
}
private:
Handler& handler_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_BLOCKING_EXECUTOR_OP_HPP

View File

@@ -0,0 +1,66 @@
//
// detail/buffer_resize_guard.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP
#define ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Helper class to manage buffer resizing in an exception safe way.
template <typename Buffer>
class buffer_resize_guard
{
public:
// Constructor.
buffer_resize_guard(Buffer& buffer)
: buffer_(buffer),
old_size_(buffer.size())
{
}
// Destructor rolls back the buffer resize unless commit was called.
~buffer_resize_guard()
{
if (old_size_ != (std::numeric_limits<size_t>::max)())
{
buffer_.resize(old_size_);
}
}
// Commit the resize transaction.
void commit()
{
old_size_ = (std::numeric_limits<size_t>::max)();
}
private:
// The buffer being managed.
Buffer& buffer_;
// The size of the buffer at the time the guard was constructed.
size_t old_size_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_BUFFER_RESIZE_GUARD_HPP

View File

@@ -0,0 +1,697 @@
//
// detail/buffer_sequence_adapter.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP
#define ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/buffer.hpp"
#include "asio/detail/array_fwd.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/registered_buffer.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class buffer_sequence_adapter_base
{
#if defined(ASIO_WINDOWS_RUNTIME)
public:
// The maximum number of buffers to support in a single operation.
enum { max_buffers = 1 };
protected:
typedef Windows::Storage::Streams::IBuffer^ native_buffer_type;
ASIO_DECL static void init_native_buffer(
native_buffer_type& buf,
const asio::mutable_buffer& buffer);
ASIO_DECL static void init_native_buffer(
native_buffer_type& buf,
const asio::const_buffer& buffer);
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
public:
// The maximum number of buffers to support in a single operation.
enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len };
protected:
typedef WSABUF native_buffer_type;
static void init_native_buffer(WSABUF& buf,
const asio::mutable_buffer& buffer)
{
buf.buf = static_cast<char*>(buffer.data());
buf.len = static_cast<ULONG>(buffer.size());
}
static void init_native_buffer(WSABUF& buf,
const asio::const_buffer& buffer)
{
buf.buf = const_cast<char*>(static_cast<const char*>(buffer.data()));
buf.len = static_cast<ULONG>(buffer.size());
}
#else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
public:
// The maximum number of buffers to support in a single operation.
enum { max_buffers = 64 < max_iov_len ? 64 : max_iov_len };
protected:
typedef iovec native_buffer_type;
static void init_iov_base(void*& base, void* addr)
{
base = addr;
}
template <typename T>
static void init_iov_base(T& base, void* addr)
{
base = static_cast<T>(addr);
}
static void init_native_buffer(iovec& iov,
const asio::mutable_buffer& buffer)
{
init_iov_base(iov.iov_base, buffer.data());
iov.iov_len = buffer.size();
}
static void init_native_buffer(iovec& iov,
const asio::const_buffer& buffer)
{
init_iov_base(iov.iov_base, const_cast<void*>(buffer.data()));
iov.iov_len = buffer.size();
}
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
};
// Helper class to translate buffers into the native buffer representation.
template <typename Buffer, typename Buffers>
class buffer_sequence_adapter
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = false };
enum { is_registered_buffer = false };
explicit buffer_sequence_adapter(const Buffers& buffer_sequence)
: count_(0), total_buffer_size_(0)
{
buffer_sequence_adapter::init(
asio::buffer_sequence_begin(buffer_sequence),
asio::buffer_sequence_end(buffer_sequence));
}
native_buffer_type* buffers()
{
return buffers_;
}
std::size_t count() const
{
return count_;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_buffer_id();
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const Buffers& buffer_sequence)
{
return buffer_sequence_adapter::all_empty(
asio::buffer_sequence_begin(buffer_sequence),
asio::buffer_sequence_end(buffer_sequence));
}
static void validate(const Buffers& buffer_sequence)
{
buffer_sequence_adapter::validate(
asio::buffer_sequence_begin(buffer_sequence),
asio::buffer_sequence_end(buffer_sequence));
}
static Buffer first(const Buffers& buffer_sequence)
{
return buffer_sequence_adapter::first(
asio::buffer_sequence_begin(buffer_sequence),
asio::buffer_sequence_end(buffer_sequence));
}
enum { linearisation_storage_size = 8192 };
static Buffer linearise(const Buffers& buffer_sequence,
const asio::mutable_buffer& storage)
{
return buffer_sequence_adapter::linearise(
asio::buffer_sequence_begin(buffer_sequence),
asio::buffer_sequence_end(buffer_sequence), storage);
}
private:
template <typename Iterator>
void init(Iterator begin, Iterator end)
{
Iterator iter = begin;
for (; iter != end && count_ < max_buffers; ++iter, ++count_)
{
Buffer buffer(*iter);
init_native_buffer(buffers_[count_], buffer);
total_buffer_size_ += buffer.size();
}
}
template <typename Iterator>
static bool all_empty(Iterator begin, Iterator end)
{
Iterator iter = begin;
std::size_t i = 0;
for (; iter != end && i < max_buffers; ++iter, ++i)
if (Buffer(*iter).size() > 0)
return false;
return true;
}
template <typename Iterator>
static void validate(Iterator begin, Iterator end)
{
Iterator iter = begin;
for (; iter != end; ++iter)
{
Buffer buffer(*iter);
buffer.data();
}
}
template <typename Iterator>
static Buffer first(Iterator begin, Iterator end)
{
Iterator iter = begin;
for (; iter != end; ++iter)
{
Buffer buffer(*iter);
if (buffer.size() != 0)
return buffer;
}
return Buffer();
}
template <typename Iterator>
static Buffer linearise(Iterator begin, Iterator end,
const asio::mutable_buffer& storage)
{
asio::mutable_buffer unused_storage = storage;
Iterator iter = begin;
while (iter != end && unused_storage.size() != 0)
{
Buffer buffer(*iter);
++iter;
if (buffer.size() == 0)
continue;
if (unused_storage.size() == storage.size())
{
if (iter == end)
return buffer;
if (buffer.size() >= unused_storage.size())
return buffer;
}
unused_storage += asio::buffer_copy(unused_storage, buffer);
}
return Buffer(storage.data(), storage.size() - unused_storage.size());
}
native_buffer_type buffers_[max_buffers];
std::size_t count_;
std::size_t total_buffer_size_;
};
template <typename Buffer>
class buffer_sequence_adapter<Buffer, asio::mutable_buffer>
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = true };
enum { is_registered_buffer = false };
explicit buffer_sequence_adapter(
const asio::mutable_buffer& buffer_sequence)
{
init_native_buffer(buffer_, Buffer(buffer_sequence));
total_buffer_size_ = buffer_sequence.size();
}
native_buffer_type* buffers()
{
return &buffer_;
}
std::size_t count() const
{
return 1;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_buffer_id();
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const asio::mutable_buffer& buffer_sequence)
{
return buffer_sequence.size() == 0;
}
static void validate(const asio::mutable_buffer& buffer_sequence)
{
buffer_sequence.data();
}
static Buffer first(const asio::mutable_buffer& buffer_sequence)
{
return Buffer(buffer_sequence);
}
enum { linearisation_storage_size = 1 };
static Buffer linearise(const asio::mutable_buffer& buffer_sequence,
const Buffer&)
{
return Buffer(buffer_sequence);
}
private:
native_buffer_type buffer_;
std::size_t total_buffer_size_;
};
template <typename Buffer>
class buffer_sequence_adapter<Buffer, asio::const_buffer>
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = true };
enum { is_registered_buffer = false };
explicit buffer_sequence_adapter(
const asio::const_buffer& buffer_sequence)
{
init_native_buffer(buffer_, Buffer(buffer_sequence));
total_buffer_size_ = buffer_sequence.size();
}
native_buffer_type* buffers()
{
return &buffer_;
}
std::size_t count() const
{
return 1;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_buffer_id();
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const asio::const_buffer& buffer_sequence)
{
return buffer_sequence.size() == 0;
}
static void validate(const asio::const_buffer& buffer_sequence)
{
buffer_sequence.data();
}
static Buffer first(const asio::const_buffer& buffer_sequence)
{
return Buffer(buffer_sequence);
}
enum { linearisation_storage_size = 1 };
static Buffer linearise(const asio::const_buffer& buffer_sequence,
const Buffer&)
{
return Buffer(buffer_sequence);
}
private:
native_buffer_type buffer_;
std::size_t total_buffer_size_;
};
template <typename Buffer>
class buffer_sequence_adapter<Buffer, asio::mutable_registered_buffer>
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = true };
enum { is_registered_buffer = true };
explicit buffer_sequence_adapter(
const asio::mutable_registered_buffer& buffer_sequence)
{
init_native_buffer(buffer_, buffer_sequence.buffer());
total_buffer_size_ = buffer_sequence.size();
registered_id_ = buffer_sequence.id();
}
native_buffer_type* buffers()
{
return &buffer_;
}
std::size_t count() const
{
return 1;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_id_;
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(
const asio::mutable_registered_buffer& buffer_sequence)
{
return buffer_sequence.size() == 0;
}
static void validate(
const asio::mutable_registered_buffer& buffer_sequence)
{
buffer_sequence.data();
}
static Buffer first(
const asio::mutable_registered_buffer& buffer_sequence)
{
return Buffer(buffer_sequence.buffer());
}
enum { linearisation_storage_size = 1 };
static Buffer linearise(
const asio::mutable_registered_buffer& buffer_sequence,
const Buffer&)
{
return Buffer(buffer_sequence.buffer());
}
private:
native_buffer_type buffer_;
std::size_t total_buffer_size_;
registered_buffer_id registered_id_;
};
template <typename Buffer>
class buffer_sequence_adapter<Buffer, asio::const_registered_buffer>
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = true };
enum { is_registered_buffer = true };
explicit buffer_sequence_adapter(
const asio::const_registered_buffer& buffer_sequence)
{
init_native_buffer(buffer_, buffer_sequence.buffer());
total_buffer_size_ = buffer_sequence.size();
registered_id_ = buffer_sequence.id();
}
native_buffer_type* buffers()
{
return &buffer_;
}
std::size_t count() const
{
return 1;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_id_;
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(
const asio::const_registered_buffer& buffer_sequence)
{
return buffer_sequence.size() == 0;
}
static void validate(
const asio::const_registered_buffer& buffer_sequence)
{
buffer_sequence.data();
}
static Buffer first(
const asio::const_registered_buffer& buffer_sequence)
{
return Buffer(buffer_sequence.buffer());
}
enum { linearisation_storage_size = 1 };
static Buffer linearise(
const asio::const_registered_buffer& buffer_sequence,
const Buffer&)
{
return Buffer(buffer_sequence.buffer());
}
private:
native_buffer_type buffer_;
std::size_t total_buffer_size_;
registered_buffer_id registered_id_;
};
template <typename Buffer, typename Elem>
class buffer_sequence_adapter<Buffer, boost::array<Elem, 2>>
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = false };
enum { is_registered_buffer = false };
explicit buffer_sequence_adapter(
const boost::array<Elem, 2>& buffer_sequence)
{
init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));
init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));
total_buffer_size_ = buffer_sequence[0].size() + buffer_sequence[1].size();
}
native_buffer_type* buffers()
{
return buffers_;
}
std::size_t count() const
{
return 2;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_buffer_id();
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const boost::array<Elem, 2>& buffer_sequence)
{
return buffer_sequence[0].size() == 0 && buffer_sequence[1].size() == 0;
}
static void validate(const boost::array<Elem, 2>& buffer_sequence)
{
buffer_sequence[0].data();
buffer_sequence[1].data();
}
static Buffer first(const boost::array<Elem, 2>& buffer_sequence)
{
return Buffer(buffer_sequence[0].size() != 0
? buffer_sequence[0] : buffer_sequence[1]);
}
enum { linearisation_storage_size = 8192 };
static Buffer linearise(const boost::array<Elem, 2>& buffer_sequence,
const asio::mutable_buffer& storage)
{
if (buffer_sequence[0].size() == 0)
return Buffer(buffer_sequence[1]);
if (buffer_sequence[1].size() == 0)
return Buffer(buffer_sequence[0]);
return Buffer(storage.data(),
asio::buffer_copy(storage, buffer_sequence));
}
private:
native_buffer_type buffers_[2];
std::size_t total_buffer_size_;
};
template <typename Buffer, typename Elem>
class buffer_sequence_adapter<Buffer, std::array<Elem, 2>>
: buffer_sequence_adapter_base
{
public:
enum { is_single_buffer = false };
enum { is_registered_buffer = false };
explicit buffer_sequence_adapter(
const std::array<Elem, 2>& buffer_sequence)
{
init_native_buffer(buffers_[0], Buffer(buffer_sequence[0]));
init_native_buffer(buffers_[1], Buffer(buffer_sequence[1]));
total_buffer_size_ = buffer_sequence[0].size() + buffer_sequence[1].size();
}
native_buffer_type* buffers()
{
return buffers_;
}
std::size_t count() const
{
return 2;
}
std::size_t total_size() const
{
return total_buffer_size_;
}
registered_buffer_id registered_id() const
{
return registered_buffer_id();
}
bool all_empty() const
{
return total_buffer_size_ == 0;
}
static bool all_empty(const std::array<Elem, 2>& buffer_sequence)
{
return buffer_sequence[0].size() == 0 && buffer_sequence[1].size() == 0;
}
static void validate(const std::array<Elem, 2>& buffer_sequence)
{
buffer_sequence[0].data();
buffer_sequence[1].data();
}
static Buffer first(const std::array<Elem, 2>& buffer_sequence)
{
return Buffer(buffer_sequence[0].size() != 0
? buffer_sequence[0] : buffer_sequence[1]);
}
enum { linearisation_storage_size = 8192 };
static Buffer linearise(const std::array<Elem, 2>& buffer_sequence,
const asio::mutable_buffer& storage)
{
if (buffer_sequence[0].size() == 0)
return Buffer(buffer_sequence[1]);
if (buffer_sequence[1].size() == 0)
return Buffer(buffer_sequence[0]);
return Buffer(storage.data(),
asio::buffer_copy(storage, buffer_sequence));
}
private:
native_buffer_type buffers_[2];
std::size_t total_buffer_size_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/buffer_sequence_adapter.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_DETAIL_BUFFER_SEQUENCE_ADAPTER_HPP

View File

@@ -0,0 +1,126 @@
//
// detail/buffered_stream_storage.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP
#define ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/buffer.hpp"
#include "asio/detail/assert.hpp"
#include <cstddef>
#include <cstring>
#include <vector>
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class buffered_stream_storage
{
public:
// The type of the bytes stored in the buffer.
typedef unsigned char byte_type;
// The type used for offsets into the buffer.
typedef std::size_t size_type;
// Constructor.
explicit buffered_stream_storage(std::size_t buffer_capacity)
: begin_offset_(0),
end_offset_(0),
buffer_(buffer_capacity)
{
}
/// Clear the buffer.
void clear()
{
begin_offset_ = 0;
end_offset_ = 0;
}
// Return a pointer to the beginning of the unread data.
mutable_buffer data()
{
return asio::buffer(buffer_) + begin_offset_;
}
// Return a pointer to the beginning of the unread data.
const_buffer data() const
{
return asio::buffer(buffer_) + begin_offset_;
}
// Is there no unread data in the buffer.
bool empty() const
{
return begin_offset_ == end_offset_;
}
// Return the amount of unread data the is in the buffer.
size_type size() const
{
return end_offset_ - begin_offset_;
}
// Resize the buffer to the specified length.
void resize(size_type length)
{
ASIO_ASSERT(length <= capacity());
if (begin_offset_ + length <= capacity())
{
end_offset_ = begin_offset_ + length;
}
else
{
using namespace std; // For memmove.
memmove(&buffer_[0], &buffer_[0] + begin_offset_, size());
end_offset_ = length;
begin_offset_ = 0;
}
}
// Return the maximum size for data in the buffer.
size_type capacity() const
{
return buffer_.size();
}
// Consume multiple bytes from the beginning of the buffer.
void consume(size_type count)
{
ASIO_ASSERT(begin_offset_ + count <= end_offset_);
begin_offset_ += count;
if (empty())
clear();
}
private:
// The offset to the beginning of the unread data.
size_type begin_offset_;
// The offset to the end of the unread data.
size_type end_offset_;
// The data in the buffer.
std::vector<byte_type> buffer_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_BUFFERED_STREAM_STORAGE_HPP

View File

@@ -0,0 +1,125 @@
//
// detail/call_stack.hpp
// ~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CALL_STACK_HPP
#define ASIO_DETAIL_CALL_STACK_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/tss_ptr.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Helper class to determine whether or not the current thread is inside an
// invocation of io_context::run() for a specified io_context object.
template <typename Key, typename Value = unsigned char>
class call_stack
{
public:
// Context class automatically pushes the key/value pair on to the stack.
class context
: private noncopyable
{
public:
// Push the key on to the stack.
explicit context(Key* k)
: key_(k),
next_(call_stack<Key, Value>::top_)
{
value_ = reinterpret_cast<unsigned char*>(this);
call_stack<Key, Value>::top_ = this;
}
// Push the key/value pair on to the stack.
context(Key* k, Value& v)
: key_(k),
value_(&v),
next_(call_stack<Key, Value>::top_)
{
call_stack<Key, Value>::top_ = this;
}
// Pop the key/value pair from the stack.
~context()
{
call_stack<Key, Value>::top_ = next_;
}
// Find the next context with the same key.
Value* next_by_key() const
{
context* elem = next_;
while (elem)
{
if (elem->key_ == key_)
return elem->value_;
elem = elem->next_;
}
return 0;
}
private:
friend class call_stack<Key, Value>;
// The key associated with the context.
Key* key_;
// The value associated with the context.
Value* value_;
// The next element in the stack.
context* next_;
};
friend class context;
// Determine whether the specified owner is on the stack. Returns address of
// key if present, 0 otherwise.
static Value* contains(Key* k)
{
context* elem = top_;
while (elem)
{
if (elem->key_ == k)
return elem->value_;
elem = elem->next_;
}
return 0;
}
// Obtain the value at the top of the stack.
static Value* top()
{
context* elem = top_;
return elem ? elem->value_ : 0;
}
private:
// The top of the stack of calls for the current thread.
static tss_ptr<context> top_;
};
template <typename Key, typename Value>
tss_ptr<typename call_stack<Key, Value>::context>
call_stack<Key, Value>::top_;
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_CALL_STACK_HPP

View File

@@ -0,0 +1,45 @@
//
// detail/chrono.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CHRONO_HPP
#define ASIO_DETAIL_CHRONO_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <chrono>
namespace asio {
namespace chrono {
using std::chrono::duration;
using std::chrono::time_point;
using std::chrono::duration_cast;
using std::chrono::nanoseconds;
using std::chrono::microseconds;
using std::chrono::milliseconds;
using std::chrono::seconds;
using std::chrono::minutes;
using std::chrono::hours;
using std::chrono::time_point_cast;
#if defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK)
typedef std::chrono::monotonic_clock steady_clock;
#else // defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK)
using std::chrono::steady_clock;
#endif // defined(ASIO_HAS_STD_CHRONO_MONOTONIC_CLOCK)
using std::chrono::system_clock;
using std::chrono::high_resolution_clock;
} // namespace chrono
} // namespace asio
#endif // ASIO_DETAIL_CHRONO_HPP

View File

@@ -0,0 +1,190 @@
//
// detail/chrono_time_traits.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP
#define ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/cstdint.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Helper template to compute the greatest common divisor.
template <int64_t v1, int64_t v2>
struct gcd { enum { value = gcd<v2, v1 % v2>::value }; };
template <int64_t v1>
struct gcd<v1, 0> { enum { value = v1 }; };
// Adapts std::chrono clocks for use with a deadline timer.
template <typename Clock, typename WaitTraits>
struct chrono_time_traits
{
// The clock type.
typedef Clock clock_type;
// The duration type of the clock.
typedef typename clock_type::duration duration_type;
// The time point type of the clock.
typedef typename clock_type::time_point time_type;
// The period of the clock.
typedef typename duration_type::period period_type;
// Get the current time.
static time_type now()
{
return clock_type::now();
}
// Add a duration to a time.
static time_type add(const time_type& t, const duration_type& d)
{
const time_type epoch;
if (t >= epoch)
{
if ((time_type::max)() - t < d)
return (time_type::max)();
}
else // t < epoch
{
if (-(t - (time_type::min)()) > d)
return (time_type::min)();
}
return t + d;
}
// Subtract one time from another.
static duration_type subtract(const time_type& t1, const time_type& t2)
{
const time_type epoch;
if (t1 >= epoch)
{
if (t2 >= epoch)
{
return t1 - t2;
}
else if (t2 == (time_type::min)())
{
return (duration_type::max)();
}
else if ((time_type::max)() - t1 < epoch - t2)
{
return (duration_type::max)();
}
else
{
return t1 - t2;
}
}
else // t1 < epoch
{
if (t2 < epoch)
{
return t1 - t2;
}
else if (t1 == (time_type::min)())
{
return (duration_type::min)();
}
else if ((time_type::max)() - t2 < epoch - t1)
{
return (duration_type::min)();
}
else
{
return -(t2 - t1);
}
}
}
// Test whether one time is less than another.
static bool less_than(const time_type& t1, const time_type& t2)
{
return t1 < t2;
}
// Implement just enough of the posix_time::time_duration interface to supply
// what the timer_queue requires.
class posix_time_duration
{
public:
explicit posix_time_duration(const duration_type& d)
: d_(d)
{
}
int64_t ticks() const
{
return d_.count();
}
int64_t total_seconds() const
{
return duration_cast<1, 1>();
}
int64_t total_milliseconds() const
{
return duration_cast<1, 1000>();
}
int64_t total_microseconds() const
{
return duration_cast<1, 1000000>();
}
private:
template <int64_t Num, int64_t Den>
int64_t duration_cast() const
{
const int64_t num1 = period_type::num / gcd<period_type::num, Num>::value;
const int64_t num2 = Num / gcd<period_type::num, Num>::value;
const int64_t den1 = period_type::den / gcd<period_type::den, Den>::value;
const int64_t den2 = Den / gcd<period_type::den, Den>::value;
const int64_t num = num1 * den2;
const int64_t den = num2 * den1;
if (num == 1 && den == 1)
return ticks();
else if (num != 1 && den == 1)
return ticks() * num;
else if (num == 1 && period_type::den != 1)
return ticks() / den;
else
return ticks() * num / den;
}
duration_type d_;
};
// Convert to POSIX duration type.
static posix_time_duration to_posix_duration(const duration_type& d)
{
return posix_time_duration(WaitTraits::to_wait_duration(d));
}
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_CHRONO_TIME_TRAITS_HPP

View File

@@ -0,0 +1,88 @@
//
// detail/completion_handler.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_COMPLETION_HANDLER_HPP
#define ASIO_DETAIL_COMPLETION_HANDLER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/handler_work.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/operation.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Handler, typename IoExecutor>
class completion_handler : public operation
{
public:
ASIO_DEFINE_HANDLER_PTR(completion_handler);
completion_handler(Handler& h, const IoExecutor& io_ex)
: operation(&completion_handler::do_complete),
handler_(static_cast<Handler&&>(h)),
work_(handler_, io_ex)
{
}
static void do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
completion_handler* h(static_cast<completion_handler*>(base));
ptr p = { asio::detail::addressof(h->handler_), h, h };
ASIO_HANDLER_COMPLETION((*h));
// Take ownership of the operation's outstanding work.
handler_work<Handler, IoExecutor> w(
static_cast<handler_work<Handler, IoExecutor>&&>(
h->work_));
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
Handler handler(static_cast<Handler&&>(h->handler_));
p.h = asio::detail::addressof(handler);
p.reset();
// Make the upcall if required.
if (owner)
{
fenced_block b(fenced_block::half);
ASIO_HANDLER_INVOCATION_BEGIN(());
w.complete(handler, handler);
ASIO_HANDLER_INVOCATION_END;
}
}
private:
Handler handler_;
handler_work<Handler, IoExecutor> work_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_COMPLETION_HANDLER_HPP

View File

@@ -0,0 +1,127 @@
//
// detail/completion_message.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_COMPLETION_MESSAGE_HPP
#define ASIO_DETAIL_COMPLETION_MESSAGE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <tuple>
#include "asio/detail/type_traits.hpp"
#include "asio/detail/utility.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Signature>
class completion_message;
template <typename R>
class completion_message<R()>
{
public:
completion_message(int)
{
}
template <typename Handler>
void receive(Handler& handler)
{
static_cast<Handler&&>(handler)();
}
};
template <typename R, typename Arg0>
class completion_message<R(Arg0)>
{
public:
template <typename T0>
completion_message(int, T0&& t0)
: arg0_(static_cast<T0&&>(t0))
{
}
template <typename Handler>
void receive(Handler& handler)
{
static_cast<Handler&&>(handler)(
static_cast<arg0_type&&>(arg0_));
}
private:
typedef decay_t<Arg0> arg0_type;
arg0_type arg0_;
};
template <typename R, typename Arg0, typename Arg1>
class completion_message<R(Arg0, Arg1)>
{
public:
template <typename T0, typename T1>
completion_message(int, T0&& t0, T1&& t1)
: arg0_(static_cast<T0&&>(t0)),
arg1_(static_cast<T1&&>(t1))
{
}
template <typename Handler>
void receive(Handler& handler)
{
static_cast<Handler&&>(handler)(
static_cast<arg0_type&&>(arg0_),
static_cast<arg1_type&&>(arg1_));
}
private:
typedef decay_t<Arg0> arg0_type;
arg0_type arg0_;
typedef decay_t<Arg1> arg1_type;
arg1_type arg1_;
};
template <typename R, typename... Args>
class completion_message<R(Args...)>
{
public:
template <typename... T>
completion_message(int, T&&... t)
: args_(static_cast<T&&>(t)...)
{
}
template <typename Handler>
void receive(Handler& h)
{
this->do_receive(h, asio::detail::index_sequence_for<Args...>());
}
private:
template <typename Handler, std::size_t... I>
void do_receive(Handler& h, asio::detail::index_sequence<I...>)
{
static_cast<Handler&&>(h)(
std::get<I>(static_cast<args_type&&>(args_))...);
}
typedef std::tuple<decay_t<Args>...> args_type;
args_type args_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_COMPLETION_MESSAGE_HPP

View File

@@ -0,0 +1,220 @@
//
// detail/completion_payload.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_COMPLETION_PAYLOAD_HPP
#define ASIO_DETAIL_COMPLETION_PAYLOAD_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/error_code.hpp"
#include "asio/detail/completion_message.hpp"
#if defined(ASIO_HAS_STD_VARIANT)
# include <variant>
#else // defined(ASIO_HAS_STD_VARIANT)
# include <new>
#endif // defined(ASIO_HAS_STD_VARIANT)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename... Signatures>
class completion_payload;
template <typename R>
class completion_payload<R()>
{
public:
explicit completion_payload(completion_message<R()>)
{
}
template <typename Handler>
void receive(Handler& handler)
{
static_cast<Handler&&>(handler)();
}
};
template <typename Signature>
class completion_payload<Signature>
{
public:
completion_payload(completion_message<Signature>&& m)
: message_(static_cast<completion_message<Signature>&&>(m))
{
}
template <typename Handler>
void receive(Handler& handler)
{
message_.receive(handler);
}
private:
completion_message<Signature> message_;
};
#if defined(ASIO_HAS_STD_VARIANT)
template <typename... Signatures>
class completion_payload
{
public:
template <typename Signature>
completion_payload(completion_message<Signature>&& m)
: message_(static_cast<completion_message<Signature>&&>(m))
{
}
template <typename Handler>
void receive(Handler& handler)
{
std::visit(
[&](auto& message)
{
message.receive(handler);
}, message_);
}
private:
std::variant<completion_message<Signatures>...> message_;
};
#else // defined(ASIO_HAS_STD_VARIANT)
template <typename R1, typename R2>
class completion_payload<R1(), R2(asio::error_code)>
{
public:
typedef completion_message<R1()> void_message_type;
typedef completion_message<R2(asio::error_code)> error_message_type;
completion_payload(void_message_type&&)
: message_(0, asio::error_code()),
empty_(true)
{
}
completion_payload(error_message_type&& m)
: message_(static_cast<error_message_type&&>(m)),
empty_(false)
{
}
template <typename Handler>
void receive(Handler& handler)
{
if (empty_)
completion_message<R1()>(0).receive(handler);
else
message_.receive(handler);
}
private:
error_message_type message_;
bool empty_;
};
template <typename Sig1, typename Sig2>
class completion_payload<Sig1, Sig2>
{
public:
typedef completion_message<Sig1> message_1_type;
typedef completion_message<Sig2> message_2_type;
completion_payload(message_1_type&& m)
: index_(1)
{
new (&storage_.message_1_) message_1_type(static_cast<message_1_type&&>(m));
}
completion_payload(message_2_type&& m)
: index_(2)
{
new (&storage_.message_2_) message_2_type(static_cast<message_2_type&&>(m));
}
completion_payload(completion_payload&& other)
: index_(other.index_)
{
switch (index_)
{
case 1:
new (&storage_.message_1_) message_1_type(
static_cast<message_1_type&&>(other.storage_.message_1_));
break;
case 2:
new (&storage_.message_2_) message_2_type(
static_cast<message_2_type&&>(other.storage_.message_2_));
break;
default:
break;
}
}
~completion_payload()
{
switch (index_)
{
case 1:
storage_.message_1_.~message_1_type();
break;
case 2:
storage_.message_2_.~message_2_type();
break;
default:
break;
}
}
template <typename Handler>
void receive(Handler& handler)
{
switch (index_)
{
case 1:
storage_.message_1_.receive(handler);
break;
case 2:
storage_.message_2_.receive(handler);
break;
default:
break;
}
}
private:
union storage
{
storage() {}
~storage() {}
char dummy_;
message_1_type message_1_;
message_2_type message_2_;
} storage_;
unsigned char index_;
};
#endif // defined(ASIO_HAS_STD_VARIANT)
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_COMPLETION_PAYLOAD_HPP

View File

@@ -0,0 +1,79 @@
//
// detail/completion_payload_handler.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_COMPLETION_PAYLOAD_HANDLER_HPP
#define ASIO_DETAIL_COMPLETION_PAYLOAD_HANDLER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/associator.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Payload, typename Handler>
class completion_payload_handler
{
public:
completion_payload_handler(Payload&& p, Handler& h)
: payload_(static_cast<Payload&&>(p)),
handler_(static_cast<Handler&&>(h))
{
}
void operator()()
{
payload_.receive(handler_);
}
Handler& handler()
{
return handler_;
}
//private:
Payload payload_;
Handler handler_;
};
} // namespace detail
template <template <typename, typename> class Associator,
typename Payload, typename Handler, typename DefaultCandidate>
struct associator<Associator,
detail::completion_payload_handler<Payload, Handler>,
DefaultCandidate>
: Associator<Handler, DefaultCandidate>
{
static typename Associator<Handler, DefaultCandidate>::type get(
const detail::completion_payload_handler<Payload, Handler>& h) noexcept
{
return Associator<Handler, DefaultCandidate>::get(h.handler_);
}
static auto get(
const detail::completion_payload_handler<Payload, Handler>& h,
const DefaultCandidate& c) noexcept
-> decltype(Associator<Handler, DefaultCandidate>::get(h.handler_, c))
{
return Associator<Handler, DefaultCandidate>::get(h.handler_, c);
}
};
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_COMPLETION_PAYLOAD_HANDLER_HPP

View File

@@ -0,0 +1,252 @@
//
// detail/composed_work.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_COMPOSED_WORK_HPP
#define ASIO_DETAIL_COMPOSED_WORK_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/execution/executor.hpp"
#include "asio/execution/outstanding_work.hpp"
#include "asio/executor_work_guard.hpp"
#include "asio/is_executor.hpp"
#include "asio/system_executor.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Executor, typename = void>
class composed_work_guard
{
public:
typedef decay_t<
prefer_result_t<Executor, execution::outstanding_work_t::tracked_t>
> executor_type;
composed_work_guard(const Executor& ex)
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
void reset()
{
}
executor_type get_executor() const noexcept
{
return executor_;
}
private:
executor_type executor_;
};
template <>
struct composed_work_guard<system_executor>
{
public:
typedef system_executor executor_type;
composed_work_guard(const system_executor&)
{
}
void reset()
{
}
executor_type get_executor() const noexcept
{
return system_executor();
}
};
#if !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor>
struct composed_work_guard<Executor,
enable_if_t<
!execution::is_executor<Executor>::value
>
> : executor_work_guard<Executor>
{
composed_work_guard(const Executor& ex)
: executor_work_guard<Executor>(ex)
{
}
};
#endif // !defined(ASIO_NO_TS_EXECUTORS)
template <typename>
struct composed_io_executors;
template <>
struct composed_io_executors<void()>
{
composed_io_executors() noexcept
: head_(system_executor())
{
}
typedef system_executor head_type;
system_executor head_;
};
inline composed_io_executors<void()> make_composed_io_executors()
{
return composed_io_executors<void()>();
}
template <typename Head>
struct composed_io_executors<void(Head)>
{
explicit composed_io_executors(const Head& ex) noexcept
: head_(ex)
{
}
typedef Head head_type;
Head head_;
};
template <typename Head>
inline composed_io_executors<void(Head)>
make_composed_io_executors(const Head& head)
{
return composed_io_executors<void(Head)>(head);
}
template <typename Head, typename... Tail>
struct composed_io_executors<void(Head, Tail...)>
{
explicit composed_io_executors(const Head& head,
const Tail&... tail) noexcept
: head_(head),
tail_(tail...)
{
}
void reset()
{
head_.reset();
tail_.reset();
}
typedef Head head_type;
Head head_;
composed_io_executors<void(Tail...)> tail_;
};
template <typename Head, typename... Tail>
inline composed_io_executors<void(Head, Tail...)>
make_composed_io_executors(const Head& head, const Tail&... tail)
{
return composed_io_executors<void(Head, Tail...)>(head, tail...);
}
template <typename>
struct composed_work;
template <>
struct composed_work<void()>
{
typedef composed_io_executors<void()> executors_type;
composed_work(const executors_type&) noexcept
: head_(system_executor())
{
}
void reset()
{
head_.reset();
}
typedef system_executor head_type;
composed_work_guard<system_executor> head_;
};
template <typename Head>
struct composed_work<void(Head)>
{
typedef composed_io_executors<void(Head)> executors_type;
explicit composed_work(const executors_type& ex) noexcept
: head_(ex.head_)
{
}
void reset()
{
head_.reset();
}
typedef Head head_type;
composed_work_guard<Head> head_;
};
template <typename Head, typename... Tail>
struct composed_work<void(Head, Tail...)>
{
typedef composed_io_executors<void(Head, Tail...)> executors_type;
explicit composed_work(const executors_type& ex) noexcept
: head_(ex.head_),
tail_(ex.tail_)
{
}
void reset()
{
head_.reset();
tail_.reset();
}
typedef Head head_type;
composed_work_guard<Head> head_;
composed_work<void(Tail...)> tail_;
};
template <typename IoObject>
inline typename IoObject::executor_type
get_composed_io_executor(IoObject& io_object,
enable_if_t<
!is_executor<IoObject>::value
>* = 0,
enable_if_t<
!execution::is_executor<IoObject>::value
>* = 0)
{
return io_object.get_executor();
}
template <typename Executor>
inline const Executor& get_composed_io_executor(const Executor& ex,
enable_if_t<
is_executor<Executor>::value
|| execution::is_executor<Executor>::value
>* = 0)
{
return ex;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_COMPOSED_WORK_HPP

View File

@@ -0,0 +1,94 @@
//
// detail/concurrency_hint.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CONCURRENCY_HINT_HPP
#define ASIO_DETAIL_CONCURRENCY_HINT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/noncopyable.hpp"
// The concurrency hint ID and mask are used to identify when a "well-known"
// concurrency hint value has been passed to the io_context.
#define ASIO_CONCURRENCY_HINT_ID 0xA5100000u
#define ASIO_CONCURRENCY_HINT_ID_MASK 0xFFFF0000u
// If set, this bit indicates that the scheduler should perform locking.
#define ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER 0x1u
// If set, this bit indicates that the reactor should perform locking when
// managing descriptor registrations.
#define ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION 0x2u
// If set, this bit indicates that the reactor should perform locking for I/O.
#define ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_IO 0x4u
// Helper macro to determine if we have a special concurrency hint.
#define ASIO_CONCURRENCY_HINT_IS_SPECIAL(hint) \
((static_cast<unsigned>(hint) \
& ASIO_CONCURRENCY_HINT_ID_MASK) \
== ASIO_CONCURRENCY_HINT_ID)
// Helper macro to determine if locking is enabled for a given facility.
#define ASIO_CONCURRENCY_HINT_IS_LOCKING(facility, hint) \
(((static_cast<unsigned>(hint) \
& (ASIO_CONCURRENCY_HINT_ID_MASK \
| ASIO_CONCURRENCY_HINT_LOCKING_ ## facility)) \
^ ASIO_CONCURRENCY_HINT_ID) != 0)
// This special concurrency hint disables locking in both the scheduler and
// reactor I/O. This hint has the following restrictions:
//
// - Care must be taken to ensure that all operations on the io_context and any
// of its associated I/O objects (such as sockets and timers) occur in only
// one thread at a time.
//
// - Asynchronous resolve operations fail with operation_not_supported.
//
// - If a signal_set is used with the io_context, signal_set objects cannot be
// used with any other io_context in the program.
#define ASIO_CONCURRENCY_HINT_UNSAFE \
static_cast<int>(ASIO_CONCURRENCY_HINT_ID)
// This special concurrency hint disables locking in the reactor I/O. This hint
// has the following restrictions:
//
// - Care must be taken to ensure that run functions on the io_context, and all
// operations on the io_context's associated I/O objects (such as sockets and
// timers), occur in only one thread at a time.
#define ASIO_CONCURRENCY_HINT_UNSAFE_IO \
static_cast<int>(ASIO_CONCURRENCY_HINT_ID \
| ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER \
| ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION)
// The special concurrency hint provides full thread safety.
#define ASIO_CONCURRENCY_HINT_SAFE \
static_cast<int>(ASIO_CONCURRENCY_HINT_ID \
| ASIO_CONCURRENCY_HINT_LOCKING_SCHEDULER \
| ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_REGISTRATION \
| ASIO_CONCURRENCY_HINT_LOCKING_REACTOR_IO)
// This #define may be overridden at compile time to specify a program-wide
// default concurrency hint, used by the zero-argument io_context constructor.
#if !defined(ASIO_CONCURRENCY_HINT_DEFAULT)
# define ASIO_CONCURRENCY_HINT_DEFAULT -1
#endif // !defined(ASIO_CONCURRENCY_HINT_DEFAULT)
// This #define may be overridden at compile time to specify a program-wide
// concurrency hint, used by the one-argument io_context constructor when
// passed a value of 1.
#if !defined(ASIO_CONCURRENCY_HINT_1)
# define ASIO_CONCURRENCY_HINT_1 1
#endif // !defined(ASIO_CONCURRENCY_HINT_DEFAULT)
#endif // ASIO_DETAIL_CONCURRENCY_HINT_HPP

View File

@@ -0,0 +1,120 @@
//
// detail/conditionally_enabled_event.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP
#define ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/conditionally_enabled_mutex.hpp"
#include "asio/detail/event.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/null_event.hpp"
#include "asio/detail/scoped_lock.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Mutex adapter used to conditionally enable or disable locking.
class conditionally_enabled_event
: private noncopyable
{
public:
// Constructor.
conditionally_enabled_event()
{
}
// Destructor.
~conditionally_enabled_event()
{
}
// Signal the event. (Retained for backward compatibility.)
void signal(conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
event_.signal(lock);
}
// Signal all waiters.
void signal_all(conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
event_.signal_all(lock);
}
// Unlock the mutex and signal one waiter.
void unlock_and_signal_one(
conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
event_.unlock_and_signal_one(lock);
}
// Unlock the mutex and signal one waiter who may destroy us.
void unlock_and_signal_one_for_destruction(
conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
event_.unlock_and_signal_one(lock);
}
// If there's a waiter, unlock the mutex and signal it.
bool maybe_unlock_and_signal_one(
conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
return event_.maybe_unlock_and_signal_one(lock);
else
return false;
}
// Reset the event.
void clear(conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
event_.clear(lock);
}
// Wait for the event to become signalled.
void wait(conditionally_enabled_mutex::scoped_lock& lock)
{
if (lock.mutex_.enabled_)
event_.wait(lock);
else
null_event().wait(lock);
}
// Timed wait for the event to become signalled.
bool wait_for_usec(
conditionally_enabled_mutex::scoped_lock& lock, long usec)
{
if (lock.mutex_.enabled_)
return event_.wait_for_usec(lock, usec);
else
return null_event().wait_for_usec(lock, usec);
}
private:
asio::detail::event event_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_CONDITIONALLY_ENABLED_EVENT_HPP

View File

@@ -0,0 +1,170 @@
//
// detail/conditionally_enabled_mutex.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP
#define ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/mutex.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/scoped_lock.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Mutex adapter used to conditionally enable or disable locking.
class conditionally_enabled_mutex
: private noncopyable
{
public:
// Helper class to lock and unlock a mutex automatically.
class scoped_lock
: private noncopyable
{
public:
// Tag type used to distinguish constructors.
enum adopt_lock_t { adopt_lock };
// Constructor adopts a lock that is already held.
scoped_lock(conditionally_enabled_mutex& m, adopt_lock_t)
: mutex_(m),
locked_(m.enabled_)
{
}
// Constructor acquires the lock.
explicit scoped_lock(conditionally_enabled_mutex& m)
: mutex_(m)
{
if (m.enabled_)
{
mutex_.mutex_.lock();
locked_ = true;
}
else
locked_ = false;
}
// Destructor releases the lock.
~scoped_lock()
{
if (locked_)
mutex_.mutex_.unlock();
}
// Explicitly acquire the lock.
void lock()
{
if (mutex_.enabled_ && !locked_)
{
for (int n = mutex_.spin_count_; n != 0; n -= (n > 0) ? 1 : 0)
{
if (mutex_.mutex_.try_lock())
{
locked_ = true;
return;
}
}
mutex_.mutex_.lock();
locked_ = true;
}
}
// Explicitly release the lock.
void unlock()
{
if (locked_)
{
mutex_.unlock();
locked_ = false;
}
}
// Test whether the lock is held.
bool locked() const
{
return locked_;
}
// Get the underlying mutex.
asio::detail::mutex& mutex()
{
return mutex_.mutex_;
}
private:
friend class conditionally_enabled_event;
conditionally_enabled_mutex& mutex_;
bool locked_;
};
// Constructor.
explicit conditionally_enabled_mutex(bool enabled, int spin_count = 0)
: spin_count_(spin_count),
enabled_(enabled)
{
}
// Destructor.
~conditionally_enabled_mutex()
{
}
// Determine whether locking is enabled.
bool enabled() const
{
return enabled_;
}
// Get the spin count.
int spin_count() const
{
return spin_count_;
}
// Lock the mutex.
void lock()
{
if (enabled_)
{
for (int n = spin_count_; n != 0; n -= (n > 0) ? 1 : 0)
if (mutex_.try_lock())
return;
mutex_.lock();
}
}
// Unlock the mutex.
void unlock()
{
if (enabled_)
mutex_.unlock();
}
private:
friend class scoped_lock;
friend class conditionally_enabled_event;
asio::detail::mutex mutex_;
const int spin_count_;
const bool enabled_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_CONDITIONALLY_ENABLED_MUTEX_HPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,405 @@
//
// detail/consuming_buffers.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CONSUMING_BUFFERS_HPP
#define ASIO_DETAIL_CONSUMING_BUFFERS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include "asio/buffer.hpp"
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/limits.hpp"
#include "asio/registered_buffer.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Helper template to determine the maximum number of prepared buffers.
template <typename Buffers>
struct prepared_buffers_max
{
enum { value = buffer_sequence_adapter_base::max_buffers };
};
template <typename Elem, std::size_t N>
struct prepared_buffers_max<boost::array<Elem, N>>
{
enum { value = N };
};
template <typename Elem, std::size_t N>
struct prepared_buffers_max<std::array<Elem, N>>
{
enum { value = N };
};
// A buffer sequence used to represent a subsequence of the buffers.
template <typename Buffer, std::size_t MaxBuffers>
struct prepared_buffers
{
typedef Buffer value_type;
typedef const Buffer* const_iterator;
enum { max_buffers = MaxBuffers < 16 ? MaxBuffers : 16 };
prepared_buffers() : count(0) {}
const_iterator begin() const { return elems; }
const_iterator end() const { return elems + count; }
Buffer elems[max_buffers];
std::size_t count;
};
// A proxy for a sub-range in a list of buffers.
template <typename Buffer, typename Buffers, typename Buffer_Iterator>
class consuming_buffers
{
public:
typedef prepared_buffers<Buffer, prepared_buffers_max<Buffers>::value>
prepared_buffers_type;
// Construct to represent the entire list of buffers.
explicit consuming_buffers(const Buffers& buffers)
: buffers_(buffers),
total_consumed_(0),
next_elem_(0),
next_elem_offset_(0)
{
using asio::buffer_size;
total_size_ = buffer_size(buffers);
}
// Determine if we are at the end of the buffers.
bool empty() const
{
return total_consumed_ >= total_size_;
}
// Get the buffer for a single transfer, with a size.
prepared_buffers_type prepare(std::size_t max_size)
{
prepared_buffers_type result;
Buffer_Iterator next = asio::buffer_sequence_begin(buffers_);
Buffer_Iterator end = asio::buffer_sequence_end(buffers_);
std::advance(next, next_elem_);
std::size_t elem_offset = next_elem_offset_;
while (next != end && max_size > 0 && (result.count) < result.max_buffers)
{
Buffer next_buf = Buffer(*next) + elem_offset;
result.elems[result.count] = asio::buffer(next_buf, max_size);
max_size -= result.elems[result.count].size();
elem_offset = 0;
if (result.elems[result.count].size() > 0)
++result.count;
++next;
}
return result;
}
// Consume the specified number of bytes from the buffers.
void consume(std::size_t size)
{
total_consumed_ += size;
Buffer_Iterator next = asio::buffer_sequence_begin(buffers_);
Buffer_Iterator end = asio::buffer_sequence_end(buffers_);
std::advance(next, next_elem_);
while (next != end && size > 0)
{
Buffer next_buf = Buffer(*next) + next_elem_offset_;
if (size < next_buf.size())
{
next_elem_offset_ += size;
size = 0;
}
else
{
size -= next_buf.size();
next_elem_offset_ = 0;
++next_elem_;
++next;
}
}
}
// Get the total number of bytes consumed from the buffers.
std::size_t total_consumed() const
{
return total_consumed_;
}
private:
Buffers buffers_;
std::size_t total_size_;
std::size_t total_consumed_;
std::size_t next_elem_;
std::size_t next_elem_offset_;
};
// Base class of all consuming_buffers specialisations for single buffers.
template <typename Buffer>
class consuming_single_buffer
{
public:
// Construct to represent the entire list of buffers.
template <typename Buffer1>
explicit consuming_single_buffer(const Buffer1& buffer)
: buffer_(buffer),
total_consumed_(0)
{
}
// Determine if we are at the end of the buffers.
bool empty() const
{
return total_consumed_ >= buffer_.size();
}
// Get the buffer for a single transfer, with a size.
Buffer prepare(std::size_t max_size)
{
return asio::buffer(buffer_ + total_consumed_, max_size);
}
// Consume the specified number of bytes from the buffers.
void consume(std::size_t size)
{
total_consumed_ += size;
}
// Get the total number of bytes consumed from the buffers.
std::size_t total_consumed() const
{
return total_consumed_;
}
private:
Buffer buffer_;
std::size_t total_consumed_;
};
template <>
class consuming_buffers<mutable_buffer, mutable_buffer, const mutable_buffer*>
: public consuming_single_buffer<mutable_buffer>
{
public:
explicit consuming_buffers(const mutable_buffer& buffer)
: consuming_single_buffer<mutable_buffer>(buffer)
{
}
};
template <>
class consuming_buffers<const_buffer, mutable_buffer, const mutable_buffer*>
: public consuming_single_buffer<const_buffer>
{
public:
explicit consuming_buffers(const mutable_buffer& buffer)
: consuming_single_buffer<const_buffer>(buffer)
{
}
};
template <>
class consuming_buffers<const_buffer, const_buffer, const const_buffer*>
: public consuming_single_buffer<const_buffer>
{
public:
explicit consuming_buffers(const const_buffer& buffer)
: consuming_single_buffer<const_buffer>(buffer)
{
}
};
template <>
class consuming_buffers<mutable_buffer,
mutable_registered_buffer, const mutable_buffer*>
: public consuming_single_buffer<mutable_registered_buffer>
{
public:
explicit consuming_buffers(const mutable_registered_buffer& buffer)
: consuming_single_buffer<mutable_registered_buffer>(buffer)
{
}
};
template <>
class consuming_buffers<const_buffer,
mutable_registered_buffer, const mutable_buffer*>
: public consuming_single_buffer<mutable_registered_buffer>
{
public:
explicit consuming_buffers(const mutable_registered_buffer& buffer)
: consuming_single_buffer<mutable_registered_buffer>(buffer)
{
}
};
template <>
class consuming_buffers<const_buffer,
const_registered_buffer, const const_buffer*>
: public consuming_single_buffer<const_registered_buffer>
{
public:
explicit consuming_buffers(const const_registered_buffer& buffer)
: consuming_single_buffer<const_registered_buffer>(buffer)
{
}
};
template <typename Buffer, typename Elem>
class consuming_buffers<Buffer, boost::array<Elem, 2>,
typename boost::array<Elem, 2>::const_iterator>
{
public:
// Construct to represent the entire list of buffers.
explicit consuming_buffers(const boost::array<Elem, 2>& buffers)
: buffers_(buffers),
total_consumed_(0)
{
}
// Determine if we are at the end of the buffers.
bool empty() const
{
return total_consumed_ >=
Buffer(buffers_[0]).size() + Buffer(buffers_[1]).size();
}
// Get the buffer for a single transfer, with a size.
boost::array<Buffer, 2> prepare(std::size_t max_size)
{
boost::array<Buffer, 2> result = {{
Buffer(buffers_[0]), Buffer(buffers_[1]) }};
std::size_t buffer0_size = result[0].size();
result[0] = asio::buffer(result[0] + total_consumed_, max_size);
result[1] = asio::buffer(
result[1] + (total_consumed_ < buffer0_size
? 0 : total_consumed_ - buffer0_size),
max_size - result[0].size());
return result;
}
// Consume the specified number of bytes from the buffers.
void consume(std::size_t size)
{
total_consumed_ += size;
}
// Get the total number of bytes consumed from the buffers.
std::size_t total_consumed() const
{
return total_consumed_;
}
private:
boost::array<Elem, 2> buffers_;
std::size_t total_consumed_;
};
template <typename Buffer, typename Elem>
class consuming_buffers<Buffer, std::array<Elem, 2>,
typename std::array<Elem, 2>::const_iterator>
{
public:
// Construct to represent the entire list of buffers.
explicit consuming_buffers(const std::array<Elem, 2>& buffers)
: buffers_(buffers),
total_consumed_(0)
{
}
// Determine if we are at the end of the buffers.
bool empty() const
{
return total_consumed_ >=
Buffer(buffers_[0]).size() + Buffer(buffers_[1]).size();
}
// Get the buffer for a single transfer, with a size.
std::array<Buffer, 2> prepare(std::size_t max_size)
{
std::array<Buffer, 2> result = {{
Buffer(buffers_[0]), Buffer(buffers_[1]) }};
std::size_t buffer0_size = result[0].size();
result[0] = asio::buffer(result[0] + total_consumed_, max_size);
result[1] = asio::buffer(
result[1] + (total_consumed_ < buffer0_size
? 0 : total_consumed_ - buffer0_size),
max_size - result[0].size());
return result;
}
// Consume the specified number of bytes from the buffers.
void consume(std::size_t size)
{
total_consumed_ += size;
}
// Get the total number of bytes consumed from the buffers.
std::size_t total_consumed() const
{
return total_consumed_;
}
private:
std::array<Elem, 2> buffers_;
std::size_t total_consumed_;
};
// Specialisation for null_buffers to ensure that the null_buffers type is
// always passed through to the underlying read or write operation.
template <typename Buffer>
class consuming_buffers<Buffer, null_buffers, const mutable_buffer*>
: public asio::null_buffers
{
public:
consuming_buffers(const null_buffers&)
{
// No-op.
}
bool empty()
{
return false;
}
null_buffers prepare(std::size_t)
{
return null_buffers();
}
void consume(std::size_t)
{
// No-op.
}
std::size_t total_consumed() const
{
return 0;
}
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_CONSUMING_BUFFERS_HPP

View File

@@ -0,0 +1,27 @@
//
// detail/cstddef.hpp
// ~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CSTDDEF_HPP
#define ASIO_DETAIL_CSTDDEF_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
namespace asio {
using std::nullptr_t;
} // namespace asio
#endif // ASIO_DETAIL_CSTDDEF_HPP

View File

@@ -0,0 +1,40 @@
//
// detail/cstdint.hpp
// ~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_CSTDINT_HPP
#define ASIO_DETAIL_CSTDINT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstdint>
namespace asio {
using std::int16_t;
using std::int_least16_t;
using std::uint16_t;
using std::uint_least16_t;
using std::int32_t;
using std::int_least32_t;
using std::uint32_t;
using std::uint_least32_t;
using std::int64_t;
using std::int_least64_t;
using std::uint64_t;
using std::uint_least64_t;
using std::uintptr_t;
using std::uintmax_t;
} // namespace asio
#endif // ASIO_DETAIL_CSTDINT_HPP

View File

@@ -0,0 +1,34 @@
//
// detail/date_time_fwd.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DATE_TIME_FWD_HPP
#define ASIO_DETAIL_DATE_TIME_FWD_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
namespace boost {
namespace date_time {
template<class T, class TimeSystem>
class base_time;
} // namespace date_time
namespace posix_time {
class ptime;
} // namespace posix_time
} // namespace boost
#endif // ASIO_DETAIL_DATE_TIME_FWD_HPP

View File

@@ -0,0 +1,345 @@
//
// detail/deadline_timer_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP
#define ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstddef>
#include "asio/associated_cancellation_slot.hpp"
#include "asio/cancellation_type.hpp"
#include "asio/config.hpp"
#include "asio/error.hpp"
#include "asio/execution_context.hpp"
#include "asio/detail/bind_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/noncopyable.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/timer_queue.hpp"
#include "asio/detail/timer_scheduler.hpp"
#include "asio/detail/wait_handler.hpp"
#include "asio/detail/wait_op.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include <chrono>
# include <thread>
#endif // defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename TimeTraits>
class deadline_timer_service
: public execution_context_service_base<deadline_timer_service<TimeTraits>>
{
public:
// The time type.
typedef typename TimeTraits::time_type time_type;
// The duration type.
typedef typename TimeTraits::duration_type duration_type;
// The allocator type.
typedef execution_context::allocator<void> allocator_type;
// The implementation type of the timer. This type is dependent on the
// underlying implementation of the timer service.
struct implementation_type
: private asio::detail::noncopyable
{
time_type expiry;
bool might_have_pending_waits;
typename timer_queue<TimeTraits, allocator_type>::per_timer_data timer_data;
};
// Constructor.
deadline_timer_service(execution_context& context)
: execution_context_service_base<
deadline_timer_service<TimeTraits>>(context),
timer_queue_(allocator_type(context),
config(context).get("timer", "heap_reserve", 0U)),
scheduler_(asio::use_service<timer_scheduler>(context))
{
scheduler_.init_task();
scheduler_.add_timer_queue(timer_queue_);
}
// Destructor.
~deadline_timer_service()
{
scheduler_.remove_timer_queue(timer_queue_);
}
// Destroy all user-defined handler objects owned by the service.
void shutdown()
{
}
// Construct a new timer implementation.
void construct(implementation_type& impl)
{
impl.expiry = time_type();
impl.might_have_pending_waits = false;
}
// Destroy a timer implementation.
void destroy(implementation_type& impl)
{
asio::error_code ec;
cancel(impl, ec);
}
// Move-construct a new timer implementation.
void move_construct(implementation_type& impl,
implementation_type& other_impl)
{
if (other_impl.might_have_pending_waits)
{
scheduler_.move_timer(timer_queue_,
impl.timer_data, other_impl.timer_data);
}
impl.expiry = other_impl.expiry;
other_impl.expiry = time_type();
impl.might_have_pending_waits = other_impl.might_have_pending_waits;
other_impl.might_have_pending_waits = false;
}
// Move-assign from another timer implementation.
void move_assign(implementation_type& impl,
deadline_timer_service& other_service,
implementation_type& other_impl)
{
if (this != &other_service)
if (impl.might_have_pending_waits)
scheduler_.cancel_timer(timer_queue_, impl.timer_data);
other_service.scheduler_.move_timer(other_service.timer_queue_,
impl.timer_data, other_impl.timer_data);
impl.expiry = other_impl.expiry;
other_impl.expiry = time_type();
impl.might_have_pending_waits = other_impl.might_have_pending_waits;
other_impl.might_have_pending_waits = false;
}
// Move-construct a new timer implementation.
void converting_move_construct(implementation_type& impl,
deadline_timer_service&, implementation_type& other_impl)
{
move_construct(impl, other_impl);
}
// Move-assign from another timer implementation.
void converting_move_assign(implementation_type& impl,
deadline_timer_service& other_service,
implementation_type& other_impl)
{
move_assign(impl, other_service, other_impl);
}
// Cancel any asynchronous wait operations associated with the timer.
std::size_t cancel(implementation_type& impl, asio::error_code& ec)
{
if (!impl.might_have_pending_waits)
{
ec = asio::error_code();
return 0;
}
ASIO_HANDLER_OPERATION((scheduler_.context(),
"deadline_timer", &impl, 0, "cancel"));
std::size_t count = scheduler_.cancel_timer(timer_queue_, impl.timer_data);
impl.might_have_pending_waits = false;
ec = asio::error_code();
return count;
}
// Cancels one asynchronous wait operation associated with the timer.
std::size_t cancel_one(implementation_type& impl,
asio::error_code& ec)
{
if (!impl.might_have_pending_waits)
{
ec = asio::error_code();
return 0;
}
ASIO_HANDLER_OPERATION((scheduler_.context(),
"deadline_timer", &impl, 0, "cancel_one"));
std::size_t count = scheduler_.cancel_timer(
timer_queue_, impl.timer_data, 1);
if (count == 0)
impl.might_have_pending_waits = false;
ec = asio::error_code();
return count;
}
// Get the expiry time for the timer as an absolute time.
time_type expiry(const implementation_type& impl) const
{
return impl.expiry;
}
// Get the expiry time for the timer as an absolute time.
time_type expires_at(const implementation_type& impl) const
{
return impl.expiry;
}
// Get the expiry time for the timer relative to now.
duration_type expires_from_now(const implementation_type& impl) const
{
return TimeTraits::subtract(this->expiry(impl), TimeTraits::now());
}
// Set the expiry time for the timer as an absolute time.
std::size_t expires_at(implementation_type& impl,
const time_type& expiry_time, asio::error_code& ec)
{
std::size_t count = cancel(impl, ec);
impl.expiry = expiry_time;
ec = asio::error_code();
return count;
}
// Set the expiry time for the timer relative to now.
std::size_t expires_after(implementation_type& impl,
const duration_type& expiry_time, asio::error_code& ec)
{
return expires_at(impl,
TimeTraits::add(TimeTraits::now(), expiry_time), ec);
}
// Set the expiry time for the timer relative to now.
std::size_t expires_from_now(implementation_type& impl,
const duration_type& expiry_time, asio::error_code& ec)
{
return expires_at(impl,
TimeTraits::add(TimeTraits::now(), expiry_time), ec);
}
// Perform a blocking wait on the timer.
void wait(implementation_type& impl, asio::error_code& ec)
{
time_type now = TimeTraits::now();
ec = asio::error_code();
while (TimeTraits::less_than(now, impl.expiry) && !ec)
{
this->do_wait(TimeTraits::to_posix_duration(
TimeTraits::subtract(impl.expiry, now)), ec);
now = TimeTraits::now();
}
}
// Start an asynchronous wait on the timer.
template <typename Handler, typename IoExecutor>
void async_wait(implementation_type& impl,
Handler& handler, const IoExecutor& io_ex)
{
associated_cancellation_slot_t<Handler> slot
= asio::get_associated_cancellation_slot(handler);
// Allocate and construct an operation to wrap the handler.
typedef wait_handler<Handler, IoExecutor> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_ex);
// Optionally register for per-operation cancellation.
if (slot.is_connected())
{
p.p->cancellation_key_ =
&slot.template emplace<op_cancellation>(this, &impl.timer_data);
}
impl.might_have_pending_waits = true;
ASIO_HANDLER_CREATION((scheduler_.context(),
*p.p, "deadline_timer", &impl, 0, "async_wait"));
scheduler_.schedule_timer(timer_queue_, impl.expiry, impl.timer_data, p.p);
p.v = p.p = 0;
}
private:
// Helper function to wait given a duration type. The duration type should
// either be of type boost::posix_time::time_duration, or implement the
// required subset of its interface.
template <typename Duration>
void do_wait(const Duration& timeout, asio::error_code& ec)
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_for(
std::chrono::seconds(timeout.total_seconds())
+ std::chrono::microseconds(timeout.total_microseconds()));
ec = asio::error_code();
#else // defined(ASIO_WINDOWS_RUNTIME)
::timeval tv;
tv.tv_sec = timeout.total_seconds();
tv.tv_usec = timeout.total_microseconds() % 1000000;
socket_ops::select(0, 0, 0, 0, &tv, ec);
#endif // defined(ASIO_WINDOWS_RUNTIME)
}
// Helper class used to implement per-operation cancellation.
class op_cancellation
{
public:
op_cancellation(deadline_timer_service* s,
typename timer_queue<TimeTraits, allocator_type>::per_timer_data* p)
: service_(s),
timer_data_(p)
{
}
void operator()(cancellation_type_t type)
{
if (!!(type &
(cancellation_type::terminal
| cancellation_type::partial
| cancellation_type::total)))
{
service_->scheduler_.cancel_timer_by_key(
service_->timer_queue_, timer_data_, this);
}
}
private:
deadline_timer_service* service_;
typename timer_queue<TimeTraits, allocator_type>::per_timer_data*
timer_data_;
};
// The queue of timers.
timer_queue<TimeTraits, allocator_type> timer_queue_;
// The object that schedules and executes timers. Usually a reactor.
timer_scheduler& scheduler_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_DEADLINE_TIMER_SERVICE_HPP

View File

@@ -0,0 +1,36 @@
//
// detail/dependent_type.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DEPENDENT_TYPE_HPP
#define ASIO_DETAIL_DEPENDENT_TYPE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename DependsOn, typename T>
struct dependent_type
{
typedef T type;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_DEPENDENT_TYPE_HPP

View File

@@ -0,0 +1,179 @@
//
// detail/descriptor_ops.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DESCRIPTOR_OPS_HPP
#define ASIO_DETAIL_DESCRIPTOR_OPS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
#include <cstddef>
#include "asio/error.hpp"
#include "asio/error_code.hpp"
#include "asio/detail/cstdint.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
namespace descriptor_ops {
// Descriptor state bits.
enum
{
// The user wants a non-blocking descriptor.
user_set_non_blocking = 1,
// The descriptor has been set non-blocking.
internal_non_blocking = 2,
// Helper "state" used to determine whether the descriptor is non-blocking.
non_blocking = user_set_non_blocking | internal_non_blocking,
// The descriptor may have been dup()-ed.
possible_dup = 4
};
typedef unsigned char state_type;
inline void get_last_error(
asio::error_code& ec, bool is_error_condition)
{
if (!is_error_condition)
{
asio::error::clear(ec);
}
else
{
ec = asio::error_code(errno,
asio::error::get_system_category());
}
}
ASIO_DECL int open(const char* path, int flags,
asio::error_code& ec);
ASIO_DECL int open(const char* path, int flags, unsigned mode,
asio::error_code& ec);
ASIO_DECL int close(int d, state_type& state,
asio::error_code& ec);
ASIO_DECL bool set_user_non_blocking(int d,
state_type& state, bool value, asio::error_code& ec);
ASIO_DECL bool set_internal_non_blocking(int d,
state_type& state, bool value, asio::error_code& ec);
typedef iovec buf;
ASIO_DECL std::size_t sync_read(int d, state_type state, buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec);
ASIO_DECL std::size_t sync_read1(int d, state_type state, void* data,
std::size_t size, asio::error_code& ec);
ASIO_DECL bool non_blocking_read(int d, buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred);
ASIO_DECL bool non_blocking_read1(int d, void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred);
ASIO_DECL std::size_t sync_write(int d, state_type state,
const buf* bufs, std::size_t count, bool all_empty,
asio::error_code& ec);
ASIO_DECL std::size_t sync_write1(int d, state_type state,
const void* data, std::size_t size, asio::error_code& ec);
ASIO_DECL bool non_blocking_write(int d,
const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred);
ASIO_DECL bool non_blocking_write1(int d,
const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred);
#if defined(ASIO_HAS_FILE)
ASIO_DECL std::size_t sync_read_at(int d, state_type state,
uint64_t offset, buf* bufs, std::size_t count, bool all_empty,
asio::error_code& ec);
ASIO_DECL std::size_t sync_read_at1(int d, state_type state,
uint64_t offset, void* data, std::size_t size,
asio::error_code& ec);
ASIO_DECL bool non_blocking_read_at(int d, uint64_t offset,
buf* bufs, std::size_t count, asio::error_code& ec,
std::size_t& bytes_transferred);
ASIO_DECL bool non_blocking_read_at1(int d, uint64_t offset,
void* data, std::size_t size, asio::error_code& ec,
std::size_t& bytes_transferred);
ASIO_DECL std::size_t sync_write_at(int d, state_type state,
uint64_t offset, const buf* bufs, std::size_t count, bool all_empty,
asio::error_code& ec);
ASIO_DECL std::size_t sync_write_at1(int d, state_type state,
uint64_t offset, const void* data, std::size_t size,
asio::error_code& ec);
ASIO_DECL bool non_blocking_write_at(int d,
uint64_t offset, const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred);
ASIO_DECL bool non_blocking_write_at1(int d,
uint64_t offset, const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred);
#endif // defined(ASIO_HAS_FILE)
ASIO_DECL int ioctl(int d, state_type& state, long cmd,
ioctl_arg_type* arg, asio::error_code& ec);
ASIO_DECL int fcntl(int d, int cmd, asio::error_code& ec);
ASIO_DECL int fcntl(int d, int cmd,
long arg, asio::error_code& ec);
ASIO_DECL int poll_read(int d,
state_type state, asio::error_code& ec);
ASIO_DECL int poll_write(int d,
state_type state, asio::error_code& ec);
ASIO_DECL int poll_error(int d,
state_type state, asio::error_code& ec);
} // namespace descriptor_ops
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/descriptor_ops.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_DESCRIPTOR_OPS_HPP

View File

@@ -0,0 +1,188 @@
//
// detail/descriptor_read_op.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP
#define ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#include "asio/detail/bind_handler.hpp"
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/descriptor_ops.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_work.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/dispatch.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename MutableBufferSequence>
class descriptor_read_op_base : public reactor_op
{
public:
descriptor_read_op_base(const asio::error_code& success_ec,
int descriptor, const MutableBufferSequence& buffers,
func_type complete_func)
: reactor_op(success_ec,
&descriptor_read_op_base::do_perform, complete_func),
descriptor_(descriptor),
buffers_(buffers)
{
}
static status do_perform(reactor_op* base)
{
ASIO_ASSUME(base != 0);
descriptor_read_op_base* o(static_cast<descriptor_read_op_base*>(base));
typedef buffer_sequence_adapter<asio::mutable_buffer,
MutableBufferSequence> bufs_type;
status result;
if (bufs_type::is_single_buffer)
{
result = descriptor_ops::non_blocking_read1(o->descriptor_,
bufs_type::first(o->buffers_).data(),
bufs_type::first(o->buffers_).size(),
o->ec_, o->bytes_transferred_) ? done : not_done;
}
else
{
bufs_type bufs(o->buffers_);
result = descriptor_ops::non_blocking_read(o->descriptor_,
bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_)
? done : not_done;
}
ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_read",
o->ec_, o->bytes_transferred_));
return result;
}
private:
int descriptor_;
MutableBufferSequence buffers_;
};
template <typename MutableBufferSequence, typename Handler, typename IoExecutor>
class descriptor_read_op
: public descriptor_read_op_base<MutableBufferSequence>
{
public:
typedef Handler handler_type;
typedef IoExecutor io_executor_type;
ASIO_DEFINE_HANDLER_PTR(descriptor_read_op);
descriptor_read_op(const asio::error_code& success_ec,
int descriptor, const MutableBufferSequence& buffers,
Handler& handler, const IoExecutor& io_ex)
: descriptor_read_op_base<MutableBufferSequence>(success_ec,
descriptor, buffers, &descriptor_read_op::do_complete),
handler_(static_cast<Handler&&>(handler)),
work_(handler_, io_ex)
{
}
static void do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
ASIO_ASSUME(base != 0);
descriptor_read_op* o(static_cast<descriptor_read_op*>(base));
ptr p = { asio::detail::addressof(o->handler_), o, o };
ASIO_HANDLER_COMPLETION((*o));
// Take ownership of the operation's outstanding work.
handler_work<Handler, IoExecutor> w(
static_cast<handler_work<Handler, IoExecutor>&&>(
o->work_));
ASIO_ERROR_LOCATION(o->ec_);
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
detail::binder2<Handler, asio::error_code, std::size_t>
handler(o->handler_, o->ec_, o->bytes_transferred_);
p.h = asio::detail::addressof(handler.handler_);
p.reset();
// Make the upcall if required.
if (owner)
{
fenced_block b(fenced_block::half);
ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
w.complete(handler, handler.handler_);
ASIO_HANDLER_INVOCATION_END;
}
}
static void do_immediate(operation* base, bool, const void* io_ex)
{
// Take ownership of the handler object.
ASIO_ASSUME(base != 0);
descriptor_read_op* o(static_cast<descriptor_read_op*>(base));
ptr p = { asio::detail::addressof(o->handler_), o, o };
ASIO_HANDLER_COMPLETION((*o));
// Take ownership of the operation's outstanding work.
immediate_handler_work<Handler, IoExecutor> w(
static_cast<handler_work<Handler, IoExecutor>&&>(
o->work_));
ASIO_ERROR_LOCATION(o->ec_);
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
detail::binder2<Handler, asio::error_code, std::size_t>
handler(o->handler_, o->ec_, o->bytes_transferred_);
p.h = asio::detail::addressof(handler.handler_);
p.reset();
ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
w.complete(handler, handler.handler_, io_ex);
ASIO_HANDLER_INVOCATION_END;
}
private:
Handler handler_;
handler_work<Handler, IoExecutor> work_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_DESCRIPTOR_READ_OP_HPP

View File

@@ -0,0 +1,187 @@
//
// detail/descriptor_write_op.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP
#define ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#include "asio/detail/bind_handler.hpp"
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/descriptor_ops.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_work.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename ConstBufferSequence>
class descriptor_write_op_base : public reactor_op
{
public:
descriptor_write_op_base(const asio::error_code& success_ec,
int descriptor, const ConstBufferSequence& buffers,
func_type complete_func)
: reactor_op(success_ec,
&descriptor_write_op_base::do_perform, complete_func),
descriptor_(descriptor),
buffers_(buffers)
{
}
static status do_perform(reactor_op* base)
{
ASIO_ASSUME(base != 0);
descriptor_write_op_base* o(static_cast<descriptor_write_op_base*>(base));
typedef buffer_sequence_adapter<asio::const_buffer,
ConstBufferSequence> bufs_type;
status result;
if (bufs_type::is_single_buffer)
{
result = descriptor_ops::non_blocking_write1(o->descriptor_,
bufs_type::first(o->buffers_).data(),
bufs_type::first(o->buffers_).size(),
o->ec_, o->bytes_transferred_) ? done : not_done;
}
else
{
bufs_type bufs(o->buffers_);
result = descriptor_ops::non_blocking_write(o->descriptor_,
bufs.buffers(), bufs.count(), o->ec_, o->bytes_transferred_)
? done : not_done;
}
ASIO_HANDLER_REACTOR_OPERATION((*o, "non_blocking_write",
o->ec_, o->bytes_transferred_));
return result;
}
private:
int descriptor_;
ConstBufferSequence buffers_;
};
template <typename ConstBufferSequence, typename Handler, typename IoExecutor>
class descriptor_write_op
: public descriptor_write_op_base<ConstBufferSequence>
{
public:
typedef Handler handler_type;
typedef IoExecutor io_executor_type;
ASIO_DEFINE_HANDLER_PTR(descriptor_write_op);
descriptor_write_op(const asio::error_code& success_ec,
int descriptor, const ConstBufferSequence& buffers,
Handler& handler, const IoExecutor& io_ex)
: descriptor_write_op_base<ConstBufferSequence>(success_ec,
descriptor, buffers, &descriptor_write_op::do_complete),
handler_(static_cast<Handler&&>(handler)),
work_(handler_, io_ex)
{
}
static void do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
ASIO_ASSUME(base != 0);
descriptor_write_op* o(static_cast<descriptor_write_op*>(base));
ptr p = { asio::detail::addressof(o->handler_), o, o };
ASIO_HANDLER_COMPLETION((*o));
// Take ownership of the operation's outstanding work.
handler_work<Handler, IoExecutor> w(
static_cast<handler_work<Handler, IoExecutor>&&>(
o->work_));
ASIO_ERROR_LOCATION(o->ec_);
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
detail::binder2<Handler, asio::error_code, std::size_t>
handler(o->handler_, o->ec_, o->bytes_transferred_);
p.h = asio::detail::addressof(handler.handler_);
p.reset();
// Make the upcall if required.
if (owner)
{
fenced_block b(fenced_block::half);
ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
w.complete(handler, handler.handler_);
ASIO_HANDLER_INVOCATION_END;
}
}
static void do_immediate(operation* base, bool, const void* io_ex)
{
// Take ownership of the handler object.
ASIO_ASSUME(base != 0);
descriptor_write_op* o(static_cast<descriptor_write_op*>(base));
ptr p = { asio::detail::addressof(o->handler_), o, o };
ASIO_HANDLER_COMPLETION((*o));
// Take ownership of the operation's outstanding work.
immediate_handler_work<Handler, IoExecutor> w(
static_cast<handler_work<Handler, IoExecutor>&&>(
o->work_));
ASIO_ERROR_LOCATION(o->ec_);
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
detail::binder2<Handler, asio::error_code, std::size_t>
handler(o->handler_, o->ec_, o->bytes_transferred_);
p.h = asio::detail::addressof(handler.handler_);
p.reset();
ASIO_HANDLER_INVOCATION_BEGIN((handler.arg1_, handler.arg2_));
w.complete(handler, handler.handler_, io_ex);
ASIO_HANDLER_INVOCATION_END;
}
private:
Handler handler_;
handler_work<Handler, IoExecutor> work_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_DESCRIPTOR_WRITE_OP_HPP

View File

@@ -0,0 +1,248 @@
//
// detail/dev_poll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_DEV_POLL_REACTOR_HPP
#define ASIO_DETAIL_DEV_POLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include <cstddef>
#include <vector>
#include <sys/devpoll.h>
#include "asio/detail/hash_map.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/mutex.hpp"
#include "asio/detail/op_queue.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/detail/reactor_op_queue.hpp"
#include "asio/detail/scheduler_task.hpp"
#include "asio/detail/select_interrupter.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/timer_queue_base.hpp"
#include "asio/detail/timer_queue_set.hpp"
#include "asio/detail/wait_op.hpp"
#include "asio/execution_context.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class dev_poll_reactor
: public execution_context_service_base<dev_poll_reactor>,
public scheduler_task
{
public:
enum op_types { read_op = 0, write_op = 1,
connect_op = 1, except_op = 2, max_ops = 3 };
// Per-descriptor data.
struct per_descriptor_data
{
};
// Constructor.
ASIO_DECL dev_poll_reactor(asio::execution_context& ctx);
// Destructor.
ASIO_DECL ~dev_poll_reactor();
// Destroy all user-defined handler objects owned by the service.
ASIO_DECL void shutdown();
// Recreate internal descriptors following a fork.
ASIO_DECL void notify_fork(
asio::execution_context::fork_event fork_ev);
// Initialise the task.
ASIO_DECL void init_task();
// Register a socket with the reactor. Returns 0 on success, system error
// code on failure.
ASIO_DECL int register_descriptor(socket_type, per_descriptor_data&);
// Register a descriptor with an associated single operation. Returns 0 on
// success, system error code on failure.
ASIO_DECL int register_internal_descriptor(
int op_type, socket_type descriptor,
per_descriptor_data& descriptor_data, reactor_op* op);
// Move descriptor registration from one descriptor_data object to another.
ASIO_DECL void move_descriptor(socket_type descriptor,
per_descriptor_data& target_descriptor_data,
per_descriptor_data& source_descriptor_data);
// Post a reactor operation for immediate completion.
void post_immediate_completion(operation* op, bool is_continuation) const;
// Post a reactor operation for immediate completion.
ASIO_DECL static void call_post_immediate_completion(
operation* op, bool is_continuation, const void* self);
// Start a new operation. The reactor operation will be performed when the
// given descriptor is flagged as ready, or an error has occurred.
ASIO_DECL void start_op(int op_type, socket_type descriptor,
per_descriptor_data&, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg);
// Start a new operation. The reactor operation will be performed when the
// given descriptor is flagged as ready, or an error has occurred.
void start_op(int op_type, socket_type descriptor,
per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative)
{
start_op(op_type, descriptor, descriptor_data,
op, is_continuation, allow_speculative,
&dev_poll_reactor::call_post_immediate_completion, this);
}
// Cancel all operations associated with the given descriptor. The
// handlers associated with the descriptor will be invoked with the
// operation_aborted error.
ASIO_DECL void cancel_ops(socket_type descriptor, per_descriptor_data&);
// Cancel all operations associated with the given descriptor and key. The
// handlers associated with the descriptor will be invoked with the
// operation_aborted error.
ASIO_DECL void cancel_ops_by_key(socket_type descriptor,
per_descriptor_data& descriptor_data,
int op_type, void* cancellation_key);
// Cancel any operations that are running against the descriptor and remove
// its registration from the reactor. The reactor resources associated with
// the descriptor must be released by calling cleanup_descriptor_data.
ASIO_DECL void deregister_descriptor(socket_type descriptor,
per_descriptor_data&, bool closing);
// Remove the descriptor's registration from the reactor. The reactor
// resources associated with the descriptor must be released by calling
// cleanup_descriptor_data.
ASIO_DECL void deregister_internal_descriptor(
socket_type descriptor, per_descriptor_data&);
// Perform any post-deregistration cleanup tasks associated with the
// descriptor data.
ASIO_DECL void cleanup_descriptor_data(per_descriptor_data&);
// Add a new timer queue to the reactor.
template <typename TimeTraits, typename Allocator>
void add_timer_queue(timer_queue<TimeTraits, Allocator>& queue);
// Remove a timer queue from the reactor.
template <typename TimeTraits, typename Allocator>
void remove_timer_queue(timer_queue<TimeTraits, Allocator>& queue);
// Schedule a new operation in the given timer queue to expire at the
// specified absolute time.
template <typename TimeTraits, typename Allocator>
void schedule_timer(timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op);
// Cancel the timer operations associated with the given token. Returns the
// number of operations that have been posted or dispatched.
template <typename TimeTraits, typename Allocator>
std::size_t cancel_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());
// Cancel the timer operations associated with the given key.
template <typename TimeTraits, typename Allocator>
void cancel_timer_by_key(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key);
// Move the timer operations associated with the given timer.
template <typename TimeTraits, typename Allocator>
void move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source);
// Run /dev/poll once until interrupted or events are ready to be dispatched.
ASIO_DECL void run(long usec, op_queue<operation>& ops);
// Interrupt the select loop.
ASIO_DECL void interrupt();
private:
// Create the /dev/poll file descriptor. Throws an exception if the descriptor
// cannot be created.
ASIO_DECL static int do_dev_poll_create();
// Helper function to add a new timer queue.
ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);
// Helper function to remove a timer queue.
ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);
// Get the timeout value for the /dev/poll DP_POLL operation. The timeout
// value is returned as a number of milliseconds. A return value of -1
// indicates that the poll should block indefinitely.
ASIO_DECL int get_timeout(int msec);
// Cancel all operations associated with the given descriptor. The do_cancel
// function of the handler objects will be invoked. This function does not
// acquire the dev_poll_reactor's mutex.
ASIO_DECL void cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec);
// Add a pending event entry for the given descriptor.
ASIO_DECL ::pollfd& add_pending_event_change(int descriptor);
// The scheduler implementation used to post completions.
scheduler& scheduler_;
// Mutex to protect access to internal data.
asio::detail::mutex mutex_;
// The /dev/poll file descriptor.
int dev_poll_fd_;
// Vector of /dev/poll events waiting to be written to the descriptor.
std::vector< ::pollfd> pending_event_changes_;
// Hash map to associate a descriptor with a pending event change index.
hash_map<int, std::size_t> pending_event_change_index_;
// The interrupter is used to break a blocking DP_POLL operation.
select_interrupter interrupter_;
// The queues of read, write and except operations.
reactor_op_queue<socket_type> op_queue_[max_ops];
// The timer queues.
timer_queue_set timer_queues_;
// Whether the service has been shut down.
bool shutdown_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#include "asio/detail/impl/dev_poll_reactor.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/dev_poll_reactor.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_DEV_POLL_REACTOR_HPP

View File

@@ -0,0 +1,300 @@
//
// detail/epoll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EPOLL_REACTOR_HPP
#define ASIO_DETAIL_EPOLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EPOLL)
#include "asio/detail/atomic_count.hpp"
#include "asio/detail/conditionally_enabled_mutex.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/object_pool.hpp"
#include "asio/detail/op_queue.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/detail/scheduler_task.hpp"
#include "asio/detail/select_interrupter.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/timer_queue_base.hpp"
#include "asio/detail/timer_queue_set.hpp"
#include "asio/detail/wait_op.hpp"
#include "asio/execution_context.hpp"
#if defined(ASIO_HAS_TIMERFD)
# include <sys/timerfd.h>
#endif // defined(ASIO_HAS_TIMERFD)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class epoll_reactor
: public execution_context_service_base<epoll_reactor>,
public scheduler_task
{
private:
// The mutex type used by this reactor.
typedef conditionally_enabled_mutex mutex;
public:
enum op_types { read_op = 0, write_op = 1,
connect_op = 1, except_op = 2, max_ops = 3 };
// Per-descriptor queues.
struct descriptor_state : operation
{
descriptor_state* next_;
descriptor_state* prev_;
mutex mutex_;
epoll_reactor* reactor_;
int descriptor_;
uint32_t registered_events_;
op_queue<reactor_op> op_queue_[max_ops];
bool try_speculative_[max_ops];
bool shutdown_;
ASIO_DECL descriptor_state(bool locking, int spin_count);
void set_ready_events(uint32_t events) { task_result_ = events; }
void add_ready_events(uint32_t events) { task_result_ |= events; }
ASIO_DECL operation* perform_io(uint32_t events);
ASIO_DECL static void do_complete(
void* owner, operation* base,
const asio::error_code& ec, std::size_t bytes_transferred);
};
// Per-descriptor data.
typedef descriptor_state* per_descriptor_data;
// Constructor.
ASIO_DECL epoll_reactor(asio::execution_context& ctx);
// Destructor.
ASIO_DECL ~epoll_reactor();
// Destroy all user-defined handler objects owned by the service.
ASIO_DECL void shutdown();
// Recreate internal descriptors following a fork.
ASIO_DECL void notify_fork(
asio::execution_context::fork_event fork_ev);
// Initialise the task.
ASIO_DECL void init_task();
// Register a socket with the reactor. Returns 0 on success, system error
// code on failure.
ASIO_DECL int register_descriptor(socket_type descriptor,
per_descriptor_data& descriptor_data);
// Register a descriptor with an associated single operation. Returns 0 on
// success, system error code on failure.
ASIO_DECL int register_internal_descriptor(
int op_type, socket_type descriptor,
per_descriptor_data& descriptor_data, reactor_op* op);
// Move descriptor registration from one descriptor_data object to another.
ASIO_DECL void move_descriptor(socket_type descriptor,
per_descriptor_data& target_descriptor_data,
per_descriptor_data& source_descriptor_data);
// Post a reactor operation for immediate completion.
void post_immediate_completion(operation* op, bool is_continuation) const;
// Post a reactor operation for immediate completion.
ASIO_DECL static void call_post_immediate_completion(
operation* op, bool is_continuation, const void* self);
// Start a new operation. The reactor operation will be performed when the
// given descriptor is flagged as ready, or an error has occurred.
ASIO_DECL void start_op(int op_type, socket_type descriptor,
per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg);
// Start a new operation. The reactor operation will be performed when the
// given descriptor is flagged as ready, or an error has occurred.
void start_op(int op_type, socket_type descriptor,
per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative)
{
start_op(op_type, descriptor, descriptor_data,
op, is_continuation, allow_speculative,
&epoll_reactor::call_post_immediate_completion, this);
}
// Cancel all operations associated with the given descriptor. The
// handlers associated with the descriptor will be invoked with the
// operation_aborted error.
ASIO_DECL void cancel_ops(socket_type descriptor,
per_descriptor_data& descriptor_data);
// Cancel all operations associated with the given descriptor and key. The
// handlers associated with the descriptor will be invoked with the
// operation_aborted error.
ASIO_DECL void cancel_ops_by_key(socket_type descriptor,
per_descriptor_data& descriptor_data,
int op_type, void* cancellation_key);
// Cancel any operations that are running against the descriptor and remove
// its registration from the reactor. The reactor resources associated with
// the descriptor must be released by calling cleanup_descriptor_data.
ASIO_DECL void deregister_descriptor(socket_type descriptor,
per_descriptor_data& descriptor_data, bool closing);
// Remove the descriptor's registration from the reactor. The reactor
// resources associated with the descriptor must be released by calling
// cleanup_descriptor_data.
ASIO_DECL void deregister_internal_descriptor(
socket_type descriptor, per_descriptor_data& descriptor_data);
// Perform any post-deregistration cleanup tasks associated with the
// descriptor data.
ASIO_DECL void cleanup_descriptor_data(
per_descriptor_data& descriptor_data);
// Add a new timer queue to the reactor.
template <typename TimeTraits, typename Allocator>
void add_timer_queue(timer_queue<TimeTraits, Allocator>& timer_queue);
// Remove a timer queue from the reactor.
template <typename TimeTraits, typename Allocator>
void remove_timer_queue(timer_queue<TimeTraits, Allocator>& timer_queue);
// Schedule a new operation in the given timer queue to expire at the
// specified absolute time.
template <typename TimeTraits, typename Allocator>
void schedule_timer(timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op);
// Cancel the timer operations associated with the given token. Returns the
// number of operations that have been posted or dispatched.
template <typename TimeTraits, typename Allocator>
std::size_t cancel_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled = (std::numeric_limits<std::size_t>::max)());
// Cancel the timer operations associated with the given key.
template <typename TimeTraits, typename Allocator>
void cancel_timer_by_key(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key);
// Move the timer operations associated with the given timer.
template <typename TimeTraits, typename Allocator>
void move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source);
// Run epoll once until interrupted or events are ready to be dispatched.
ASIO_DECL void run(long usec, op_queue<operation>& ops);
// Interrupt the select loop.
ASIO_DECL void interrupt();
private:
// The hint to pass to epoll_create to size its data structures.
enum { epoll_size = 20000 };
// Create the epoll file descriptor. Throws an exception if the descriptor
// cannot be created.
ASIO_DECL static int do_epoll_create();
// Create the timerfd file descriptor. Does not throw.
ASIO_DECL static int do_timerfd_create();
// Allocate a new descriptor state object.
ASIO_DECL descriptor_state* allocate_descriptor_state();
// Free an existing descriptor state object.
ASIO_DECL void free_descriptor_state(descriptor_state* s);
// Helper function to add a new timer queue.
ASIO_DECL void do_add_timer_queue(timer_queue_base& queue);
// Helper function to remove a timer queue.
ASIO_DECL void do_remove_timer_queue(timer_queue_base& queue);
// Called to recalculate and update the timeout.
ASIO_DECL void update_timeout();
// Get the timeout value for the epoll_wait call. The timeout value is
// returned as a number of milliseconds. A return value of -1 indicates
// that epoll_wait should block indefinitely.
ASIO_DECL int get_timeout(int msec);
#if defined(ASIO_HAS_TIMERFD)
// Get the timeout value for the timer descriptor. The return value is the
// flag argument to be used when calling timerfd_settime.
ASIO_DECL int get_timeout(itimerspec& ts);
#endif // defined(ASIO_HAS_TIMERFD)
// The scheduler implementation used to post completions.
scheduler& scheduler_;
// Mutex to protect access to internal data.
mutex mutex_;
// The interrupter is used to break a blocking epoll_wait call.
select_interrupter interrupter_;
// The epoll file descriptor.
int epoll_fd_;
// The timer file descriptor.
int timer_fd_;
// The timer queues.
timer_queue_set timer_queues_;
// Whether the service has been shut down.
bool shutdown_;
// Whether I/O locking is enabled.
const bool io_locking_;
// How any times to spin waiting for the I/O mutex.
const int io_locking_spin_count_;
// Mutex to protect access to the registered descriptors.
mutex registered_descriptors_mutex_;
// Keep track of all registered descriptors.
object_pool<descriptor_state, execution_context::allocator<void>>
registered_descriptors_;
// Helper class to do post-perform_io cleanup.
struct perform_io_cleanup_on_block_exit;
friend struct perform_io_cleanup_on_block_exit;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#include "asio/detail/impl/epoll_reactor.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/epoll_reactor.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_EPOLL_REACTOR_HPP

View File

@@ -0,0 +1,46 @@
//
// detail/event.hpp
// ~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EVENT_HPP
#define ASIO_DETAIL_EVENT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_THREADS)
# include "asio/detail/null_event.hpp"
#elif defined(ASIO_WINDOWS)
# include "asio/detail/win_event.hpp"
#elif defined(ASIO_HAS_PTHREADS)
# include "asio/detail/posix_event.hpp"
#else
# include "asio/detail/std_event.hpp"
#endif
namespace asio {
namespace detail {
#if !defined(ASIO_HAS_THREADS)
typedef null_event event;
#elif defined(ASIO_WINDOWS)
typedef win_event event;
#elif defined(ASIO_HAS_PTHREADS)
typedef posix_event event;
#else
typedef std_event event;
#endif
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_EVENT_HPP

View File

@@ -0,0 +1,83 @@
//
// detail/eventfd_select_interrupter.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
#define ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EVENTFD)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class eventfd_select_interrupter
{
public:
// Constructor.
ASIO_DECL eventfd_select_interrupter();
// Destructor.
ASIO_DECL ~eventfd_select_interrupter();
// Recreate the interrupter's descriptors. Used after a fork.
ASIO_DECL void recreate();
// Interrupt the select call.
ASIO_DECL void interrupt();
// Reset the select interrupter. Returns true if the reset was successful.
ASIO_DECL bool reset();
// Get the read descriptor to be passed to select.
int read_descriptor() const
{
return read_descriptor_;
}
private:
// Open the descriptors. Throws on error.
ASIO_DECL void open_descriptors();
// Close the descriptors.
ASIO_DECL void close_descriptors();
// The read end of a connection used to interrupt the select call. This file
// descriptor is passed to select such that when it is time to stop, a single
// 64bit value will be written on the other end of the connection and this
// descriptor will become readable.
int read_descriptor_;
// The write end of a connection used to interrupt the select call. A single
// 64bit non-zero value may be written to this to wake up the select which is
// waiting for the other end to become readable. This descriptor will only
// differ from the read descriptor when a pipe is used.
int write_descriptor_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/eventfd_select_interrupter.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // defined(ASIO_HAS_EVENTFD)
#endif // ASIO_DETAIL_EVENTFD_SELECT_INTERRUPTER_HPP

View File

@@ -0,0 +1,29 @@
//
// detail/exception.hpp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EXCEPTION_HPP
#define ASIO_DETAIL_EXCEPTION_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <exception>
namespace asio {
using std::exception_ptr;
using std::current_exception;
using std::rethrow_exception;
} // namespace asio
#endif // ASIO_DETAIL_EXCEPTION_HPP

View File

@@ -0,0 +1,152 @@
//
// detail/executor_function.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EXECUTOR_FUNCTION_HPP
#define ASIO_DETAIL_EXECUTOR_FUNCTION_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
// Lightweight, move-only function object wrapper.
class executor_function
{
public:
template <typename F, typename Alloc>
explicit executor_function(F f, const Alloc& a)
{
// Allocate and construct an object to wrap the function.
typedef impl<F, Alloc> impl_type;
typename impl_type::ptr p = {
detail::addressof(a), impl_type::ptr::allocate(a), 0 };
impl_ = new (p.v) impl_type(static_cast<F&&>(f), a);
p.v = 0;
}
executor_function(executor_function&& other) noexcept
: impl_(other.impl_)
{
other.impl_ = 0;
}
~executor_function()
{
if (impl_)
impl_->complete_(impl_, false);
}
void operator()()
{
if (impl_)
{
impl_base* i = impl_;
impl_ = 0;
i->complete_(i, true);
}
}
private:
// Base class for polymorphic function implementations.
struct impl_base
{
void (*complete_)(impl_base*, bool);
};
// Polymorphic function implementation.
template <typename Function, typename Alloc>
struct impl : impl_base
{
ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR(
thread_info_base::executor_function_tag, impl);
template <typename F>
impl(F&& f, const Alloc& a)
: function_(static_cast<F&&>(f)),
allocator_(a)
{
complete_ = &executor_function::complete<Function, Alloc>;
}
Function function_;
Alloc allocator_;
};
// Helper to complete function invocation.
template <typename Function, typename Alloc>
static void complete(impl_base* base, bool call)
{
// Take ownership of the function object.
impl<Function, Alloc>* i(static_cast<impl<Function, Alloc>*>(base));
Alloc allocator(i->allocator_);
typename impl<Function, Alloc>::ptr p = {
detail::addressof(allocator), i, i };
// Make a copy of the function so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the function may be the true owner of the memory
// associated with the function. Consequently, a local copy of the function
// is required to ensure that any owning sub-object remains valid until
// after we have deallocated the memory here.
Function function(static_cast<Function&&>(i->function_));
p.reset();
// Make the upcall if required.
if (call)
{
static_cast<Function&&>(function)();
}
}
impl_base* impl_;
};
// Lightweight, non-owning, copyable function object wrapper.
class executor_function_view
{
public:
template <typename F>
explicit executor_function_view(F& f) noexcept
: complete_(&executor_function_view::complete<F>),
function_(&f)
{
}
void operator()()
{
complete_(function_);
}
private:
// Helper to complete function invocation.
template <typename F>
static void complete(void* f)
{
(*static_cast<F*>(f))();
}
void (*complete_)(void*);
void* function_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_EXECUTOR_FUNCTION_HPP

View File

@@ -0,0 +1,84 @@
//
// detail/executor_op.hpp
// ~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_EXECUTOR_OP_HPP
#define ASIO_DETAIL_EXECUTOR_OP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/scheduler_operation.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Handler, typename Alloc,
typename Operation = scheduler_operation>
class executor_op : public Operation
{
public:
ASIO_DEFINE_HANDLER_ALLOCATOR_PTR(executor_op);
template <typename H>
executor_op(H&& h, const Alloc& allocator)
: Operation(&executor_op::do_complete),
handler_(static_cast<H&&>(h)),
allocator_(allocator)
{
}
static void do_complete(void* owner, Operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
// Take ownership of the handler object.
ASIO_ASSUME(base != 0);
executor_op* o(static_cast<executor_op*>(base));
Alloc allocator(o->allocator_);
ptr p = { detail::addressof(allocator), o, o };
ASIO_HANDLER_COMPLETION((*o));
// Make a copy of the handler so that the memory can be deallocated before
// the upcall is made. Even if we're not about to make an upcall, a
// sub-object of the handler may be the true owner of the memory associated
// with the handler. Consequently, a local copy of the handler is required
// to ensure that any owning sub-object remains valid until after we have
// deallocated the memory here.
Handler handler(static_cast<Handler&&>(o->handler_));
p.reset();
// Make the upcall if required.
if (owner)
{
fenced_block b(fenced_block::half);
ASIO_HANDLER_INVOCATION_BEGIN(());
static_cast<Handler&&>(handler)();
ASIO_HANDLER_INVOCATION_END;
}
}
private:
Handler handler_;
Alloc allocator_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_EXECUTOR_OP_HPP

View File

@@ -0,0 +1,39 @@
//
// detail/fd_set_adapter.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_FD_SET_ADAPTER_HPP
#define ASIO_DETAIL_FD_SET_ADAPTER_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#include "asio/detail/posix_fd_set_adapter.hpp"
#include "asio/detail/win_fd_set_adapter.hpp"
namespace asio {
namespace detail {
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
typedef win_fd_set_adapter fd_set_adapter;
#else
typedef posix_fd_set_adapter fd_set_adapter;
#endif
} // namespace detail
} // namespace asio
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_FD_SET_ADAPTER_HPP

View File

@@ -0,0 +1,40 @@
//
// detail/fenced_block.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_FENCED_BLOCK_HPP
#define ASIO_DETAIL_FENCED_BLOCK_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_THREADS) \
|| defined(ASIO_DISABLE_FENCED_BLOCK)
# include "asio/detail/null_fenced_block.hpp"
#else
# include "asio/detail/std_fenced_block.hpp"
#endif
namespace asio {
namespace detail {
#if !defined(ASIO_HAS_THREADS) \
|| defined(ASIO_DISABLE_FENCED_BLOCK)
typedef null_fenced_block fenced_block;
#else
typedef std_fenced_block fenced_block;
#endif
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_FENCED_BLOCK_HPP

View File

@@ -0,0 +1,33 @@
//
// detail/functional.hpp
// ~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_FUNCTIONAL_HPP
#define ASIO_DETAIL_FUNCTIONAL_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <functional>
namespace asio {
namespace detail {
using std::function;
} // namespace detail
using std::ref;
using std::reference_wrapper;
} // namespace asio
#endif // ASIO_DETAIL_FUNCTIONAL_HPP

View File

@@ -0,0 +1,32 @@
//
// detail/future.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_FUTURE_HPP
#define ASIO_DETAIL_FUTURE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <future>
// Even though the future header is available, libstdc++ may not implement the
// std::future class itself. However, we need to have already included the
// future header to reliably test for _GLIBCXX_HAS_GTHREADS.
#if defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)
# if defined(_GLIBCXX_HAS_GTHREADS)
# define ASIO_HAS_STD_FUTURE_CLASS 1
# endif // defined(_GLIBCXX_HAS_GTHREADS)
#else // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)
# define ASIO_HAS_STD_FUTURE_CLASS 1
#endif // defined(__GNUC__) && !defined(ASIO_HAS_CLANG_LIBCXX)
#endif // ASIO_DETAIL_FUTURE_HPP

View File

@@ -0,0 +1,50 @@
//
// detail/global.hpp
// ~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_GLOBAL_HPP
#define ASIO_DETAIL_GLOBAL_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_THREADS)
# include "asio/detail/null_global.hpp"
#elif defined(ASIO_WINDOWS)
# include "asio/detail/win_global.hpp"
#elif defined(ASIO_HAS_PTHREADS)
# include "asio/detail/posix_global.hpp"
#else
# include "asio/detail/std_global.hpp"
#endif
namespace asio {
namespace detail {
template <typename T>
inline T& global()
{
#if !defined(ASIO_HAS_THREADS)
return null_global<T>();
#elif defined(ASIO_WINDOWS)
return win_global<T>();
#elif defined(ASIO_HAS_PTHREADS)
return posix_global<T>();
#else
return std_global<T>();
#endif
}
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_GLOBAL_HPP

View File

@@ -0,0 +1,123 @@
//
// detail/handler_alloc_helpers.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP
#define ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/recycling_allocator.hpp"
#include "asio/associated_allocator.hpp"
#include "asio/detail/push_options.hpp"
#define ASIO_DEFINE_TAGGED_HANDLER_PTR(purpose, op) \
struct ptr \
{ \
Handler* h; \
op* v; \
op* p; \
~ptr() \
{ \
reset(); \
} \
static op* allocate(Handler& handler) \
{ \
typedef typename ::asio::associated_allocator< \
Handler>::type associated_allocator_type; \
typedef typename ::asio::detail::get_recycling_allocator< \
associated_allocator_type, purpose>::type default_allocator_type; \
ASIO_REBIND_ALLOC(default_allocator_type, op) a( \
::asio::detail::get_recycling_allocator< \
associated_allocator_type, purpose>::get( \
::asio::get_associated_allocator(handler))); \
return a.allocate(1); \
} \
void reset() \
{ \
if (p) \
{ \
p->~op(); \
p = 0; \
} \
if (v) \
{ \
typedef typename ::asio::associated_allocator< \
Handler>::type associated_allocator_type; \
typedef typename ::asio::detail::get_recycling_allocator< \
associated_allocator_type, purpose>::type default_allocator_type; \
ASIO_REBIND_ALLOC(default_allocator_type, op) a( \
::asio::detail::get_recycling_allocator< \
associated_allocator_type, purpose>::get( \
::asio::get_associated_allocator(*h))); \
a.deallocate(static_cast<op*>(v), 1); \
v = 0; \
} \
} \
} \
/**/
#define ASIO_DEFINE_HANDLER_PTR(op) \
ASIO_DEFINE_TAGGED_HANDLER_PTR( \
::asio::detail::thread_info_base::default_tag, op ) \
/**/
#define ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR(purpose, op) \
struct ptr \
{ \
const Alloc* a; \
void* v; \
op* p; \
~ptr() \
{ \
reset(); \
} \
static op* allocate(const Alloc& a) \
{ \
typedef typename ::asio::detail::get_recycling_allocator< \
Alloc, purpose>::type recycling_allocator_type; \
ASIO_REBIND_ALLOC(recycling_allocator_type, op) a1( \
::asio::detail::get_recycling_allocator< \
Alloc, purpose>::get(a)); \
return a1.allocate(1); \
} \
void reset() \
{ \
if (p) \
{ \
p->~op(); \
p = 0; \
} \
if (v) \
{ \
typedef typename ::asio::detail::get_recycling_allocator< \
Alloc, purpose>::type recycling_allocator_type; \
ASIO_REBIND_ALLOC(recycling_allocator_type, op) a1( \
::asio::detail::get_recycling_allocator< \
Alloc, purpose>::get(*a)); \
a1.deallocate(static_cast<op*>(v), 1); \
v = 0; \
} \
} \
} \
/**/
#define ASIO_DEFINE_HANDLER_ALLOCATOR_PTR(op) \
ASIO_DEFINE_TAGGED_HANDLER_ALLOCATOR_PTR( \
::asio::detail::thread_info_base::default_tag, op ) \
/**/
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_HANDLER_ALLOC_HELPERS_HPP

View File

@@ -0,0 +1,45 @@
//
// detail/handler_cont_helpers.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP
#define ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/memory.hpp"
#include "asio/handler_continuation_hook.hpp"
#include "asio/detail/push_options.hpp"
// Calls to asio_handler_is_continuation must be made from a namespace that
// does not contain overloads of this function. This namespace is defined here
// for that purpose.
namespace asio_handler_cont_helpers {
template <typename Context>
inline bool is_continuation(Context& context)
{
#if !defined(ASIO_HAS_HANDLER_HOOKS)
return false;
#else
using asio::asio_handler_is_continuation;
return asio_handler_is_continuation(
asio::detail::addressof(context));
#endif
}
} // namespace asio_handler_cont_helpers
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_HANDLER_CONT_HELPERS_HPP

View File

@@ -0,0 +1,264 @@
//
// detail/handler_tracking.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_HANDLER_TRACKING_HPP
#define ASIO_DETAIL_HANDLER_TRACKING_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
namespace asio {
class execution_context;
} // namespace asio
#if defined(ASIO_CUSTOM_HANDLER_TRACKING)
# include ASIO_CUSTOM_HANDLER_TRACKING
#elif defined(ASIO_ENABLE_HANDLER_TRACKING)
# include "asio/error_code.hpp"
# include "asio/detail/cstdint.hpp"
# include "asio/detail/static_mutex.hpp"
# include "asio/detail/tss_ptr.hpp"
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
#if defined(ASIO_CUSTOM_HANDLER_TRACKING)
// The user-specified header must define the following macros:
// - ASIO_INHERIT_TRACKED_HANDLER
// - ASIO_ALSO_INHERIT_TRACKED_HANDLER
// - ASIO_HANDLER_TRACKING_INIT
// - ASIO_HANDLER_CREATION(args)
// - ASIO_HANDLER_COMPLETION(args)
// - ASIO_HANDLER_INVOCATION_BEGIN(args)
// - ASIO_HANDLER_INVOCATION_END
// - ASIO_HANDLER_OPERATION(args)
// - ASIO_HANDLER_REACTOR_REGISTRATION(args)
// - ASIO_HANDLER_REACTOR_DEREGISTRATION(args)
// - ASIO_HANDLER_REACTOR_READ_EVENT
// - ASIO_HANDLER_REACTOR_WRITE_EVENT
// - ASIO_HANDLER_REACTOR_ERROR_EVENT
// - ASIO_HANDLER_REACTOR_EVENTS(args)
// - ASIO_HANDLER_REACTOR_OPERATION(args)
# if !defined(ASIO_ENABLE_HANDLER_TRACKING)
# define ASIO_ENABLE_HANDLER_TRACKING 1
# endif /// !defined(ASIO_ENABLE_HANDLER_TRACKING)
#elif defined(ASIO_ENABLE_HANDLER_TRACKING)
class handler_tracking
{
public:
class completion;
// Base class for objects containing tracked handlers.
class tracked_handler
{
private:
// Only the handler_tracking class will have access to the id.
friend class handler_tracking;
friend class completion;
uint64_t id_;
protected:
// Constructor initialises with no id.
tracked_handler() : id_(0) {}
// Prevent deletion through this type.
~tracked_handler() {}
};
// Initialise the tracking system.
ASIO_DECL static void init();
class location
{
public:
// Constructor adds a location to the stack.
ASIO_DECL explicit location(const char* file,
int line, const char* func);
// Destructor removes a location from the stack.
ASIO_DECL ~location();
private:
// Disallow copying and assignment.
location(const location&) = delete;
location& operator=(const location&) = delete;
friend class handler_tracking;
const char* file_;
int line_;
const char* func_;
location* next_;
};
// Record the creation of a tracked handler.
ASIO_DECL static void creation(
execution_context& context, tracked_handler& h,
const char* object_type, void* object,
uintmax_t native_handle, const char* op_name);
class completion
{
public:
// Constructor records that handler is to be invoked with no arguments.
ASIO_DECL explicit completion(const tracked_handler& h);
// Destructor records only when an exception is thrown from the handler, or
// if the memory is being freed without the handler having been invoked.
ASIO_DECL ~completion();
// Records that handler is to be invoked with no arguments.
ASIO_DECL void invocation_begin();
// Records that handler is to be invoked with one arguments.
ASIO_DECL void invocation_begin(const asio::error_code& ec);
// Constructor records that handler is to be invoked with two arguments.
ASIO_DECL void invocation_begin(
const asio::error_code& ec, std::size_t bytes_transferred);
// Constructor records that handler is to be invoked with two arguments.
ASIO_DECL void invocation_begin(
const asio::error_code& ec, int signal_number);
// Constructor records that handler is to be invoked with two arguments.
ASIO_DECL void invocation_begin(
const asio::error_code& ec, const char* arg);
// Record that handler invocation has ended.
ASIO_DECL void invocation_end();
private:
friend class handler_tracking;
uint64_t id_;
bool invoked_;
completion* next_;
};
// Record an operation that is not directly associated with a handler.
ASIO_DECL static void operation(execution_context& context,
const char* object_type, void* object,
uintmax_t native_handle, const char* op_name);
// Record that a descriptor has been registered with the reactor.
ASIO_DECL static void reactor_registration(execution_context& context,
uintmax_t native_handle, uintmax_t registration);
// Record that a descriptor has been deregistered from the reactor.
ASIO_DECL static void reactor_deregistration(execution_context& context,
uintmax_t native_handle, uintmax_t registration);
// Record a reactor-based operation that is associated with a handler.
ASIO_DECL static void reactor_events(execution_context& context,
uintmax_t registration, unsigned events);
// Record a reactor-based operation that is associated with a handler.
ASIO_DECL static void reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec);
// Record a reactor-based operation that is associated with a handler.
ASIO_DECL static void reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec, std::size_t bytes_transferred);
// Write a line of output.
ASIO_DECL static void write_line(const char* format, ...);
private:
struct tracking_state;
ASIO_DECL static tracking_state* get_state();
};
# define ASIO_INHERIT_TRACKED_HANDLER \
: public asio::detail::handler_tracking::tracked_handler
# define ASIO_ALSO_INHERIT_TRACKED_HANDLER \
, public asio::detail::handler_tracking::tracked_handler
# define ASIO_HANDLER_TRACKING_INIT \
asio::detail::handler_tracking::init()
# define ASIO_HANDLER_LOCATION(args) \
asio::detail::handler_tracking::location tracked_location args
# define ASIO_HANDLER_CREATION(args) \
asio::detail::handler_tracking::creation args
# define ASIO_HANDLER_COMPLETION(args) \
asio::detail::handler_tracking::completion tracked_completion args
# define ASIO_HANDLER_INVOCATION_BEGIN(args) \
tracked_completion.invocation_begin args
# define ASIO_HANDLER_INVOCATION_END \
tracked_completion.invocation_end()
# define ASIO_HANDLER_OPERATION(args) \
asio::detail::handler_tracking::operation args
# define ASIO_HANDLER_REACTOR_REGISTRATION(args) \
asio::detail::handler_tracking::reactor_registration args
# define ASIO_HANDLER_REACTOR_DEREGISTRATION(args) \
asio::detail::handler_tracking::reactor_deregistration args
# define ASIO_HANDLER_REACTOR_READ_EVENT 1
# define ASIO_HANDLER_REACTOR_WRITE_EVENT 2
# define ASIO_HANDLER_REACTOR_ERROR_EVENT 4
# define ASIO_HANDLER_REACTOR_EVENTS(args) \
asio::detail::handler_tracking::reactor_events args
# define ASIO_HANDLER_REACTOR_OPERATION(args) \
asio::detail::handler_tracking::reactor_operation args
#else // defined(ASIO_ENABLE_HANDLER_TRACKING)
# define ASIO_INHERIT_TRACKED_HANDLER
# define ASIO_ALSO_INHERIT_TRACKED_HANDLER
# define ASIO_HANDLER_TRACKING_INIT (void)0
# define ASIO_HANDLER_LOCATION(loc) (void)0
# define ASIO_HANDLER_CREATION(args) (void)0
# define ASIO_HANDLER_COMPLETION(args) (void)0
# define ASIO_HANDLER_INVOCATION_BEGIN(args) (void)0
# define ASIO_HANDLER_INVOCATION_END (void)0
# define ASIO_HANDLER_OPERATION(args) (void)0
# define ASIO_HANDLER_REACTOR_REGISTRATION(args) (void)0
# define ASIO_HANDLER_REACTOR_DEREGISTRATION(args) (void)0
# define ASIO_HANDLER_REACTOR_READ_EVENT 0
# define ASIO_HANDLER_REACTOR_WRITE_EVENT 0
# define ASIO_HANDLER_REACTOR_ERROR_EVENT 0
# define ASIO_HANDLER_REACTOR_EVENTS(args) (void)0
# define ASIO_HANDLER_REACTOR_OPERATION(args) (void)0
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#if defined(ASIO_HEADER_ONLY)
# include "asio/detail/impl/handler_tracking.ipp"
#endif // defined(ASIO_HEADER_ONLY)
#endif // ASIO_DETAIL_HANDLER_TRACKING_HPP

View File

@@ -0,0 +1,531 @@
//
// detail/handler_type_requirements.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP
#define ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
// Older versions of gcc have difficulty compiling the sizeof expressions where
// we test the handler type requirements. We'll disable checking of handler type
// requirements for those compilers, but otherwise enable it by default.
#if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS)
# if !defined(__GNUC__) || (__GNUC__ >= 4)
# define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS 1
# endif // !defined(__GNUC__) || (__GNUC__ >= 4)
#endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS)
// With C++0x we can use a combination of enhanced SFINAE and static_assert to
// generate better template error messages. As this technique is not yet widely
// portable, we'll only enable it for tested compilers.
#if !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)
# if defined(__GNUC__)
# if ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)
# if defined(__GXX_EXPERIMENTAL_CXX0X__)
# define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1
# endif // defined(__GXX_EXPERIMENTAL_CXX0X__)
# endif // ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 5)) || (__GNUC__ > 4)
# endif // defined(__GNUC__)
# if defined(ASIO_MSVC)
# if (_MSC_VER >= 1600)
# define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1
# endif // (_MSC_VER >= 1600)
# endif // defined(ASIO_MSVC)
# if defined(__clang__)
# if __has_feature(__cxx_static_assert__)
# define ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT 1
# endif // __has_feature(cxx_static_assert)
# endif // defined(__clang__)
#endif // !defined(ASIO_DISABLE_HANDLER_TYPE_REQUIREMENTS)
#if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)
# include "asio/async_result.hpp"
#endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)
namespace asio {
namespace detail {
#if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)
# if defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)
template <typename Handler>
auto zero_arg_copyable_handler_test(Handler h, void*)
-> decltype(
sizeof(Handler(static_cast<const Handler&>(h))),
(static_cast<Handler&&>(h)()),
char(0));
template <typename Handler>
char (&zero_arg_copyable_handler_test(Handler, ...))[2];
template <typename Handler, typename Arg1>
auto one_arg_handler_test(Handler h, Arg1* a1)
-> decltype(
sizeof(Handler(static_cast<Handler&&>(h))),
(static_cast<Handler&&>(h)(*a1)),
char(0));
template <typename Handler>
char (&one_arg_handler_test(Handler h, ...))[2];
template <typename Handler, typename Arg1, typename Arg2>
auto two_arg_handler_test(Handler h, Arg1* a1, Arg2* a2)
-> decltype(
sizeof(Handler(static_cast<Handler&&>(h))),
(static_cast<Handler&&>(h)(*a1, *a2)),
char(0));
template <typename Handler>
char (&two_arg_handler_test(Handler, ...))[2];
template <typename Handler, typename Arg1, typename Arg2>
auto two_arg_move_handler_test(Handler h, Arg1* a1, Arg2* a2)
-> decltype(
sizeof(Handler(static_cast<Handler&&>(h))),
(static_cast<Handler&&>(h)(
*a1, static_cast<Arg2&&>(*a2))),
char(0));
template <typename Handler>
char (&two_arg_move_handler_test(Handler, ...))[2];
# define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg) \
static_assert(expr, msg);
# else // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)
# define ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT(expr, msg)
# endif // defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS_ASSERT)
template <typename T> T& lvref();
template <typename T> T& lvref(T);
template <typename T> const T& clvref();
template <typename T> const T& clvref(T);
template <typename T> T rvref();
template <typename T> T rvref(T);
template <typename T> T rorlvref();
template <typename T> char argbyv(T);
template <int>
struct handler_type_requirements
{
};
#define ASIO_READ_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, std::size_t)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const std::size_t*>(0))) == 1, \
"ReadHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const std::size_t>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_WRITE_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, std::size_t)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const std::size_t*>(0))) == 1, \
"WriteHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const std::size_t>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_ACCEPT_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::one_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0))) == 1, \
"AcceptHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_MOVE_ACCEPT_HANDLER_CHECK( \
handler_type, handler, socket_type) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, socket_type)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_move_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<socket_type*>(0))) == 1, \
"MoveAcceptHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::rvref<socket_type>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_CONNECT_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::one_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0))) == 1, \
"ConnectHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_RANGE_CONNECT_HANDLER_CHECK( \
handler_type, handler, endpoint_type) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, endpoint_type)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const endpoint_type*>(0))) == 1, \
"RangeConnectHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const endpoint_type>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_ITERATOR_CONNECT_HANDLER_CHECK( \
handler_type, handler, iter_type) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, iter_type)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const iter_type*>(0))) == 1, \
"IteratorConnectHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const iter_type>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_RESOLVE_HANDLER_CHECK( \
handler_type, handler, range_type) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, range_type)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const range_type*>(0))) == 1, \
"ResolveHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const range_type>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_WAIT_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::one_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0))) == 1, \
"WaitHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_SIGNAL_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, int)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const int*>(0))) == 1, \
"SignalHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const int>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_HANDSHAKE_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::one_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0))) == 1, \
"HandshakeHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code, std::size_t)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::two_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0), \
static_cast<const std::size_t*>(0))) == 1, \
"BufferedHandshakeHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>(), \
asio::detail::lvref<const std::size_t>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#define ASIO_SHUTDOWN_HANDLER_CHECK( \
handler_type, handler) \
\
typedef ASIO_HANDLER_TYPE(handler_type, \
void(asio::error_code)) \
asio_true_handler_type; \
\
ASIO_HANDLER_TYPE_REQUIREMENTS_ASSERT( \
sizeof(asio::detail::one_arg_handler_test( \
asio::detail::rvref< \
asio_true_handler_type>(), \
static_cast<const asio::error_code*>(0))) == 1, \
"ShutdownHandler type requirements not met") \
\
typedef asio::detail::handler_type_requirements< \
sizeof( \
asio::detail::argbyv( \
asio::detail::rvref< \
asio_true_handler_type>())) + \
sizeof( \
asio::detail::rorlvref< \
asio_true_handler_type>()( \
asio::detail::lvref<const asio::error_code>()), \
char(0))> ASIO_UNUSED_TYPEDEF
#else // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)
#define ASIO_LEGACY_COMPLETION_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_READ_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_WRITE_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_ACCEPT_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_MOVE_ACCEPT_HANDLER_CHECK( \
handler_type, handler, socket_type) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_CONNECT_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_RANGE_CONNECT_HANDLER_CHECK( \
handler_type, handler, iter_type) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_ITERATOR_CONNECT_HANDLER_CHECK( \
handler_type, handler, iter_type) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_RESOLVE_HANDLER_CHECK( \
handler_type, handler, iter_type) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_WAIT_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_SIGNAL_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_HANDSHAKE_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_BUFFERED_HANDSHAKE_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#define ASIO_SHUTDOWN_HANDLER_CHECK( \
handler_type, handler) \
typedef int ASIO_UNUSED_TYPEDEF
#endif // !defined(ASIO_ENABLE_HANDLER_TYPE_REQUIREMENTS)
} // namespace detail
} // namespace asio
#endif // ASIO_DETAIL_HANDLER_TYPE_REQUIREMENTS_HPP

View File

@@ -0,0 +1,511 @@
//
// detail/handler_work.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_HANDLER_WORK_HPP
#define ASIO_DETAIL_HANDLER_WORK_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/associated_allocator.hpp"
#include "asio/associated_executor.hpp"
#include "asio/associated_immediate_executor.hpp"
#include "asio/detail/initiate_dispatch.hpp"
#include "asio/detail/type_traits.hpp"
#include "asio/detail/work_dispatcher.hpp"
#include "asio/execution/allocator.hpp"
#include "asio/execution/blocking.hpp"
#include "asio/execution/executor.hpp"
#include "asio/execution/outstanding_work.hpp"
#include "asio/executor_work_guard.hpp"
#include "asio/prefer.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
class executor;
class io_context;
#if !defined(ASIO_USE_TS_EXECUTOR_AS_DEFAULT)
class any_completion_executor;
class any_io_executor;
#endif // !defined(ASIO_USE_TS_EXECUTOR_AS_DEFAULT)
namespace execution {
template <typename...> class any_executor;
} // namespace execution
namespace detail {
template <typename Executor, typename CandidateExecutor = void,
typename IoContext = io_context,
typename PolymorphicExecutor = executor, typename = void>
class handler_work_base
{
public:
explicit handler_work_base(int, int, const Executor& ex) noexcept
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
template <typename OtherExecutor>
handler_work_base(bool /*base1_owns_work*/, const Executor& ex,
const OtherExecutor& /*candidate*/) noexcept
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
handler_work_base(const handler_work_base& other) noexcept
: executor_(other.executor_)
{
}
handler_work_base(handler_work_base&& other) noexcept
: executor_(static_cast<executor_type&&>(other.executor_))
{
}
bool owns_work() const noexcept
{
return true;
}
template <typename Function, typename Handler>
void dispatch(Function& function, Handler& handler)
{
asio::prefer(executor_,
execution::allocator((get_associated_allocator)(handler))
).execute(static_cast<Function&&>(function));
}
private:
typedef decay_t<
prefer_result_t<Executor, execution::outstanding_work_t::tracked_t>
> executor_type;
executor_type executor_;
};
template <typename Executor, typename CandidateExecutor,
typename IoContext, typename PolymorphicExecutor>
class handler_work_base<Executor, CandidateExecutor,
IoContext, PolymorphicExecutor,
enable_if_t<
!execution::is_executor<Executor>::value
&& (!is_same<Executor, PolymorphicExecutor>::value
|| !is_same<CandidateExecutor, void>::value)
>
>
{
public:
explicit handler_work_base(int, int, const Executor& ex) noexcept
: executor_(ex),
owns_work_(true)
{
executor_.on_work_started();
}
handler_work_base(bool /*base1_owns_work*/, const Executor& ex,
const Executor& candidate) noexcept
: executor_(ex),
owns_work_(ex != candidate)
{
if (owns_work_)
executor_.on_work_started();
}
template <typename OtherExecutor>
handler_work_base(bool /*base1_owns_work*/, const Executor& ex,
const OtherExecutor& /*candidate*/) noexcept
: executor_(ex),
owns_work_(true)
{
executor_.on_work_started();
}
handler_work_base(const handler_work_base& other) noexcept
: executor_(other.executor_),
owns_work_(other.owns_work_)
{
if (owns_work_)
executor_.on_work_started();
}
handler_work_base(handler_work_base&& other) noexcept
: executor_(static_cast<Executor&&>(other.executor_)),
owns_work_(other.owns_work_)
{
other.owns_work_ = false;
}
~handler_work_base()
{
if (owns_work_)
executor_.on_work_finished();
}
bool owns_work() const noexcept
{
return owns_work_;
}
template <typename Function, typename Handler>
void dispatch(Function& function, Handler& handler)
{
executor_.dispatch(static_cast<Function&&>(function),
asio::get_associated_allocator(handler));
}
private:
Executor executor_;
bool owns_work_;
};
template <typename Executor, typename IoContext, typename PolymorphicExecutor>
class handler_work_base<Executor, void, IoContext, PolymorphicExecutor,
enable_if_t<
is_same<
Executor,
typename IoContext::executor_type
>::value
>
>
{
public:
explicit handler_work_base(int, int, const Executor&)
{
}
bool owns_work() const noexcept
{
return false;
}
template <typename Function, typename Handler>
void dispatch(Function& function, Handler&)
{
// When using a native implementation, I/O completion handlers are
// already dispatched according to the execution context's executor's
// rules. We can call the function directly.
static_cast<Function&&>(function)();
}
};
template <typename Executor, typename IoContext>
class handler_work_base<Executor, void, IoContext, Executor>
{
public:
explicit handler_work_base(int, int, const Executor& ex) noexcept
#if !defined(ASIO_NO_TYPEID)
: executor_(
ex.target_type() == typeid(typename IoContext::executor_type)
? Executor() : ex)
#else // !defined(ASIO_NO_TYPEID)
: executor_(ex)
#endif // !defined(ASIO_NO_TYPEID)
{
if (executor_)
executor_.on_work_started();
}
handler_work_base(bool /*base1_owns_work*/, const Executor& ex,
const Executor& candidate) noexcept
: executor_(ex != candidate ? ex : Executor())
{
if (executor_)
executor_.on_work_started();
}
template <typename OtherExecutor>
handler_work_base(const Executor& ex,
const OtherExecutor&) noexcept
: executor_(ex)
{
executor_.on_work_started();
}
handler_work_base(const handler_work_base& other) noexcept
: executor_(other.executor_)
{
if (executor_)
executor_.on_work_started();
}
handler_work_base(handler_work_base&& other) noexcept
: executor_(static_cast<Executor&&>(other.executor_))
{
}
~handler_work_base()
{
if (executor_)
executor_.on_work_finished();
}
bool owns_work() const noexcept
{
return !!executor_;
}
template <typename Function, typename Handler>
void dispatch(Function& function, Handler& handler)
{
executor_.dispatch(static_cast<Function&&>(function),
asio::get_associated_allocator(handler));
}
private:
Executor executor_;
};
template <typename... SupportableProperties, typename CandidateExecutor,
typename IoContext, typename PolymorphicExecutor>
class handler_work_base<execution::any_executor<SupportableProperties...>,
CandidateExecutor, IoContext, PolymorphicExecutor>
{
public:
typedef execution::any_executor<SupportableProperties...> executor_type;
explicit handler_work_base(int, int, const executor_type& ex) noexcept
#if !defined(ASIO_NO_TYPEID)
: executor_(
ex.target_type() == typeid(typename IoContext::executor_type)
? executor_type()
: asio::prefer(ex, execution::outstanding_work.tracked))
#else // !defined(ASIO_NO_TYPEID)
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
#endif // !defined(ASIO_NO_TYPEID)
{
}
handler_work_base(bool base1_owns_work, const executor_type& ex,
const executor_type& candidate) noexcept
: executor_(
!base1_owns_work && ex == candidate
? executor_type()
: asio::prefer(ex, execution::outstanding_work.tracked))
{
}
template <typename OtherExecutor>
handler_work_base(bool /*base1_owns_work*/, const executor_type& ex,
const OtherExecutor& /*candidate*/) noexcept
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
handler_work_base(const handler_work_base& other) noexcept
: executor_(other.executor_)
{
}
handler_work_base(handler_work_base&& other) noexcept
: executor_(static_cast<executor_type&&>(other.executor_))
{
}
bool owns_work() const noexcept
{
return !!executor_;
}
template <typename Function, typename Handler>
void dispatch(Function& function, Handler&)
{
executor_.execute(static_cast<Function&&>(function));
}
private:
executor_type executor_;
};
#if !defined(ASIO_USE_TS_EXECUTOR_AS_DEFAULT)
template <typename Executor, typename CandidateExecutor,
typename IoContext, typename PolymorphicExecutor>
class handler_work_base<
Executor, CandidateExecutor,
IoContext, PolymorphicExecutor,
enable_if_t<
is_same<Executor, any_completion_executor>::value
|| is_same<Executor, any_io_executor>::value
>
>
{
public:
typedef Executor executor_type;
explicit handler_work_base(int, int,
const executor_type& ex) noexcept
#if !defined(ASIO_NO_TYPEID)
: executor_(
ex.target_type() == typeid(typename IoContext::executor_type)
? executor_type()
: asio::prefer(ex, execution::outstanding_work.tracked))
#else // !defined(ASIO_NO_TYPEID)
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
#endif // !defined(ASIO_NO_TYPEID)
{
}
handler_work_base(bool base1_owns_work, const executor_type& ex,
const executor_type& candidate) noexcept
: executor_(
!base1_owns_work && ex == candidate
? executor_type()
: asio::prefer(ex, execution::outstanding_work.tracked))
{
}
template <typename OtherExecutor>
handler_work_base(bool /*base1_owns_work*/, const executor_type& ex,
const OtherExecutor& /*candidate*/) noexcept
: executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
handler_work_base(const handler_work_base& other) noexcept
: executor_(other.executor_)
{
}
handler_work_base(handler_work_base&& other) noexcept
: executor_(static_cast<executor_type&&>(other.executor_))
{
}
bool owns_work() const noexcept
{
return !!executor_;
}
template <typename Function, typename Handler>
void dispatch(Function& function, Handler&)
{
executor_.execute(static_cast<Function&&>(function));
}
private:
executor_type executor_;
};
#endif // !defined(ASIO_USE_TS_EXECUTOR_AS_DEFAULT)
template <typename Handler, typename IoExecutor, typename = void>
class handler_work :
handler_work_base<IoExecutor>,
handler_work_base<associated_executor_t<Handler, IoExecutor>, IoExecutor>
{
public:
typedef handler_work_base<IoExecutor> base1_type;
typedef handler_work_base<associated_executor_t<Handler, IoExecutor>,
IoExecutor> base2_type;
handler_work(Handler& handler, const IoExecutor& io_ex) noexcept
: base1_type(0, 0, io_ex),
base2_type(base1_type::owns_work(),
asio::get_associated_executor(handler, io_ex), io_ex)
{
}
template <typename Function>
void complete(Function& function, Handler& handler)
{
if (!base1_type::owns_work() && !base2_type::owns_work())
{
// When using a native implementation, I/O completion handlers are
// already dispatched according to the execution context's executor's
// rules. We can call the function directly.
static_cast<Function&&>(function)();
}
else
{
base2_type::dispatch(function, handler);
}
}
};
template <typename Handler, typename IoExecutor>
class handler_work<
Handler, IoExecutor,
enable_if_t<
is_same<
typename associated_executor<Handler,
IoExecutor>::asio_associated_executor_is_unspecialised,
void
>::value
>
> : handler_work_base<IoExecutor>
{
public:
typedef handler_work_base<IoExecutor> base1_type;
handler_work(Handler&, const IoExecutor& io_ex) noexcept
: base1_type(0, 0, io_ex)
{
}
template <typename Function>
void complete(Function& function, Handler& handler)
{
if (!base1_type::owns_work())
{
// When using a native implementation, I/O completion handlers are
// already dispatched according to the execution context's executor's
// rules. We can call the function directly.
static_cast<Function&&>(function)();
}
else
{
base1_type::dispatch(function, handler);
}
}
};
template <typename Handler, typename IoExecutor>
class immediate_handler_work
{
public:
typedef handler_work<Handler, IoExecutor> handler_work_type;
explicit immediate_handler_work(handler_work_type&& w)
: handler_work_(static_cast<handler_work_type&&>(w))
{
}
template <typename Function>
void complete(Function& function, Handler& handler, const void* io_ex)
{
typedef associated_immediate_executor_t<Handler, IoExecutor>
immediate_ex_type;
immediate_ex_type immediate_ex = (get_associated_immediate_executor)(
handler, *static_cast<const IoExecutor*>(io_ex));
(initiate_dispatch_with_executor<immediate_ex_type>(immediate_ex))(
static_cast<Function&&>(function));
}
private:
handler_work_type handler_work_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_HANDLER_WORK_HPP

View File

@@ -0,0 +1,331 @@
//
// detail/hash_map.hpp
// ~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_HASH_MAP_HPP
#define ASIO_DETAIL_HASH_MAP_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <list>
#include <utility>
#include "asio/detail/assert.hpp"
#include "asio/detail/noncopyable.hpp"
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# include "asio/detail/socket_types.hpp"
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline std::size_t calculate_hash_value(int i)
{
return static_cast<std::size_t>(i);
}
inline std::size_t calculate_hash_value(void* p)
{
return reinterpret_cast<std::size_t>(p)
+ (reinterpret_cast<std::size_t>(p) >> 3);
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
inline std::size_t calculate_hash_value(SOCKET s)
{
return static_cast<std::size_t>(s);
}
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Note: assumes K and V are POD types.
template <typename K, typename V>
class hash_map
: private noncopyable
{
public:
// The type of a value in the map.
typedef std::pair<K, V> value_type;
// The type of a non-const iterator over the hash map.
typedef typename std::list<value_type>::iterator iterator;
// The type of a const iterator over the hash map.
typedef typename std::list<value_type>::const_iterator const_iterator;
// Constructor.
hash_map()
: size_(0),
buckets_(0),
num_buckets_(0)
{
}
// Destructor.
~hash_map()
{
delete[] buckets_;
}
// Get an iterator for the beginning of the map.
iterator begin()
{
return values_.begin();
}
// Get an iterator for the beginning of the map.
const_iterator begin() const
{
return values_.begin();
}
// Get an iterator for the end of the map.
iterator end()
{
return values_.end();
}
// Get an iterator for the end of the map.
const_iterator end() const
{
return values_.end();
}
// Check whether the map is empty.
bool empty() const
{
return values_.empty();
}
// Find an entry in the map.
iterator find(const K& k)
{
if (num_buckets_)
{
size_t bucket = calculate_hash_value(k) % num_buckets_;
iterator it = buckets_[bucket].first;
if (it == values_.end())
return values_.end();
iterator end_it = buckets_[bucket].last;
++end_it;
while (it != end_it)
{
if (it->first == k)
return it;
++it;
}
}
return values_.end();
}
// Find an entry in the map.
const_iterator find(const K& k) const
{
if (num_buckets_)
{
size_t bucket = calculate_hash_value(k) % num_buckets_;
const_iterator it = buckets_[bucket].first;
if (it == values_.end())
return it;
const_iterator end_it = buckets_[bucket].last;
++end_it;
while (it != end_it)
{
if (it->first == k)
return it;
++it;
}
}
return values_.end();
}
// Insert a new entry into the map.
std::pair<iterator, bool> insert(const value_type& v)
{
if (size_ + 1 >= num_buckets_)
rehash(hash_size(size_ + 1));
size_t bucket = calculate_hash_value(v.first) % num_buckets_;
iterator it = buckets_[bucket].first;
if (it == values_.end())
{
buckets_[bucket].first = buckets_[bucket].last =
values_insert(values_.end(), v);
++size_;
return std::pair<iterator, bool>(buckets_[bucket].last, true);
}
iterator end_it = buckets_[bucket].last;
++end_it;
while (it != end_it)
{
if (it->first == v.first)
return std::pair<iterator, bool>(it, false);
++it;
}
buckets_[bucket].last = values_insert(end_it, v);
++size_;
return std::pair<iterator, bool>(buckets_[bucket].last, true);
}
// Erase an entry from the map.
void erase(iterator it)
{
ASIO_ASSERT(it != values_.end());
ASIO_ASSERT(num_buckets_ != 0);
size_t bucket = calculate_hash_value(it->first) % num_buckets_;
bool is_first = (it == buckets_[bucket].first);
bool is_last = (it == buckets_[bucket].last);
if (is_first && is_last)
buckets_[bucket].first = buckets_[bucket].last = values_.end();
else if (is_first)
++buckets_[bucket].first;
else if (is_last)
--buckets_[bucket].last;
values_erase(it);
--size_;
}
// Erase a key from the map.
void erase(const K& k)
{
iterator it = find(k);
if (it != values_.end())
erase(it);
}
// Remove all entries from the map.
void clear()
{
// Clear the values.
values_.clear();
size_ = 0;
// Initialise all buckets to empty.
iterator end_it = values_.end();
for (size_t i = 0; i < num_buckets_; ++i)
buckets_[i].first = buckets_[i].last = end_it;
}
private:
// Calculate the hash size for the specified number of elements.
static std::size_t hash_size(std::size_t num_elems)
{
static std::size_t sizes[] =
{
#if defined(ASIO_HASH_MAP_BUCKETS)
ASIO_HASH_MAP_BUCKETS
#else // ASIO_HASH_MAP_BUCKETS
3, 13, 23, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593,
49157, 98317, 196613, 393241, 786433, 1572869, 3145739, 6291469,
12582917, 25165843
#endif // ASIO_HASH_MAP_BUCKETS
};
const std::size_t nth_size = sizeof(sizes) / sizeof(std::size_t) - 1;
for (std::size_t i = 0; i < nth_size; ++i)
if (num_elems < sizes[i])
return sizes[i];
return sizes[nth_size];
}
// Re-initialise the hash from the values already contained in the list.
void rehash(std::size_t num_buckets)
{
if (num_buckets == num_buckets_)
return;
ASIO_ASSERT(num_buckets != 0);
iterator end_iter = values_.end();
// Update number of buckets and initialise all buckets to empty.
bucket_type* tmp = new bucket_type[num_buckets];
delete[] buckets_;
buckets_ = tmp;
num_buckets_ = num_buckets;
for (std::size_t i = 0; i < num_buckets_; ++i)
buckets_[i].first = buckets_[i].last = end_iter;
// Put all values back into the hash.
iterator iter = values_.begin();
while (iter != end_iter)
{
std::size_t bucket = calculate_hash_value(iter->first) % num_buckets_;
if (buckets_[bucket].last == end_iter)
{
buckets_[bucket].first = buckets_[bucket].last = iter++;
}
else if (++buckets_[bucket].last == iter)
{
++iter;
}
else
{
values_.splice(buckets_[bucket].last, values_, iter++);
--buckets_[bucket].last;
}
}
}
// Insert an element into the values list by splicing from the spares list,
// if a spare is available, and otherwise by inserting a new element.
iterator values_insert(iterator it, const value_type& v)
{
if (spares_.empty())
{
return values_.insert(it, v);
}
else
{
spares_.front() = v;
values_.splice(it, spares_, spares_.begin());
return --it;
}
}
// Erase an element from the values list by splicing it to the spares list.
void values_erase(iterator it)
{
*it = value_type();
spares_.splice(spares_.begin(), values_, it);
}
// The number of elements in the hash.
std::size_t size_;
// The list of all values in the hash map.
std::list<value_type> values_;
// The list of spare nodes waiting to be recycled. Assumes that POD types only
// are stored in the hash map.
std::list<value_type> spares_;
// The type for a bucket in the hash table.
struct bucket_type
{
iterator first;
iterator last;
};
// The buckets in the hash.
bucket_type* buckets_;
// The number of buckets in the hash.
std::size_t num_buckets_;
};
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_HASH_MAP_HPP

View File

@@ -0,0 +1,118 @@
//
// detail/impl/buffer_sequence_adapter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
#define ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
#include <robuffer.h>
#include <windows.storage.streams.h>
#include <wrl/implements.h>
#include "asio/detail/buffer_sequence_adapter.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class winrt_buffer_impl :
public Microsoft::WRL::RuntimeClass<
Microsoft::WRL::RuntimeClassFlags<
Microsoft::WRL::RuntimeClassType::WinRtClassicComMix>,
ABI::Windows::Storage::Streams::IBuffer,
Windows::Storage::Streams::IBufferByteAccess>
{
public:
explicit winrt_buffer_impl(const asio::const_buffer& b)
{
bytes_ = const_cast<byte*>(static_cast<const byte*>(b.data()));
length_ = b.size();
capacity_ = b.size();
}
explicit winrt_buffer_impl(const asio::mutable_buffer& b)
{
bytes_ = static_cast<byte*>(b.data());
length_ = 0;
capacity_ = b.size();
}
~winrt_buffer_impl()
{
}
STDMETHODIMP Buffer(byte** value)
{
*value = bytes_;
return S_OK;
}
STDMETHODIMP get_Capacity(UINT32* value)
{
*value = capacity_;
return S_OK;
}
STDMETHODIMP get_Length(UINT32 *value)
{
*value = length_;
return S_OK;
}
STDMETHODIMP put_Length(UINT32 value)
{
if (value > capacity_)
return E_INVALIDARG;
length_ = value;
return S_OK;
}
private:
byte* bytes_;
UINT32 length_;
UINT32 capacity_;
};
void buffer_sequence_adapter_base::init_native_buffer(
buffer_sequence_adapter_base::native_buffer_type& buf,
const asio::mutable_buffer& buffer)
{
std::memset(&buf, 0, sizeof(native_buffer_type));
Microsoft::WRL::ComPtr<IInspectable> insp
= Microsoft::WRL::Make<winrt_buffer_impl>(buffer);
buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());
}
void buffer_sequence_adapter_base::init_native_buffer(
buffer_sequence_adapter_base::native_buffer_type& buf,
const asio::const_buffer& buffer)
{
std::memset(&buf, 0, sizeof(native_buffer_type));
Microsoft::WRL::ComPtr<IInspectable> insp
= Microsoft::WRL::Make<winrt_buffer_impl>(buffer);
Platform::Object^ buf_obj = reinterpret_cast<Platform::Object^>(insp.Get());
buf = reinterpret_cast<Windows::Storage::Streams::IBuffer^>(insp.Get());
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_BUFFER_SEQUENCE_ADAPTER_IPP

View File

@@ -0,0 +1,994 @@
//
// detail/impl/descriptor_ops.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
#define ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cerrno>
#include "asio/detail/descriptor_ops.hpp"
#include "asio/error.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
namespace descriptor_ops {
int open(const char* path, int flags, asio::error_code& ec)
{
int result = ::open(path, flags);
get_last_error(ec, result < 0);
return result;
}
int open(const char* path, int flags,
unsigned mode, asio::error_code& ec)
{
int result = ::open(path, flags, mode);
get_last_error(ec, result < 0);
return result;
}
int close(int d, state_type& state, asio::error_code& ec)
{
int result = 0;
if (d != -1)
{
result = ::close(d);
get_last_error(ec, result < 0);
if (result != 0
&& (ec == asio::error::would_block
|| ec == asio::error::try_again))
{
// According to UNIX Network Programming Vol. 1, it is possible for
// close() to fail with EWOULDBLOCK under certain circumstances. What
// isn't clear is the state of the descriptor after this error. The one
// current OS where this behaviour is seen, Windows, says that the socket
// remains open. Therefore we'll put the descriptor back into blocking
// mode and have another attempt at closing it.
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int flags = ::fcntl(d, F_GETFL, 0);
if (flags >= 0)
::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
int flags = ::fcntl(d, F_GETFL, 0);
if (flags >= 0)
::fcntl(d, F_SETFL, flags & ~O_NONBLOCK);
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
state &= ~non_blocking;
result = ::close(d);
get_last_error(ec, result < 0);
}
}
return result;
}
bool set_user_non_blocking(int d, state_type& state,
bool value, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return false;
}
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= user_set_non_blocking;
else
{
// Clearing the user-set non-blocking mode always overrides any
// internally-set non-blocking flag. Any subsequent asynchronous
// operations will need to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
return true;
}
return false;
}
bool set_internal_non_blocking(int d, state_type& state,
bool value, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return false;
}
if (!value && (state & user_set_non_blocking))
{
// It does not make sense to clear the internal non-blocking flag if the
// user still wants non-blocking behaviour. Return an error and let the
// caller figure out whether to update the user-set non-blocking flag.
ec = asio::error::invalid_argument;
return false;
}
#if defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
int result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
#else // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
ioctl_arg_type arg = (value ? 1 : 0);
int result = 0;
if ((state & possible_dup) == 0)
{
result = ::ioctl(d, FIONBIO, &arg);
get_last_error(ec, result < 0);
}
if ((state & possible_dup) != 0
# if defined(ENOTTY)
|| ec.value() == ENOTTY
# endif // defined(ENOTTY)
# if defined(ENOTCAPABLE)
|| ec.value() == ENOTCAPABLE
# endif // defined(ENOTCAPABLE)
)
{
result = ::fcntl(d, F_GETFL, 0);
get_last_error(ec, result < 0);
if (result >= 0)
{
int flag = (value ? (result | O_NONBLOCK) : (result & ~O_NONBLOCK));
result = (flag != result) ? ::fcntl(d, F_SETFL, flag) : 0;
get_last_error(ec, result < 0);
}
}
#endif // defined(__SYMBIAN32__) || defined(__EMSCRIPTEN__)
if (result >= 0)
{
if (value)
state |= internal_non_blocking;
else
state &= ~internal_non_blocking;
return true;
}
return false;
}
std::size_t sync_read(int d, state_type state, buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::readv(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_read1(int d, state_type state, void* data,
std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::read(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_read(int d, buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::readv(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check for end of stream.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_read1(int d, void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::read(d, data, size);
get_last_error(ec, bytes < 0);
// Check for end of stream.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
std::size_t sync_write(int d, state_type state, const buf* bufs,
std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::writev(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_write1(int d, state_type state, const void* data,
std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::write(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_write(int d, const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::writev(d, bufs, static_cast<int>(count));
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_write1(int d, const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::write(d, data, size);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#if defined(ASIO_HAS_FILE)
std::size_t sync_read_at(int d, state_type state, uint64_t offset,
buf* bufs, std::size_t count, bool all_empty, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::preadv(d, bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_read_at1(int d, state_type state, uint64_t offset,
void* data, std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to read 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Read some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::pread(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return 0;
}
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_read(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_read_at(int d, uint64_t offset, buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::preadv(d, bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_read_at1(int d, uint64_t offset, void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Read some data.
signed_size_type bytes = ::pread(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check for EOF.
if (bytes == 0)
{
ec = asio::error::eof;
return true;
}
// Check if operation succeeded.
if (bytes > 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
std::size_t sync_write_at(int d, state_type state, uint64_t offset,
const buf* bufs, std::size_t count, bool all_empty,
asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (all_empty)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::pwritev(d,
bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
std::size_t sync_write_at1(int d, state_type state, uint64_t offset,
const void* data, std::size_t size, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return 0;
}
// A request to write 0 bytes on a stream is a no-op.
if (size == 0)
{
asio::error::clear(ec);
return 0;
}
// Write some data.
for (;;)
{
// Try to complete the operation without blocking.
signed_size_type bytes = ::pwrite(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes > 0)
return bytes;
// Operation failed.
if ((state & user_set_non_blocking)
|| (ec != asio::error::would_block
&& ec != asio::error::try_again))
return 0;
// Wait for descriptor to become ready.
if (descriptor_ops::poll_write(d, 0, ec) < 0)
return 0;
}
}
bool non_blocking_write_at(int d, uint64_t offset,
const buf* bufs, std::size_t count,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::pwritev(d,
bufs, static_cast<int>(count), offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
bool non_blocking_write_at1(int d, uint64_t offset,
const void* data, std::size_t size,
asio::error_code& ec, std::size_t& bytes_transferred)
{
for (;;)
{
// Write some data.
signed_size_type bytes = ::pwrite(d, data, size, offset);
get_last_error(ec, bytes < 0);
// Check if operation succeeded.
if (bytes >= 0)
{
bytes_transferred = bytes;
return true;
}
// Retry operation if interrupted by signal.
if (ec == asio::error::interrupted)
continue;
// Check if we need to run the operation again.
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return false;
// Operation failed.
bytes_transferred = 0;
return true;
}
}
#endif // defined(ASIO_HAS_FILE)
int ioctl(int d, state_type& state, long cmd,
ioctl_arg_type* arg, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::ioctl(d, cmd, arg);
get_last_error(ec, result < 0);
if (result >= 0)
{
// When updating the non-blocking mode we always perform the ioctl syscall,
// even if the flags would otherwise indicate that the descriptor is
// already in the correct state. This ensures that the underlying
// descriptor is put into the state that has been requested by the user. If
// the ioctl syscall was successful then we need to update the flags to
// match.
if (cmd == static_cast<long>(FIONBIO))
{
if (*arg)
{
state |= user_set_non_blocking;
}
else
{
// Clearing the non-blocking mode always overrides any internally-set
// non-blocking flag. Any subsequent asynchronous operations will need
// to re-enable non-blocking I/O.
state &= ~(user_set_non_blocking | internal_non_blocking);
}
}
}
return result;
}
int fcntl(int d, int cmd, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::fcntl(d, cmd);
get_last_error(ec, result < 0);
return result;
}
int fcntl(int d, int cmd, long arg, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
int result = ::fcntl(d, cmd, arg);
get_last_error(ec, result < 0);
return result;
}
int poll_read(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLIN;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_write(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLOUT;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
int poll_error(int d, state_type state, asio::error_code& ec)
{
if (d == -1)
{
ec = asio::error::bad_descriptor;
return -1;
}
pollfd fds;
fds.fd = d;
fds.events = POLLPRI | POLLERR | POLLHUP;
fds.revents = 0;
int timeout = (state & user_set_non_blocking) ? 0 : -1;
int result = ::poll(&fds, 1, timeout);
get_last_error(ec, result < 0);
if (result == 0)
if (state & user_set_non_blocking)
ec = asio::error::would_block;
return result;
}
} // namespace descriptor_ops
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
#endif // ASIO_DETAIL_IMPL_DESCRIPTOR_OPS_IPP

View File

@@ -0,0 +1,117 @@
//
// detail/impl/dev_poll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void dev_poll_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename TimeTraits, typename Allocator>
void dev_poll_reactor::add_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_add_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void dev_poll_reactor::remove_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_remove_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void dev_poll_reactor::schedule_timer(
timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
template <typename TimeTraits, typename Allocator>
std::size_t dev_poll_reactor::cancel_timer(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename TimeTraits, typename Allocator>
void dev_poll_reactor::cancel_timer_by_key(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename TimeTraits, typename Allocator>
void dev_poll_reactor::move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_HPP

View File

@@ -0,0 +1,469 @@
//
// detail/impl/dev_poll_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
#define ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_DEV_POLL)
#include "asio/detail/dev_poll_reactor.hpp"
#include "asio/detail/assert.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
dev_poll_reactor::dev_poll_reactor(asio::execution_context& ctx)
: asio::detail::execution_context_service_base<dev_poll_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(),
dev_poll_fd_(do_dev_poll_create()),
interrupter_(),
shutdown_(false)
{
// Add the interrupter's descriptor to /dev/poll.
::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
}
dev_poll_reactor::~dev_poll_reactor()
{
shutdown();
::close(dev_poll_fd_);
}
void dev_poll_reactor::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void dev_poll_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
detail::mutex::scoped_lock lock(mutex_);
if (dev_poll_fd_ != -1)
::close(dev_poll_fd_);
dev_poll_fd_ = -1;
dev_poll_fd_ = do_dev_poll_create();
interrupter_.recreate();
// Add the interrupter's descriptor to /dev/poll.
::pollfd ev = { 0, 0, 0 };
ev.fd = interrupter_.read_descriptor();
ev.events = POLLIN | POLLERR;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
// Re-register all descriptors with /dev/poll. The changes will be written
// to the /dev/poll descriptor the next time the reactor is run.
for (int i = 0; i < max_ops; ++i)
{
reactor_op_queue<socket_type>::iterator iter = op_queue_[i].begin();
reactor_op_queue<socket_type>::iterator end = op_queue_[i].end();
for (; iter != end; ++iter)
{
::pollfd& pending_ev = add_pending_event_change(iter->first);
pending_ev.events |= POLLERR | POLLHUP;
switch (i)
{
case read_op: pending_ev.events |= POLLIN; break;
case write_op: pending_ev.events |= POLLOUT; break;
case except_op: pending_ev.events |= POLLPRI; break;
default: break;
}
}
}
interrupter_.interrupt();
}
}
void dev_poll_reactor::init_task()
{
scheduler_.init_task();
}
int dev_poll_reactor::register_descriptor(socket_type, per_descriptor_data&)
{
return 0;
}
int dev_poll_reactor::register_internal_descriptor(int op_type,
socket_type descriptor, per_descriptor_data&, reactor_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue_[op_type].enqueue_operation(descriptor, op);
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLERR | POLLHUP;
switch (op_type)
{
case read_op: ev.events |= POLLIN; break;
case write_op: ev.events |= POLLOUT; break;
case except_op: ev.events |= POLLPRI; break;
default: break;
}
interrupter_.interrupt();
return 0;
}
void dev_poll_reactor::move_descriptor(socket_type,
dev_poll_reactor::per_descriptor_data&,
dev_poll_reactor::per_descriptor_data&)
{
}
void dev_poll_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const dev_poll_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void dev_poll_reactor::start_op(int op_type, socket_type descriptor,
dev_poll_reactor::per_descriptor_data&, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (allow_speculative)
{
if (op_type != read_op || !op_queue_[except_op].has_operation(descriptor))
{
if (!op_queue_[op_type].has_operation(descriptor))
{
if (op->perform())
{
lock.unlock();
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
}
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
scheduler_.work_started();
if (first)
{
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLERR | POLLHUP;
if (op_type == read_op
|| op_queue_[read_op].has_operation(descriptor))
ev.events |= POLLIN;
if (op_type == write_op
|| op_queue_[write_op].has_operation(descriptor))
ev.events |= POLLOUT;
if (op_type == except_op
|| op_queue_[except_op].has_operation(descriptor))
ev.events |= POLLPRI;
interrupter_.interrupt();
}
}
void dev_poll_reactor::cancel_ops(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void dev_poll_reactor::cancel_ops_by_key(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&,
int op_type, void* cancellation_key)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
bool need_interrupt = op_queue_[op_type].cancel_operations_by_key(
descriptor, ops, cancellation_key, asio::error::operation_aborted);
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
void dev_poll_reactor::deregister_descriptor(socket_type descriptor,
dev_poll_reactor::per_descriptor_data&, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// Remove the descriptor from /dev/poll.
::pollfd& ev = add_pending_event_change(descriptor);
ev.events = POLLREMOVE;
interrupter_.interrupt();
// Cancel any outstanding operations associated with the descriptor.
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void dev_poll_reactor::deregister_internal_descriptor(
socket_type descriptor, dev_poll_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// Remove the descriptor from /dev/poll. Since this function is only called
// during a fork, we can apply the change immediately.
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
// Destroy all operations associated with the descriptor.
op_queue<operation> ops;
asio::error_code ec;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].cancel_operations(descriptor, ops, ec);
}
void dev_poll_reactor::cleanup_descriptor_data(
dev_poll_reactor::per_descriptor_data&)
{
}
void dev_poll_reactor::run(long usec, op_queue<operation>& ops)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
if (usec == 0 && op_queue_[read_op].empty() && op_queue_[write_op].empty()
&& op_queue_[except_op].empty() && timer_queues_.all_empty())
return;
// Write the pending event registration changes to the /dev/poll descriptor.
std::size_t events_size = sizeof(::pollfd) * pending_event_changes_.size();
if (events_size > 0)
{
errno = 0;
int result = ::write(dev_poll_fd_,
&pending_event_changes_[0], events_size);
if (result != static_cast<int>(events_size))
{
asio::error_code ec = asio::error_code(
errno, asio::error::get_system_category());
for (std::size_t i = 0; i < pending_event_changes_.size(); ++i)
{
int descriptor = pending_event_changes_[i].fd;
for (int j = 0; j < max_ops; ++j)
op_queue_[j].cancel_operations(descriptor, ops, ec);
}
}
pending_event_changes_.clear();
pending_event_change_index_.clear();
}
// Calculate timeout.
int timeout;
if (usec == 0)
timeout = 0;
else
{
timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
timeout = get_timeout(timeout);
}
lock.unlock();
// Block on the /dev/poll descriptor.
::pollfd events[128] = { { 0, 0, 0 } };
::dvpoll dp = { 0, 0, 0 };
dp.dp_fds = events;
dp.dp_nfds = 128;
dp.dp_timeout = timeout;
int num_events = ::ioctl(dev_poll_fd_, DP_POLL, &dp);
lock.lock();
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
int descriptor = events[i].fd;
if (descriptor == interrupter_.read_descriptor())
{
interrupter_.reset();
}
else
{
bool more_reads = false;
bool more_writes = false;
bool more_except = false;
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
if (events[i].events & (POLLPRI | POLLERR | POLLHUP))
more_except =
op_queue_[except_op].perform_operations(descriptor, ops);
else
more_except = op_queue_[except_op].has_operation(descriptor);
if (events[i].events & (POLLIN | POLLERR | POLLHUP))
more_reads = op_queue_[read_op].perform_operations(descriptor, ops);
else
more_reads = op_queue_[read_op].has_operation(descriptor);
if (events[i].events & (POLLOUT | POLLERR | POLLHUP))
more_writes = op_queue_[write_op].perform_operations(descriptor, ops);
else
more_writes = op_queue_[write_op].has_operation(descriptor);
if ((events[i].events & (POLLERR | POLLHUP)) != 0
&& !more_except && !more_reads && !more_writes)
{
// If we have an event and no operations associated with the
// descriptor then we need to delete the descriptor from /dev/poll.
// The poll operation can produce POLLHUP or POLLERR events when there
// is no operation pending, so if we do not remove the descriptor we
// can end up in a tight polling loop.
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLREMOVE;
ev.revents = 0;
::write(dev_poll_fd_, &ev, sizeof(ev));
}
else
{
::pollfd ev = { 0, 0, 0 };
ev.fd = descriptor;
ev.events = POLLERR | POLLHUP;
if (more_reads)
ev.events |= POLLIN;
if (more_writes)
ev.events |= POLLOUT;
if (more_except)
ev.events |= POLLPRI;
ev.revents = 0;
int result = ::write(dev_poll_fd_, &ev, sizeof(ev));
if (result != sizeof(ev))
{
asio::error_code ec(errno,
asio::error::get_system_category());
for (int j = 0; j < max_ops; ++j)
op_queue_[j].cancel_operations(descriptor, ops, ec);
}
}
}
}
timer_queues_.get_ready_timers(ops);
}
void dev_poll_reactor::interrupt()
{
interrupter_.interrupt();
}
int dev_poll_reactor::do_dev_poll_create()
{
int fd = ::open("/dev/poll", O_RDWR);
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "/dev/poll");
}
return fd;
}
void dev_poll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void dev_poll_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
int dev_poll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const int max_msec = 5 * 60 * 1000;
return timer_queues_.wait_duration_msec(
(msec < 0 || max_msec < msec) ? max_msec : msec);
}
void dev_poll_reactor::cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec)
{
bool need_interrupt = false;
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
::pollfd& dev_poll_reactor::add_pending_event_change(int descriptor)
{
hash_map<int, std::size_t>::iterator iter
= pending_event_change_index_.find(descriptor);
if (iter == pending_event_change_index_.end())
{
std::size_t index = pending_event_changes_.size();
pending_event_changes_.reserve(pending_event_changes_.size() + 1);
pending_event_change_index_.insert(std::make_pair(descriptor, index));
pending_event_changes_.push_back(::pollfd());
pending_event_changes_[index].fd = descriptor;
pending_event_changes_[index].revents = 0;
return pending_event_changes_[index];
}
else
{
return pending_event_changes_[iter->second];
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_DEV_POLL)
#endif // ASIO_DETAIL_IMPL_DEV_POLL_REACTOR_IPP

View File

@@ -0,0 +1,114 @@
//
// detail/impl/epoll_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#if defined(ASIO_HAS_EPOLL)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void epoll_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename TimeTraits, typename Allocator>
void epoll_reactor::add_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_add_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void epoll_reactor::remove_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_remove_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void epoll_reactor::schedule_timer(timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
update_timeout();
}
template <typename TimeTraits, typename Allocator>
std::size_t epoll_reactor::cancel_timer(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename TimeTraits, typename Allocator>
void epoll_reactor::cancel_timer_by_key(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename TimeTraits, typename Allocator>
void epoll_reactor::move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_HPP

View File

@@ -0,0 +1,840 @@
//
// detail/impl/epoll_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
#define ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EPOLL)
#include <cstddef>
#include <sys/epoll.h>
#include "asio/config.hpp"
#include "asio/detail/epoll_reactor.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#if defined(ASIO_HAS_TIMERFD)
# include <sys/timerfd.h>
#endif // defined(ASIO_HAS_TIMERFD)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
epoll_reactor::epoll_reactor(asio::execution_context& ctx)
: execution_context_service_base<epoll_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(config(ctx).get("reactor", "registration_locking", true),
config(ctx).get("reactor", "registration_locking_spin_count", 0)),
interrupter_(),
epoll_fd_(do_epoll_create()),
timer_fd_(do_timerfd_create()),
shutdown_(false),
io_locking_(config(ctx).get("reactor", "io_locking", true)),
io_locking_spin_count_(
config(ctx).get("reactor", "io_locking_spin_count", 0)),
registered_descriptors_mutex_(mutex_.enabled(), mutex_.spin_count()),
registered_descriptors_(execution_context::allocator<void>(ctx),
config(ctx).get("reactor", "preallocated_io_objects", 0U),
io_locking_, io_locking_spin_count_)
{
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
interrupter_.interrupt();
// Add the timer descriptor to epoll.
if (timer_fd_ != -1)
{
ev.events = EPOLLIN | EPOLLERR;
ev.data.ptr = &timer_fd_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
}
}
epoll_reactor::~epoll_reactor()
{
if (epoll_fd_ != -1)
close(epoll_fd_);
if (timer_fd_ != -1)
close(timer_fd_);
}
void epoll_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
while (descriptor_state* state = registered_descriptors_.first())
{
for (int i = 0; i < max_ops; ++i)
ops.push(state->op_queue_[i]);
state->shutdown_ = true;
registered_descriptors_.free(state);
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void epoll_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
if (epoll_fd_ != -1)
::close(epoll_fd_);
epoll_fd_ = -1;
epoll_fd_ = do_epoll_create();
if (timer_fd_ != -1)
::close(timer_fd_);
timer_fd_ = -1;
timer_fd_ = do_timerfd_create();
interrupter_.recreate();
// Add the interrupter's descriptor to epoll.
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, interrupter_.read_descriptor(), &ev);
interrupter_.interrupt();
// Add the timer descriptor to epoll.
if (timer_fd_ != -1)
{
ev.events = EPOLLIN | EPOLLERR;
ev.data.ptr = &timer_fd_;
epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, timer_fd_, &ev);
}
update_timeout();
// Re-register all descriptors with epoll.
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
for (descriptor_state* state = registered_descriptors_.first();
state != 0; state = state->next_)
{
if (state->registered_events_ != 0)
{
ev.events = state->registered_events_;
ev.data.ptr = state;
int result = epoll_ctl(epoll_fd_,
EPOLL_CTL_ADD, state->descriptor_, &ev);
if (result != 0)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "epoll re-registration");
}
}
}
}
}
void epoll_reactor::init_task()
{
scheduler_.init_task();
}
int epoll_reactor::register_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
descriptor_data->registered_events_ = ev.events;
ev.data.ptr = descriptor_data;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
if (result != 0)
{
if (errno == EPERM)
{
// This file descriptor type is not supported by epoll. However, if it is
// a regular file then operations on it will not block. We will allow
// this descriptor to be used and fail later if an operation on it would
// otherwise require a trip through the reactor.
descriptor_data->registered_events_ = 0;
return 0;
}
return errno;
}
return 0;
}
int epoll_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
{
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
descriptor_data->reactor_ = this;
descriptor_data->descriptor_ = descriptor;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
for (int i = 0; i < max_ops; ++i)
descriptor_data->try_speculative_[i] = true;
}
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLHUP | EPOLLPRI | EPOLLET;
descriptor_data->registered_events_ = ev.events;
ev.data.ptr = descriptor_data;
int result = epoll_ctl(epoll_fd_, EPOLL_CTL_ADD, descriptor, &ev);
if (result != 0)
{
// Don't try to re-register internal descriptor after fork().
descriptor_data->registered_events_ = 0;
return errno;
}
return 0;
}
void epoll_reactor::move_descriptor(socket_type,
epoll_reactor::per_descriptor_data& target_descriptor_data,
epoll_reactor::per_descriptor_data& source_descriptor_data)
{
target_descriptor_data = source_descriptor_data;
source_descriptor_data = 0;
}
void epoll_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const epoll_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void epoll_reactor::start_op(int op_type, socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
if (!descriptor_data)
{
op->ec_ = asio::error::bad_descriptor;
on_immediate(op, is_continuation, immediate_arg);
return;
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (descriptor_data->op_queue_[op_type].empty())
{
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (descriptor_data->try_speculative_[op_type])
{
if (reactor_op::status status = op->perform())
{
if (status == reactor_op::done_and_exhausted)
if (descriptor_data->registered_events_ != 0)
descriptor_data->try_speculative_[op_type] = false;
descriptor_lock.unlock();
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
if (descriptor_data->registered_events_ == 0)
{
op->ec_ = asio::error::operation_not_supported;
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (op_type == write_op)
{
if ((descriptor_data->registered_events_ & EPOLLOUT) == 0)
{
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_ | EPOLLOUT;
ev.data.ptr = descriptor_data;
if (epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev) == 0)
{
descriptor_data->registered_events_ |= ev.events;
}
else
{
op->ec_ = asio::error_code(errno,
asio::error::get_system_category());
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
}
}
else if (descriptor_data->registered_events_ == 0)
{
op->ec_ = asio::error::operation_not_supported;
on_immediate(op, is_continuation, immediate_arg);
return;
}
else
{
if (op_type == write_op)
{
descriptor_data->registered_events_ |= EPOLLOUT;
}
epoll_event ev = { 0, { 0 } };
ev.events = descriptor_data->registered_events_;
ev.data.ptr = descriptor_data;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, descriptor, &ev);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
void epoll_reactor::cancel_ops(socket_type,
epoll_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void epoll_reactor::cancel_ops_by_key(socket_type,
epoll_reactor::per_descriptor_data& descriptor_data,
int op_type, void* cancellation_key)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
op_queue<reactor_op> other_ops;
while (reactor_op* op = descriptor_data->op_queue_[op_type].front())
{
descriptor_data->op_queue_[op_type].pop();
if (op->cancellation_key_ == cancellation_key)
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
else
other_ops.push(op);
}
descriptor_data->op_queue_[op_type].push(other_ops);
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void epoll_reactor::deregister_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
if (closing)
{
// The descriptor will be automatically removed from the epoll set when
// it is closed.
}
else if (descriptor_data->registered_events_ != 0)
{
epoll_event ev = { 0, { 0 } };
epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
}
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void epoll_reactor::deregister_internal_descriptor(socket_type descriptor,
epoll_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
epoll_event ev = { 0, { 0 } };
epoll_ctl(epoll_fd_, EPOLL_CTL_DEL, descriptor, &ev);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
ops.push(descriptor_data->op_queue_[i]);
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void epoll_reactor::cleanup_descriptor_data(
per_descriptor_data& descriptor_data)
{
if (descriptor_data)
{
free_descriptor_state(descriptor_data);
descriptor_data = 0;
}
}
void epoll_reactor::run(long usec, op_queue<operation>& ops)
{
// This code relies on the fact that the scheduler queues the reactor task
// behind all descriptor operations generated by this function. This means,
// that by the time we reach this point, any previously returned descriptor
// operations have already been dequeued. Therefore it is now safe for us to
// reuse and return them for the scheduler to queue again.
// Calculate timeout. Check the timer queues only if timerfd is not in use.
int timeout;
if (usec == 0)
timeout = 0;
else
{
timeout = (usec < 0) ? -1 : ((usec - 1) / 1000 + 1);
if (timer_fd_ == -1)
{
mutex::scoped_lock lock(mutex_);
timeout = get_timeout(timeout);
}
}
// Block on the epoll descriptor.
epoll_event events[128];
int num_events = epoll_wait(epoll_fd_, events, 128, timeout);
#if defined(ASIO_ENABLE_HANDLER_TRACKING)
// Trace the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = events[i].data.ptr;
if (ptr == &interrupter_)
{
// Ignore.
}
# if defined(ASIO_HAS_TIMERFD)
else if (ptr == &timer_fd_)
{
// Ignore.
}
# endif // defined(ASIO_HAS_TIMERFD)
else
{
unsigned event_mask = 0;
if ((events[i].events & EPOLLIN) != 0)
event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;
if ((events[i].events & EPOLLOUT))
event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;
if ((events[i].events & (EPOLLERR | EPOLLHUP)) != 0)
event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;
ASIO_HANDLER_REACTOR_EVENTS((context(),
reinterpret_cast<uintmax_t>(ptr), event_mask));
}
}
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#if defined(ASIO_HAS_TIMERFD)
bool check_timers = (timer_fd_ == -1);
#else // defined(ASIO_HAS_TIMERFD)
bool check_timers = true;
#endif // defined(ASIO_HAS_TIMERFD)
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = events[i].data.ptr;
if (ptr == &interrupter_)
{
// No need to reset the interrupter since we're leaving the descriptor
// in a ready-to-read state and relying on edge-triggered notifications
// to make it so that we only get woken up when the descriptor's epoll
// registration is updated.
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ == -1)
check_timers = true;
#else // defined(ASIO_HAS_TIMERFD)
check_timers = true;
#endif // defined(ASIO_HAS_TIMERFD)
}
#if defined(ASIO_HAS_TIMERFD)
else if (ptr == &timer_fd_)
{
check_timers = true;
}
#endif // defined(ASIO_HAS_TIMERFD)
else
{
// The descriptor operation doesn't count as work in and of itself, so we
// don't call work_started() here. This still allows the scheduler to
// stop if the only remaining operations are descriptor operations.
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
if (!ops.is_enqueued(descriptor_data))
{
descriptor_data->set_ready_events(events[i].events);
ops.push(descriptor_data);
}
else
{
descriptor_data->add_ready_events(events[i].events);
}
}
}
if (check_timers)
{
mutex::scoped_lock common_lock(mutex_);
timer_queues_.get_ready_timers(ops);
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ != -1)
{
itimerspec new_timeout;
itimerspec old_timeout;
int flags = get_timeout(new_timeout);
timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
}
#endif // defined(ASIO_HAS_TIMERFD)
}
}
void epoll_reactor::interrupt()
{
epoll_event ev = { 0, { 0 } };
ev.events = EPOLLIN | EPOLLERR | EPOLLET;
ev.data.ptr = &interrupter_;
epoll_ctl(epoll_fd_, EPOLL_CTL_MOD, interrupter_.read_descriptor(), &ev);
}
int epoll_reactor::do_epoll_create()
{
#if defined(EPOLL_CLOEXEC)
int fd = epoll_create1(EPOLL_CLOEXEC);
#else // defined(EPOLL_CLOEXEC)
int fd = -1;
errno = EINVAL;
#endif // defined(EPOLL_CLOEXEC)
if (fd == -1 && (errno == EINVAL || errno == ENOSYS))
{
fd = epoll_create(epoll_size);
if (fd != -1)
::fcntl(fd, F_SETFD, FD_CLOEXEC);
}
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "epoll");
}
return fd;
}
int epoll_reactor::do_timerfd_create()
{
#if defined(ASIO_HAS_TIMERFD)
# if defined(TFD_CLOEXEC)
int fd = timerfd_create(CLOCK_MONOTONIC, TFD_CLOEXEC);
# else // defined(TFD_CLOEXEC)
int fd = -1;
errno = EINVAL;
# endif // defined(TFD_CLOEXEC)
if (fd == -1 && errno == EINVAL)
{
fd = timerfd_create(CLOCK_MONOTONIC, 0);
if (fd != -1)
::fcntl(fd, F_SETFD, FD_CLOEXEC);
}
return fd;
#else // defined(ASIO_HAS_TIMERFD)
return -1;
#endif // defined(ASIO_HAS_TIMERFD)
}
epoll_reactor::descriptor_state* epoll_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc(io_locking_, io_locking_spin_count_);
}
void epoll_reactor::free_descriptor_state(epoll_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void epoll_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void epoll_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
void epoll_reactor::update_timeout()
{
#if defined(ASIO_HAS_TIMERFD)
if (timer_fd_ != -1)
{
itimerspec new_timeout;
itimerspec old_timeout;
int flags = get_timeout(new_timeout);
timerfd_settime(timer_fd_, flags, &new_timeout, &old_timeout);
return;
}
#endif // defined(ASIO_HAS_TIMERFD)
interrupt();
}
int epoll_reactor::get_timeout(int msec)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const int max_msec = 5 * 60 * 1000;
return timer_queues_.wait_duration_msec(
(msec < 0 || max_msec < msec) ? max_msec : msec);
}
#if defined(ASIO_HAS_TIMERFD)
int epoll_reactor::get_timeout(itimerspec& ts)
{
ts.it_interval.tv_sec = 0;
ts.it_interval.tv_nsec = 0;
long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
ts.it_value.tv_sec = usec / 1000000;
ts.it_value.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
return usec ? 0 : TFD_TIMER_ABSTIME;
}
#endif // defined(ASIO_HAS_TIMERFD)
struct epoll_reactor::perform_io_cleanup_on_block_exit
{
explicit perform_io_cleanup_on_block_exit(epoll_reactor* r)
: reactor_(r), first_op_(0)
{
}
~perform_io_cleanup_on_block_exit()
{
if (first_op_)
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
reactor_->scheduler_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
// the fact that the scheduler will call work_finished() once we return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
// for the work_finished() call that the scheduler will make once this
// operation returns.
reactor_->scheduler_.compensating_work_started();
}
}
epoll_reactor* reactor_;
op_queue<operation> ops_;
operation* first_op_;
};
epoll_reactor::descriptor_state::descriptor_state(bool locking, int spin_count)
: operation(&epoll_reactor::descriptor_state::do_complete),
mutex_(locking, spin_count)
{
}
operation* epoll_reactor::descriptor_state::perform_io(uint32_t events)
{
mutex_.lock();
perform_io_cleanup_on_block_exit io_cleanup(reactor_);
mutex::scoped_lock descriptor_lock(mutex_, mutex::scoped_lock::adopt_lock);
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
static const int flag[max_ops] = { EPOLLIN, EPOLLOUT, EPOLLPRI };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events & (flag[j] | EPOLLERR | EPOLLHUP))
{
try_speculative_[j] = true;
while (reactor_op* op = op_queue_[j].front())
{
if (reactor_op::status status = op->perform())
{
op_queue_[j].pop();
io_cleanup.ops_.push(op);
if (status == reactor_op::done_and_exhausted)
{
try_speculative_[j] = false;
break;
}
}
else
break;
}
}
}
// The first operation will be returned for completion now. The others will
// be posted for later by the io_cleanup object's destructor.
io_cleanup.first_op_ = io_cleanup.ops_.front();
io_cleanup.ops_.pop();
return io_cleanup.first_op_;
}
void epoll_reactor::descriptor_state::do_complete(
void* owner, operation* base,
const asio::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(base);
uint32_t events = static_cast<uint32_t>(bytes_transferred);
if (operation* op = descriptor_data->perform_io(events))
{
op->complete(owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EPOLL)
#endif // ASIO_DETAIL_IMPL_EPOLL_REACTOR_IPP

View File

@@ -0,0 +1,171 @@
//
// detail/impl/eventfd_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Roelof Naude (roelof.naude at gmail dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_EVENTFD)
#include <sys/stat.h>
#include <sys/types.h>
#include <fcntl.h>
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# include <asm/unistd.h>
#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# include <sys/eventfd.h>
#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
#include "asio/detail/cstdint.hpp"
#include "asio/detail/eventfd_select_interrupter.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
eventfd_select_interrupter::eventfd_select_interrupter()
{
open_descriptors();
}
void eventfd_select_interrupter::open_descriptors()
{
#if __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
write_descriptor_ = read_descriptor_ = syscall(__NR_eventfd, 0);
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
#else // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
# if defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
write_descriptor_ = read_descriptor_ =
::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
# else // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
errno = EINVAL;
write_descriptor_ = read_descriptor_ = -1;
# endif // defined(EFD_CLOEXEC) && defined(EFD_NONBLOCK)
if (read_descriptor_ == -1 && errno == EINVAL)
{
write_descriptor_ = read_descriptor_ = ::eventfd(0, 0);
if (read_descriptor_ != -1)
{
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
}
}
#endif // __GLIBC__ == 2 && __GLIBC_MINOR__ < 8 && !defined(__UCLIBC__)
if (read_descriptor_ == -1)
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "eventfd_select_interrupter");
}
}
}
eventfd_select_interrupter::~eventfd_select_interrupter()
{
close_descriptors();
}
void eventfd_select_interrupter::close_descriptors()
{
if (write_descriptor_ != -1 && write_descriptor_ != read_descriptor_)
::close(write_descriptor_);
if (read_descriptor_ != -1)
::close(read_descriptor_);
}
void eventfd_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = -1;
read_descriptor_ = -1;
open_descriptors();
}
void eventfd_select_interrupter::interrupt()
{
uint64_t counter(1UL);
int result = ::write(write_descriptor_, &counter, sizeof(uint64_t));
(void)result;
}
bool eventfd_select_interrupter::reset()
{
if (write_descriptor_ == read_descriptor_)
{
for (;;)
{
// Only perform one read. The kernel maintains an atomic counter.
uint64_t counter(0);
errno = 0;
int bytes_read = ::read(read_descriptor_, &counter, sizeof(uint64_t));
if (bytes_read < 0 && errno == EINTR)
continue;
return true;
}
}
else
{
for (;;)
{
// Clear all data from the pipe.
char data[1024];
int bytes_read = ::read(read_descriptor_, data, sizeof(data));
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK)
return true;
if (errno == EAGAIN)
return true;
return false;
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_EVENTFD)
#endif // ASIO_DETAIL_IMPL_EVENTFD_SELECT_INTERRUPTER_IPP

View File

@@ -0,0 +1,387 @@
//
// detail/impl/handler_tracking.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
#define ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_CUSTOM_HANDLER_TRACKING)
// The handler tracking implementation is provided by the user-specified header.
#elif defined(ASIO_ENABLE_HANDLER_TRACKING)
#include <cstdarg>
#include <cstdio>
#include "asio/detail/chrono.hpp"
#include "asio/detail/chrono_time_traits.hpp"
#include "asio/detail/handler_tracking.hpp"
#include "asio/wait_traits.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include "asio/detail/socket_types.hpp"
#elif !defined(ASIO_WINDOWS)
# include <unistd.h>
#endif // !defined(ASIO_WINDOWS)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct handler_tracking_timestamp
{
uint64_t seconds;
uint64_t microseconds;
handler_tracking_timestamp()
{
typedef chrono_time_traits<chrono::system_clock,
asio::wait_traits<chrono::system_clock>> traits_helper;
traits_helper::posix_time_duration now(
chrono::system_clock::now().time_since_epoch());
seconds = static_cast<uint64_t>(now.total_seconds());
microseconds = static_cast<uint64_t>(now.total_microseconds() % 1000000);
}
};
struct handler_tracking::tracking_state
{
static_mutex mutex_;
uint64_t next_id_;
tss_ptr<completion>* current_completion_;
tss_ptr<location>* current_location_;
};
handler_tracking::tracking_state* handler_tracking::get_state()
{
static tracking_state state = { ASIO_STATIC_MUTEX_INIT, 1, 0, 0 };
return &state;
}
void handler_tracking::init()
{
static tracking_state* state = get_state();
state->mutex_.init();
static_mutex::scoped_lock lock(state->mutex_);
if (state->current_completion_ == 0)
state->current_completion_ = new tss_ptr<completion>;
if (state->current_location_ == 0)
state->current_location_ = new tss_ptr<location>;
}
handler_tracking::location::location(
const char* file, int line, const char* func)
: file_(file),
line_(line),
func_(func),
next_(*get_state()->current_location_)
{
if (file_)
*get_state()->current_location_ = this;
}
handler_tracking::location::~location()
{
if (file_)
*get_state()->current_location_ = next_;
}
void handler_tracking::creation(execution_context&,
handler_tracking::tracked_handler& h,
const char* object_type, void* object,
uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
static_mutex::scoped_lock lock(state->mutex_);
h.id_ = state->next_id_++;
lock.unlock();
handler_tracking_timestamp timestamp;
uint64_t current_id = 0;
if (completion* current_completion = *state->current_completion_)
current_id = current_completion->id_;
for (location* current_location = *state->current_location_;
current_location; current_location = current_location->next_)
{
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u^%I64u|%s%s%.80s%s(%.80s:%d)\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu^%llu|%s%s%.80s%s(%.80s:%d)\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, h.id_,
current_location == *state->current_location_ ? "in " : "called from ",
current_location->func_ ? "'" : "",
current_location->func_ ? current_location->func_ : "",
current_location->func_ ? "' " : "",
current_location->file_, current_location->line_);
}
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u*%I64u|%.20s@%p.%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu*%llu|%.20s@%p.%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, h.id_, object_type, object, op_name);
}
handler_tracking::completion::completion(
const handler_tracking::tracked_handler& h)
: id_(h.id_),
invoked_(false),
next_(*get_state()->current_completion_)
{
*get_state()->current_completion_ = this;
}
handler_tracking::completion::~completion()
{
if (id_)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%c%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%c%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
invoked_ ? '!' : '~', id_);
}
*get_state()->current_completion_ = next_;
}
void handler_tracking::completion::invocation_begin()
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds, id_);
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value());
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, std::size_t bytes_transferred)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,bytes_transferred=%I64u\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,bytes_transferred=%llu\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(),
static_cast<uint64_t>(bytes_transferred));
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, int signal_number)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,signal_number=%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,signal_number=%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(), signal_number);
invoked_ = true;
}
void handler_tracking::completion::invocation_begin(
const asio::error_code& ec, const char* arg)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|>%I64u|ec=%.20s:%d,%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|>%llu|ec=%.20s:%d,%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
id_, ec.category().name(), ec.value(), arg);
invoked_ = true;
}
void handler_tracking::completion::invocation_end()
{
if (id_)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|<%I64u|\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|<%llu|\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds, id_);
id_ = 0;
}
}
void handler_tracking::operation(execution_context&,
const char* object_type, void* object,
uintmax_t /*native_handle*/, const char* op_name)
{
static tracking_state* state = get_state();
handler_tracking_timestamp timestamp;
unsigned long long current_id = 0;
if (completion* current_completion = *state->current_completion_)
current_id = current_completion->id_;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|%I64u|%.20s@%p.%.50s\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|%llu|%.20s@%p.%.50s\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
current_id, object_type, object, op_name);
}
void handler_tracking::reactor_registration(execution_context& /*context*/,
uintmax_t /*native_handle*/, uintmax_t /*registration*/)
{
}
void handler_tracking::reactor_deregistration(execution_context& /*context*/,
uintmax_t /*native_handle*/, uintmax_t /*registration*/)
{
}
void handler_tracking::reactor_events(execution_context& /*context*/,
uintmax_t /*native_handle*/, unsigned /*events*/)
{
}
void handler_tracking::reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
h.id_, op_name, ec.category().name(), ec.value());
}
void handler_tracking::reactor_operation(
const tracked_handler& h, const char* op_name,
const asio::error_code& ec, std::size_t bytes_transferred)
{
handler_tracking_timestamp timestamp;
write_line(
#if defined(ASIO_WINDOWS)
"@asio|%I64u.%06I64u|.%I64u|%s,ec=%.20s:%d,bytes_transferred=%I64u\n",
#else // defined(ASIO_WINDOWS)
"@asio|%llu.%06llu|.%llu|%s,ec=%.20s:%d,bytes_transferred=%llu\n",
#endif // defined(ASIO_WINDOWS)
timestamp.seconds, timestamp.microseconds,
h.id_, op_name, ec.category().name(), ec.value(),
static_cast<uint64_t>(bytes_transferred));
}
void handler_tracking::write_line(const char* format, ...)
{
using namespace std; // For sprintf (or equivalent).
va_list args;
va_start(args, format);
char line[256] = "";
#if defined(ASIO_HAS_SNPRINTF)
int length = vsnprintf(line, sizeof(line), format, args);
#elif defined(ASIO_HAS_SECURE_RTL)
int length = vsprintf_s(line, sizeof(line), format, args);
#else // defined(ASIO_HAS_SECURE_RTL)
int length = vsprintf(line, format, args);
#endif // defined(ASIO_HAS_SECURE_RTL)
va_end(args);
#if defined(ASIO_WINDOWS_RUNTIME)
wchar_t wline[256] = L"";
mbstowcs_s(0, wline, sizeof(wline) / sizeof(wchar_t), line, length);
::OutputDebugStringW(wline);
#elif defined(ASIO_WINDOWS)
HANDLE stderr_handle = ::GetStdHandle(STD_ERROR_HANDLE);
DWORD bytes_written = 0;
::WriteFile(stderr_handle, line, length, &bytes_written, 0);
#else // defined(ASIO_WINDOWS)
::write(STDERR_FILENO, line, length);
#endif // defined(ASIO_WINDOWS)
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
#endif // ASIO_DETAIL_IMPL_HANDLER_TRACKING_IPP

View File

@@ -0,0 +1,205 @@
//
// detail/impl/io_uring_descriptor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_DESCRIPTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_DESCRIPTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include "asio/error.hpp"
#include "asio/detail/io_uring_descriptor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_descriptor_service::io_uring_descriptor_service(
execution_context& context)
: execution_context_service_base<io_uring_descriptor_service>(context),
io_uring_service_(asio::use_service<io_uring_service>(context))
{
io_uring_service_.init_task();
}
void io_uring_descriptor_service::shutdown()
{
}
void io_uring_descriptor_service::construct(
io_uring_descriptor_service::implementation_type& impl)
{
impl.descriptor_ = -1;
impl.state_ = 0;
impl.io_object_data_ = 0;
}
void io_uring_descriptor_service::move_construct(
io_uring_descriptor_service::implementation_type& impl,
io_uring_descriptor_service::implementation_type& other_impl)
noexcept
{
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_descriptor_service::move_assign(
io_uring_descriptor_service::implementation_type& impl,
io_uring_descriptor_service& /*other_service*/,
io_uring_descriptor_service::implementation_type& other_impl)
{
destroy(impl);
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_descriptor_service::destroy(
io_uring_descriptor_service::implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
asio::error_code ignored_ec;
descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
}
asio::error_code io_uring_descriptor_service::assign(
io_uring_descriptor_service::implementation_type& impl,
const native_handle_type& native_descriptor, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
io_uring_service_.register_io_object(impl.io_object_data_);
impl.descriptor_ = native_descriptor;
impl.state_ = descriptor_ops::possible_dup;
ec = success_ec_;
return ec;
}
asio::error_code io_uring_descriptor_service::close(
io_uring_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
descriptor_ops::close(impl.descriptor_, impl.state_, ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
else
{
ec = success_ec_;
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
construct(impl);
ASIO_ERROR_LOCATION(ec);
return ec;
}
io_uring_descriptor_service::native_handle_type
io_uring_descriptor_service::release(
io_uring_descriptor_service::implementation_type& impl)
{
native_handle_type descriptor = impl.descriptor_;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "release"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
construct(impl);
}
return descriptor;
}
asio::error_code io_uring_descriptor_service::cancel(
io_uring_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return ec;
}
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"descriptor", &impl, impl.descriptor_, "cancel"));
io_uring_service_.cancel_ops(impl.io_object_data_);
ec = success_ec_;
return ec;
}
void io_uring_descriptor_service::start_op(
io_uring_descriptor_service::implementation_type& impl,
int op_type, io_uring_operation* op, bool is_continuation, bool noop)
{
if (!noop)
{
io_uring_service_.start_op(op_type,
impl.io_object_data_, op, is_continuation);
}
else
{
io_uring_service_.post_immediate_completion(op, is_continuation);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_DESCRIPTOR_SERVICE_IPP

View File

@@ -0,0 +1,140 @@
//
// detail/impl/io_uring_file_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_FILE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_FILE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_FILE) \
&& defined(ASIO_HAS_IO_URING)
#include <cstring>
#include <sys/stat.h>
#include "asio/detail/io_uring_file_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_file_service::io_uring_file_service(
execution_context& context)
: execution_context_service_base<io_uring_file_service>(context),
descriptor_service_(context)
{
}
void io_uring_file_service::shutdown()
{
descriptor_service_.shutdown();
}
asio::error_code io_uring_file_service::open(
io_uring_file_service::implementation_type& impl,
const char* path, file_base::flags open_flags,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
descriptor_ops::state_type state = 0;
int fd = descriptor_ops::open(path, static_cast<int>(open_flags), 0777, ec);
if (fd < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
// We're done. Take ownership of the serial port descriptor.
if (descriptor_service_.assign(impl, fd, ec))
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
}
(void)::posix_fadvise(native_handle(impl), 0, 0,
impl.is_stream_ ? POSIX_FADV_SEQUENTIAL : POSIX_FADV_RANDOM);
ASIO_ERROR_LOCATION(ec);
return ec;
}
uint64_t io_uring_file_service::size(
const io_uring_file_service::implementation_type& impl,
asio::error_code& ec) const
{
struct stat s;
int result = ::fstat(native_handle(impl), &s);
descriptor_ops::get_last_error(ec, result != 0);
ASIO_ERROR_LOCATION(ec);
return !ec ? s.st_size : 0;
}
asio::error_code io_uring_file_service::resize(
io_uring_file_service::implementation_type& impl,
uint64_t n, asio::error_code& ec)
{
int result = ::ftruncate(native_handle(impl), n);
descriptor_ops::get_last_error(ec, result != 0);
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code io_uring_file_service::sync_all(
io_uring_file_service::implementation_type& impl,
asio::error_code& ec)
{
int result = ::fsync(native_handle(impl));
descriptor_ops::get_last_error(ec, result != 0);
return ec;
}
asio::error_code io_uring_file_service::sync_data(
io_uring_file_service::implementation_type& impl,
asio::error_code& ec)
{
#if defined(_POSIX_SYNCHRONIZED_IO)
int result = ::fdatasync(native_handle(impl));
#else // defined(_POSIX_SYNCHRONIZED_IO)
int result = ::fsync(native_handle(impl));
#endif // defined(_POSIX_SYNCHRONIZED_IO)
descriptor_ops::get_last_error(ec, result != 0);
ASIO_ERROR_LOCATION(ec);
return ec;
}
uint64_t io_uring_file_service::seek(
io_uring_file_service::implementation_type& impl, int64_t offset,
file_base::seek_basis whence, asio::error_code& ec)
{
int64_t result = ::lseek(native_handle(impl), offset, whence);
descriptor_ops::get_last_error(ec, result < 0);
ASIO_ERROR_LOCATION(ec);
return !ec ? static_cast<uint64_t>(result) : 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_FILE)
// && defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_FILE_SERVICE_IPP

View File

@@ -0,0 +1,118 @@
//
// detail/impl/io_uring_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_SERVICE_HPP
#define ASIO_DETAIL_IMPL_IO_URING_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#if defined(ASIO_HAS_IO_URING)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void io_uring_service::post_immediate_completion(
operation* op, bool is_continuation)
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename TimeTraits, typename Allocator>
void io_uring_service::add_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_add_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void io_uring_service::remove_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_remove_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void io_uring_service::schedule_timer(
timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
{
update_timeout();
post_submit_sqes_op(lock);
}
}
template <typename TimeTraits, typename Allocator>
std::size_t io_uring_service::cancel_timer(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename TimeTraits, typename Allocator>
void io_uring_service::cancel_timer_by_key(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename TimeTraits, typename Allocator>
void io_uring_service::move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_SERVICE_HPP

View File

@@ -0,0 +1,918 @@
//
// detail/impl/io_uring_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_SERVICE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include <cstddef>
#include <sys/eventfd.h>
#include "asio/detail/io_uring_service.hpp"
#include "asio/detail/reactor_op.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_service::io_uring_service(asio::execution_context& ctx)
: execution_context_service_base<io_uring_service>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(config(ctx).get("reactor", "registration_locking", true),
config(ctx).get("reactor", "registration_locking_spin_count", 0)),
outstanding_work_(0),
submit_sqes_op_(this),
pending_sqes_(0),
pending_submit_sqes_op_(false),
shutdown_(false),
io_locking_(config(ctx).get("reactor", "io_locking", true)),
io_locking_spin_count_(
config(ctx).get("reactor", "io_locking_spin_count", 0)),
timeout_(),
registration_mutex_(mutex_.enabled()),
registered_io_objects_(execution_context::allocator<void>(ctx),
config(ctx).get("reactor", "preallocated_io_objects", 0U),
io_locking_, io_locking_spin_count_),
reactor_(use_service<reactor>(ctx)),
reactor_data_(),
event_fd_(-1)
{
reactor_.init_task();
init_ring();
register_with_reactor();
}
io_uring_service::~io_uring_service()
{
if (ring_.ring_fd != -1)
::io_uring_queue_exit(&ring_);
if (event_fd_ != -1)
::close(event_fd_);
}
void io_uring_service::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
// Cancel all outstanding operations.
while (io_object* io_obj = registered_io_objects_.first())
{
for (int i = 0; i < max_ops; ++i)
{
if (!io_obj->queues_[i].op_queue_.empty())
{
ops.push(io_obj->queues_[i].op_queue_);
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
}
}
io_obj->shutdown_ = true;
registered_io_objects_.free(io_obj);
}
// Cancel the timeout operation.
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &timeout_, IOSQE_IO_DRAIN);
submit_sqes();
// Wait for all completions to come back.
for (; outstanding_work_ > 0; --outstanding_work_)
{
::io_uring_cqe* cqe = 0;
if (::io_uring_wait_cqe(&ring_, &cqe) != 0)
break;
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void io_uring_service::notify_fork(
asio::execution_context::fork_event fork_ev)
{
switch (fork_ev)
{
case asio::execution_context::fork_prepare:
{
// Cancel all outstanding operations. They will be restarted
// after the fork completes.
mutex::scoped_lock registration_lock(registration_mutex_);
for (io_object* io_obj = registered_io_objects_.first();
io_obj != 0; io_obj = io_obj->next_)
{
mutex::scoped_lock io_object_lock(io_obj->mutex_);
for (int i = 0; i < max_ops; ++i)
{
if (!io_obj->queues_[i].op_queue_.empty()
&& !io_obj->queues_[i].cancel_requested_)
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
}
}
}
// Cancel the timeout operation.
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &timeout_, IOSQE_IO_DRAIN);
submit_sqes();
}
// Wait for all completions to come back, and post all completed I/O
// queues to the scheduler. Note that some operations may have already
// completed, or were explicitly cancelled. All others will be
// automatically restarted.
op_queue<operation> ops;
for (; outstanding_work_ > 0; --outstanding_work_)
{
::io_uring_cqe* cqe = 0;
if (::io_uring_wait_cqe(&ring_, &cqe) != 0)
break;
if (void* ptr = ::io_uring_cqe_get_data(cqe))
{
if (ptr != this && ptr != &timer_queues_ && ptr != &timeout_)
{
io_queue* io_q = static_cast<io_queue*>(ptr);
io_q->set_result(cqe->res);
ops.push(io_q);
}
}
}
scheduler_.post_deferred_completions(ops);
// Restart and eventfd operation.
register_with_reactor();
}
break;
case asio::execution_context::fork_parent:
// Restart the timeout and eventfd operations.
update_timeout();
register_with_reactor();
break;
case asio::execution_context::fork_child:
{
// The child process gets a new io_uring instance.
::io_uring_queue_exit(&ring_);
init_ring();
register_with_reactor();
}
break;
default:
break;
}
}
void io_uring_service::init_task()
{
scheduler_.init_task();
}
void io_uring_service::register_io_object(
io_uring_service::per_io_object_data& io_obj)
{
io_obj = allocate_io_object();
mutex::scoped_lock io_object_lock(io_obj->mutex_);
io_obj->service_ = this;
io_obj->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
{
io_obj->queues_[i].io_object_ = io_obj;
io_obj->queues_[i].cancel_requested_ = false;
}
}
void io_uring_service::register_internal_io_object(
io_uring_service::per_io_object_data& io_obj,
int op_type, io_uring_operation* op)
{
io_obj = allocate_io_object();
mutex::scoped_lock io_object_lock(io_obj->mutex_);
io_obj->service_ = this;
io_obj->shutdown_ = false;
for (int i = 0; i < max_ops; ++i)
{
io_obj->queues_[i].io_object_ = io_obj;
io_obj->queues_[i].cancel_requested_ = false;
}
io_obj->queues_[op_type].op_queue_.push(op);
io_object_lock.unlock();
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
op->prepare(sqe);
::io_uring_sqe_set_data(sqe, &io_obj->queues_[op_type]);
post_submit_sqes_op(lock);
}
else
{
asio::error_code ec(ENOBUFS,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_get_sqe");
}
}
void io_uring_service::register_buffers(const ::iovec* v, unsigned n)
{
int result = ::io_uring_register_buffers(&ring_, v, n);
if (result < 0)
{
asio::error_code ec(-result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_register_buffers");
}
}
void io_uring_service::unregister_buffers()
{
(void)::io_uring_unregister_buffers(&ring_);
}
void io_uring_service::start_op(int op_type,
io_uring_service::per_io_object_data& io_obj,
io_uring_operation* op, bool is_continuation)
{
if (!io_obj)
{
op->ec_ = asio::error::bad_descriptor;
post_immediate_completion(op, is_continuation);
return;
}
mutex::scoped_lock io_object_lock(io_obj->mutex_);
if (io_obj->shutdown_)
{
io_object_lock.unlock();
post_immediate_completion(op, is_continuation);
return;
}
if (io_obj->queues_[op_type].op_queue_.empty())
{
if (op->perform(false))
{
io_object_lock.unlock();
scheduler_.post_immediate_completion(op, is_continuation);
}
else
{
io_obj->queues_[op_type].op_queue_.push(op);
io_object_lock.unlock();
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
op->prepare(sqe);
::io_uring_sqe_set_data(sqe, &io_obj->queues_[op_type]);
scheduler_.work_started();
post_submit_sqes_op(lock);
}
else
{
lock.unlock();
io_obj->queues_[op_type].set_result(-ENOBUFS);
post_immediate_completion(&io_obj->queues_[op_type], is_continuation);
}
}
}
else
{
io_obj->queues_[op_type].op_queue_.push(op);
scheduler_.work_started();
}
}
void io_uring_service::cancel_ops(io_uring_service::per_io_object_data& io_obj)
{
if (!io_obj)
return;
mutex::scoped_lock io_object_lock(io_obj->mutex_);
op_queue<operation> ops;
do_cancel_ops(io_obj, ops);
io_object_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void io_uring_service::cancel_ops_by_key(
io_uring_service::per_io_object_data& io_obj,
int op_type, void* cancellation_key)
{
if (!io_obj)
return;
mutex::scoped_lock io_object_lock(io_obj->mutex_);
bool first = true;
op_queue<operation> ops;
op_queue<io_uring_operation> other_ops;
while (io_uring_operation* op = io_obj->queues_[op_type].op_queue_.front())
{
io_obj->queues_[op_type].op_queue_.pop();
if (op->cancellation_key_ == cancellation_key)
{
if (first)
{
other_ops.push(op);
if (!io_obj->queues_[op_type].cancel_requested_)
{
io_obj->queues_[op_type].cancel_requested_ = true;
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_cancel(sqe, &io_obj->queues_[op_type], 0);
submit_sqes();
}
}
}
else
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
}
else
other_ops.push(op);
first = false;
}
io_obj->queues_[op_type].op_queue_.push(other_ops);
io_object_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void io_uring_service::deregister_io_object(
io_uring_service::per_io_object_data& io_obj)
{
if (!io_obj)
return;
mutex::scoped_lock io_object_lock(io_obj->mutex_);
if (!io_obj->shutdown_)
{
op_queue<operation> ops;
bool pending_cancelled_ops = do_cancel_ops(io_obj, ops);
io_obj->shutdown_ = true;
io_object_lock.unlock();
scheduler_.post_deferred_completions(ops);
if (pending_cancelled_ops)
{
// There are still pending operations. Prevent cleanup_io_object from
// freeing the I/O object and let the last operation to complete free it.
io_obj = 0;
}
else
{
// Leave io_obj set so that it will be freed by the subsequent call to
// cleanup_io_object.
}
}
else
{
// We are shutting down, so prevent cleanup_io_object from freeing
// the I/O object and let the destructor free it instead.
io_obj = 0;
}
}
void io_uring_service::cleanup_io_object(
io_uring_service::per_io_object_data& io_obj)
{
if (io_obj)
{
free_io_object(io_obj);
io_obj = 0;
}
}
void io_uring_service::run(long usec, op_queue<operation>& ops)
{
__kernel_timespec ts;
int local_ops = 0;
if (usec > 0)
{
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
++local_ops;
::io_uring_prep_timeout(sqe, &ts, 0, 0);
::io_uring_sqe_set_data(sqe, &ts);
submit_sqes();
}
}
::io_uring_cqe* cqe = 0;
int result = (usec == 0)
? ::io_uring_peek_cqe(&ring_, &cqe)
: ::io_uring_wait_cqe(&ring_, &cqe);
if (local_ops > 0)
{
if (result != 0 || ::io_uring_cqe_get_data(cqe) != &ts)
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
++local_ops;
::io_uring_prep_timeout_remove(sqe, reinterpret_cast<__u64>(&ts), 0);
::io_uring_sqe_set_data(sqe, &ts);
submit_sqes();
}
}
}
bool check_timers = false;
int count = 0;
while (result == 0 || local_ops > 0)
{
if (result == 0)
{
if (void* ptr = ::io_uring_cqe_get_data(cqe))
{
if (ptr == this)
{
// The io_uring service was interrupted.
}
else if (ptr == &timer_queues_)
{
check_timers = true;
}
else if (ptr == &timeout_)
{
check_timers = true;
timeout_.tv_sec = 0;
timeout_.tv_nsec = 0;
}
else if (ptr == &ts)
{
--local_ops;
}
else
{
io_queue* io_q = static_cast<io_queue*>(ptr);
io_q->set_result(cqe->res);
ops.push(io_q);
}
}
::io_uring_cqe_seen(&ring_, cqe);
++count;
}
result = (count < complete_batch_size || local_ops > 0)
? ::io_uring_peek_cqe(&ring_, &cqe) : -EAGAIN;
}
decrement(outstanding_work_, count);
if (check_timers)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.get_ready_timers(ops);
if (timeout_.tv_sec == 0 && timeout_.tv_nsec == 0)
{
timeout_ = get_timeout();
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_timeout(sqe, &timeout_, 0, 0);
::io_uring_sqe_set_data(sqe, &timeout_);
push_submit_sqes_op(ops);
}
}
}
}
void io_uring_service::interrupt()
{
mutex::scoped_lock lock(mutex_);
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_nop(sqe);
::io_uring_sqe_set_data(sqe, this);
}
submit_sqes();
}
void io_uring_service::init_ring()
{
int result = ::io_uring_queue_init(ring_size, &ring_, 0);
if (result < 0)
{
ring_.ring_fd = -1;
asio::error_code ec(-result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_queue_init");
}
#if !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
event_fd_ = ::eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (event_fd_ < 0)
{
asio::error_code ec(-result,
asio::error::get_system_category());
::io_uring_queue_exit(&ring_);
asio::detail::throw_error(ec, "eventfd");
}
result = ::io_uring_register_eventfd(&ring_, event_fd_);
if (result < 0)
{
::close(event_fd_);
::io_uring_queue_exit(&ring_);
asio::error_code ec(-result,
asio::error::get_system_category());
asio::detail::throw_error(ec, "io_uring_queue_init");
}
#endif // !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
#if !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
class io_uring_service::event_fd_read_op :
public reactor_op
{
public:
event_fd_read_op(io_uring_service* s)
: reactor_op(asio::error_code(),
&event_fd_read_op::do_perform, event_fd_read_op::do_complete),
service_(s)
{
}
static status do_perform(reactor_op* base)
{
event_fd_read_op* o(static_cast<event_fd_read_op*>(base));
for (;;)
{
// Only perform one read. The kernel maintains an atomic counter.
uint64_t counter(0);
errno = 0;
int bytes_read = ::read(o->service_->event_fd_,
&counter, sizeof(uint64_t));
if (bytes_read < 0 && errno == EINTR)
continue;
break;
}
op_queue<operation> ops;
o->service_->run(0, ops);
o->service_->scheduler_.post_deferred_completions(ops);
return not_done;
}
static void do_complete(void* /*owner*/, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
event_fd_read_op* o(static_cast<event_fd_read_op*>(base));
delete o;
}
private:
io_uring_service* service_;
};
#endif // !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
void io_uring_service::register_with_reactor()
{
#if !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.register_internal_descriptor(reactor::read_op,
event_fd_, reactor_data_, new event_fd_read_op(this));
#endif // !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
io_uring_service::io_object* io_uring_service::allocate_io_object()
{
mutex::scoped_lock registration_lock(registration_mutex_);
return registered_io_objects_.alloc(io_locking_, io_locking_spin_count_);
}
void io_uring_service::free_io_object(io_uring_service::io_object* io_obj)
{
mutex::scoped_lock registration_lock(registration_mutex_);
registered_io_objects_.free(io_obj);
}
bool io_uring_service::do_cancel_ops(
per_io_object_data& io_obj, op_queue<operation>& ops)
{
bool cancel_op = false;
for (int i = 0; i < max_ops; ++i)
{
if (io_uring_operation* first_op = io_obj->queues_[i].op_queue_.front())
{
cancel_op = true;
io_obj->queues_[i].op_queue_.pop();
while (io_uring_operation* op = io_obj->queues_[i].op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
io_obj->queues_[i].op_queue_.pop();
ops.push(op);
}
io_obj->queues_[i].op_queue_.push(first_op);
}
}
if (cancel_op)
{
mutex::scoped_lock lock(mutex_);
for (int i = 0; i < max_ops; ++i)
{
if (!io_obj->queues_[i].op_queue_.empty()
&& !io_obj->queues_[i].cancel_requested_)
{
io_obj->queues_[i].cancel_requested_ = true;
if (::io_uring_sqe* sqe = get_sqe())
::io_uring_prep_cancel(sqe, &io_obj->queues_[i], 0);
}
}
submit_sqes();
}
return cancel_op;
}
void io_uring_service::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void io_uring_service::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
void io_uring_service::update_timeout()
{
if (::io_uring_sqe* sqe = get_sqe())
{
::io_uring_prep_timeout_remove(sqe, reinterpret_cast<__u64>(&timeout_), 0);
::io_uring_sqe_set_data(sqe, &timer_queues_);
}
}
__kernel_timespec io_uring_service::get_timeout() const
{
__kernel_timespec ts;
long usec = timer_queues_.wait_duration_usec(5 * 60 * 1000 * 1000);
ts.tv_sec = usec / 1000000;
ts.tv_nsec = usec ? (usec % 1000000) * 1000 : 1;
return ts;
}
::io_uring_sqe* io_uring_service::get_sqe()
{
::io_uring_sqe* sqe = ::io_uring_get_sqe(&ring_);
if (!sqe)
{
submit_sqes();
sqe = ::io_uring_get_sqe(&ring_);
}
if (sqe)
{
::io_uring_sqe_set_data(sqe, 0);
++pending_sqes_;
}
return sqe;
}
void io_uring_service::submit_sqes()
{
if (pending_sqes_ != 0)
{
int result = ::io_uring_submit(&ring_);
if (result > 0)
{
pending_sqes_ -= result;
increment(outstanding_work_, result);
}
}
}
void io_uring_service::post_submit_sqes_op(mutex::scoped_lock& lock)
{
if (pending_sqes_ >= submit_batch_size)
{
submit_sqes();
}
else if (pending_sqes_ != 0 && !pending_submit_sqes_op_)
{
pending_submit_sqes_op_ = true;
lock.unlock();
scheduler_.post_immediate_completion(&submit_sqes_op_, false);
}
}
void io_uring_service::push_submit_sqes_op(op_queue<operation>& ops)
{
if (pending_sqes_ != 0 && !pending_submit_sqes_op_)
{
pending_submit_sqes_op_ = true;
ops.push(&submit_sqes_op_);
scheduler_.compensating_work_started();
}
}
io_uring_service::submit_sqes_op::submit_sqes_op(io_uring_service* s)
: operation(&io_uring_service::submit_sqes_op::do_complete),
service_(s)
{
}
void io_uring_service::submit_sqes_op::do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/)
{
if (owner)
{
submit_sqes_op* o = static_cast<submit_sqes_op*>(base);
mutex::scoped_lock lock(o->service_->mutex_);
o->service_->submit_sqes();
if (o->service_->pending_sqes_ != 0)
o->service_->scheduler_.post_immediate_completion(o, true);
else
o->service_->pending_submit_sqes_op_ = false;
}
}
io_uring_service::io_queue::io_queue()
: operation(&io_uring_service::io_queue::do_complete)
{
}
struct io_uring_service::perform_io_cleanup_on_block_exit
{
explicit perform_io_cleanup_on_block_exit(io_uring_service* s)
: service_(s), io_object_to_free_(0), first_op_(0)
{
}
~perform_io_cleanup_on_block_exit()
{
if (io_object_to_free_)
{
mutex::scoped_lock lock(service_->mutex_);
service_->free_io_object(io_object_to_free_);
}
if (first_op_)
{
// Post the remaining completed operations for invocation.
if (!ops_.empty())
service_->scheduler_.post_deferred_completions(ops_);
// A user-initiated operation has completed, but there's no need to
// explicitly call work_finished() here. Instead, we'll take advantage of
// the fact that the scheduler will call work_finished() once we return.
}
else
{
// No user-initiated operations have completed, so we need to compensate
// for the work_finished() call that the scheduler will make once this
// operation returns.
service_->scheduler_.compensating_work_started();
}
}
io_uring_service* service_;
io_object* io_object_to_free_;
op_queue<operation> ops_;
operation* first_op_;
};
operation* io_uring_service::io_queue::perform_io(int result)
{
perform_io_cleanup_on_block_exit io_cleanup(io_object_->service_);
mutex::scoped_lock io_object_lock(io_object_->mutex_);
if (result != -ECANCELED || cancel_requested_)
{
if (io_uring_operation* op = op_queue_.front())
{
if (result < 0)
{
op->ec_.assign(-result, asio::error::get_system_category());
op->bytes_transferred_ = 0;
}
else
{
op->ec_.assign(0, op->ec_.category());
op->bytes_transferred_ = static_cast<std::size_t>(result);
}
}
while (io_uring_operation* op = op_queue_.front())
{
if (op->perform(io_cleanup.ops_.empty()))
{
op_queue_.pop();
io_cleanup.ops_.push(op);
}
else
break;
}
}
cancel_requested_ = false;
if (!op_queue_.empty())
{
io_uring_service* service = io_object_->service_;
mutex::scoped_lock lock(service->mutex_);
if (::io_uring_sqe* sqe = service->get_sqe())
{
op_queue_.front()->prepare(sqe);
::io_uring_sqe_set_data(sqe, this);
service->post_submit_sqes_op(lock);
}
else
{
lock.unlock();
while (io_uring_operation* op = op_queue_.front())
{
op->ec_ = asio::error::no_buffer_space;
op_queue_.pop();
io_cleanup.ops_.push(op);
}
}
}
// The last operation to complete on a shut down object must free it.
if (io_object_->shutdown_)
{
io_cleanup.io_object_to_free_ = io_object_;
for (int i = 0; i < max_ops; ++i)
if (!io_object_->queues_[i].op_queue_.empty())
io_cleanup.io_object_to_free_ = 0;
}
// The first operation will be returned for completion now. The others will
// be posted for later by the io_cleanup object's destructor.
io_cleanup.first_op_ = io_cleanup.ops_.front();
io_cleanup.ops_.pop();
return io_cleanup.first_op_;
}
void io_uring_service::io_queue::do_complete(void* owner, operation* base,
const asio::error_code& ec, std::size_t bytes_transferred)
{
if (owner)
{
io_queue* io_q = static_cast<io_queue*>(base);
int result = static_cast<int>(bytes_transferred);
if (operation* op = io_q->perform_io(result))
{
op->complete(owner, ec, 0);
}
}
}
io_uring_service::io_object::io_object(bool locking, int spin_count)
: mutex_(locking, spin_count)
{
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_SERVICE_IPP

View File

@@ -0,0 +1,249 @@
//
// detail/io_uring_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_IO_URING_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_IO_URING_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IO_URING)
#include "asio/detail/io_uring_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
io_uring_socket_service_base::io_uring_socket_service_base(
execution_context& context)
: io_uring_service_(asio::use_service<io_uring_service>(context))
{
io_uring_service_.init_task();
}
void io_uring_socket_service_base::base_shutdown()
{
}
void io_uring_socket_service_base::construct(
io_uring_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.io_object_data_ = 0;
}
void io_uring_socket_service_base::base_move_construct(
io_uring_socket_service_base::base_implementation_type& impl,
io_uring_socket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_socket_service_base::base_move_assign(
io_uring_socket_service_base::base_implementation_type& impl,
io_uring_socket_service_base& /*other_service*/,
io_uring_socket_service_base::base_implementation_type& other_impl)
{
destroy(impl);
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.io_object_data_ = other_impl.io_object_data_;
other_impl.io_object_data_ = 0;
}
void io_uring_socket_service_base::destroy(
io_uring_socket_service_base::base_implementation_type& impl)
{
if (impl.socket_ != invalid_socket)
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
}
asio::error_code io_uring_socket_service_base::close(
io_uring_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "close"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
socket_ops::close(impl.socket_, impl.state_, false, ec);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
}
else
{
ec = success_ec_;
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
construct(impl);
return ec;
}
socket_type io_uring_socket_service_base::release(
io_uring_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return invalid_socket;
}
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "release"));
io_uring_service_.deregister_io_object(impl.io_object_data_);
io_uring_service_.cleanup_io_object(impl.io_object_data_);
socket_type sock = impl.socket_;
construct(impl);
ec = success_ec_;
return sock;
}
asio::error_code io_uring_socket_service_base::cancel(
io_uring_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((io_uring_service_.context(),
"socket", &impl, impl.socket_, "cancel"));
io_uring_service_.cancel_ops(impl.io_object_data_);
ec = success_ec_;
return ec;
}
asio::error_code io_uring_socket_service_base::do_open(
io_uring_socket_service_base::base_implementation_type& impl,
int af, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(af, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
io_uring_service_.register_io_object(impl.io_object_data_);
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
ec = success_ec_;
return ec;
}
asio::error_code io_uring_socket_service_base::do_assign(
io_uring_socket_service_base::base_implementation_type& impl, int type,
const io_uring_socket_service_base::native_handle_type& native_socket,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
io_uring_service_.register_io_object(impl.io_object_data_);
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.state_ |= socket_ops::possible_dup;
ec = success_ec_;
return ec;
}
void io_uring_socket_service_base::start_op(
io_uring_socket_service_base::base_implementation_type& impl,
int op_type, io_uring_operation* op, bool is_continuation, bool noop)
{
if (!noop)
{
io_uring_service_.start_op(op_type,
impl.io_object_data_, op, is_continuation);
}
else
{
io_uring_service_.post_immediate_completion(op, is_continuation);
}
}
void io_uring_socket_service_base::start_accept_op(
io_uring_socket_service_base::base_implementation_type& impl,
io_uring_operation* op, bool is_continuation, bool peer_is_open)
{
if (!peer_is_open)
start_op(impl, io_uring_service::read_op, op, is_continuation, false);
else
{
op->ec_ = asio::error::already_open;
io_uring_service_.post_immediate_completion(op, is_continuation);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IO_URING)
#endif // ASIO_DETAIL_IMPL_IO_URING_SOCKET_SERVICE_BASE_IPP

View File

@@ -0,0 +1,117 @@
//
// detail/impl/kqueue_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_KQUEUE)
#include "asio/detail/scheduler.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void kqueue_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename TimeTraits, typename Allocator>
void kqueue_reactor::add_timer_queue(timer_queue<TimeTraits, Allocator>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename TimeTraits, typename Allocator>
void kqueue_reactor::remove_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_remove_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void kqueue_reactor::schedule_timer(timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op)
{
mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupt();
}
template <typename TimeTraits, typename Allocator>
std::size_t kqueue_reactor::cancel_timer(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename TimeTraits, typename Allocator>
void kqueue_reactor::cancel_timer_by_key(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename TimeTraits, typename Allocator>
void kqueue_reactor::move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_KQUEUE)
#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_HPP

View File

@@ -0,0 +1,614 @@
//
// detail/impl/kqueue_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2005 Stefan Arentz (stefan at soze dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
#define ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_KQUEUE)
#include "asio/config.hpp"
#include "asio/detail/kqueue_reactor.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#if defined(__NetBSD__)
# include <sys/param.h>
#endif
#include "asio/detail/push_options.hpp"
#if defined(__NetBSD__) && __NetBSD_Version__ < 999001500
# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, \
reinterpret_cast<intptr_t>(static_cast<void*>(udata)))
#else
# define ASIO_KQUEUE_EV_SET(ev, ident, filt, flags, fflags, data, udata) \
EV_SET(ev, ident, filt, flags, fflags, data, udata)
#endif
namespace asio {
namespace detail {
kqueue_reactor::kqueue_reactor(asio::execution_context& ctx)
: execution_context_service_base<kqueue_reactor>(ctx),
scheduler_(use_service<scheduler>(ctx)),
mutex_(config(ctx).get("reactor", "registration_locking", true),
config(ctx).get("reactor", "registration_locking_spin_count", 0)),
kqueue_fd_(do_kqueue_create()),
interrupter_(),
shutdown_(false),
io_locking_(config(ctx).get("reactor", "io_locking", true)),
io_locking_spin_count_(
config(ctx).get("reactor", "io_locking_spin_count", 0)),
registered_descriptors_mutex_(mutex_.enabled()),
registered_descriptors_(execution_context::allocator<void>(ctx),
config(ctx).get("reactor", "preallocated_io_objects", 0U),
io_locking_, io_locking_spin_count_)
{
struct kevent events[1];
ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
{
asio::error_code error(errno,
asio::error::get_system_category());
asio::detail::throw_error(error);
}
}
kqueue_reactor::~kqueue_reactor()
{
close(kqueue_fd_);
}
void kqueue_reactor::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
lock.unlock();
op_queue<operation> ops;
while (descriptor_state* state = registered_descriptors_.first())
{
for (int i = 0; i < max_ops; ++i)
ops.push(state->op_queue_[i]);
state->shutdown_ = true;
registered_descriptors_.free(state);
}
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void kqueue_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
if (fork_ev == asio::execution_context::fork_child)
{
// The kqueue descriptor is automatically closed in the child.
kqueue_fd_ = -1;
kqueue_fd_ = do_kqueue_create();
interrupter_.recreate();
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], interrupter_.read_descriptor(),
EVFILT_READ, EV_ADD, 0, 0, &interrupter_);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue interrupter registration");
}
// Re-register all descriptors with kqueue.
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
for (descriptor_state* state = registered_descriptors_.first();
state != 0; state = state->next_)
{
if (state->num_kevents_ > 0)
{
ASIO_KQUEUE_EV_SET(&events[0], state->descriptor_,
EVFILT_READ, EV_ADD | EV_CLEAR, 0, 0, state);
ASIO_KQUEUE_EV_SET(&events[1], state->descriptor_,
EVFILT_WRITE, EV_ADD | EV_CLEAR, 0, 0, state);
if (::kevent(kqueue_fd_, events, state->num_kevents_, 0, 0, 0) == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue re-registration");
}
}
}
}
}
void kqueue_reactor::init_task()
{
scheduler_.init_task();
}
int kqueue_reactor::register_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
descriptor_data->num_kevents_ = 0;
descriptor_data->shutdown_ = false;
return 0;
}
int kqueue_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op)
{
descriptor_data = allocate_descriptor_state();
ASIO_HANDLER_REACTOR_REGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
mutex::scoped_lock lock(descriptor_data->mutex_);
descriptor_data->descriptor_ = descriptor;
descriptor_data->num_kevents_ = 1;
descriptor_data->shutdown_ = false;
descriptor_data->op_queue_[op_type].push(op);
struct kevent events[1];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
if (::kevent(kqueue_fd_, events, 1, 0, 0, 0) == -1)
return errno;
return 0;
}
void kqueue_reactor::move_descriptor(socket_type,
kqueue_reactor::per_descriptor_data& target_descriptor_data,
kqueue_reactor::per_descriptor_data& source_descriptor_data)
{
target_descriptor_data = source_descriptor_data;
source_descriptor_data = 0;
}
void kqueue_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const kqueue_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void kqueue_reactor::start_op(int op_type, socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, reactor_op* op,
bool is_continuation, bool allow_speculative,
void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
if (!descriptor_data)
{
op->ec_ = asio::error::bad_descriptor;
on_immediate(op, is_continuation, immediate_arg);
return;
}
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (descriptor_data->shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (descriptor_data->op_queue_[op_type].empty())
{
static const int num_kevents[max_ops] = { 1, 2, 1 };
if (allow_speculative
&& (op_type != read_op
|| descriptor_data->op_queue_[except_op].empty()))
{
if (op->perform())
{
descriptor_lock.unlock();
on_immediate(op, is_continuation, immediate_arg);
return;
}
if (descriptor_data->num_kevents_ < num_kevents[op_type])
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
if (::kevent(kqueue_fd_, events, num_kevents[op_type], 0, 0, 0) != -1)
{
descriptor_data->num_kevents_ = num_kevents[op_type];
}
else
{
op->ec_ = asio::error_code(errno,
asio::error::get_system_category());
on_immediate(op, is_continuation, immediate_arg);
return;
}
}
}
else
{
if (descriptor_data->num_kevents_ < num_kevents[op_type])
descriptor_data->num_kevents_ = num_kevents[op_type];
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor, EVFILT_READ,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
ASIO_KQUEUE_EV_SET(&events[1], descriptor, EVFILT_WRITE,
EV_ADD | EV_CLEAR, 0, 0, descriptor_data);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
}
}
descriptor_data->op_queue_[op_type].push(op);
scheduler_.work_started();
}
void kqueue_reactor::cancel_ops(socket_type,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void kqueue_reactor::cancel_ops_by_key(socket_type,
kqueue_reactor::per_descriptor_data& descriptor_data,
int op_type, void* cancellation_key)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
op_queue<operation> ops;
op_queue<reactor_op> other_ops;
while (reactor_op* op = descriptor_data->op_queue_[op_type].front())
{
descriptor_data->op_queue_[op_type].pop();
if (op->cancellation_key_ == cancellation_key)
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
else
other_ops.push(op);
}
descriptor_data->op_queue_[op_type].push(other_ops);
descriptor_lock.unlock();
scheduler_.post_deferred_completions(ops);
}
void kqueue_reactor::deregister_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data, bool closing)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
if (closing)
{
// The descriptor will be automatically removed from the kqueue when it
// is closed.
}
else
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor,
EVFILT_READ, EV_DELETE, 0, 0, 0);
ASIO_KQUEUE_EV_SET(&events[1], descriptor,
EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
}
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
{
while (reactor_op* op = descriptor_data->op_queue_[i].front())
{
op->ec_ = asio::error::operation_aborted;
descriptor_data->op_queue_[i].pop();
ops.push(op);
}
}
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
scheduler_.post_deferred_completions(ops);
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void kqueue_reactor::deregister_internal_descriptor(socket_type descriptor,
kqueue_reactor::per_descriptor_data& descriptor_data)
{
if (!descriptor_data)
return;
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (!descriptor_data->shutdown_)
{
struct kevent events[2];
ASIO_KQUEUE_EV_SET(&events[0], descriptor,
EVFILT_READ, EV_DELETE, 0, 0, 0);
ASIO_KQUEUE_EV_SET(&events[1], descriptor,
EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, events, descriptor_data->num_kevents_, 0, 0, 0);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
ops.push(descriptor_data->op_queue_[i]);
descriptor_data->descriptor_ = -1;
descriptor_data->shutdown_ = true;
descriptor_lock.unlock();
ASIO_HANDLER_REACTOR_DEREGISTRATION((
context(), static_cast<uintmax_t>(descriptor),
reinterpret_cast<uintmax_t>(descriptor_data)));
// Leave descriptor_data set so that it will be freed by the subsequent
// call to cleanup_descriptor_data.
}
else
{
// We are shutting down, so prevent cleanup_descriptor_data from freeing
// the descriptor_data object and let the destructor free it instead.
descriptor_data = 0;
}
}
void kqueue_reactor::cleanup_descriptor_data(
per_descriptor_data& descriptor_data)
{
if (descriptor_data)
{
free_descriptor_state(descriptor_data);
descriptor_data = 0;
}
}
void kqueue_reactor::run(long usec, op_queue<operation>& ops)
{
mutex::scoped_lock lock(mutex_);
// Determine how long to block while waiting for events.
timespec timeout_buf = { 0, 0 };
timespec* timeout = usec ? get_timeout(usec, timeout_buf) : &timeout_buf;
lock.unlock();
// Block on the kqueue descriptor.
struct kevent events[128];
int num_events = kevent(kqueue_fd_, 0, 0, events, 128, timeout);
#if defined(ASIO_ENABLE_HANDLER_TRACKING)
// Trace the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = reinterpret_cast<void*>(events[i].udata);
if (ptr != &interrupter_)
{
unsigned event_mask = 0;
switch (events[i].filter)
{
case EVFILT_READ:
event_mask |= ASIO_HANDLER_REACTOR_READ_EVENT;
break;
case EVFILT_WRITE:
event_mask |= ASIO_HANDLER_REACTOR_WRITE_EVENT;
break;
}
if ((events[i].flags & (EV_ERROR | EV_OOBAND)) != 0)
event_mask |= ASIO_HANDLER_REACTOR_ERROR_EVENT;
ASIO_HANDLER_REACTOR_EVENTS((context(),
reinterpret_cast<uintmax_t>(ptr), event_mask));
}
}
#endif // defined(ASIO_ENABLE_HANDLER_TRACKING)
// Dispatch the waiting events.
for (int i = 0; i < num_events; ++i)
{
void* ptr = reinterpret_cast<void*>(events[i].udata);
if (ptr == &interrupter_)
{
interrupter_.reset();
}
else
{
descriptor_state* descriptor_data = static_cast<descriptor_state*>(ptr);
mutex::scoped_lock descriptor_lock(descriptor_data->mutex_);
if (events[i].filter == EVFILT_WRITE
&& descriptor_data->num_kevents_ == 2
&& descriptor_data->op_queue_[write_op].empty())
{
// Some descriptor types, like serial ports, don't seem to support
// EV_CLEAR with EVFILT_WRITE. Since we have no pending write
// operations we'll remove the EVFILT_WRITE registration here so that
// we don't end up in a tight spin.
struct kevent delete_events[1];
ASIO_KQUEUE_EV_SET(&delete_events[0],
descriptor_data->descriptor_, EVFILT_WRITE, EV_DELETE, 0, 0, 0);
::kevent(kqueue_fd_, delete_events, 1, 0, 0, 0);
descriptor_data->num_kevents_ = 1;
}
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
#if defined(__NetBSD__)
static const unsigned int filter[max_ops] =
#else
static const int filter[max_ops] =
#endif
{ EVFILT_READ, EVFILT_WRITE, EVFILT_READ };
for (int j = max_ops - 1; j >= 0; --j)
{
if (events[i].filter == filter[j])
{
if (j != except_op || events[i].flags & EV_OOBAND)
{
while (reactor_op* op = descriptor_data->op_queue_[j].front())
{
if (events[i].flags & EV_ERROR)
{
op->ec_ = asio::error_code(
static_cast<int>(events[i].data),
asio::error::get_system_category());
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
if (op->perform())
{
descriptor_data->op_queue_[j].pop();
ops.push(op);
}
else
break;
}
}
}
}
}
}
lock.lock();
timer_queues_.get_ready_timers(ops);
}
void kqueue_reactor::interrupt()
{
interrupter_.interrupt();
}
int kqueue_reactor::do_kqueue_create()
{
int fd = ::kqueue();
if (fd == -1)
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "kqueue");
}
return fd;
}
kqueue_reactor::descriptor_state* kqueue_reactor::allocate_descriptor_state()
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
return registered_descriptors_.alloc(io_locking_, io_locking_spin_count_);
}
void kqueue_reactor::free_descriptor_state(kqueue_reactor::descriptor_state* s)
{
mutex::scoped_lock descriptors_lock(registered_descriptors_mutex_);
registered_descriptors_.free(s);
}
void kqueue_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void kqueue_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
timespec* kqueue_reactor::get_timeout(long usec, timespec& ts)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const long max_usec = 5 * 60 * 1000 * 1000;
usec = timer_queues_.wait_duration_usec(
(usec < 0 || max_usec < usec) ? max_usec : usec);
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
return &ts;
}
} // namespace detail
} // namespace asio
#undef ASIO_KQUEUE_EV_SET
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_KQUEUE)
#endif // ASIO_DETAIL_IMPL_KQUEUE_REACTOR_IPP

View File

@@ -0,0 +1,74 @@
//
// detail/impl/null_event.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_NULL_EVENT_IPP
#define ASIO_DETAIL_IMPL_NULL_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS_RUNTIME)
# include <thread>
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
# include "asio/detail/socket_types.hpp"
#else
# include <unistd.h>
# if defined(__hpux)
# include <sys/time.h>
# endif
# if !defined(__hpux) || defined(__SELECT)
# include <sys/select.h>
# endif
#endif
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void null_event::do_wait()
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_until((std::chrono::steady_clock::time_point::max)());
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
::Sleep(INFINITE);
#else
::pause();
#endif
}
void null_event::do_wait_for_usec(long usec)
{
#if defined(ASIO_WINDOWS_RUNTIME)
std::this_thread::sleep_for(std::chrono::microseconds(usec));
#elif defined(ASIO_WINDOWS) || defined(__CYGWIN__)
::Sleep(usec / 1000);
#elif defined(__hpux) && defined(__SELECT)
timespec ts;
ts.tv_sec = usec / 1000000;
ts.tv_nsec = (usec % 1000000) * 1000;
::pselect(0, 0, 0, 0, &ts, 0);
#else
timeval tv;
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
::select(0, 0, 0, 0, &tv);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_NULL_EVENT_IPP

View File

@@ -0,0 +1,129 @@
//
// detail/impl/pipe_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#if !defined(ASIO_WINDOWS)
#if !defined(__CYGWIN__)
#if !defined(__SYMBIAN32__)
#if !defined(ASIO_HAS_EVENTFD)
#include <fcntl.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include "asio/detail/pipe_select_interrupter.hpp"
#include "asio/detail/socket_types.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
pipe_select_interrupter::pipe_select_interrupter()
{
open_descriptors();
}
void pipe_select_interrupter::open_descriptors()
{
int pipe_fds[2];
if (pipe(pipe_fds) == 0)
{
read_descriptor_ = pipe_fds[0];
::fcntl(read_descriptor_, F_SETFL, O_NONBLOCK);
write_descriptor_ = pipe_fds[1];
::fcntl(write_descriptor_, F_SETFL, O_NONBLOCK);
#if defined(FD_CLOEXEC)
::fcntl(read_descriptor_, F_SETFD, FD_CLOEXEC);
::fcntl(write_descriptor_, F_SETFD, FD_CLOEXEC);
#endif // defined(FD_CLOEXEC)
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "pipe_select_interrupter");
}
}
pipe_select_interrupter::~pipe_select_interrupter()
{
close_descriptors();
}
void pipe_select_interrupter::close_descriptors()
{
if (read_descriptor_ != -1)
::close(read_descriptor_);
if (write_descriptor_ != -1)
::close(write_descriptor_);
}
void pipe_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = -1;
read_descriptor_ = -1;
open_descriptors();
}
void pipe_select_interrupter::interrupt()
{
char byte = 0;
signed_size_type result = ::write(write_descriptor_, &byte, 1);
(void)result;
}
bool pipe_select_interrupter::reset()
{
for (;;)
{
char data[1024];
signed_size_type bytes_read = ::read(read_descriptor_, data, sizeof(data));
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (errno == EINTR)
continue;
if (errno == EWOULDBLOCK || errno == EAGAIN)
return true;
return false;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_EVENTFD)
#endif // !defined(__SYMBIAN32__)
#endif // !defined(__CYGWIN__)
#endif // !defined(ASIO_WINDOWS)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_PIPE_SELECT_INTERRUPTER_IPP

View File

@@ -0,0 +1,63 @@
//
// detail/impl/posix_event.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
#define ASIO_DETAIL_IMPL_POSIX_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_event.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_event::posix_event()
: state_(0)
{
#if (defined(__MACH__) && defined(__APPLE__)) \
|| (defined(__ANDROID__) && (__ANDROID_API__ < 21))
int error = ::pthread_cond_init(&cond_, 0);
#else // (defined(__MACH__) && defined(__APPLE__))
// || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
::pthread_condattr_t attr;
int error = ::pthread_condattr_init(&attr);
if (error == 0)
{
error = ::pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
if (error == 0)
error = ::pthread_cond_init(&cond_, &attr);
::pthread_condattr_destroy(&attr);
}
#endif // (defined(__MACH__) && defined(__APPLE__))
// || (defined(__ANDROID__) && (__ANDROID_API__ < 21))
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_EVENT_IPP

View File

@@ -0,0 +1,46 @@
//
// detail/impl/posix_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
#define ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_mutex.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_mutex::posix_mutex()
{
int error = ::pthread_mutex_init(&mutex_, 0);
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "mutex");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_MUTEX_IPP

View File

@@ -0,0 +1,168 @@
//
// detail/impl/posix_serial_port_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_SERIAL_PORT_SERVICE_IPP
#define ASIO_DETAIL_IMPL_POSIX_SERIAL_PORT_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_SERIAL_PORT)
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#include <cstring>
#include "asio/detail/posix_serial_port_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_serial_port_service::posix_serial_port_service(
execution_context& context)
: execution_context_service_base<posix_serial_port_service>(context),
descriptor_service_(context)
{
}
void posix_serial_port_service::shutdown()
{
descriptor_service_.shutdown();
}
asio::error_code posix_serial_port_service::open(
posix_serial_port_service::implementation_type& impl,
const std::string& device, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
descriptor_ops::state_type state = 0;
int fd = descriptor_ops::open(device.c_str(),
O_RDWR | O_NONBLOCK | O_NOCTTY, ec);
if (fd < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
int s = descriptor_ops::fcntl(fd, F_GETFL, ec);
if (s >= 0)
s = descriptor_ops::fcntl(fd, F_SETFL, s | O_NONBLOCK, ec);
if (s < 0)
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set up default serial port options.
termios ios;
s = ::tcgetattr(fd, &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s >= 0)
{
#if defined(_BSD_SOURCE) || defined(_DEFAULT_SOURCE)
::cfmakeraw(&ios);
#else
ios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK
| ISTRIP | INLCR | IGNCR | ICRNL | IXON);
ios.c_oflag &= ~OPOST;
ios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN);
ios.c_cflag &= ~(CSIZE | PARENB);
ios.c_cflag |= CS8;
#endif
ios.c_iflag |= IGNPAR;
ios.c_cflag |= CREAD | CLOCAL;
s = ::tcsetattr(fd, TCSANOW, &ios);
descriptor_ops::get_last_error(ec, s < 0);
}
if (s < 0)
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
// We're done. Take ownership of the serial port descriptor.
if (descriptor_service_.assign(impl, fd, ec))
{
asio::error_code ignored_ec;
descriptor_ops::close(fd, state, ignored_ec);
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code posix_serial_port_service::do_set_option(
posix_serial_port_service::implementation_type& impl,
posix_serial_port_service::store_function_type store,
const void* option, asio::error_code& ec)
{
termios ios;
int s = ::tcgetattr(descriptor_service_.native_handle(impl), &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (store(option, ios, ec))
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
s = ::tcsetattr(descriptor_service_.native_handle(impl), TCSANOW, &ios);
descriptor_ops::get_last_error(ec, s < 0);
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code posix_serial_port_service::do_get_option(
const posix_serial_port_service::implementation_type& impl,
posix_serial_port_service::load_function_type load,
void* option, asio::error_code& ec) const
{
termios ios;
int s = ::tcgetattr(descriptor_service_.native_handle(impl), &ios);
descriptor_ops::get_last_error(ec, s < 0);
if (s < 0)
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
load(option, ios, ec);
ASIO_ERROR_LOCATION(ec);
return ec;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
#endif // defined(ASIO_HAS_SERIAL_PORT)
#endif // ASIO_DETAIL_IMPL_POSIX_SERIAL_PORT_SERVICE_IPP

View File

@@ -0,0 +1,84 @@
//
// detail/impl/posix_thread.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
#define ASIO_DETAIL_IMPL_POSIX_THREAD_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_thread.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
posix_thread::~posix_thread()
{
if (arg_)
std::terminate();
}
void posix_thread::join()
{
if (arg_)
{
::pthread_join(arg_->thread_, 0);
arg_->destroy();
arg_ = 0;
}
}
std::size_t posix_thread::hardware_concurrency()
{
#if defined(_SC_NPROCESSORS_ONLN)
long result = sysconf(_SC_NPROCESSORS_ONLN);
if (result > 0)
return result;
#endif // defined(_SC_NPROCESSORS_ONLN)
return 0;
}
posix_thread::func_base* posix_thread::start_thread(func_base* arg)
{
int error = ::pthread_create(&arg->thread_, 0,
asio_detail_posix_thread_function, arg);
if (error != 0)
{
arg->destroy();
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "thread");
}
return arg;
}
void* asio_detail_posix_thread_function(void* arg)
{
static_cast<posix_thread::func_base*>(arg)->run();
return 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_THREAD_IPP

View File

@@ -0,0 +1,46 @@
//
// detail/impl/posix_tss_ptr.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
#define ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_PTHREADS)
#include "asio/detail/posix_tss_ptr.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void posix_tss_ptr_create(pthread_key_t& key)
{
int error = ::pthread_key_create(&key, 0);
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "tss");
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_PTHREADS)
#endif // ASIO_DETAIL_IMPL_POSIX_TSS_PTR_IPP

View File

@@ -0,0 +1,232 @@
//
// detail/impl/reactive_descriptor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__) \
&& !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/error.hpp"
#include "asio/detail/reactive_descriptor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_descriptor_service::reactive_descriptor_service(
execution_context& context)
: execution_context_service_base<reactive_descriptor_service>(context),
reactor_(asio::use_service<reactor>(context))
{
reactor_.init_task();
}
void reactive_descriptor_service::shutdown()
{
}
void reactive_descriptor_service::construct(
reactive_descriptor_service::implementation_type& impl)
{
impl.descriptor_ = -1;
impl.state_ = 0;
impl.reactor_data_ = reactor::per_descriptor_data();
}
void reactive_descriptor_service::move_construct(
reactive_descriptor_service::implementation_type& impl,
reactive_descriptor_service::implementation_type& other_impl)
noexcept
{
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
reactor_.move_descriptor(impl.descriptor_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_descriptor_service::move_assign(
reactive_descriptor_service::implementation_type& impl,
reactive_descriptor_service& other_service,
reactive_descriptor_service::implementation_type& other_impl)
{
destroy(impl);
impl.descriptor_ = other_impl.descriptor_;
other_impl.descriptor_ = -1;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
other_service.reactor_.move_descriptor(impl.descriptor_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_descriptor_service::destroy(
reactive_descriptor_service::implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
asio::error_code ignored_ec;
descriptor_ops::close(impl.descriptor_, impl.state_, ignored_ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
}
asio::error_code reactive_descriptor_service::assign(
reactive_descriptor_service::implementation_type& impl,
const native_handle_type& native_descriptor, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (int err = reactor_.register_descriptor(
native_descriptor, impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
impl.descriptor_ = native_descriptor;
impl.state_ = descriptor_ops::possible_dup;
ec = asio::error_code();
return ec;
}
asio::error_code reactive_descriptor_service::close(
reactive_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "close"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_,
(impl.state_ & descriptor_ops::possible_dup) == 0);
descriptor_ops::close(impl.descriptor_, impl.state_, ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
// We'll just have to assume that other OSes follow the same behaviour.)
construct(impl);
ASIO_ERROR_LOCATION(ec);
return ec;
}
reactive_descriptor_service::native_handle_type
reactive_descriptor_service::release(
reactive_descriptor_service::implementation_type& impl)
{
native_handle_type descriptor = impl.descriptor_;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "release"));
reactor_.deregister_descriptor(impl.descriptor_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
construct(impl);
}
return descriptor;
}
asio::error_code reactive_descriptor_service::cancel(
reactive_descriptor_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return ec;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"descriptor", &impl, impl.descriptor_, "cancel"));
reactor_.cancel_ops(impl.descriptor_, impl.reactor_data_);
ec = asio::error_code();
return ec;
}
void reactive_descriptor_service::do_start_op(implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation,
bool allow_speculative, bool noop, bool needs_non_blocking,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if (!noop)
{
if ((impl.state_ & descriptor_ops::non_blocking)
|| !needs_non_blocking
|| descriptor_ops::set_internal_non_blocking(
impl.descriptor_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.descriptor_, impl.reactor_data_, op,
is_continuation, allow_speculative, on_immediate, immediate_arg);
return;
}
}
on_immediate(op, is_continuation, immediate_arg);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
// && !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // ASIO_DETAIL_IMPL_REACTIVE_DESCRIPTOR_SERVICE_IPP

View File

@@ -0,0 +1,312 @@
//
// detail/reactive_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_HAS_IOCP) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/detail/reactive_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
reactive_socket_service_base::reactive_socket_service_base(
execution_context& context)
: reactor_(use_service<reactor>(context))
{
reactor_.init_task();
}
void reactive_socket_service_base::base_shutdown()
{
}
void reactive_socket_service_base::construct(
reactive_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.reactor_data_ = reactor::per_descriptor_data();
}
void reactive_socket_service_base::base_move_construct(
reactive_socket_service_base::base_implementation_type& impl,
reactive_socket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
reactor_.move_descriptor(impl.socket_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_socket_service_base::base_move_assign(
reactive_socket_service_base::base_implementation_type& impl,
reactive_socket_service_base& other_service,
reactive_socket_service_base::base_implementation_type& other_impl)
{
destroy(impl);
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
other_service.reactor_.move_descriptor(impl.socket_,
impl.reactor_data_, other_impl.reactor_data_);
}
void reactive_socket_service_base::destroy(
reactive_socket_service_base::base_implementation_type& impl)
{
if (impl.socket_ != invalid_socket)
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
}
asio::error_code reactive_socket_service_base::close(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "close"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_,
(impl.state_ & socket_ops::possible_dup) == 0);
socket_ops::close(impl.socket_, impl.state_, false, ec);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
// The descriptor is closed by the OS even if close() returns an error.
//
// (Actually, POSIX says the state of the descriptor is unspecified. On
// Linux the descriptor is apparently closed anyway; e.g. see
// http://lkml.org/lkml/2005/9/10/129
// We'll just have to assume that other OSes follow the same behaviour. The
// known exception is when Windows's closesocket() function fails with
// WSAEWOULDBLOCK, but this case is handled inside socket_ops::close().
construct(impl);
return ec;
}
socket_type reactive_socket_service_base::release(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return invalid_socket;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "release"));
reactor_.deregister_descriptor(impl.socket_, impl.reactor_data_, false);
reactor_.cleanup_descriptor_data(impl.reactor_data_);
socket_type sock = impl.socket_;
construct(impl);
ec = asio::error_code();
return sock;
}
asio::error_code reactive_socket_service_base::cancel(
reactive_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((reactor_.context(),
"socket", &impl, impl.socket_, "cancel"));
reactor_.cancel_ops(impl.socket_, impl.reactor_data_);
ec = asio::error_code();
return ec;
}
asio::error_code reactive_socket_service_base::do_open(
reactive_socket_service_base::base_implementation_type& impl,
int af, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(af, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
if (int err = reactor_.register_descriptor(sock.get(), impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
ec = asio::error_code();
return ec;
}
asio::error_code reactive_socket_service_base::do_assign(
reactive_socket_service_base::base_implementation_type& impl, int type,
const reactive_socket_service_base::native_handle_type& native_socket,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
if (int err = reactor_.register_descriptor(
native_socket, impl.reactor_data_))
{
ec = asio::error_code(err,
asio::error::get_system_category());
return ec;
}
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.state_ |= socket_ops::possible_dup;
ec = asio::error_code();
return ec;
}
void reactive_socket_service_base::do_start_op(
reactive_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op, bool is_continuation,
bool allow_speculative, bool noop, bool needs_non_blocking,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if (!noop)
{
if ((impl.state_ & socket_ops::non_blocking)
|| !needs_non_blocking
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
reactor_.start_op(op_type, impl.socket_, impl.reactor_data_, op,
is_continuation, allow_speculative, on_immediate, immediate_arg);
return;
}
}
on_immediate(op, is_continuation, immediate_arg);
}
void reactive_socket_service_base::do_start_accept_op(
reactive_socket_service_base::base_implementation_type& impl,
reactor_op* op, bool is_continuation, bool peer_is_open,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if (!peer_is_open)
{
do_start_op(impl, reactor::read_op, op, is_continuation,
true, false, true, on_immediate, immediate_arg);
}
else
{
op->ec_ = asio::error::already_open;
on_immediate(op, is_continuation, immediate_arg);
}
}
void reactive_socket_service_base::do_start_connect_op(
reactive_socket_service_base::base_implementation_type& impl,
reactor_op* op, bool is_continuation, const void* addr, size_t addrlen,
void (*on_immediate)(operation* op, bool, const void*),
const void* immediate_arg)
{
if ((impl.state_ & socket_ops::non_blocking)
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
if (op->ec_ == asio::error::in_progress
|| op->ec_ == asio::error::would_block)
{
op->ec_ = asio::error_code();
reactor_.start_op(reactor::connect_op, impl.socket_, impl.reactor_data_,
op, is_continuation, false, on_immediate, immediate_arg);
return;
}
}
}
on_immediate(op, is_continuation, immediate_arg);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // !defined(ASIO_HAS_IOCP)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // ASIO_DETAIL_IMPL_REACTIVE_SOCKET_SERVICE_BASE_IPP

View File

@@ -0,0 +1,79 @@
//
// detail/impl/resolver_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/config.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/resolver_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
resolver_service_base::resolver_service_base(execution_context& context)
: thread_pool_(asio::use_service<resolver_thread_pool>(context))
{
}
resolver_service_base::~resolver_service_base()
{
}
void resolver_service_base::construct(
resolver_service_base::implementation_type& impl)
{
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
void resolver_service_base::destroy(
resolver_service_base::implementation_type& impl)
{
ASIO_HANDLER_OPERATION((thread_pool_.context(),
"resolver", &impl, 0, "cancel"));
impl.reset();
}
void resolver_service_base::move_construct(implementation_type& impl,
implementation_type& other_impl)
{
impl = static_cast<implementation_type&&>(other_impl);
}
void resolver_service_base::move_assign(implementation_type& impl,
resolver_service_base&, implementation_type& other_impl)
{
destroy(impl);
impl = static_cast<implementation_type&&>(other_impl);
}
void resolver_service_base::cancel(
resolver_service_base::implementation_type& impl)
{
ASIO_HANDLER_OPERATION((thread_pool_.context(),
"resolver", &impl, 0, "cancel"));
impl.reset(static_cast<void*>(0), socket_ops::noop_deleter());
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_RESOLVER_SERVICE_BASE_IPP

View File

@@ -0,0 +1,124 @@
//
// detail/impl/resolver_thread_pool.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_RESOLVER_THREAD_POOL_IPP
#define ASIO_DETAIL_IMPL_RESOLVER_THREAD_POOL_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/config.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/resolver_thread_pool.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class resolver_thread_pool::work_scheduler_runner
{
public:
work_scheduler_runner(scheduler_impl& work_scheduler)
: work_scheduler_(work_scheduler)
{
}
void operator()()
{
asio::error_code ec;
work_scheduler_.run(ec);
}
private:
scheduler_impl& work_scheduler_;
};
resolver_thread_pool::resolver_thread_pool(execution_context& context)
: execution_context_service_base<resolver_thread_pool>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
work_scheduler_(scheduler_impl::internal(), context),
work_threads_(execution_context::allocator<void>(context)),
num_work_threads_(config(context).get("resolver", "threads", 0U)),
scheduler_locking_(config(context).get("scheduler", "locking", true)),
shutdown_(false)
{
work_scheduler_.work_started();
if (num_work_threads_ > 0)
start_work_threads();
else
num_work_threads_ = 1;
}
resolver_thread_pool::~resolver_thread_pool()
{
shutdown();
}
void resolver_thread_pool::shutdown()
{
if (!shutdown_)
{
work_scheduler_.work_finished();
work_scheduler_.stop();
work_threads_.join();
work_scheduler_.shutdown();
shutdown_ = true;
}
}
void resolver_thread_pool::notify_fork(execution_context::fork_event fork_ev)
{
if (!work_threads_.empty())
{
if (fork_ev == execution_context::fork_prepare)
{
work_scheduler_.stop();
work_threads_.join();
}
}
else if (fork_ev != execution_context::fork_prepare)
{
work_scheduler_.restart();
}
}
void resolver_thread_pool::start_resolve_op(resolve_op* op)
{
if (scheduler_locking_)
{
start_work_threads();
scheduler_.work_started();
work_scheduler_.post_immediate_completion(op, false);
}
else
{
op->ec_ = asio::error::operation_not_supported;
scheduler_.post_immediate_completion(op, false);
}
}
void resolver_thread_pool::start_work_threads()
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (work_threads_.empty())
for (unsigned int i = 0; i < num_work_threads_; ++i)
work_threads_.create_thread(work_scheduler_runner(work_scheduler_));
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_RESOLVER_THREAD_POOL_IPP

View File

@@ -0,0 +1,696 @@
//
// detail/impl/scheduler.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SCHEDULER_IPP
#define ASIO_DETAIL_IMPL_SCHEDULER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/config.hpp"
#include "asio/detail/event.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/scheduler.hpp"
#include "asio/detail/scheduler_thread_info.hpp"
#include "asio/detail/signal_blocker.hpp"
#if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/io_uring_service.hpp"
#else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/reactor.hpp"
#endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class scheduler::thread_function
{
public:
explicit thread_function(scheduler* s)
: this_(s)
{
}
void operator()()
{
asio::error_code ec;
this_->run(ec);
}
private:
scheduler* this_;
};
struct scheduler::task_cleanup
{
~task_cleanup()
{
if (this_thread_->private_outstanding_work > 0)
{
asio::detail::increment(
scheduler_->outstanding_work_,
this_thread_->private_outstanding_work);
}
this_thread_->private_outstanding_work = 0;
// Enqueue the completed operations and reinsert the task at the end of
// the operation queue.
lock_->lock();
scheduler_->task_interrupted_ = true;
scheduler_->op_queue_.push(this_thread_->private_op_queue);
scheduler_->op_queue_.push(&scheduler_->task_operation_);
}
scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
struct scheduler::work_cleanup
{
~work_cleanup()
{
if (this_thread_->private_outstanding_work > 1)
{
asio::detail::increment(
scheduler_->outstanding_work_,
this_thread_->private_outstanding_work - 1);
}
else if (this_thread_->private_outstanding_work < 1)
{
scheduler_->work_finished();
}
this_thread_->private_outstanding_work = 0;
#if defined(ASIO_HAS_THREADS)
if (!this_thread_->private_op_queue.empty())
{
lock_->lock();
scheduler_->op_queue_.push(this_thread_->private_op_queue);
}
#endif // defined(ASIO_HAS_THREADS)
}
scheduler* scheduler_;
mutex::scoped_lock* lock_;
thread_info* this_thread_;
};
scheduler::scheduler(asio::execution_context& ctx,
bool own_thread, get_task_func_type get_task)
: asio::detail::execution_context_service_base<scheduler>(ctx),
one_thread_(config(ctx).get("scheduler", "concurrency_hint", 0) == 1),
mutex_(config(ctx).get("scheduler", "locking", true),
config(ctx).get("scheduler", "locking_spin_count", 0)),
task_(0),
get_task_(get_task),
task_interrupted_(true),
stopped_(false),
shutdown_(false),
outstanding_work_(0),
task_usec_(config(ctx).get("scheduler", "task_usec", -1L)),
wait_usec_(config(ctx).get("scheduler", "wait_usec", -1L)),
thread_()
{
ASIO_HANDLER_TRACKING_INIT;
if (own_thread)
{
++outstanding_work_;
signal_blocker sb;
thread_ = thread(thread_function(this));
}
}
scheduler::scheduler(scheduler::internal, asio::execution_context& ctx)
: asio::detail::execution_context_service_base<scheduler>(ctx),
one_thread_(false),
mutex_(true, 0),
task_(0),
get_task_(&scheduler::get_default_task),
task_interrupted_(true),
stopped_(false),
shutdown_(false),
outstanding_work_(0),
task_usec_(-1L),
wait_usec_(-1L)
{
ASIO_HANDLER_TRACKING_INIT;
}
scheduler::~scheduler()
{
if (thread_.joinable())
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
stop_all_threads(lock);
lock.unlock();
thread_.join();
}
}
void scheduler::shutdown()
{
mutex::scoped_lock lock(mutex_);
shutdown_ = true;
if (thread_.joinable())
stop_all_threads(lock);
lock.unlock();
// Join thread to ensure task operation is returned to queue.
thread_.join();
// Destroy handler objects.
while (!op_queue_.empty())
{
operation* o = op_queue_.front();
op_queue_.pop();
if (o != &task_operation_)
o->destroy();
}
// Reset to initial state.
task_ = 0;
}
void scheduler::init_task()
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_ && !task_)
{
task_ = get_task_(this->context());
op_queue_.push(&task_operation_);
wake_one_thread_and_unlock(lock);
}
}
std::size_t scheduler::run(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
std::size_t n = 0;
for (; do_run_one(lock, this_thread, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
}
std::size_t scheduler::run_one(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_run_one(lock, this_thread, ec);
}
std::size_t scheduler::wait_one(long usec, asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
return do_wait_one(lock, this_thread, usec, ec);
}
std::size_t scheduler::poll(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
op_queue_.push(outer_info->private_op_queue);
#endif // defined(ASIO_HAS_THREADS)
std::size_t n = 0;
for (; do_poll_one(lock, this_thread, ec); lock.lock())
if (n != (std::numeric_limits<std::size_t>::max)())
++n;
return n;
}
std::size_t scheduler::poll_one(asio::error_code& ec)
{
ec = asio::error_code();
if (outstanding_work_ == 0)
{
stop();
return 0;
}
thread_info this_thread;
this_thread.private_outstanding_work = 0;
thread_call_stack::context ctx(this, this_thread);
mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_THREADS)
// We want to support nested calls to poll() and poll_one(), so any handlers
// that are already on a thread-private queue need to be put on to the main
// queue now.
if (one_thread_)
if (thread_info* outer_info = static_cast<thread_info*>(ctx.next_by_key()))
op_queue_.push(outer_info->private_op_queue);
#endif // defined(ASIO_HAS_THREADS)
return do_poll_one(lock, this_thread, ec);
}
void scheduler::stop()
{
mutex::scoped_lock lock(mutex_);
stop_all_threads(lock);
}
bool scheduler::stopped() const
{
mutex::scoped_lock lock(mutex_);
return stopped_;
}
void scheduler::restart()
{
mutex::scoped_lock lock(mutex_);
stopped_ = false;
}
void scheduler::compensating_work_started()
{
thread_info_base* this_thread = thread_call_stack::contains(this);
ASIO_ASSUME(this_thread != 0); // Only called from inside scheduler.
++static_cast<thread_info*>(this_thread)->private_outstanding_work;
}
bool scheduler::can_dispatch()
{
return thread_call_stack::contains(this) != 0;
}
void scheduler::capture_current_exception()
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
this_thread->capture_current_exception();
}
void scheduler::post_immediate_completion(
scheduler::operation* op, bool is_continuation)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
++static_cast<thread_info*>(this_thread)->private_outstanding_work;
static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
#else // defined(ASIO_HAS_THREADS)
(void)is_continuation;
#endif // defined(ASIO_HAS_THREADS)
work_started();
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_immediate_completions(std::size_t n,
op_queue<scheduler::operation>& ops, bool is_continuation)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_ || is_continuation)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_outstanding_work
+= static_cast<long>(n);
static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
#else // defined(ASIO_HAS_THREADS)
(void)is_continuation;
#endif // defined(ASIO_HAS_THREADS)
increment(outstanding_work_, static_cast<long>(n));
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_deferred_completion(scheduler::operation* op)
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_op_queue.push(op);
return;
}
}
#endif // defined(ASIO_HAS_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::post_deferred_completions(
op_queue<scheduler::operation>& ops)
{
if (!ops.empty())
{
#if defined(ASIO_HAS_THREADS)
if (one_thread_)
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
{
static_cast<thread_info*>(this_thread)->private_op_queue.push(ops);
return;
}
}
#endif // defined(ASIO_HAS_THREADS)
mutex::scoped_lock lock(mutex_);
op_queue_.push(ops);
wake_one_thread_and_unlock(lock);
}
}
void scheduler::do_dispatch(
scheduler::operation* op)
{
work_started();
mutex::scoped_lock lock(mutex_);
op_queue_.push(op);
wake_one_thread_and_unlock(lock);
}
void scheduler::abandon_operations(
op_queue<scheduler::operation>& ops)
{
op_queue<scheduler::operation> ops2;
ops2.push(ops);
}
std::size_t scheduler::do_run_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread,
const asio::error_code& ec)
{
while (!stopped_)
{
if (!op_queue_.empty())
{
// Prepare to execute first handler from queue.
operation* o = op_queue_.front();
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
if (o == &task_operation_)
{
task_interrupted_ = more_handlers || task_usec_ == 0;
if (more_handlers && !one_thread_ && wait_usec_ != 0)
wakeup_event_.unlock_and_signal_one(lock);
else
lock.unlock();
task_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(more_handlers ? 0 : task_usec_,
this_thread.private_op_queue);
}
else
{
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
}
else
{
if (wait_usec_ == 0)
{
lock.unlock();
lock.lock();
}
else
{
wakeup_event_.clear(lock);
if (wait_usec_ > 0)
wakeup_event_.wait_for_usec(lock, wait_usec_);
else
wakeup_event_.wait(lock);
}
}
}
return 0;
}
std::size_t scheduler::do_wait_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread, long usec,
const asio::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == 0)
{
wakeup_event_.clear(lock);
usec = (wait_usec_ >= 0 && wait_usec_ < usec) ? wait_usec_ : usec;
wakeup_event_.wait_for_usec(lock, usec);
usec = 0; // Wait at most once.
o = op_queue_.front();
}
if (o == &task_operation_)
{
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
usec = (task_usec_ >= 0 && task_usec_ < usec) ? task_usec_ : usec;
task_interrupted_ = more_handlers || usec == 0;
if (more_handlers && !one_thread_ && wait_usec_ != 0)
wakeup_event_.unlock_and_signal_one(lock);
else
lock.unlock();
{
task_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(more_handlers ? 0 : usec, this_thread.private_op_queue);
}
o = op_queue_.front();
if (o == &task_operation_)
{
if (!one_thread_)
wakeup_event_.maybe_unlock_and_signal_one(lock);
return 0;
}
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
std::size_t scheduler::do_poll_one(mutex::scoped_lock& lock,
scheduler::thread_info& this_thread,
const asio::error_code& ec)
{
if (stopped_)
return 0;
operation* o = op_queue_.front();
if (o == &task_operation_)
{
op_queue_.pop();
lock.unlock();
{
task_cleanup c = { this, &lock, &this_thread };
(void)c;
// Run the task. May throw an exception. Only block if the operation
// queue is empty and we're not polling, otherwise we want to return
// as soon as possible.
task_->run(0, this_thread.private_op_queue);
}
o = op_queue_.front();
if (o == &task_operation_)
{
wakeup_event_.maybe_unlock_and_signal_one(lock);
return 0;
}
}
if (o == 0)
return 0;
op_queue_.pop();
bool more_handlers = (!op_queue_.empty());
std::size_t task_result = o->task_result_;
if (more_handlers && !one_thread_)
wake_one_thread_and_unlock(lock);
else
lock.unlock();
// Ensure the count of outstanding work is decremented on block exit.
work_cleanup on_exit = { this, &lock, &this_thread };
(void)on_exit;
// Complete the operation. May throw an exception. Deletes the object.
o->complete(this, ec, task_result);
this_thread.rethrow_pending_exception();
return 1;
}
void scheduler::stop_all_threads(
mutex::scoped_lock& lock)
{
stopped_ = true;
wakeup_event_.signal_all(lock);
if (!task_interrupted_ && task_)
{
task_interrupted_ = true;
task_->interrupt();
}
}
void scheduler::wake_one_thread_and_unlock(
mutex::scoped_lock& lock)
{
if (wait_usec_ == 0 || !wakeup_event_.maybe_unlock_and_signal_one(lock))
{
if (!task_interrupted_ && task_)
{
task_interrupted_ = true;
task_->interrupt();
}
lock.unlock();
}
}
scheduler_task* scheduler::get_default_task(asio::execution_context& ctx)
{
#if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
return &use_service<io_uring_service>(ctx);
#else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
return &use_service<reactor>(ctx);
#endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SCHEDULER_IPP

View File

@@ -0,0 +1,130 @@
//
// detail/impl/select_reactor.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
#define ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) \
|| (!defined(ASIO_HAS_DEV_POLL) \
&& !defined(ASIO_HAS_EPOLL) \
&& !defined(ASIO_HAS_KQUEUE) \
&& !defined(ASIO_WINDOWS_RUNTIME))
#if defined(ASIO_HAS_IOCP)
# include "asio/detail/win_iocp_io_context.hpp"
#else // defined(ASIO_HAS_IOCP)
# include "asio/detail/scheduler.hpp"
#endif // defined(ASIO_HAS_IOCP)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline void select_reactor::post_immediate_completion(
operation* op, bool is_continuation) const
{
scheduler_.post_immediate_completion(op, is_continuation);
}
template <typename TimeTraits, typename Allocator>
void select_reactor::add_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_add_timer_queue(queue);
}
// Remove a timer queue from the reactor.
template <typename TimeTraits, typename Allocator>
void select_reactor::remove_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_remove_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void select_reactor::schedule_timer(
timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
scheduler_.post_immediate_completion(op, false);
return;
}
bool earliest = queue.enqueue_timer(time, timer, op);
scheduler_.work_started();
if (earliest)
interrupter_.interrupt();
}
template <typename TimeTraits, typename Allocator>
std::size_t select_reactor::cancel_timer(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
scheduler_.post_deferred_completions(ops);
return n;
}
template <typename TimeTraits, typename Allocator>
void select_reactor::cancel_timer_by_key(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key)
{
mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
template <typename TimeTraits, typename Allocator>
void select_reactor::move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& target,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& source)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
queue.cancel_timer(target, ops);
queue.move_timer(target, source);
lock.unlock();
scheduler_.post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
// || (!defined(ASIO_HAS_DEV_POLL)
// && !defined(ASIO_HAS_EPOLL)
// && !defined(ASIO_HAS_KQUEUE)
// && !defined(ASIO_WINDOWS_RUNTIME))
#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_HPP

View File

@@ -0,0 +1,389 @@
//
// detail/impl/select_reactor.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
#define ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) \
|| (!defined(ASIO_HAS_DEV_POLL) \
&& !defined(ASIO_HAS_EPOLL) \
&& !defined(ASIO_HAS_KQUEUE) \
&& !defined(ASIO_WINDOWS_RUNTIME))
#include "asio/detail/fd_set_adapter.hpp"
#include "asio/detail/select_reactor.hpp"
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/socket_ops.hpp"
#if defined(ASIO_HAS_IOCP)
# include "asio/detail/win_iocp_io_context.hpp"
#else // defined(ASIO_HAS_IOCP)
# include "asio/detail/scheduler.hpp"
#endif // defined(ASIO_HAS_IOCP)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
#if defined(ASIO_HAS_IOCP)
class select_reactor::thread_function
{
public:
explicit thread_function(select_reactor* r)
: this_(r)
{
}
void operator()()
{
this_->run_thread();
}
private:
select_reactor* this_;
};
#endif // defined(ASIO_HAS_IOCP)
select_reactor::select_reactor(asio::execution_context& ctx)
: execution_context_service_base<select_reactor>(ctx),
scheduler_(use_service<scheduler_type>(ctx)),
mutex_(),
interrupter_(),
#if defined(ASIO_HAS_IOCP)
stop_thread_(false),
thread_(),
restart_reactor_(this),
#endif // defined(ASIO_HAS_IOCP)
shutdown_(false)
{
#if defined(ASIO_HAS_IOCP)
asio::detail::signal_blocker sb;
thread_ = thread(thread_function(this));
#endif // defined(ASIO_HAS_IOCP)
}
select_reactor::~select_reactor()
{
shutdown();
}
void select_reactor::shutdown()
{
asio::detail::mutex::scoped_lock lock(mutex_);
shutdown_ = true;
#if defined(ASIO_HAS_IOCP)
stop_thread_ = true;
if (thread_.joinable())
interrupter_.interrupt();
#endif // defined(ASIO_HAS_IOCP)
lock.unlock();
#if defined(ASIO_HAS_IOCP)
thread_.join();
#endif // defined(ASIO_HAS_IOCP)
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].get_all_operations(ops);
timer_queues_.get_all_timers(ops);
scheduler_.abandon_operations(ops);
}
void select_reactor::notify_fork(
asio::execution_context::fork_event fork_ev)
{
#if defined(ASIO_HAS_IOCP)
(void)fork_ev;
#else // defined(ASIO_HAS_IOCP)
if (fork_ev == asio::execution_context::fork_child)
interrupter_.recreate();
#endif // defined(ASIO_HAS_IOCP)
}
void select_reactor::init_task()
{
scheduler_.init_task();
}
int select_reactor::register_descriptor(socket_type,
select_reactor::per_descriptor_data&)
{
return 0;
}
int select_reactor::register_internal_descriptor(
int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue_[op_type].enqueue_operation(descriptor, op);
interrupter_.interrupt();
return 0;
}
void select_reactor::move_descriptor(socket_type,
select_reactor::per_descriptor_data&,
select_reactor::per_descriptor_data&)
{
}
void select_reactor::call_post_immediate_completion(
operation* op, bool is_continuation, const void* self)
{
static_cast<const select_reactor*>(self)->post_immediate_completion(
op, is_continuation);
}
void select_reactor::start_op(int op_type, socket_type descriptor,
select_reactor::per_descriptor_data&, reactor_op* op, bool is_continuation,
bool, void (*on_immediate)(operation*, bool, const void*),
const void* immediate_arg)
{
asio::detail::mutex::scoped_lock lock(mutex_);
if (shutdown_)
{
on_immediate(op, is_continuation, immediate_arg);
return;
}
bool first = op_queue_[op_type].enqueue_operation(descriptor, op);
scheduler_.work_started();
if (first)
interrupter_.interrupt();
}
void select_reactor::cancel_ops(socket_type descriptor,
select_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void select_reactor::cancel_ops_by_key(socket_type descriptor,
select_reactor::per_descriptor_data&,
int op_type, void* cancellation_key)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
bool need_interrupt = op_queue_[op_type].cancel_operations_by_key(
descriptor, ops, cancellation_key, asio::error::operation_aborted);
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
void select_reactor::deregister_descriptor(socket_type descriptor,
select_reactor::per_descriptor_data&, bool)
{
asio::detail::mutex::scoped_lock lock(mutex_);
cancel_ops_unlocked(descriptor, asio::error::operation_aborted);
}
void select_reactor::deregister_internal_descriptor(
socket_type descriptor, select_reactor::per_descriptor_data&)
{
asio::detail::mutex::scoped_lock lock(mutex_);
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
op_queue_[i].cancel_operations(descriptor, ops);
}
void select_reactor::cleanup_descriptor_data(
select_reactor::per_descriptor_data&)
{
}
void select_reactor::run(long usec, op_queue<operation>& ops)
{
asio::detail::mutex::scoped_lock lock(mutex_);
#if defined(ASIO_HAS_IOCP)
// Check if the thread is supposed to stop.
if (stop_thread_)
return;
#endif // defined(ASIO_HAS_IOCP)
// Set up the descriptor sets.
for (int i = 0; i < max_select_ops; ++i)
fd_sets_[i].reset();
fd_sets_[read_op].set(interrupter_.read_descriptor());
socket_type max_fd = 0;
bool have_work_to_do = !timer_queues_.all_empty();
for (int i = 0; i < max_select_ops; ++i)
{
have_work_to_do = have_work_to_do || !op_queue_[i].empty();
fd_sets_[i].set(op_queue_[i], ops);
if (fd_sets_[i].max_descriptor() > max_fd)
max_fd = fd_sets_[i].max_descriptor();
}
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
have_work_to_do = have_work_to_do || !op_queue_[connect_op].empty();
fd_sets_[write_op].set(op_queue_[connect_op], ops);
if (fd_sets_[write_op].max_descriptor() > max_fd)
max_fd = fd_sets_[write_op].max_descriptor();
fd_sets_[except_op].set(op_queue_[connect_op], ops);
if (fd_sets_[except_op].max_descriptor() > max_fd)
max_fd = fd_sets_[except_op].max_descriptor();
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// We can return immediately if there's no work to do and the reactor is
// not supposed to block.
if (!usec && !have_work_to_do)
return;
// Determine how long to block while waiting for events.
timeval tv_buf = { 0, 0 };
timeval* tv = usec ? get_timeout(usec, tv_buf) : &tv_buf;
lock.unlock();
// Block on the select call until descriptors become ready.
asio::error_code ec;
int retval = socket_ops::select(static_cast<int>(max_fd + 1),
fd_sets_[read_op], fd_sets_[write_op], fd_sets_[except_op], tv, ec);
// Reset the interrupter.
if (retval > 0 && fd_sets_[read_op].is_set(interrupter_.read_descriptor()))
{
if (!interrupter_.reset())
{
lock.lock();
#if defined(ASIO_HAS_IOCP)
stop_thread_ = true;
scheduler_.post_immediate_completion(&restart_reactor_, false);
#else // defined(ASIO_HAS_IOCP)
interrupter_.recreate();
#endif // defined(ASIO_HAS_IOCP)
}
--retval;
}
lock.lock();
// Dispatch all ready operations.
if (retval > 0)
{
#if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Connection operations on Windows use both except and write fd_sets.
fd_sets_[except_op].perform(op_queue_[connect_op], ops);
fd_sets_[write_op].perform(op_queue_[connect_op], ops);
#endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
// Exception operations must be processed first to ensure that any
// out-of-band data is read before normal data.
for (int i = max_select_ops - 1; i >= 0; --i)
fd_sets_[i].perform(op_queue_[i], ops);
}
timer_queues_.get_ready_timers(ops);
}
void select_reactor::interrupt()
{
interrupter_.interrupt();
}
#if defined(ASIO_HAS_IOCP)
void select_reactor::run_thread()
{
asio::detail::mutex::scoped_lock lock(mutex_);
while (!stop_thread_)
{
lock.unlock();
op_queue<operation> ops;
run(-1, ops);
scheduler_.post_deferred_completions(ops);
lock.lock();
}
}
void select_reactor::restart_reactor::do_complete(void* owner, operation* base,
const asio::error_code& /*ec*/, std::size_t /*bytes_transferred*/)
{
if (owner)
{
select_reactor* reactor = static_cast<restart_reactor*>(base)->reactor_;
reactor->thread_.join();
asio::detail::mutex::scoped_lock lock(reactor->mutex_);
reactor->interrupter_.recreate();
reactor->stop_thread_ = false;
lock.unlock();
asio::detail::signal_blocker sb;
reactor->thread_ = thread(thread_function(reactor));
}
}
#endif // defined(ASIO_HAS_IOCP)
void select_reactor::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.insert(&queue);
}
void select_reactor::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(mutex_);
timer_queues_.erase(&queue);
}
timeval* select_reactor::get_timeout(long usec, timeval& tv)
{
// By default we will wait no longer than 5 minutes. This will ensure that
// any changes to the system clock are detected after no longer than this.
const long max_usec = 5 * 60 * 1000 * 1000;
usec = timer_queues_.wait_duration_usec(
(usec < 0 || max_usec < usec) ? max_usec : usec);
tv.tv_sec = usec / 1000000;
tv.tv_usec = usec % 1000000;
return &tv;
}
void select_reactor::cancel_ops_unlocked(socket_type descriptor,
const asio::error_code& ec)
{
bool need_interrupt = false;
op_queue<operation> ops;
for (int i = 0; i < max_ops; ++i)
need_interrupt = op_queue_[i].cancel_operations(
descriptor, ops, ec) || need_interrupt;
scheduler_.post_deferred_completions(ops);
if (need_interrupt)
interrupter_.interrupt();
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
// || (!defined(ASIO_HAS_DEV_POLL)
// && !defined(ASIO_HAS_EPOLL)
// && !defined(ASIO_HAS_KQUEUE))
// && !defined(ASIO_WINDOWS_RUNTIME))
#endif // ASIO_DETAIL_IMPL_SELECT_REACTOR_IPP

View File

@@ -0,0 +1,117 @@
//
// detail/impl/service_registry.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename Service>
Service& service_registry::use_service()
{
execution_context::service::key key;
init_key<Service>(key, 0);
factory_type factory = &service_registry::create<Service, execution_context>;
return *static_cast<Service*>(do_use_service(key, factory, &owner_));
}
template <typename Service>
Service& service_registry::use_service(io_context& owner)
{
execution_context::service::key key;
init_key<Service>(key, 0);
factory_type factory = &service_registry::create<Service, io_context>;
return *static_cast<Service*>(do_use_service(key, factory, &owner));
}
template <typename Service, typename... Args>
Service& service_registry::make_service(Args&&... args)
{
auto_service_ptr new_service =
{ create<Service, execution_context>(owner_,
&owner_, static_cast<Args&&>(args)...) };
add_service(static_cast<Service*>(new_service.ptr_));
Service& result = *static_cast<Service*>(new_service.ptr_);
new_service.ptr_ = 0;
return result;
}
template <typename Service>
void service_registry::add_service(Service* new_service)
{
execution_context::service::key key;
init_key<Service>(key, 0);
return do_add_service(key, new_service);
}
template <typename Service>
bool service_registry::has_service() const
{
execution_context::service::key key;
init_key<Service>(key, 0);
return do_has_service(key);
}
template <typename Service>
inline void service_registry::init_key(
execution_context::service::key& key, ...)
{
init_key_from_id(key, Service::id);
}
#if !defined(ASIO_NO_TYPEID)
template <typename Service>
void service_registry::init_key(execution_context::service::key& key,
enable_if_t<is_base_of<typename Service::key_type, Service>::value>*)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
template <typename Service>
void service_registry::init_key_from_id(execution_context::service::key& key,
const service_id<Service>& /*id*/)
{
key.type_info_ = &typeid(typeid_wrapper<Service>);
key.id_ = 0;
}
#endif // !defined(ASIO_NO_TYPEID)
template <typename Service, typename Owner, typename... Args>
execution_context::service* service_registry::create(
execution_context& context, void* owner, Args&&... args)
{
Service* svc = allocate_object<Service>(
execution_context::allocator<void>(context),
*static_cast<Owner*>(owner), static_cast<Args&&>(args)...);
svc->destroy_ = &service_registry::destroy_allocated<Service>;
return svc;
}
template <typename Service>
void service_registry::destroy_allocated(execution_context::service* service)
{
deallocate_object(execution_context::allocator<void>(service->owner_),
static_cast<Service*>(service));
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_HPP

View File

@@ -0,0 +1,205 @@
//
// detail/impl/service_registry.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
#define ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <vector>
#include "asio/detail/service_registry.hpp"
#include "asio/detail/throw_exception.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
service_registry::service_registry(execution_context& owner)
: owner_(owner),
first_service_(0)
{
}
service_registry::~service_registry()
{
}
void service_registry::shutdown_services()
{
execution_context::service* service = first_service_;
while (service)
{
service->shutdown();
service = service->next_;
}
}
void service_registry::destroy_services()
{
while (first_service_)
{
execution_context::service* next_service = first_service_->next_;
first_service_->destroy_(first_service_);
first_service_ = next_service;
}
}
void service_registry::notify_fork(execution_context::fork_event fork_ev)
{
// Make a copy of all of the services while holding the lock. We don't want
// to hold the lock while calling into each service, as it may try to call
// back into this class.
std::vector<execution_context::service*> services;
{
asio::detail::mutex::scoped_lock lock(mutex_);
execution_context::service* service = first_service_;
while (service)
{
services.push_back(service);
service = service->next_;
}
}
// If processing the fork_prepare event, we want to go in reverse order of
// service registration, which happens to be the existing order of the
// services in the vector. For the other events we want to go in the other
// direction.
std::size_t num_services = services.size();
if (fork_ev == execution_context::fork_prepare)
for (std::size_t i = 0; i < num_services; ++i)
services[i]->notify_fork(fork_ev);
else
for (std::size_t i = num_services; i > 0; --i)
services[i - 1]->notify_fork(fork_ev);
}
void service_registry::init_key_from_id(execution_context::service::key& key,
const execution_context::id& id)
{
key.type_info_ = 0;
key.id_ = &id;
}
bool service_registry::keys_match(
const execution_context::service::key& key1,
const execution_context::service::key& key2)
{
if (key1.id_ && key2.id_)
if (key1.id_ == key2.id_)
return true;
if (key1.type_info_ && key2.type_info_)
if (*key1.type_info_ == *key2.type_info_)
return true;
return false;
}
void service_registry::destroy_added(execution_context::service* service)
{
delete service;
}
service_registry::auto_service_ptr::~auto_service_ptr()
{
if (ptr_)
ptr_->destroy_(ptr_);
}
execution_context::service* service_registry::do_use_service(
const execution_context::service::key& key,
factory_type factory, void* owner)
{
asio::detail::mutex::scoped_lock lock(mutex_);
// First see if there is an existing service object with the given key.
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return service;
service = service->next_;
}
// Create a new service object. The service registry's mutex is not locked
// at this time to allow for nested calls into this function from the new
// service's constructor.
lock.unlock();
auto_service_ptr new_service = { factory(owner_, owner) };
new_service.ptr_->key_ = key;
lock.lock();
// Check that nobody else created another service object of the same type
// while the lock was released.
service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return service;
service = service->next_;
}
// Service was successfully initialised, pass ownership to registry.
new_service.ptr_->next_ = first_service_;
first_service_ = new_service.ptr_;
new_service.ptr_ = 0;
return first_service_;
}
void service_registry::do_add_service(
const execution_context::service::key& key,
execution_context::service* new_service)
{
if (&owner_ != &new_service->context())
asio::detail::throw_exception(invalid_service_owner());
asio::detail::mutex::scoped_lock lock(mutex_);
// Check if there is an existing service object with the given key.
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
asio::detail::throw_exception(service_already_exists());
service = service->next_;
}
// Take ownership of the service object.
if (!new_service->destroy_)
new_service->destroy_ = &service_registry::destroy_added;
new_service->key_ = key;
new_service->next_ = first_service_;
first_service_ = new_service;
}
bool service_registry::do_has_service(
const execution_context::service::key& key) const
{
asio::detail::mutex::scoped_lock lock(mutex_);
execution_context::service* service = first_service_;
while (service)
{
if (keys_match(service->key_, key))
return true;
service = service->next_;
}
return false;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SERVICE_REGISTRY_IPP

View File

@@ -0,0 +1,825 @@
//
// detail/impl/signal_set_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
#define ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include <cstring>
#include <stdexcept>
#include "asio/detail/signal_blocker.hpp"
#include "asio/detail/signal_set_service.hpp"
#include "asio/detail/static_mutex.hpp"
#include "asio/detail/throw_exception.hpp"
#if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/io_uring_service.hpp"
#else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
# include "asio/detail/reactor.hpp"
#endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct signal_state
{
// Mutex used for protecting global state.
static_mutex mutex_;
// The read end of the pipe used for signal notifications.
int read_descriptor_;
// The write end of the pipe used for signal notifications.
int write_descriptor_;
// Whether the signal state has been prepared for a fork.
bool fork_prepared_;
// The head of a linked list of all signal_set_service instances.
class signal_set_service* service_list_;
// A count of the number of objects that are registered for each signal.
std::size_t registration_count_[max_signal_number];
// The flags used for each registered signal.
signal_set_base::flags_t flags_[max_signal_number];
};
signal_state* get_signal_state()
{
static signal_state state = {
ASIO_STATIC_MUTEX_INIT, -1, -1, false, 0,
{ 0 }, { signal_set_base::flags_t() } };
return &state;
}
void asio_signal_handler(int signal_number)
{
#if defined(ASIO_WINDOWS) \
|| defined(ASIO_WINDOWS_RUNTIME) \
|| defined(__CYGWIN__)
signal_set_service::deliver_signal(signal_number);
#else // defined(ASIO_WINDOWS)
// || defined(ASIO_WINDOWS_RUNTIME)
// || defined(__CYGWIN__)
int saved_errno = errno;
signal_state* state = get_signal_state();
signed_size_type result = ::write(state->write_descriptor_,
&signal_number, sizeof(signal_number));
(void)result;
errno = saved_errno;
#endif // defined(ASIO_WINDOWS)
// || defined(ASIO_WINDOWS_RUNTIME)
// || defined(__CYGWIN__)
#if defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)
::signal(signal_number, asio_signal_handler);
#endif // defined(ASIO_HAS_SIGNAL) && !defined(ASIO_HAS_SIGACTION)
}
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
class signal_set_service::pipe_read_op :
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
public io_uring_operation
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
public reactor_op
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
{
public:
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
pipe_read_op()
: io_uring_operation(asio::error_code(), &pipe_read_op::do_prepare,
&pipe_read_op::do_perform, pipe_read_op::do_complete)
{
}
static void do_prepare(io_uring_operation*, ::io_uring_sqe* sqe)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
::io_uring_prep_poll_add(sqe, fd, POLLIN);
}
static bool do_perform(io_uring_operation*, bool)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
int signal_number = 0;
while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
if (signal_number >= 0 && signal_number < max_signal_number)
signal_set_service::deliver_signal(signal_number);
return false;
}
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
pipe_read_op()
: reactor_op(asio::error_code(),
&pipe_read_op::do_perform, pipe_read_op::do_complete)
{
}
static status do_perform(reactor_op*)
{
signal_state* state = get_signal_state();
int fd = state->read_descriptor_;
int signal_number = 0;
while (::read(fd, &signal_number, sizeof(int)) == sizeof(int))
if (signal_number >= 0 && signal_number < max_signal_number)
signal_set_service::deliver_signal(signal_number);
return not_done;
}
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
static void do_complete(void* /*owner*/, operation* base,
const asio::error_code& /*ec*/,
std::size_t /*bytes_transferred*/)
{
pipe_read_op* o(static_cast<pipe_read_op*>(base));
delete o;
}
};
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
signal_set_service::signal_set_service(execution_context& context)
: execution_context_service_base<signal_set_service>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
io_uring_service_(asio::use_service<io_uring_service>(context)),
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_(asio::use_service<reactor>(context)),
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
next_(0),
prev_(0)
{
get_signal_state()->mutex_.init();
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
io_uring_service_.init_task();
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.init_task();
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
for (int i = 0; i < max_signal_number; ++i)
registrations_[i] = 0;
add_service(this);
}
signal_set_service::~signal_set_service()
{
remove_service(this);
}
void signal_set_service::shutdown()
{
remove_service(this);
op_queue<operation> ops;
for (int i = 0; i < max_signal_number; ++i)
{
registration* reg = registrations_[i];
while (reg)
{
ops.push(*reg->queue_);
reg = reg->next_in_table_;
}
}
scheduler_.abandon_operations(ops);
}
void signal_set_service::notify_fork(execution_context::fork_event fork_ev)
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
switch (fork_ev)
{
case execution_context::fork_prepare:
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = true;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
io_uring_service_.deregister_io_object(io_object_data_);
io_uring_service_.cleanup_io_object(io_object_data_);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.deregister_internal_descriptor(read_descriptor, reactor_data_);
reactor_.cleanup_descriptor_data(reactor_data_);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
break;
case execution_context::fork_parent:
if (state->fork_prepared_)
{
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = false;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
io_uring_service_.register_internal_io_object(io_object_data_,
io_uring_service::read_op, new pipe_read_op);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, reactor_data_, new pipe_read_op);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
break;
case execution_context::fork_child:
if (state->fork_prepared_)
{
asio::detail::signal_blocker blocker;
close_descriptors();
open_descriptors();
int read_descriptor = state->read_descriptor_;
state->fork_prepared_ = false;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
io_uring_service_.register_internal_io_object(io_object_data_,
io_uring_service::read_op, new pipe_read_op);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, reactor_data_, new pipe_read_op);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
}
break;
default:
break;
}
#else // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
(void)fork_ev;
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::construct(
signal_set_service::implementation_type& impl)
{
impl.signals_ = 0;
}
void signal_set_service::destroy(
signal_set_service::implementation_type& impl)
{
asio::error_code ignored_ec;
clear(impl, ignored_ec);
cancel(impl, ignored_ec);
}
asio::error_code signal_set_service::add(
signal_set_service::implementation_type& impl, int signal_number,
signal_set_base::flags_t f, asio::error_code& ec)
{
// Check that the signal number is valid.
if (signal_number < 0 || signal_number >= max_signal_number)
{
ec = asio::error::invalid_argument;
return ec;
}
// Check that the specified flags are supported.
#if !defined(ASIO_HAS_SIGACTION)
if (f != signal_set_base::flags::dont_care)
{
ec = asio::error::operation_not_supported;
return ec;
}
#endif // !defined(ASIO_HAS_SIGACTION)
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
// Find the appropriate place to insert the registration.
registration** insertion_point = &impl.signals_;
registration* next = impl.signals_;
while (next && next->signal_number_ < signal_number)
{
insertion_point = &next->next_in_set_;
next = next->next_in_set_;
}
// Only do something if the signal is not already registered.
if (next == 0 || next->signal_number_ != signal_number)
{
registration* new_registration = new registration;
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Register for the signal if we're the first.
if (state->registration_count_[signal_number] == 0)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = asio_signal_handler;
sigfillset(&sa.sa_mask);
if (f != signal_set_base::flags::dont_care)
sa.sa_flags = static_cast<int>(f);
if (::sigaction(signal_number, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(signal_number, asio_signal_handler) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
delete new_registration;
return ec;
}
# if defined(ASIO_HAS_SIGACTION)
state->flags_[signal_number] = f;
# endif // defined(ASIO_HAS_SIGACTION)
}
# if defined(ASIO_HAS_SIGACTION)
// Otherwise check to see if the flags have changed.
else if (f != signal_set_base::flags::dont_care)
{
if (f != state->flags_[signal_number])
{
using namespace std; // For memset.
if (state->flags_[signal_number] != signal_set_base::flags::dont_care)
{
ec = asio::error::invalid_argument;
delete new_registration;
return ec;
}
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = asio_signal_handler;
sigfillset(&sa.sa_mask);
sa.sa_flags = static_cast<int>(f);
if (::sigaction(signal_number, &sa, 0) == -1)
{
ec = asio::error_code(errno,
asio::error::get_system_category());
delete new_registration;
return ec;
}
state->flags_[signal_number] = f;
}
}
# endif // defined(ASIO_HAS_SIGACTION)
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Record the new registration in the set.
new_registration->signal_number_ = signal_number;
new_registration->queue_ = &impl.queue_;
new_registration->next_in_set_ = next;
*insertion_point = new_registration;
// Insert registration into the registration table.
new_registration->next_in_table_ = registrations_[signal_number];
if (registrations_[signal_number])
registrations_[signal_number]->prev_in_table_ = new_registration;
registrations_[signal_number] = new_registration;
++state->registration_count_[signal_number];
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::remove(
signal_set_service::implementation_type& impl,
int signal_number, asio::error_code& ec)
{
// Check that the signal number is valid.
if (signal_number < 0 || signal_number >= max_signal_number)
{
ec = asio::error::invalid_argument;
return ec;
}
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
// Find the signal number in the list of registrations.
registration** deletion_point = &impl.signals_;
registration* reg = impl.signals_;
while (reg && reg->signal_number_ < signal_number)
{
deletion_point = &reg->next_in_set_;
reg = reg->next_in_set_;
}
if (reg != 0 && reg->signal_number_ == signal_number)
{
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Set signal handler back to the default if we're the last.
if (state->registration_count_[signal_number] == 1)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
if (::sigaction(signal_number, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(signal_number, SIG_DFL) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return ec;
}
# if defined(ASIO_HAS_SIGACTION)
state->flags_[signal_number] = signal_set_base::flags_t();
# endif // defined(ASIO_HAS_SIGACTION)
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Remove the registration from the set.
*deletion_point = reg->next_in_set_;
// Remove the registration from the registration table.
if (registrations_[signal_number] == reg)
registrations_[signal_number] = reg->next_in_table_;
if (reg->prev_in_table_)
reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
if (reg->next_in_table_)
reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
--state->registration_count_[signal_number];
delete reg;
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::clear(
signal_set_service::implementation_type& impl,
asio::error_code& ec)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (registration* reg = impl.signals_)
{
#if defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Set signal handler back to the default if we're the last.
if (state->registration_count_[reg->signal_number_] == 1)
{
# if defined(ASIO_HAS_SIGACTION)
using namespace std; // For memset.
struct sigaction sa;
memset(&sa, 0, sizeof(sa));
sa.sa_handler = SIG_DFL;
if (::sigaction(reg->signal_number_, &sa, 0) == -1)
# else // defined(ASIO_HAS_SIGACTION)
if (::signal(reg->signal_number_, SIG_DFL) == SIG_ERR)
# endif // defined(ASIO_HAS_SIGACTION)
{
# if defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error::invalid_argument;
# else // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
ec = asio::error_code(errno,
asio::error::get_system_category());
# endif // defined(ASIO_WINDOWS) || defined(__CYGWIN__)
return ec;
}
# if defined(ASIO_HAS_SIGACTION)
state->flags_[reg->signal_number_] = signal_set_base::flags_t();
# endif // defined(ASIO_HAS_SIGACTION)
}
#endif // defined(ASIO_HAS_SIGNAL) || defined(ASIO_HAS_SIGACTION)
// Remove the registration from the registration table.
if (registrations_[reg->signal_number_] == reg)
registrations_[reg->signal_number_] = reg->next_in_table_;
if (reg->prev_in_table_)
reg->prev_in_table_->next_in_table_ = reg->next_in_table_;
if (reg->next_in_table_)
reg->next_in_table_->prev_in_table_ = reg->prev_in_table_;
--state->registration_count_[reg->signal_number_];
impl.signals_ = reg->next_in_set_;
delete reg;
}
ec = asio::error_code();
return ec;
}
asio::error_code signal_set_service::cancel(
signal_set_service::implementation_type& impl,
asio::error_code& ec)
{
ASIO_HANDLER_OPERATION((scheduler_.context(),
"signal_set", &impl, 0, "cancel"));
op_queue<operation> ops;
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (signal_op* op = impl.queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.queue_.pop();
ops.push(op);
}
}
scheduler_.post_deferred_completions(ops);
ec = asio::error_code();
return ec;
}
void signal_set_service::cancel_ops_by_key(
signal_set_service::implementation_type& impl, void* cancellation_key)
{
op_queue<operation> ops;
{
op_queue<signal_op> other_ops;
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
while (signal_op* op = impl.queue_.front())
{
impl.queue_.pop();
if (op->cancellation_key_ == cancellation_key)
{
op->ec_ = asio::error::operation_aborted;
ops.push(op);
}
else
other_ops.push(op);
}
impl.queue_.push(other_ops);
}
scheduler_.post_deferred_completions(ops);
}
void signal_set_service::deliver_signal(int signal_number)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
signal_set_service* service = state->service_list_;
while (service)
{
op_queue<operation> ops;
registration* reg = service->registrations_[signal_number];
while (reg)
{
if (reg->queue_->empty())
{
++reg->undelivered_;
}
else
{
while (signal_op* op = reg->queue_->front())
{
op->signal_number_ = signal_number;
reg->queue_->pop();
ops.push(op);
}
}
reg = reg->next_in_table_;
}
service->scheduler_.post_deferred_completions(ops);
service = service->next_;
}
}
void signal_set_service::add_service(signal_set_service* service)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If this is the first service to be created, open a new pipe.
if (state->service_list_ == 0)
open_descriptors();
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If a scheduler_ object is thread-unsafe then it must be the only
// scheduler used to create signal_set objects.
if (state->service_list_ != 0)
{
if (!config(service->context()).get("scheduler", "locking", true)
|| !config(state->service_list_->context()).get(
"scheduler", "locking", true))
{
std::logic_error ex(
"Thread-unsafe execution context objects require "
"exclusive access to signal handling.");
asio::detail::throw_exception(ex);
}
}
// Insert service into linked list of all services.
service->next_ = state->service_list_;
service->prev_ = 0;
if (state->service_list_)
state->service_list_->prev_ = service;
state->service_list_ = service;
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
// Register for pipe readiness notifications.
int read_descriptor = state->read_descriptor_;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
service->io_uring_service_.register_internal_io_object(
service->io_object_data_, io_uring_service::read_op, new pipe_read_op);
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
service->reactor_.register_internal_descriptor(reactor::read_op,
read_descriptor, service->reactor_data_, new pipe_read_op);
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::remove_service(signal_set_service* service)
{
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
if (service->next_ || service->prev_ || state->service_list_ == service)
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
// Disable the pipe readiness notifications.
int read_descriptor = state->read_descriptor_;
lock.unlock();
# if defined(ASIO_HAS_IO_URING_AS_DEFAULT)
(void)read_descriptor;
service->io_uring_service_.deregister_io_object(service->io_object_data_);
service->io_uring_service_.cleanup_io_object(service->io_object_data_);
lock.lock();
# else // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
service->reactor_.deregister_internal_descriptor(
read_descriptor, service->reactor_data_);
service->reactor_.cleanup_descriptor_data(service->reactor_data_);
lock.lock();
# endif // defined(ASIO_HAS_IO_URING_AS_DEFAULT)
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
// Remove service from linked list of all services.
if (state->service_list_ == service)
state->service_list_ = service->next_;
if (service->prev_)
service->prev_->next_ = service->next_;
if (service->next_)
service->next_->prev_= service->prev_;
service->next_ = 0;
service->prev_ = 0;
#if !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
// If this is the last service to be removed, close the pipe.
if (state->service_list_ == 0)
close_descriptors();
#endif // !defined(ASIO_WINDOWS) && !defined(__CYGWIN__)
}
}
void signal_set_service::open_descriptors()
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
int pipe_fds[2];
if (::pipe(pipe_fds) == 0)
{
state->read_descriptor_ = pipe_fds[0];
::fcntl(state->read_descriptor_, F_SETFL, O_NONBLOCK);
state->write_descriptor_ = pipe_fds[1];
::fcntl(state->write_descriptor_, F_SETFL, O_NONBLOCK);
#if defined(FD_CLOEXEC)
::fcntl(state->read_descriptor_, F_SETFD, FD_CLOEXEC);
::fcntl(state->write_descriptor_, F_SETFD, FD_CLOEXEC);
#endif // defined(FD_CLOEXEC)
}
else
{
asio::error_code ec(errno,
asio::error::get_system_category());
asio::detail::throw_error(ec, "signal_set_service pipe");
}
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::close_descriptors()
{
#if !defined(ASIO_WINDOWS) \
&& !defined(ASIO_WINDOWS_RUNTIME) \
&& !defined(__CYGWIN__)
signal_state* state = get_signal_state();
if (state->read_descriptor_ != -1)
::close(state->read_descriptor_);
state->read_descriptor_ = -1;
if (state->write_descriptor_ != -1)
::close(state->write_descriptor_);
state->write_descriptor_ = -1;
#endif // !defined(ASIO_WINDOWS)
// && !defined(ASIO_WINDOWS_RUNTIME)
// && !defined(__CYGWIN__)
}
void signal_set_service::start_wait_op(
signal_set_service::implementation_type& impl, signal_op* op)
{
scheduler_.work_started();
signal_state* state = get_signal_state();
static_mutex::scoped_lock lock(state->mutex_);
registration* reg = impl.signals_;
while (reg)
{
if (reg->undelivered_ > 0)
{
--reg->undelivered_;
op->signal_number_ = reg->signal_number_;
scheduler_.post_deferred_completion(op);
return;
}
reg = reg->next_in_set_;
}
impl.queue_.push(op);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_SIGNAL_SET_SERVICE_IPP

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,185 @@
//
// detail/impl/socket_select_interrupter.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
#define ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if !defined(ASIO_WINDOWS_RUNTIME)
#if defined(ASIO_WINDOWS) \
|| defined(__CYGWIN__) \
|| defined(__SYMBIAN32__)
#include <cstdlib>
#include "asio/detail/socket_holder.hpp"
#include "asio/detail/socket_ops.hpp"
#include "asio/detail/socket_select_interrupter.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
socket_select_interrupter::socket_select_interrupter()
{
open_descriptors();
}
void socket_select_interrupter::open_descriptors()
{
asio::error_code ec;
socket_holder acceptor(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
if (acceptor.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
int opt = 1;
socket_ops::state_type acceptor_state = 0;
socket_ops::setsockopt(acceptor.get(), acceptor_state,
SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt), ec);
using namespace std; // For memset.
sockaddr_in4_type addr;
std::size_t addr_len = sizeof(addr);
memset(&addr, 0, sizeof(addr));
addr.sin_family = AF_INET;
addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);
addr.sin_port = 0;
if (socket_ops::bind(acceptor.get(), &addr,
addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
if (socket_ops::getsockname(acceptor.get(), &addr,
&addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
// Some broken firewalls on Windows will intermittently cause getsockname to
// return 0.0.0.0 when the socket is actually bound to 127.0.0.1. We
// explicitly specify the target address here to work around this problem.
if (addr.sin_addr.s_addr == socket_ops::host_to_network_long(INADDR_ANY))
addr.sin_addr.s_addr = socket_ops::host_to_network_long(INADDR_LOOPBACK);
if (socket_ops::listen(acceptor.get(),
SOMAXCONN, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
socket_holder client(socket_ops::socket(
AF_INET, SOCK_STREAM, IPPROTO_TCP, ec));
if (client.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
if (socket_ops::connect(client.get(), &addr,
addr_len, ec) == socket_error_retval)
asio::detail::throw_error(ec, "socket_select_interrupter");
socket_holder server(socket_ops::accept(acceptor.get(), 0, 0, ec));
if (server.get() == invalid_socket)
asio::detail::throw_error(ec, "socket_select_interrupter");
ioctl_arg_type non_blocking = 1;
socket_ops::state_type client_state = 0;
if (socket_ops::ioctl(client.get(), client_state,
FIONBIO, &non_blocking, ec))
asio::detail::throw_error(ec, "socket_select_interrupter");
opt = 1;
socket_ops::setsockopt(client.get(), client_state,
IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
non_blocking = 1;
socket_ops::state_type server_state = 0;
if (socket_ops::ioctl(server.get(), server_state,
FIONBIO, &non_blocking, ec))
asio::detail::throw_error(ec, "socket_select_interrupter");
opt = 1;
socket_ops::setsockopt(server.get(), server_state,
IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt), ec);
read_descriptor_ = server.release();
write_descriptor_ = client.release();
}
socket_select_interrupter::~socket_select_interrupter()
{
close_descriptors();
}
void socket_select_interrupter::close_descriptors()
{
asio::error_code ec;
socket_ops::state_type state = socket_ops::internal_non_blocking;
if (read_descriptor_ != invalid_socket)
socket_ops::close(read_descriptor_, state, true, ec);
if (write_descriptor_ != invalid_socket)
socket_ops::close(write_descriptor_, state, true, ec);
}
void socket_select_interrupter::recreate()
{
close_descriptors();
write_descriptor_ = invalid_socket;
read_descriptor_ = invalid_socket;
open_descriptors();
}
void socket_select_interrupter::interrupt()
{
char byte = 0;
socket_ops::buf b;
socket_ops::init_buf(b, &byte, 1);
asio::error_code ec;
socket_ops::send(write_descriptor_, &b, 1, 0, ec);
}
bool socket_select_interrupter::reset()
{
char data[1024];
socket_ops::buf b;
socket_ops::init_buf(b, data, sizeof(data));
asio::error_code ec;
for (;;)
{
int bytes_read = socket_ops::recv(read_descriptor_, &b, 1, 0, ec);
if (bytes_read == sizeof(data))
continue;
if (bytes_read > 0)
return true;
if (bytes_read == 0)
return false;
if (ec == asio::error::would_block
|| ec == asio::error::try_again)
return true;
return false;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
// || defined(__CYGWIN__)
// || defined(__SYMBIAN32__)
#endif // !defined(ASIO_WINDOWS_RUNTIME)
#endif // ASIO_DETAIL_IMPL_SOCKET_SELECT_INTERRUPTER_IPP

View File

@@ -0,0 +1,346 @@
//
// detail/impl/strand_executor_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/recycling_allocator.hpp"
#include "asio/executor_work_guard.hpp"
#include "asio/defer.hpp"
#include "asio/dispatch.hpp"
#include "asio/post.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename F, typename Allocator>
class strand_executor_service::allocator_binder
{
public:
typedef Allocator allocator_type;
allocator_binder(F&& f, const Allocator& a)
: f_(static_cast<F&&>(f)),
allocator_(a)
{
}
allocator_binder(const allocator_binder& other)
: f_(other.f_),
allocator_(other.allocator_)
{
}
allocator_binder(allocator_binder&& other)
: f_(static_cast<F&&>(other.f_)),
allocator_(static_cast<allocator_type&&>(other.allocator_))
{
}
allocator_type get_allocator() const noexcept
{
return allocator_;
}
void operator()()
{
f_();
}
private:
F f_;
allocator_type allocator_;
};
template <typename Executor>
class strand_executor_service::invoker<Executor,
enable_if_t<
execution::is_executor<Executor>::value
>>
{
public:
invoker(const implementation_type& impl, Executor& ex)
: impl_(impl),
executor_(asio::prefer(ex, execution::outstanding_work.tracked))
{
}
invoker(const invoker& other)
: impl_(other.impl_),
executor_(other.executor_)
{
}
invoker(invoker&& other)
: impl_(static_cast<implementation_type&&>(other.impl_)),
executor_(static_cast<executor_type&&>(other.executor_))
{
}
struct on_invoker_exit
{
invoker* this_;
~on_invoker_exit()
{
if (push_waiting_to_ready(this_->impl_))
{
recycling_allocator<void> allocator;
executor_type ex = this_->executor_;
asio::prefer(
asio::require(
static_cast<executor_type&&>(ex),
execution::blocking.never),
execution::allocator(allocator)
).execute(static_cast<invoker&&>(*this_));
}
}
};
void operator()()
{
// Ensure the next handler, if any, is scheduled on block exit.
on_invoker_exit on_exit = { this };
(void)on_exit;
run_ready_handlers(impl_);
}
private:
typedef decay_t<
prefer_result_t<
Executor,
execution::outstanding_work_t::tracked_t
>
> executor_type;
implementation_type impl_;
executor_type executor_;
};
#if !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor>
class strand_executor_service::invoker<Executor,
enable_if_t<
!execution::is_executor<Executor>::value
>>
{
public:
invoker(const implementation_type& impl, Executor& ex)
: impl_(impl),
work_(ex)
{
}
invoker(const invoker& other)
: impl_(other.impl_),
work_(other.work_)
{
}
invoker(invoker&& other)
: impl_(static_cast<implementation_type&&>(other.impl_)),
work_(static_cast<executor_work_guard<Executor>&&>(other.work_))
{
}
struct on_invoker_exit
{
invoker* this_;
~on_invoker_exit()
{
if (push_waiting_to_ready(this_->impl_))
{
Executor ex(this_->work_.get_executor());
recycling_allocator<void> allocator;
ex.post(static_cast<invoker&&>(*this_), allocator);
}
}
};
void operator()()
{
// Ensure the next handler, if any, is scheduled on block exit.
on_invoker_exit on_exit = { this };
(void)on_exit;
run_ready_handlers(impl_);
}
private:
implementation_type impl_;
executor_work_guard<Executor> work_;
};
#endif // !defined(ASIO_NO_TS_EXECUTORS)
template <typename Executor, typename Function>
inline void strand_executor_service::execute(const implementation_type& impl,
Executor& ex, Function&& function,
enable_if_t<
can_query<Executor, execution::allocator_t<void>>::value
>*)
{
return strand_executor_service::do_execute(impl, ex,
static_cast<Function&&>(function),
asio::query(ex, execution::allocator));
}
template <typename Executor, typename Function>
inline void strand_executor_service::execute(const implementation_type& impl,
Executor& ex, Function&& function,
enable_if_t<
!can_query<Executor, execution::allocator_t<void>>::value
>*)
{
return strand_executor_service::do_execute(impl, ex,
static_cast<Function&&>(function),
std::allocator<void>());
}
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::do_execute(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// If the executor is not never-blocking, and we are already in the strand,
// then the function can run immediately.
if (asio::query(ex, execution::blocking) != execution::blocking.never
&& running_in_this_thread(impl))
{
// Make a local, non-const copy of the function.
function_type tmp(static_cast<Function&&>(function));
fenced_block b(fenced_block::full);
static_cast<function_type&&>(tmp)();
return;
}
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "execute"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
ex.execute(invoker<Executor>(impl, ex));
}
}
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::dispatch(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// If we are already in the strand then the function can run immediately.
if (running_in_this_thread(impl))
{
// Make a local, non-const copy of the function.
function_type tmp(static_cast<Function&&>(function));
fenced_block b(fenced_block::full);
static_cast<function_type&&>(tmp)();
return;
}
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "dispatch"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::dispatch(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
// Request invocation of the given function and return immediately.
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::post(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "post"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::post(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
// Request invocation of the given function and return immediately.
template <typename Executor, typename Function, typename Allocator>
void strand_executor_service::defer(const implementation_type& impl,
Executor& ex, Function&& function, const Allocator& a)
{
typedef decay_t<Function> function_type;
// Allocate and construct an operation to wrap the function.
typedef executor_op<function_type, Allocator> op;
typename op::ptr p = { detail::addressof(a), op::ptr::allocate(a), 0 };
p.p = new (p.v) op(static_cast<Function&&>(function), a);
ASIO_HANDLER_CREATION((impl->service_->context(), *p.p,
"strand_executor", impl.get(), 0, "defer"));
// Add the function to the strand and schedule the strand if required.
bool first = enqueue(impl, p.p);
p.v = p.p = 0;
if (first)
{
asio::defer(ex,
allocator_binder<invoker<Executor>, Allocator>(
invoker<Executor>(impl, ex), a));
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_HPP

View File

@@ -0,0 +1,159 @@
//
// detail/impl/strand_executor_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
#define ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/strand_executor_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
strand_executor_service::strand_executor_service(execution_context& ctx)
: execution_context_service_base<strand_executor_service>(ctx),
mutex_(),
salt_(0),
impl_list_(0)
{
}
void strand_executor_service::shutdown()
{
op_queue<scheduler_operation> ops;
asio::detail::mutex::scoped_lock lock(mutex_);
strand_impl* impl = impl_list_;
while (impl)
{
impl->mutex_->lock();
impl->shutdown_ = true;
ops.push(impl->waiting_queue_);
ops.push(impl->ready_queue_);
impl->mutex_->unlock();
impl = impl->next_;
}
}
strand_executor_service::implementation_type
strand_executor_service::create_implementation()
{
execution_context::allocator<void> alloc(context());
implementation_type new_impl = allocate_shared<strand_impl>(alloc);
new_impl->locked_ = false;
new_impl->shutdown_ = false;
asio::detail::mutex::scoped_lock lock(mutex_);
// Select a mutex from the pool of shared mutexes.
std::size_t salt = salt_++;
std::size_t mutex_index = reinterpret_cast<std::size_t>(new_impl.get());
mutex_index += (reinterpret_cast<std::size_t>(new_impl.get()) >> 3);
mutex_index ^= salt + 0x9e3779b9 + (mutex_index << 6) + (mutex_index >> 2);
mutex_index = mutex_index % num_mutexes;
if (!mutexes_[mutex_index])
mutexes_[mutex_index] = allocate_shared<mutex>(alloc);
new_impl->mutex_ = mutexes_[mutex_index].get();
// Insert implementation into linked list of all implementations.
new_impl->next_ = impl_list_;
new_impl->prev_ = 0;
if (impl_list_)
impl_list_->prev_ = new_impl.get();
impl_list_ = new_impl.get();
new_impl->service_ = this;
return new_impl;
}
strand_executor_service::strand_impl::~strand_impl()
{
asio::detail::mutex::scoped_lock lock(service_->mutex_);
// Remove implementation from linked list of all implementations.
if (service_->impl_list_ == this)
service_->impl_list_ = next_;
if (prev_)
prev_->next_ = next_;
if (next_)
next_->prev_= prev_;
}
bool strand_executor_service::enqueue(const implementation_type& impl,
scheduler_operation* op)
{
impl->mutex_->lock();
if (impl->shutdown_)
{
impl->mutex_->unlock();
op->destroy();
return false;
}
else if (impl->locked_)
{
// Some other function already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_->unlock();
return false;
}
else
{
// The function is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_->unlock();
impl->ready_queue_.push(op);
return true;
}
}
bool strand_executor_service::running_in_this_thread(
const implementation_type& impl)
{
return !!call_stack<strand_impl>::contains(impl.get());
}
bool strand_executor_service::push_waiting_to_ready(implementation_type& impl)
{
impl->mutex_->lock();
impl->ready_queue_.push(impl->waiting_queue_);
bool more_handlers = impl->locked_ = !impl->ready_queue_.empty();
impl->mutex_->unlock();
return more_handlers;
}
void strand_executor_service::run_ready_handlers(implementation_type& impl)
{
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl.get());
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
asio::error_code ec;
while (scheduler_operation* o = impl->ready_queue_.front())
{
impl->ready_queue_.pop();
o->complete(impl.get(), ec, 0);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_EXECUTOR_SERVICE_IPP

View File

@@ -0,0 +1,86 @@
//
// detail/impl/strand_service.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
#define ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/completion_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
inline strand_service::strand_impl::strand_impl()
: operation(&strand_service::do_complete),
locked_(false)
{
}
template <typename Handler>
void strand_service::dispatch(strand_service::implementation_type& impl,
Handler& handler)
{
// If we are already in the strand then the handler can run immediately.
if (running_in_this_thread(impl))
{
fenced_block b(fenced_block::full);
static_cast<Handler&&>(handler)();
return;
}
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler, io_context::executor_type> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_context_.get_executor());
ASIO_HANDLER_CREATION((this->context(),
*p.p, "strand", impl, 0, "dispatch"));
operation* o = p.p;
p.v = p.p = 0;
do_dispatch(impl, o);
}
// Request the io_context to invoke the given handler and return immediately.
template <typename Handler>
void strand_service::post(strand_service::implementation_type& impl,
Handler& handler)
{
bool is_continuation =
asio_handler_cont_helpers::is_continuation(handler);
// Allocate and construct an operation to wrap the handler.
typedef completion_handler<Handler, io_context::executor_type> op;
typename op::ptr p = { asio::detail::addressof(handler),
op::ptr::allocate(handler), 0 };
p.p = new (p.v) op(handler, io_context_.get_executor());
ASIO_HANDLER_CREATION((this->context(),
*p.p, "strand", impl, 0, "post"));
do_post(impl, p.p, is_continuation);
p.v = p.p = 0;
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_HPP

View File

@@ -0,0 +1,205 @@
//
// detail/impl/strand_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
#define ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/call_stack.hpp"
#include "asio/detail/strand_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct strand_service::on_do_complete_exit
{
io_context_impl* owner_;
strand_impl* impl_;
~on_do_complete_exit()
{
impl_->mutex_.lock();
impl_->ready_queue_.push(impl_->waiting_queue_);
bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
impl_->mutex_.unlock();
if (more_handlers)
owner_->post_immediate_completion(impl_, true);
}
};
strand_service::strand_service(asio::io_context& io_context)
: asio::detail::service_base<strand_service>(io_context),
io_context_(io_context),
io_context_impl_(asio::use_service<io_context_impl>(io_context)),
mutex_(),
salt_(0)
{
}
void strand_service::shutdown()
{
op_queue<operation> ops;
asio::detail::mutex::scoped_lock lock(mutex_);
for (std::size_t i = 0; i < num_implementations; ++i)
{
if (strand_impl* impl = implementations_[i].get())
{
ops.push(impl->waiting_queue_);
ops.push(impl->ready_queue_);
}
}
}
void strand_service::construct(strand_service::implementation_type& impl)
{
asio::detail::mutex::scoped_lock lock(mutex_);
std::size_t salt = salt_++;
#if defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
std::size_t index = salt;
#else // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
std::size_t index = reinterpret_cast<std::size_t>(&impl);
index += (reinterpret_cast<std::size_t>(&impl) >> 3);
index ^= salt + 0x9e3779b9 + (index << 6) + (index >> 2);
#endif // defined(ASIO_ENABLE_SEQUENTIAL_STRAND_ALLOCATION)
index = index % num_implementations;
if (!implementations_[index])
{
execution_context::allocator<void> alloc(context());
implementations_[index] = allocate_shared<strand_impl>(alloc);
}
impl = implementations_[index].get();
}
bool strand_service::running_in_this_thread(
const implementation_type& impl) const
{
return call_stack<strand_impl>::contains(impl) != 0;
}
struct strand_service::on_dispatch_exit
{
io_context_impl* io_context_impl_;
strand_impl* impl_;
~on_dispatch_exit()
{
impl_->mutex_.lock();
impl_->ready_queue_.push(impl_->waiting_queue_);
bool more_handlers = impl_->locked_ = !impl_->ready_queue_.empty();
impl_->mutex_.unlock();
if (more_handlers)
io_context_impl_->post_immediate_completion(impl_, false);
}
};
void strand_service::do_dispatch(implementation_type& impl, operation* op)
{
// If we are running inside the io_context, and no other handler already
// holds the strand lock, then the handler can run immediately.
bool can_dispatch = io_context_impl_.can_dispatch();
impl->mutex_.lock();
if (can_dispatch && !impl->locked_)
{
// Immediate invocation is allowed.
impl->locked_ = true;
impl->mutex_.unlock();
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
on_dispatch_exit on_exit = { &io_context_impl_, impl };
(void)on_exit;
op->complete(&io_context_impl_, asio::error_code(), 0);
return;
}
if (impl->locked_)
{
// Some other handler already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_.unlock();
}
else
{
// The handler is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
io_context_impl_.post_immediate_completion(impl, false);
}
}
void strand_service::do_post(implementation_type& impl,
operation* op, bool is_continuation)
{
impl->mutex_.lock();
if (impl->locked_)
{
// Some other handler already holds the strand lock. Enqueue for later.
impl->waiting_queue_.push(op);
impl->mutex_.unlock();
}
else
{
// The handler is acquiring the strand lock and so is responsible for
// scheduling the strand.
impl->locked_ = true;
impl->mutex_.unlock();
impl->ready_queue_.push(op);
io_context_impl_.post_immediate_completion(impl, is_continuation);
}
}
void strand_service::do_complete(void* owner, operation* base,
const asio::error_code& ec, std::size_t /*bytes_transferred*/)
{
if (owner)
{
strand_impl* impl = static_cast<strand_impl*>(base);
// Indicate that this strand is executing on the current thread.
call_stack<strand_impl>::context ctx(impl);
// Ensure the next handler, if any, is scheduled on block exit.
on_do_complete_exit on_exit;
on_exit.owner_ = static_cast<io_context_impl*>(owner);
on_exit.impl_ = impl;
// Run all ready handlers. No lock is required since the ready queue is
// accessed only within the strand.
while (operation* o = impl->ready_queue_.front())
{
impl->ready_queue_.pop();
o->complete(owner, ec, 0);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_STRAND_SERVICE_IPP

View File

@@ -0,0 +1,35 @@
//
// detail/impl/thread_context.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_THREAD_CONTEXT_IPP
#define ASIO_DETAIL_IMPL_THREAD_CONTEXT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
thread_info_base* thread_context::top_of_thread_call_stack()
{
return thread_call_stack::top();
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_THREAD_CONTEXT_IPP

View File

@@ -0,0 +1,49 @@
//
// detail/impl/throw_error.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_THROW_ERROR_IPP
#define ASIO_DETAIL_IMPL_THROW_ERROR_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/system_error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
void do_throw_error(
const asio::error_code& err
ASIO_SOURCE_LOCATION_PARAM)
{
asio::system_error e(err);
asio::detail::throw_exception(e ASIO_SOURCE_LOCATION_ARG);
}
void do_throw_error(
const asio::error_code& err,
const char* location
ASIO_SOURCE_LOCATION_PARAM)
{
asio::system_error e(err, location);
asio::detail::throw_exception(e ASIO_SOURCE_LOCATION_ARG);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_THROW_ERROR_IPP

View File

@@ -0,0 +1,101 @@
//
// detail/impl/timer_queue_set.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
#define ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#include "asio/detail/timer_queue_set.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
timer_queue_set::timer_queue_set()
: first_(0)
{
}
void timer_queue_set::insert(timer_queue_base* q)
{
q->next_ = first_;
first_ = q;
}
void timer_queue_set::erase(timer_queue_base* q)
{
if (first_)
{
if (q == first_)
{
first_ = q->next_;
q->next_ = 0;
return;
}
for (timer_queue_base* p = first_; p->next_; p = p->next_)
{
if (p->next_ == q)
{
p->next_ = q->next_;
q->next_ = 0;
return;
}
}
}
}
bool timer_queue_set::all_empty() const
{
for (timer_queue_base* p = first_; p; p = p->next_)
if (!p->empty())
return false;
return true;
}
long timer_queue_set::wait_duration_msec(long max_duration) const
{
long min_duration = max_duration;
for (timer_queue_base* p = first_; p; p = p->next_)
min_duration = p->wait_duration_msec(min_duration);
return min_duration;
}
long timer_queue_set::wait_duration_usec(long max_duration) const
{
long min_duration = max_duration;
for (timer_queue_base* p = first_; p; p = p->next_)
min_duration = p->wait_duration_usec(min_duration);
return min_duration;
}
void timer_queue_set::get_ready_timers(op_queue<operation>& ops)
{
for (timer_queue_base* p = first_; p; p = p->next_)
p->get_ready_timers(ops);
}
void timer_queue_set::get_all_timers(op_queue<operation>& ops)
{
for (timer_queue_base* p = first_; p; p = p->next_)
p->get_all_timers(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // ASIO_DETAIL_IMPL_TIMER_QUEUE_SET_IPP

View File

@@ -0,0 +1,76 @@
//
// detail/win_event.ipp
// ~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_EVENT_IPP
#define ASIO_DETAIL_IMPL_WIN_EVENT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_event.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_event::win_event()
: state_(0)
{
#if defined(ASIO_WINDOWS_APP)
events_[0] = ::CreateEventExW(0, 0,
CREATE_EVENT_MANUAL_RESET, EVENT_ALL_ACCESS);
#else // defined(ASIO_WINDOWS_APP)
events_[0] = ::CreateEventW(0, true, false, 0);
#endif // defined(ASIO_WINDOWS_APP)
if (!events_[0])
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
#if defined(ASIO_WINDOWS_APP)
events_[1] = ::CreateEventExW(0, 0, 0, EVENT_ALL_ACCESS);
#else // defined(ASIO_WINDOWS_APP)
events_[1] = ::CreateEventW(0, false, false, 0);
#endif // defined(ASIO_WINDOWS_APP)
if (!events_[1])
{
DWORD last_error = ::GetLastError();
::CloseHandle(events_[0]);
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "event");
}
}
win_event::~win_event()
{
::CloseHandle(events_[0]);
::CloseHandle(events_[1]);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_EVENT_IPP

View File

@@ -0,0 +1,288 @@
//
// detail/impl/win_iocp_file_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_FILE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_FILE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_FILE) \
&& defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)
#include <cstring>
#include <sys/stat.h>
#include "asio/detail/win_iocp_file_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_file_service::win_iocp_file_service(
execution_context& context)
: execution_context_service_base<win_iocp_file_service>(context),
handle_service_(context),
nt_flush_buffers_file_ex_(0)
{
if (FARPROC nt_flush_buffers_file_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("NTDLL"), "NtFlushBuffersFileEx"))
{
nt_flush_buffers_file_ex_ = reinterpret_cast<nt_flush_buffers_file_ex_fn>(
reinterpret_cast<void*>(nt_flush_buffers_file_ex_ptr));
}
}
void win_iocp_file_service::shutdown()
{
handle_service_.shutdown();
}
asio::error_code win_iocp_file_service::open(
win_iocp_file_service::implementation_type& impl,
const char* path, file_base::flags open_flags,
asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
DWORD access = 0;
if ((open_flags & file_base::read_only) != 0)
access = GENERIC_READ;
else if ((open_flags & file_base::write_only) != 0)
access = GENERIC_WRITE;
else if ((open_flags & file_base::read_write) != 0)
access = GENERIC_READ | GENERIC_WRITE;
DWORD share = FILE_SHARE_READ | FILE_SHARE_WRITE;
DWORD disposition = 0;
if ((open_flags & file_base::create) != 0)
{
if ((open_flags & file_base::exclusive) != 0)
disposition = CREATE_NEW;
else
disposition = OPEN_ALWAYS;
}
else
{
if ((open_flags & file_base::truncate) != 0)
disposition = TRUNCATE_EXISTING;
else
disposition = OPEN_EXISTING;
}
DWORD flags = FILE_ATTRIBUTE_NORMAL | FILE_FLAG_OVERLAPPED;
if (impl.is_stream_)
flags |= FILE_FLAG_SEQUENTIAL_SCAN;
else
flags |= FILE_FLAG_RANDOM_ACCESS;
if ((open_flags & file_base::sync_all_on_write) != 0)
flags |= FILE_FLAG_WRITE_THROUGH;
impl.offset_ = 0;
HANDLE handle = ::CreateFileA(path, access, share, 0, disposition, flags, 0);
if (handle != INVALID_HANDLE_VALUE)
{
if (disposition == OPEN_ALWAYS)
{
if ((open_flags & file_base::truncate) != 0)
{
if (!::SetEndOfFile(handle))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
}
if (disposition == OPEN_ALWAYS || disposition == OPEN_EXISTING)
{
if ((open_flags & file_base::append) != 0)
{
LARGE_INTEGER distance, new_offset;
distance.QuadPart = 0;
if (::SetFilePointerEx(handle, distance, &new_offset, FILE_END))
{
impl.offset_ = static_cast<uint64_t>(new_offset.QuadPart);
}
else
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
}
handle_service_.assign(impl, handle, ec);
if (ec)
::CloseHandle(handle);
ASIO_ERROR_LOCATION(ec);
return ec;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
uint64_t win_iocp_file_service::size(
const win_iocp_file_service::implementation_type& impl,
asio::error_code& ec) const
{
LARGE_INTEGER result;
if (::GetFileSizeEx(native_handle(impl), &result))
{
asio::error::clear(ec);
return static_cast<uint64_t>(result.QuadPart);
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
asio::error_code win_iocp_file_service::resize(
win_iocp_file_service::implementation_type& impl,
uint64_t n, asio::error_code& ec)
{
LARGE_INTEGER distance;
distance.QuadPart = n;
if (::SetFilePointerEx(native_handle(impl), distance, 0, FILE_BEGIN))
{
BOOL result = ::SetEndOfFile(native_handle(impl));
DWORD last_error = ::GetLastError();
distance.QuadPart = static_cast<LONGLONG>(impl.offset_);
if (!::SetFilePointerEx(native_handle(impl), distance, 0, FILE_BEGIN))
{
result = FALSE;
last_error = ::GetLastError();
}
if (result)
asio::error::clear(ec);
else
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
asio::error_code win_iocp_file_service::sync_all(
win_iocp_file_service::implementation_type& impl,
asio::error_code& ec)
{
BOOL result = ::FlushFileBuffers(native_handle(impl));
if (result)
{
asio::error::clear(ec);
return ec;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
}
asio::error_code win_iocp_file_service::sync_data(
win_iocp_file_service::implementation_type& impl,
asio::error_code& ec)
{
if (nt_flush_buffers_file_ex_)
{
io_status_block status = {};
if (!nt_flush_buffers_file_ex_(native_handle(impl),
flush_flags_file_data_sync_only, 0, 0, &status))
{
asio::error::clear(ec);
return ec;
}
}
return sync_all(impl, ec);
}
uint64_t win_iocp_file_service::seek(
win_iocp_file_service::implementation_type& impl, int64_t offset,
file_base::seek_basis whence, asio::error_code& ec)
{
DWORD method;
switch (whence)
{
case file_base::seek_set:
method = FILE_BEGIN;
break;
case file_base::seek_cur:
method = FILE_BEGIN;
offset = static_cast<int64_t>(impl.offset_) + offset;
break;
case file_base::seek_end:
method = FILE_END;
break;
default:
ec = asio::error::invalid_argument;
ASIO_ERROR_LOCATION(ec);
return 0;
}
LARGE_INTEGER distance, new_offset;
distance.QuadPart = offset;
if (::SetFilePointerEx(native_handle(impl), distance, &new_offset, method))
{
impl.offset_ = new_offset.QuadPart;
asio::error::clear(ec);
return impl.offset_;
}
else
{
DWORD last_error = ::GetLastError();
ec.assign(last_error, asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_FILE)
// && defined(ASIO_HAS_WINDOWS_RANDOM_ACCESS_HANDLE)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_FILE_SERVICE_IPP

View File

@@ -0,0 +1,619 @@
//
// detail/impl/win_iocp_handle_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/win_iocp_handle_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
class win_iocp_handle_service::overlapped_wrapper
: public OVERLAPPED
{
public:
explicit overlapped_wrapper(asio::error_code& ec)
{
Internal = 0;
InternalHigh = 0;
Offset = 0;
OffsetHigh = 0;
// Create a non-signalled manual-reset event, for GetOverlappedResult.
hEvent = ::CreateEventW(0, TRUE, FALSE, 0);
if (hEvent)
{
// As documented in GetQueuedCompletionStatus, setting the low order
// bit of this event prevents our synchronous writes from being treated
// as completion port events.
DWORD_PTR tmp = reinterpret_cast<DWORD_PTR>(hEvent);
hEvent = reinterpret_cast<HANDLE>(tmp | 1);
}
else
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
~overlapped_wrapper()
{
if (hEvent)
{
::CloseHandle(hEvent);
}
}
};
win_iocp_handle_service::win_iocp_handle_service(execution_context& context)
: execution_context_service_base<win_iocp_handle_service>(context),
iocp_service_(asio::use_service<win_iocp_io_context>(context)),
nt_set_info_(0),
mutex_(),
impl_list_(0)
{
}
void win_iocp_handle_service::shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
implementation_type* impl = impl_list_;
while (impl)
{
close_for_destruction(*impl);
impl = impl->next_;
}
}
void win_iocp_handle_service::construct(
win_iocp_handle_service::implementation_type& impl)
{
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_handle_service::move_construct(
win_iocp_handle_service::implementation_type& impl,
win_iocp_handle_service::implementation_type& other_impl)
{
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_handle_service::move_assign(
win_iocp_handle_service::implementation_type& impl,
win_iocp_handle_service& other_service,
win_iocp_handle_service::implementation_type& other_impl)
{
close_for_destruction(impl);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void win_iocp_handle_service::destroy(
win_iocp_handle_service::implementation_type& impl)
{
close_for_destruction(impl);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code win_iocp_handle_service::assign(
win_iocp_handle_service::implementation_type& impl,
const native_handle_type& handle, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (iocp_service_.register_handle(handle, ec))
{
ASIO_ERROR_LOCATION(ec);
return ec;
}
impl.handle_ = handle;
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_handle_service::close(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
if (!::CloseHandle(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
else
{
ec = asio::error_code();
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
win_iocp_handle_service::native_handle_type win_iocp_handle_service::release(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return INVALID_HANDLE_VALUE;
cancel(impl, ec);
if (ec)
{
ASIO_ERROR_LOCATION(ec);
return INVALID_HANDLE_VALUE;
}
nt_set_info_fn fn = get_nt_set_info();
if (fn == 0)
{
ec = asio::error::operation_not_supported;
ASIO_ERROR_LOCATION(ec);
return INVALID_HANDLE_VALUE;
}
ULONG_PTR iosb[2] = { 0, 0 };
void* info[2] = { 0, 0 };
if (fn(impl.handle_, iosb, &info, sizeof(info),
61 /* FileReplaceCompletionInformation */))
{
ec = asio::error::operation_not_supported;
ASIO_ERROR_LOCATION(ec);
return INVALID_HANDLE_VALUE;
}
native_handle_type tmp = impl.handle_;
impl.handle_ = INVALID_HANDLE_VALUE;
return tmp;
}
asio::error_code win_iocp_handle_service::cancel(
win_iocp_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return ec;
}
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(
reinterpret_cast<void*>(cancel_io_ex_ptr));
if (!cancel_io_ex(impl.handle_, 0))
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_NOT_FOUND)
{
// ERROR_NOT_FOUND means that there were no operations to be
// cancelled. We swallow this error to match the behaviour on other
// platforms.
ec = asio::error_code();
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
else
{
ec = asio::error_code();
}
}
else if (impl.safe_cancellation_thread_id_ == 0)
{
// No operations have been started, so there's nothing to cancel.
ec = asio::error_code();
}
else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
{
// Asynchronous operations have been started from the current thread only,
// so it is safe to try to cancel them using CancelIo.
if (!::CancelIo(impl.handle_))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
}
else
{
// Asynchronous operations have been started from more than one thread,
// so cancellation is not safe.
ec = asio::error::operation_not_supported;
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
size_t win_iocp_handle_service::do_write(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::const_buffer& buffer, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return 0;
}
// A request to write 0 bytes on a handle is a no-op.
if (buffer.size() == 0)
{
ec = asio::error_code();
return 0;
}
overlapped_wrapper overlapped(ec);
if (ec)
{
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Write the data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error != ERROR_IO_PENDING)
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
// Wait for the operation to complete.
DWORD bytes_transferred = 0;
ok = ::GetOverlappedResult(impl.handle_,
&overlapped, &bytes_transferred, TRUE);
if (!ok)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return 0;
}
ec = asio::error_code();
return bytes_transferred;
}
void win_iocp_handle_service::start_write_op(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::const_buffer& buffer, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
{
iocp_service_.on_completion(op, asio::error::bad_descriptor);
}
else if (buffer.size() == 0)
{
// A request to write 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
}
else
{
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::WriteFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
&& last_error != ERROR_MORE_DATA)
{
iocp_service_.on_completion(op, last_error, bytes_transferred);
}
else
{
iocp_service_.on_pending(op);
}
}
}
size_t win_iocp_handle_service::do_read(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::mutable_buffer& buffer, asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
ASIO_ERROR_LOCATION(ec);
return 0;
}
// A request to read 0 bytes on a stream handle is a no-op.
if (buffer.size() == 0)
{
ec = asio::error_code();
return 0;
}
overlapped_wrapper overlapped(ec);
if (ec)
{
ASIO_ERROR_LOCATION(ec);
return 0;
}
// Read some data.
overlapped.Offset = offset & 0xFFFFFFFF;
overlapped.OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()), 0, &overlapped);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error != ERROR_IO_PENDING && last_error != ERROR_MORE_DATA)
{
if (last_error == ERROR_HANDLE_EOF)
{
ec = asio::error::eof;
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
ASIO_ERROR_LOCATION(ec);
return 0;
}
}
// Wait for the operation to complete.
DWORD bytes_transferred = 0;
ok = ::GetOverlappedResult(impl.handle_,
&overlapped, &bytes_transferred, TRUE);
if (!ok)
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_HANDLE_EOF)
{
ec = asio::error::eof;
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
ASIO_ERROR_LOCATION(ec);
return (last_error == ERROR_MORE_DATA) ? bytes_transferred : 0;
}
ec = asio::error_code();
return bytes_transferred;
}
void win_iocp_handle_service::start_read_op(
win_iocp_handle_service::implementation_type& impl, uint64_t offset,
const asio::mutable_buffer& buffer, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
{
iocp_service_.on_completion(op, asio::error::bad_descriptor);
}
else if (buffer.size() == 0)
{
// A request to read 0 bytes on a handle is a no-op.
iocp_service_.on_completion(op);
}
else
{
DWORD bytes_transferred = 0;
op->Offset = offset & 0xFFFFFFFF;
op->OffsetHigh = (offset >> 32) & 0xFFFFFFFF;
BOOL ok = ::ReadFile(impl.handle_, buffer.data(),
static_cast<DWORD>(buffer.size()),
&bytes_transferred, op);
DWORD last_error = ::GetLastError();
if (!ok && last_error != ERROR_IO_PENDING
&& last_error != ERROR_MORE_DATA)
{
iocp_service_.on_completion(op, last_error, bytes_transferred);
}
else
{
iocp_service_.on_pending(op);
}
}
}
void win_iocp_handle_service::update_cancellation_thread_id(
win_iocp_handle_service::implementation_type& impl)
{
if (impl.safe_cancellation_thread_id_ == 0)
impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
impl.safe_cancellation_thread_id_ = ~DWORD(0);
}
void win_iocp_handle_service::close_for_destruction(implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(), "handle",
&impl, reinterpret_cast<uintmax_t>(impl.handle_), "close"));
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
impl.safe_cancellation_thread_id_ = 0;
}
}
win_iocp_handle_service::nt_set_info_fn
win_iocp_handle_service::get_nt_set_info()
{
void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);
if (!ptr)
{
if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL"))
ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile"));
// On failure, set nt_set_info_ to a special value to indicate that the
// NtSetInformationFile function is unavailable. That way we won't bother
// trying to look it up again.
interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);
}
return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);
}
void* win_iocp_handle_service::interlocked_compare_exchange_pointer(
void** dest, void* exch, void* cmp)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedCompareExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),
reinterpret_cast<LONG>(cmp)));
#else
return InterlockedCompareExchangePointer(dest, exch, cmp);
#endif
}
void* win_iocp_handle_service::interlocked_exchange_pointer(
void** dest, void* val)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));
#else
return InterlockedExchangePointer(dest, val);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_HANDLE_SERVICE_IPP

View File

@@ -0,0 +1,123 @@
//
// detail/impl/win_iocp_io_context.hpp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/completion_handler.hpp"
#include "asio/detail/fenced_block.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/memory.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
template <typename TimeTraits, typename Allocator>
void win_iocp_io_context::add_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_add_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void win_iocp_io_context::remove_timer_queue(
timer_queue<TimeTraits, Allocator>& queue)
{
do_remove_timer_queue(queue);
}
template <typename TimeTraits, typename Allocator>
void win_iocp_io_context::schedule_timer(
timer_queue<TimeTraits, Allocator>& queue,
const typename TimeTraits::time_type& time,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
wait_op* op)
{
// If the service has been shut down we silently discard the timer.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
{
post_immediate_completion(op, false);
return;
}
mutex::scoped_lock lock(dispatch_mutex_);
bool earliest = queue.enqueue_timer(time, timer, op);
work_started();
if (earliest)
update_timeout();
}
template <typename TimeTraits, typename Allocator>
std::size_t win_iocp_io_context::cancel_timer(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& timer,
std::size_t max_cancelled)
{
// If the service has been shut down we silently ignore the cancellation.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
return 0;
mutex::scoped_lock lock(dispatch_mutex_);
op_queue<win_iocp_operation> ops;
std::size_t n = queue.cancel_timer(timer, ops, max_cancelled);
lock.unlock();
post_deferred_completions(ops);
return n;
}
template <typename TimeTraits, typename Allocator>
void win_iocp_io_context::cancel_timer_by_key(
timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data* timer,
void* cancellation_key)
{
// If the service has been shut down we silently ignore the cancellation.
if (::InterlockedExchangeAdd(&shutdown_, 0) != 0)
return;
mutex::scoped_lock lock(dispatch_mutex_);
op_queue<win_iocp_operation> ops;
queue.cancel_timer_by_key(timer, ops, cancellation_key);
lock.unlock();
post_deferred_completions(ops);
}
template <typename TimeTraits, typename Allocator>
void win_iocp_io_context::move_timer(timer_queue<TimeTraits, Allocator>& queue,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& to,
typename timer_queue<TimeTraits, Allocator>::per_timer_data& from)
{
asio::detail::mutex::scoped_lock lock(dispatch_mutex_);
op_queue<operation> ops;
queue.cancel_timer(to, ops);
queue.move_timer(to, from);
lock.unlock();
post_deferred_completions(ops);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_HPP

View File

@@ -0,0 +1,636 @@
//
// detail/impl/win_iocp_io_context.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/config.hpp"
#include "asio/error.hpp"
#include "asio/detail/cstdint.hpp"
#include "asio/detail/handler_alloc_helpers.hpp"
#include "asio/detail/limits.hpp"
#include "asio/detail/thread.hpp"
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_iocp_io_context.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
struct win_iocp_io_context::thread_function
{
explicit thread_function(win_iocp_io_context* s)
: this_(s)
{
}
void operator()()
{
asio::error_code ec;
this_->run(ec);
}
win_iocp_io_context* this_;
};
struct win_iocp_io_context::work_finished_on_block_exit
{
~work_finished_on_block_exit() noexcept(false)
{
io_context_->work_finished();
}
win_iocp_io_context* io_context_;
};
struct win_iocp_io_context::timer_thread_function
{
void operator()()
{
while (::InterlockedExchangeAdd(&io_context_->shutdown_, 0) == 0)
{
if (::WaitForSingleObject(io_context_->waitable_timer_.handle,
INFINITE) == WAIT_OBJECT_0)
{
::InterlockedExchange(&io_context_->dispatch_required_, 1);
::PostQueuedCompletionStatus(io_context_->iocp_.handle,
0, wake_for_dispatch, 0);
}
}
}
win_iocp_io_context* io_context_;
};
win_iocp_io_context::win_iocp_io_context(
asio::execution_context& ctx, bool own_thread)
: execution_context_service_base<win_iocp_io_context>(ctx),
iocp_(),
outstanding_work_(0),
stopped_(0),
stop_event_posted_(0),
shutdown_(0),
gqcs_timeout_(get_gqcs_timeout()),
dispatch_required_(0),
concurrency_hint_(config(ctx).get("scheduler", "concurrency_hint", -1))
{
ASIO_HANDLER_TRACKING_INIT;
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
static_cast<DWORD>(concurrency_hint_ >= 0
? concurrency_hint_ : DWORD(~0)));
if (!iocp_.handle)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "iocp");
}
if (own_thread)
{
::InterlockedIncrement(&outstanding_work_);
thread_ = thread(thread_function(this));
}
}
win_iocp_io_context::win_iocp_io_context(
win_iocp_io_context::internal, asio::execution_context& ctx)
: execution_context_service_base<win_iocp_io_context>(ctx),
iocp_(),
outstanding_work_(0),
stopped_(0),
stop_event_posted_(0),
shutdown_(0),
gqcs_timeout_(get_gqcs_timeout()),
dispatch_required_(0),
concurrency_hint_(-1)
{
ASIO_HANDLER_TRACKING_INIT;
iocp_.handle = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, 0, 0,
static_cast<DWORD>(concurrency_hint_ >= 0
? concurrency_hint_ : DWORD(~0)));
if (!iocp_.handle)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "iocp");
}
}
win_iocp_io_context::~win_iocp_io_context()
{
if (thread_.joinable())
{
stop();
thread_.join();
}
}
void win_iocp_io_context::shutdown()
{
::InterlockedExchange(&shutdown_, 1);
if (timer_thread_.joinable())
{
LARGE_INTEGER timeout;
timeout.QuadPart = 1;
::SetWaitableTimer(waitable_timer_.handle, &timeout, 1, 0, 0, FALSE);
}
if (thread_.joinable())
{
stop();
thread_.join();
::InterlockedDecrement(&outstanding_work_);
}
while (::InterlockedExchangeAdd(&outstanding_work_, 0) > 0)
{
op_queue<win_iocp_operation> ops;
timer_queues_.get_all_timers(ops);
ops.push(completed_ops_);
if (!ops.empty())
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
::InterlockedDecrement(&outstanding_work_);
op->destroy();
}
}
else
{
DWORD bytes_transferred = 0;
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::GetQueuedCompletionStatus(iocp_.handle, &bytes_transferred,
&completion_key, &overlapped, gqcs_timeout_);
if (overlapped)
{
::InterlockedDecrement(&outstanding_work_);
static_cast<win_iocp_operation*>(overlapped)->destroy();
}
}
}
timer_thread_.join();
}
asio::error_code win_iocp_io_context::register_handle(
HANDLE handle, asio::error_code& ec)
{
if (::CreateIoCompletionPort(handle, iocp_.handle, 0, 0) == 0)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
return ec;
}
size_t win_iocp_io_context::run(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
while (do_one(INFINITE, this_thread, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
size_t win_iocp_io_context::run_one(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(INFINITE, this_thread, ec);
}
size_t win_iocp_io_context::wait_one(long usec, asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(usec < 0 ? INFINITE : ((usec - 1) / 1000 + 1), this_thread, ec);
}
size_t win_iocp_io_context::poll(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
size_t n = 0;
while (do_one(0, this_thread, ec))
if (n != (std::numeric_limits<size_t>::max)())
++n;
return n;
}
size_t win_iocp_io_context::poll_one(asio::error_code& ec)
{
if (::InterlockedExchangeAdd(&outstanding_work_, 0) == 0)
{
stop();
ec = asio::error_code();
return 0;
}
win_iocp_thread_info this_thread;
thread_call_stack::context ctx(this, this_thread);
return do_one(0, this_thread, ec);
}
void win_iocp_io_context::stop()
{
if (::InterlockedExchange(&stopped_, 1) == 0)
{
if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
{
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "pqcs");
}
}
}
}
bool win_iocp_io_context::can_dispatch()
{
return thread_call_stack::contains(this) != 0;
}
void win_iocp_io_context::capture_current_exception()
{
if (thread_info_base* this_thread = thread_call_stack::contains(this))
this_thread->capture_current_exception();
}
void win_iocp_io_context::post_deferred_completion(win_iocp_operation* op)
{
// Flag the operation as ready.
op->ready_ = 1;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
void win_iocp_io_context::post_deferred_completions(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
// Flag the operation as ready.
op->ready_ = 1;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
completed_ops_.push(ops);
::InterlockedExchange(&dispatch_required_, 1);
}
}
}
void win_iocp_io_context::abandon_operations(
op_queue<win_iocp_operation>& ops)
{
while (win_iocp_operation* op = ops.front())
{
ops.pop();
::InterlockedDecrement(&outstanding_work_);
op->destroy();
}
}
void win_iocp_io_context::on_pending(win_iocp_operation* op)
{
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
}
void win_iocp_io_context::on_completion(win_iocp_operation* op,
DWORD last_error, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
op->ready_ = 1;
// Store results in the OVERLAPPED structure.
op->Internal = reinterpret_cast<ulong_ptr_t>(
&asio::error::get_system_category());
op->Offset = last_error;
op->OffsetHigh = bytes_transferred;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
void win_iocp_io_context::on_completion(win_iocp_operation* op,
const asio::error_code& ec, DWORD bytes_transferred)
{
// Flag that the operation is ready for invocation.
op->ready_ = 1;
// Store results in the OVERLAPPED structure.
op->Internal = reinterpret_cast<ulong_ptr_t>(&ec.category());
op->Offset = ec.value();
op->OffsetHigh = bytes_transferred;
// Enqueue the operation on the I/O completion port.
if (!::PostQueuedCompletionStatus(iocp_.handle,
0, overlapped_contains_result, op))
{
// Out of resources. Put on completed queue instead.
mutex::scoped_lock lock(dispatch_mutex_);
completed_ops_.push(op);
::InterlockedExchange(&dispatch_required_, 1);
}
}
size_t win_iocp_io_context::do_one(DWORD msec,
win_iocp_thread_info& this_thread, asio::error_code& ec)
{
for (;;)
{
// Try to acquire responsibility for dispatching timers and completed ops.
if (::InterlockedCompareExchange(&dispatch_required_, 0, 1) == 1)
{
mutex::scoped_lock lock(dispatch_mutex_);
// Dispatch pending timers and operations.
op_queue<win_iocp_operation> ops;
ops.push(completed_ops_);
timer_queues_.get_ready_timers(ops);
post_deferred_completions(ops);
update_timeout();
}
// Get the next operation from the queue.
DWORD bytes_transferred = 0;
dword_ptr_t completion_key = 0;
LPOVERLAPPED overlapped = 0;
::SetLastError(0);
BOOL ok = ::GetQueuedCompletionStatus(iocp_.handle,
&bytes_transferred, &completion_key, &overlapped,
msec < gqcs_timeout_ ? msec : gqcs_timeout_);
DWORD last_error = ::GetLastError();
if (overlapped)
{
win_iocp_operation* op = static_cast<win_iocp_operation*>(overlapped);
asio::error_code result_ec(last_error,
asio::error::get_system_category());
// We may have been passed the last_error and bytes_transferred in the
// OVERLAPPED structure itself.
if (completion_key == overlapped_contains_result)
{
result_ec = asio::error_code(static_cast<int>(op->Offset),
*reinterpret_cast<asio::error_category*>(op->Internal));
bytes_transferred = op->OffsetHigh;
}
// Otherwise ensure any result has been saved into the OVERLAPPED
// structure.
else
{
op->Internal = reinterpret_cast<ulong_ptr_t>(&result_ec.category());
op->Offset = result_ec.value();
op->OffsetHigh = bytes_transferred;
}
// Dispatch the operation only if ready. The operation may not be ready
// if the initiating function (e.g. a call to WSARecv) has not yet
// returned. This is because the initiating function still wants access
// to the operation's OVERLAPPED structure.
if (::InterlockedCompareExchange(&op->ready_, 1, 0) == 1)
{
// Ensure the count of outstanding work is decremented on block exit.
work_finished_on_block_exit on_exit = { this };
(void)on_exit;
op->complete(this, result_ec, bytes_transferred);
this_thread.rethrow_pending_exception();
ec = asio::error_code();
return 1;
}
}
else if (!ok)
{
if (last_error != WAIT_TIMEOUT)
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
// If we're waiting indefinitely we need to keep going until we get a
// real handler.
if (msec == INFINITE)
continue;
ec = asio::error_code();
return 0;
}
else if (completion_key == wake_for_dispatch)
{
// We have been woken up to try to acquire responsibility for dispatching
// timers and completed operations.
}
else
{
// Indicate that there is no longer an in-flight stop event.
::InterlockedExchange(&stop_event_posted_, 0);
// The stopped_ flag is always checked to ensure that any leftover
// stop events from a previous run invocation are ignored.
if (::InterlockedExchangeAdd(&stopped_, 0) != 0)
{
// Wake up next thread that is blocked on GetQueuedCompletionStatus.
if (::InterlockedExchange(&stop_event_posted_, 1) == 0)
{
if (!::PostQueuedCompletionStatus(iocp_.handle, 0, 0, 0))
{
last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
return 0;
}
}
ec = asio::error_code();
return 0;
}
}
}
}
DWORD win_iocp_io_context::get_gqcs_timeout()
{
#if !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0600)
OSVERSIONINFOEX osvi;
ZeroMemory(&osvi, sizeof(osvi));
osvi.dwOSVersionInfoSize = sizeof(osvi);
osvi.dwMajorVersion = 6ul;
const uint64_t condition_mask = ::VerSetConditionMask(
0, VER_MAJORVERSION, VER_GREATER_EQUAL);
if (!!::VerifyVersionInfo(&osvi, VER_MAJORVERSION, condition_mask))
return INFINITE;
return default_gqcs_timeout;
#else // !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0600)
return INFINITE;
#endif // !defined(_WIN32_WINNT) || (_WIN32_WINNT < 0x0600)
}
void win_iocp_io_context::do_add_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.insert(&queue);
if (!waitable_timer_.handle)
{
waitable_timer_.handle = ::CreateWaitableTimer(0, FALSE, 0);
if (waitable_timer_.handle == 0)
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "timer");
}
LARGE_INTEGER timeout;
timeout.QuadPart = -max_timeout_usec;
timeout.QuadPart *= 10;
::SetWaitableTimer(waitable_timer_.handle,
&timeout, max_timeout_msec, 0, 0, FALSE);
}
if (!timer_thread_.joinable())
{
timer_thread_function thread_function = { this };
timer_thread_ = thread(thread_function, 65536);
}
}
void win_iocp_io_context::do_remove_timer_queue(timer_queue_base& queue)
{
mutex::scoped_lock lock(dispatch_mutex_);
timer_queues_.erase(&queue);
}
void win_iocp_io_context::update_timeout()
{
if (timer_thread_.joinable())
{
// There's no point updating the waitable timer if the new timeout period
// exceeds the maximum timeout. In that case, we might as well wait for the
// existing period of the timer to expire.
long timeout_usec = timer_queues_.wait_duration_usec(max_timeout_usec);
if (timeout_usec < max_timeout_usec)
{
LARGE_INTEGER timeout;
timeout.QuadPart = -timeout_usec;
timeout.QuadPart *= 10;
::SetWaitableTimer(waitable_timer_.handle,
&timeout, max_timeout_msec, 0, 0, FALSE);
}
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_IO_CONTEXT_IPP

View File

@@ -0,0 +1,200 @@
//
// detail/impl/win_iocp_serial_port_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2008 Rep Invariant Systems, Inc. (info@repinvariant.com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
#include <cstring>
#include "asio/detail/win_iocp_serial_port_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_serial_port_service::win_iocp_serial_port_service(
execution_context& context)
: execution_context_service_base<win_iocp_serial_port_service>(context),
handle_service_(context)
{
}
void win_iocp_serial_port_service::shutdown()
{
}
asio::error_code win_iocp_serial_port_service::open(
win_iocp_serial_port_service::implementation_type& impl,
const std::string& device, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
// For convenience, add a leading \\.\ sequence if not already present.
std::string name = (device[0] == '\\') ? device : "\\\\.\\" + device;
// Open a handle to the serial port.
::HANDLE handle = ::CreateFileA(name.c_str(),
GENERIC_READ | GENERIC_WRITE, 0, 0,
OPEN_EXISTING, FILE_FLAG_OVERLAPPED, 0);
if (handle == INVALID_HANDLE_VALUE)
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Determine the initial serial port parameters.
using namespace std; // For memset.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle, &dcb))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set some default serial port parameters. This implementation does not
// support changing all of these, so they might as well be in a known state.
dcb.fBinary = TRUE; // Win32 only supports binary mode.
dcb.fNull = FALSE; // Do not ignore NULL characters.
dcb.fAbortOnError = FALSE; // Ignore serial framing errors.
dcb.BaudRate = CBR_9600; // 9600 baud by default
dcb.ByteSize = 8; // 8 bit bytes
dcb.fOutxCtsFlow = FALSE; // No flow control
dcb.fOutxDsrFlow = FALSE;
dcb.fDtrControl = DTR_CONTROL_DISABLE;
dcb.fDsrSensitivity = FALSE;
dcb.fOutX = FALSE;
dcb.fInX = FALSE;
dcb.fRtsControl = RTS_CONTROL_DISABLE;
dcb.fParity = FALSE; // No parity
dcb.Parity = NOPARITY;
dcb.StopBits = ONESTOPBIT; // One stop bit
if (!::SetCommState(handle, &dcb))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// Set up timeouts so that the serial port will behave similarly to a
// network socket. Reads wait for at least one byte, then return with
// whatever they have. Writes return once everything is out the door.
::COMMTIMEOUTS timeouts;
timeouts.ReadIntervalTimeout = 1;
timeouts.ReadTotalTimeoutMultiplier = 0;
timeouts.ReadTotalTimeoutConstant = 0;
timeouts.WriteTotalTimeoutMultiplier = 0;
timeouts.WriteTotalTimeoutConstant = 0;
if (!::SetCommTimeouts(handle, &timeouts))
{
DWORD last_error = ::GetLastError();
::CloseHandle(handle);
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
// We're done. Take ownership of the serial port handle.
if (handle_service_.assign(impl, handle, ec))
::CloseHandle(handle);
return ec;
}
asio::error_code win_iocp_serial_port_service::do_set_option(
win_iocp_serial_port_service::implementation_type& impl,
win_iocp_serial_port_service::store_function_type store,
const void* option, asio::error_code& ec)
{
using namespace std; // For memcpy.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
if (store(option, dcb, ec))
return ec;
if (!::SetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_serial_port_service::do_get_option(
const win_iocp_serial_port_service::implementation_type& impl,
win_iocp_serial_port_service::load_function_type load,
void* option, asio::error_code& ec) const
{
using namespace std; // For memset.
::DCB dcb;
memset(&dcb, 0, sizeof(DCB));
dcb.DCBlength = sizeof(DCB);
if (!::GetCommState(handle_service_.native_handle(impl), &dcb))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
return ec;
}
return load(option, dcb, ec);
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP) && defined(ASIO_HAS_SERIAL_PORT)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SERIAL_PORT_SERVICE_IPP

View File

@@ -0,0 +1,821 @@
//
// detail/impl/win_iocp_socket_service_base.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
#define ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_IOCP)
#include "asio/detail/win_iocp_socket_service_base.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_iocp_socket_service_base::win_iocp_socket_service_base(
execution_context& context)
: context_(context),
iocp_service_(use_service<win_iocp_io_context>(context)),
reactor_(0),
connect_ex_(0),
nt_set_info_(0),
mutex_(),
impl_list_(0)
{
}
void win_iocp_socket_service_base::base_shutdown()
{
// Close all implementations, causing all operations to complete.
asio::detail::mutex::scoped_lock lock(mutex_);
base_implementation_type* impl = impl_list_;
while (impl)
{
close_for_destruction(*impl);
impl = impl->next_;
}
}
void win_iocp_socket_service_base::construct(
win_iocp_socket_service_base::base_implementation_type& impl)
{
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_socket_service_base::base_move_construct(
win_iocp_socket_service_base::base_implementation_type& impl,
win_iocp_socket_service_base::base_implementation_type& other_impl)
noexcept
{
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.cancel_token_ = other_impl.cancel_token_;
other_impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
void win_iocp_socket_service_base::base_move_assign(
win_iocp_socket_service_base::base_implementation_type& impl,
win_iocp_socket_service_base& other_service,
win_iocp_socket_service_base::base_implementation_type& other_impl)
{
close_for_destruction(impl);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.socket_ = other_impl.socket_;
other_impl.socket_ = invalid_socket;
impl.state_ = other_impl.state_;
other_impl.state_ = 0;
impl.cancel_token_ = other_impl.cancel_token_;
other_impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = other_impl.safe_cancellation_thread_id_;
other_impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
asio::detail::mutex::scoped_lock lock(other_service.mutex_);
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
}
void win_iocp_socket_service_base::destroy(
win_iocp_socket_service_base::base_implementation_type& impl)
{
close_for_destruction(impl);
// Remove implementation from linked list of all implementations.
asio::detail::mutex::scoped_lock lock(mutex_);
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
asio::error_code win_iocp_socket_service_base::close(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
socket_ops::close(impl.socket_, impl.state_, false, ec);
if (r)
r->cleanup_descriptor_data(impl.reactor_data_);
}
else
{
ec = asio::error_code();
}
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
return ec;
}
socket_type win_iocp_socket_service_base::release(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
return invalid_socket;
cancel(impl, ec);
if (ec)
return invalid_socket;
nt_set_info_fn fn = get_nt_set_info();
if (fn == 0)
{
ec = asio::error::operation_not_supported;
return invalid_socket;
}
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(impl.socket_);
ULONG_PTR iosb[2] = { 0, 0 };
void* info[2] = { 0, 0 };
if (fn(sock_as_handle, iosb, &info, sizeof(info),
61 /* FileReplaceCompletionInformation */))
{
ec = asio::error::operation_not_supported;
return invalid_socket;
}
socket_type tmp = impl.socket_;
impl.socket_ = invalid_socket;
return tmp;
}
asio::error_code win_iocp_socket_service_base::cancel(
win_iocp_socket_service_base::base_implementation_type& impl,
asio::error_code& ec)
{
if (!is_open(impl))
{
ec = asio::error::bad_descriptor;
return ec;
}
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "cancel"));
if (FARPROC cancel_io_ex_ptr = ::GetProcAddress(
::GetModuleHandleA("KERNEL32"), "CancelIoEx"))
{
// The version of Windows supports cancellation from any thread.
typedef BOOL (WINAPI* cancel_io_ex_t)(HANDLE, LPOVERLAPPED);
cancel_io_ex_t cancel_io_ex = reinterpret_cast<cancel_io_ex_t>(
reinterpret_cast<void*>(cancel_io_ex_ptr));
socket_type sock = impl.socket_;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
if (!cancel_io_ex(sock_as_handle, 0))
{
DWORD last_error = ::GetLastError();
if (last_error == ERROR_NOT_FOUND)
{
// ERROR_NOT_FOUND means that there were no operations to be
// cancelled. We swallow this error to match the behaviour on other
// platforms.
ec = asio::error_code();
}
else
{
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
}
else
{
ec = asio::error_code();
}
}
#if defined(ASIO_ENABLE_CANCELIO)
else if (impl.safe_cancellation_thread_id_ == 0)
{
// No operations have been started, so there's nothing to cancel.
ec = asio::error_code();
}
else if (impl.safe_cancellation_thread_id_ == ::GetCurrentThreadId())
{
// Asynchronous operations have been started from the current thread only,
// so it is safe to try to cancel them using CancelIo.
socket_type sock = impl.socket_;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock);
if (!::CancelIo(sock_as_handle))
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
else
{
ec = asio::error_code();
}
}
else
{
// Asynchronous operations have been started from more than one thread,
// so cancellation is not safe.
ec = asio::error::operation_not_supported;
}
#else // defined(ASIO_ENABLE_CANCELIO)
else
{
// Cancellation is not supported as CancelIo may not be used.
ec = asio::error::operation_not_supported;
}
#endif // defined(ASIO_ENABLE_CANCELIO)
// Cancel any operations started via the reactor.
if (!ec)
{
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->cancel_ops(impl.socket_, impl.reactor_data_);
}
return ec;
}
asio::error_code win_iocp_socket_service_base::do_open(
win_iocp_socket_service_base::base_implementation_type& impl,
int family, int type, int protocol, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
socket_holder sock(socket_ops::socket(family, type, protocol, ec));
if (sock.get() == invalid_socket)
return ec;
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(sock.get());
if (iocp_service_.register_handle(sock_as_handle, ec))
return ec;
impl.socket_ = sock.release();
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
ec = asio::error_code();
return ec;
}
asio::error_code win_iocp_socket_service_base::do_assign(
win_iocp_socket_service_base::base_implementation_type& impl,
int type, socket_type native_socket, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
return ec;
}
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(native_socket);
if (iocp_service_.register_handle(sock_as_handle, ec))
return ec;
impl.socket_ = native_socket;
switch (type)
{
case SOCK_STREAM: impl.state_ = socket_ops::stream_oriented; break;
case SOCK_DGRAM: impl.state_ = socket_ops::datagram_oriented; break;
default: impl.state_ = 0; break;
}
impl.cancel_token_.reset(static_cast<void*>(0), socket_ops::noop_deleter());
ec = asio::error_code();
return ec;
}
void win_iocp_socket_service_base::start_send_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
socket_base::message_flags flags, bool noop, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (noop)
iocp_service_.on_completion(op);
else if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
int result = ::WSASend(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, flags, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_send_to_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count, const void* addr,
int addrlen, socket_base::message_flags flags, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
int result = ::WSASendTo(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, flags,
static_cast<const socket_addr_type*>(addr), addrlen, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_receive_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count,
socket_base::message_flags flags, bool noop, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (noop)
iocp_service_.on_completion(op);
else if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecv(impl.socket_, buffers,
static_cast<DWORD>(buffer_count),
&bytes_transferred, &recv_flags, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_NETNAME_DELETED)
last_error = WSAECONNRESET;
else if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
int win_iocp_socket_service_base::start_null_buffers_receive_op(
win_iocp_socket_service_base::base_implementation_type& impl,
socket_base::message_flags flags, reactor_op* op, operation* iocp_op)
{
if ((impl.state_ & socket_ops::stream_oriented) != 0)
{
// For stream sockets on Windows, we may issue a 0-byte overlapped
// WSARecv to wait until there is data available on the socket.
::WSABUF buf = { 0, 0 };
start_receive_op(impl, &buf, 1, flags, false, iocp_op);
return -1;
}
else
{
int op_type = (flags & socket_base::message_out_of_band)
? select_reactor::except_op : select_reactor::read_op;
start_reactor_op(impl, op_type, op);
return op_type;
}
}
void win_iocp_socket_service_base::start_receive_from_op(
win_iocp_socket_service_base::base_implementation_type& impl,
WSABUF* buffers, std::size_t buffer_count, void* addr,
socket_base::message_flags flags, int* addrlen, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else
{
DWORD bytes_transferred = 0;
DWORD recv_flags = flags;
int result = ::WSARecvFrom(impl.socket_, buffers,
static_cast<DWORD>(buffer_count), &bytes_transferred, &recv_flags,
static_cast<socket_addr_type*>(addr), addrlen, op, 0);
DWORD last_error = ::WSAGetLastError();
if (last_error == ERROR_PORT_UNREACHABLE)
last_error = WSAECONNREFUSED;
if (result != 0 && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error, bytes_transferred);
else
iocp_service_.on_pending(op);
}
}
void win_iocp_socket_service_base::start_accept_op(
win_iocp_socket_service_base::base_implementation_type& impl,
bool peer_is_open, socket_holder& new_socket, int family, int type,
int protocol, void* output_buffer, DWORD address_length, operation* op)
{
update_cancellation_thread_id(impl);
iocp_service_.work_started();
if (!is_open(impl))
iocp_service_.on_completion(op, asio::error::bad_descriptor);
else if (peer_is_open)
iocp_service_.on_completion(op, asio::error::already_open);
else
{
asio::error_code ec;
new_socket.reset(socket_ops::socket(family, type, protocol, ec));
if (new_socket.get() == invalid_socket)
iocp_service_.on_completion(op, ec);
else
{
DWORD bytes_read = 0;
BOOL result = ::AcceptEx(impl.socket_, new_socket.get(), output_buffer,
0, address_length, address_length, &bytes_read, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
iocp_service_.on_pending(op);
}
}
}
void win_iocp_socket_service_base::restart_accept_op(
socket_type s, socket_holder& new_socket, int family, int type,
int protocol, void* output_buffer, DWORD address_length,
long* cancel_requested, operation* op)
{
new_socket.reset();
iocp_service_.work_started();
// Check if we were cancelled after the first AcceptEx completed.
if (cancel_requested)
if (::InterlockedExchangeAdd(cancel_requested, 0) == 1)
iocp_service_.on_completion(op, asio::error::operation_aborted);
asio::error_code ec;
new_socket.reset(socket_ops::socket(family, type, protocol, ec));
if (new_socket.get() == invalid_socket)
iocp_service_.on_completion(op, ec);
else
{
DWORD bytes_read = 0;
BOOL result = ::AcceptEx(s, new_socket.get(), output_buffer,
0, address_length, address_length, &bytes_read, op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(op, last_error);
else
{
#if defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0600)
if (cancel_requested)
{
if (::InterlockedExchangeAdd(cancel_requested, 0) == 1)
{
HANDLE sock_as_handle = reinterpret_cast<HANDLE>(s);
::CancelIoEx(sock_as_handle, op);
}
}
#endif // defined(_WIN32_WINNT) && (_WIN32_WINNT >= 0x0600)
iocp_service_.on_pending(op);
}
}
}
void win_iocp_socket_service_base::start_reactor_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int op_type, reactor_op* op)
{
select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if (is_open(impl))
{
r.start_op(op_type, impl.socket_, impl.reactor_data_, op, false, false);
return;
}
else
op->ec_ = asio::error::bad_descriptor;
iocp_service_.post_immediate_completion(op, false);
}
int win_iocp_socket_service_base::start_connect_op(
win_iocp_socket_service_base::base_implementation_type& impl,
int family, int type, const void* addr, std::size_t addrlen,
win_iocp_socket_connect_op_base* op, operation* iocp_op)
{
// If ConnectEx is available, use that.
if (family == ASIO_OS_DEF(AF_INET)
|| family == ASIO_OS_DEF(AF_INET6))
{
if (connect_ex_fn connect_ex = get_connect_ex(impl, type))
{
union address_union
{
socket_addr_type base;
sockaddr_in4_type v4;
sockaddr_in6_type v6;
} a;
using namespace std; // For memset.
memset(&a, 0, sizeof(a));
a.base.sa_family = family;
socket_ops::bind(impl.socket_, &a.base,
family == ASIO_OS_DEF(AF_INET)
? sizeof(a.v4) : sizeof(a.v6), op->ec_);
if (op->ec_ && op->ec_ != asio::error::invalid_argument)
{
iocp_service_.post_immediate_completion(op, false);
return -1;
}
op->connect_ex_ = true;
update_cancellation_thread_id(impl);
iocp_service_.work_started();
BOOL result = connect_ex(impl.socket_,
static_cast<const socket_addr_type*>(addr),
static_cast<int>(addrlen), 0, 0, 0, iocp_op);
DWORD last_error = ::WSAGetLastError();
if (!result && last_error != WSA_IO_PENDING)
iocp_service_.on_completion(iocp_op, last_error);
else
iocp_service_.on_pending(iocp_op);
return -1;
}
}
// Otherwise, fall back to a reactor-based implementation.
select_reactor& r = get_reactor();
update_cancellation_thread_id(impl);
if ((impl.state_ & socket_ops::non_blocking) != 0
|| socket_ops::set_internal_non_blocking(
impl.socket_, impl.state_, true, op->ec_))
{
if (socket_ops::connect(impl.socket_, addr, addrlen, op->ec_) != 0)
{
if (op->ec_ == asio::error::in_progress
|| op->ec_ == asio::error::would_block)
{
op->ec_ = asio::error_code();
r.start_op(select_reactor::connect_op, impl.socket_,
impl.reactor_data_, op, false, false);
return select_reactor::connect_op;
}
}
}
r.post_immediate_completion(op, false);
return -1;
}
void win_iocp_socket_service_base::close_for_destruction(
win_iocp_socket_service_base::base_implementation_type& impl)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((iocp_service_.context(),
"socket", &impl, impl.socket_, "close"));
// Check if the reactor was created, in which case we need to close the
// socket on the reactor as well to cancel any operations that might be
// running there.
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (r)
r->deregister_descriptor(impl.socket_, impl.reactor_data_, true);
asio::error_code ignored_ec;
socket_ops::close(impl.socket_, impl.state_, true, ignored_ec);
if (r)
r->cleanup_descriptor_data(impl.reactor_data_);
}
impl.socket_ = invalid_socket;
impl.state_ = 0;
impl.cancel_token_.reset();
#if defined(ASIO_ENABLE_CANCELIO)
impl.safe_cancellation_thread_id_ = 0;
#endif // defined(ASIO_ENABLE_CANCELIO)
}
void win_iocp_socket_service_base::update_cancellation_thread_id(
win_iocp_socket_service_base::base_implementation_type& impl)
{
#if defined(ASIO_ENABLE_CANCELIO)
if (impl.safe_cancellation_thread_id_ == 0)
impl.safe_cancellation_thread_id_ = ::GetCurrentThreadId();
else if (impl.safe_cancellation_thread_id_ != ::GetCurrentThreadId())
impl.safe_cancellation_thread_id_ = ~DWORD(0);
#else // defined(ASIO_ENABLE_CANCELIO)
(void)impl;
#endif // defined(ASIO_ENABLE_CANCELIO)
}
select_reactor& win_iocp_socket_service_base::get_reactor()
{
select_reactor* r = static_cast<select_reactor*>(
interlocked_compare_exchange_pointer(
reinterpret_cast<void**>(&reactor_), 0, 0));
if (!r)
{
r = &(use_service<select_reactor>(context_));
interlocked_exchange_pointer(reinterpret_cast<void**>(&reactor_), r);
}
return *r;
}
win_iocp_socket_service_base::connect_ex_fn
win_iocp_socket_service_base::get_connect_ex(
win_iocp_socket_service_base::base_implementation_type& impl, int type)
{
#if defined(ASIO_DISABLE_CONNECTEX)
(void)impl;
(void)type;
return 0;
#else // defined(ASIO_DISABLE_CONNECTEX)
if (type != ASIO_OS_DEF(SOCK_STREAM)
&& type != ASIO_OS_DEF(SOCK_SEQPACKET))
return 0;
void* ptr = interlocked_compare_exchange_pointer(&connect_ex_, 0, 0);
if (!ptr)
{
GUID guid = { 0x25a207b9, 0xddf3, 0x4660,
{ 0x8e, 0xe9, 0x76, 0xe5, 0x8c, 0x74, 0x06, 0x3e } };
DWORD bytes = 0;
if (::WSAIoctl(impl.socket_, SIO_GET_EXTENSION_FUNCTION_POINTER,
&guid, sizeof(guid), &ptr, sizeof(ptr), &bytes, 0, 0) != 0)
{
// Set connect_ex_ to a special value to indicate that ConnectEx is
// unavailable. That way we won't bother trying to look it up again.
ptr = this;
}
interlocked_exchange_pointer(&connect_ex_, ptr);
}
return reinterpret_cast<connect_ex_fn>(ptr == this ? 0 : ptr);
#endif // defined(ASIO_DISABLE_CONNECTEX)
}
win_iocp_socket_service_base::nt_set_info_fn
win_iocp_socket_service_base::get_nt_set_info()
{
void* ptr = interlocked_compare_exchange_pointer(&nt_set_info_, 0, 0);
if (!ptr)
{
if (HMODULE h = ::GetModuleHandleA("NTDLL.DLL"))
ptr = reinterpret_cast<void*>(GetProcAddress(h, "NtSetInformationFile"));
// On failure, set nt_set_info_ to a special value to indicate that the
// NtSetInformationFile function is unavailable. That way we won't bother
// trying to look it up again.
interlocked_exchange_pointer(&nt_set_info_, ptr ? ptr : this);
}
return reinterpret_cast<nt_set_info_fn>(ptr == this ? 0 : ptr);
}
void* win_iocp_socket_service_base::interlocked_compare_exchange_pointer(
void** dest, void* exch, void* cmp)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedCompareExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(exch),
reinterpret_cast<LONG>(cmp)));
#else
return InterlockedCompareExchangePointer(dest, exch, cmp);
#endif
}
void* win_iocp_socket_service_base::interlocked_exchange_pointer(
void** dest, void* val)
{
#if defined(_M_IX86)
return reinterpret_cast<void*>(InterlockedExchange(
reinterpret_cast<PLONG>(dest), reinterpret_cast<LONG>(val)));
#else
return InterlockedExchangePointer(dest, val);
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_IOCP)
#endif // ASIO_DETAIL_IMPL_WIN_IOCP_SOCKET_SERVICE_BASE_IPP

View File

@@ -0,0 +1,84 @@
//
// detail/impl/win_mutex.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
#define ASIO_DETAIL_IMPL_WIN_MUTEX_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_WINDOWS)
#include "asio/detail/throw_error.hpp"
#include "asio/detail/win_mutex.hpp"
#include "asio/error.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_mutex::win_mutex()
{
int error = do_init();
asio::error_code ec(error,
asio::error::get_system_category());
asio::detail::throw_error(ec, "mutex");
}
int win_mutex::do_init()
{
#if defined(__MINGW32__)
// Not sure if MinGW supports structured exception handling, so for now
// we'll just call the Windows API and hope.
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
return ::GetLastError();
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
return ::GetLastError();
# endif
return 0;
#else
__try
{
# if defined(UNDER_CE)
::InitializeCriticalSection(&crit_section_);
# elif defined(ASIO_WINDOWS_APP)
if (!::InitializeCriticalSectionEx(&crit_section_, 0, 0))
return ::GetLastError();
# else
if (!::InitializeCriticalSectionAndSpinCount(&crit_section_, 0x80000000))
return ::GetLastError();
# endif
}
__except(GetExceptionCode() == STATUS_NO_MEMORY
? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH)
{
return ERROR_OUTOFMEMORY;
}
return 0;
#endif
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_WINDOWS)
#endif // ASIO_DETAIL_IMPL_WIN_MUTEX_IPP

View File

@@ -0,0 +1,452 @@
//
// detail/impl/win_object_handle_service.ipp
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
//
// Copyright (c) 2003-2025 Christopher M. Kohlhoff (chris at kohlhoff dot com)
// Copyright (c) 2011 Boris Schaeling (boris@highscore.de)
//
// Distributed under the Boost Software License, Version 1.0. (See accompanying
// file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
#ifndef ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
#define ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP
#if defined(_MSC_VER) && (_MSC_VER >= 1200)
# pragma once
#endif // defined(_MSC_VER) && (_MSC_VER >= 1200)
#include "asio/detail/config.hpp"
#if defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
#include "asio/detail/win_object_handle_service.hpp"
#include "asio/detail/push_options.hpp"
namespace asio {
namespace detail {
win_object_handle_service::win_object_handle_service(execution_context& context)
: execution_context_service_base<win_object_handle_service>(context),
scheduler_(asio::use_service<scheduler_impl>(context)),
mutex_(),
impl_list_(0),
shutdown_(false)
{
}
void win_object_handle_service::shutdown()
{
mutex::scoped_lock lock(mutex_);
// Setting this flag to true prevents new objects from being registered, and
// new asynchronous wait operations from being started. We only need to worry
// about cleaning up the operations that are currently in progress.
shutdown_ = true;
op_queue<operation> ops;
for (implementation_type* impl = impl_list_; impl; impl = impl->next_)
ops.push(impl->op_queue_);
lock.unlock();
scheduler_.abandon_operations(ops);
}
void win_object_handle_service::construct(
win_object_handle_service::implementation_type& impl)
{
impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.owner_ = this;
// Insert implementation into linked list of all implementations.
mutex::scoped_lock lock(mutex_);
if (!shutdown_)
{
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
}
void win_object_handle_service::move_construct(
win_object_handle_service::implementation_type& impl,
win_object_handle_service::implementation_type& other_impl)
{
mutex::scoped_lock lock(mutex_);
// Insert implementation into linked list of all implementations.
if (!shutdown_)
{
impl.next_ = impl_list_;
impl.prev_ = 0;
if (impl_list_)
impl_list_->prev_ = &impl;
impl_list_ = &impl;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = other_impl.wait_handle_;
other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.op_queue_.push(other_impl.op_queue_);
impl.owner_ = this;
// We must not hold the lock while calling UnregisterWaitEx. This is because
// the registered callback function might be invoked while we are waiting for
// UnregisterWaitEx to complete.
lock.unlock();
if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
if (!impl.op_queue_.empty())
register_wait_callback(impl, lock);
}
void win_object_handle_service::move_assign(
win_object_handle_service::implementation_type& impl,
win_object_handle_service& other_service,
win_object_handle_service::implementation_type& other_impl)
{
asio::error_code ignored_ec;
close(impl, ignored_ec);
mutex::scoped_lock lock(mutex_);
if (this != &other_service)
{
// Remove implementation from linked list of all implementations.
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
}
impl.handle_ = other_impl.handle_;
other_impl.handle_ = INVALID_HANDLE_VALUE;
impl.wait_handle_ = other_impl.wait_handle_;
other_impl.wait_handle_ = INVALID_HANDLE_VALUE;
impl.op_queue_.push(other_impl.op_queue_);
impl.owner_ = this;
if (this != &other_service)
{
// Insert implementation into linked list of all implementations.
impl.next_ = other_service.impl_list_;
impl.prev_ = 0;
if (other_service.impl_list_)
other_service.impl_list_->prev_ = &impl;
other_service.impl_list_ = &impl;
}
// We must not hold the lock while calling UnregisterWaitEx. This is because
// the registered callback function might be invoked while we are waiting for
// UnregisterWaitEx to complete.
lock.unlock();
if (impl.wait_handle_ != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(impl.wait_handle_, INVALID_HANDLE_VALUE);
if (!impl.op_queue_.empty())
register_wait_callback(impl, lock);
}
void win_object_handle_service::destroy(
win_object_handle_service::implementation_type& impl)
{
mutex::scoped_lock lock(mutex_);
// Remove implementation from linked list of all implementations.
if (impl_list_ == &impl)
impl_list_ = impl.next_;
if (impl.prev_)
impl.prev_->next_ = impl.next_;
if (impl.next_)
impl.next_->prev_= impl.prev_;
impl.next_ = 0;
impl.prev_ = 0;
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.op_queue_.pop();
ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
::CloseHandle(impl.handle_);
impl.handle_ = INVALID_HANDLE_VALUE;
scheduler_.post_deferred_completions(ops);
}
}
asio::error_code win_object_handle_service::assign(
win_object_handle_service::implementation_type& impl,
const native_handle_type& handle, asio::error_code& ec)
{
if (is_open(impl))
{
ec = asio::error::already_open;
ASIO_ERROR_LOCATION(ec);
return ec;
}
impl.handle_ = handle;
ec = asio::error_code();
return ec;
}
asio::error_code win_object_handle_service::close(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "close"));
mutex::scoped_lock lock(mutex_);
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
impl.op_queue_.pop();
op->ec_ = asio::error::operation_aborted;
completed_ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
if (::CloseHandle(impl.handle_))
{
impl.handle_ = INVALID_HANDLE_VALUE;
ec = asio::error_code();
}
else
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
}
scheduler_.post_deferred_completions(completed_ops);
}
else
{
ec = asio::error_code();
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
asio::error_code win_object_handle_service::cancel(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
if (is_open(impl))
{
ASIO_HANDLER_OPERATION((scheduler_.context(), "object_handle",
&impl, reinterpret_cast<uintmax_t>(impl.wait_handle_), "cancel"));
mutex::scoped_lock lock(mutex_);
HANDLE wait_handle = impl.wait_handle_;
impl.wait_handle_ = INVALID_HANDLE_VALUE;
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = asio::error::operation_aborted;
impl.op_queue_.pop();
completed_ops.push(op);
}
// We must not hold the lock while calling UnregisterWaitEx. This is
// because the registered callback function might be invoked while we are
// waiting for UnregisterWaitEx to complete.
lock.unlock();
if (wait_handle != INVALID_HANDLE_VALUE)
::UnregisterWaitEx(wait_handle, INVALID_HANDLE_VALUE);
ec = asio::error_code();
scheduler_.post_deferred_completions(completed_ops);
}
else
{
ec = asio::error::bad_descriptor;
}
ASIO_ERROR_LOCATION(ec);
return ec;
}
void win_object_handle_service::wait(
win_object_handle_service::implementation_type& impl,
asio::error_code& ec)
{
switch (::WaitForSingleObject(impl.handle_, INFINITE))
{
case WAIT_FAILED:
{
DWORD last_error = ::GetLastError();
ec = asio::error_code(last_error,
asio::error::get_system_category());
ASIO_ERROR_LOCATION(ec);
break;
}
case WAIT_OBJECT_0:
case WAIT_ABANDONED:
default:
ec = asio::error_code();
break;
}
}
void win_object_handle_service::start_wait_op(
win_object_handle_service::implementation_type& impl, wait_op* op)
{
scheduler_.work_started();
if (is_open(impl))
{
mutex::scoped_lock lock(mutex_);
if (!shutdown_)
{
impl.op_queue_.push(op);
// Only the first operation to be queued gets to register a wait callback.
// Subsequent operations have to wait for the first to finish.
if (impl.op_queue_.front() == op)
register_wait_callback(impl, lock);
}
else
{
lock.unlock();
scheduler_.post_deferred_completion(op);
}
}
else
{
op->ec_ = asio::error::bad_descriptor;
scheduler_.post_deferred_completion(op);
}
}
void win_object_handle_service::register_wait_callback(
win_object_handle_service::implementation_type& impl,
mutex::scoped_lock& lock)
{
lock.lock();
if (!RegisterWaitForSingleObject(&impl.wait_handle_,
impl.handle_, &win_object_handle_service::wait_callback,
&impl, INFINITE, WT_EXECUTEONLYONCE))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
op_queue<operation> completed_ops;
while (wait_op* op = impl.op_queue_.front())
{
op->ec_ = ec;
impl.op_queue_.pop();
completed_ops.push(op);
}
lock.unlock();
scheduler_.post_deferred_completions(completed_ops);
}
}
void win_object_handle_service::wait_callback(PVOID param, BOOLEAN)
{
implementation_type* impl = static_cast<implementation_type*>(param);
mutex::scoped_lock lock(impl->owner_->mutex_);
if (impl->wait_handle_ != INVALID_HANDLE_VALUE)
{
::UnregisterWaitEx(impl->wait_handle_, NULL);
impl->wait_handle_ = INVALID_HANDLE_VALUE;
}
if (wait_op* op = impl->op_queue_.front())
{
op_queue<operation> completed_ops;
op->ec_ = asio::error_code();
impl->op_queue_.pop();
completed_ops.push(op);
if (!impl->op_queue_.empty())
{
if (!RegisterWaitForSingleObject(&impl->wait_handle_,
impl->handle_, &win_object_handle_service::wait_callback,
param, INFINITE, WT_EXECUTEONLYONCE))
{
DWORD last_error = ::GetLastError();
asio::error_code ec(last_error,
asio::error::get_system_category());
while ((op = impl->op_queue_.front()) != 0)
{
op->ec_ = ec;
impl->op_queue_.pop();
completed_ops.push(op);
}
}
}
scheduler_impl& sched = impl->owner_->scheduler_;
lock.unlock();
sched.post_deferred_completions(completed_ops);
}
}
} // namespace detail
} // namespace asio
#include "asio/detail/pop_options.hpp"
#endif // defined(ASIO_HAS_WINDOWS_OBJECT_HANDLE)
#endif // ASIO_DETAIL_IMPL_WIN_OBJECT_HANDLE_SERVICE_IPP

Some files were not shown because too many files have changed in this diff Show More