synchronization

See Public API for a list of names and headers that are part of the public HPX API.

namespace hpx
namespace experimental
template<typename ReadWriteT, typename ReadT, typename Allocator>
class async_rw_mutex
#include <async_rw_mutex.hpp>

Read-write mutex where access is granted to a value through senders.

The wrapped value is accessed through read and readwrite, both of which return senders which call set_value on a connected receiver when the wrapped value is safe to read or write. The senders send the value through a wrapper type which is implicitly convertible to a reference of the wrapped value. Read-only senders send wrappers that are convertible to const references.

A read-write sender gives exclusive access to the wrapped value, while a read-only sender gives shared (with other read-only senders) access to the value.

A void mutex acts as a mutex around some user-managed resource, i.e. the void mutex does not manage any value and the types sent by the senders are not convertible. The sent types are copyable and release access to the protected resource when released.

The order in which senders call set_value is determined by the order in which the senders are retrieved from the mutex. Connecting and starting the senders is thread-safe.

Retrieving senders from the mutex is not thread-safe.

The mutex is movable and non-copyable.

Public Types

template<>
using read_type = std::decay_t<ReadT> const
template<>
using readwrite_type = std::decay_t<ReadWriteT>
template<>
using value_type = readwrite_type
template<>
using read_access_type = detail::async_rw_mutex_access_wrapper<readwrite_type, read_type, detail::async_rw_mutex_access_type::read>
template<>
using readwrite_access_type = detail::async_rw_mutex_access_wrapper<readwrite_type, read_type, detail::async_rw_mutex_access_type::readwrite>
template<>
using allocator_type = Allocator

Public Functions

async_rw_mutex()
template<typename U, typename = std::enable_if_t<!std::is_same<std::decay_t<U>, async_rw_mutex>::value>>
async_rw_mutex(U &&u, allocator_type const &alloc = {})
async_rw_mutex(async_rw_mutex&&)
async_rw_mutex &operator=(async_rw_mutex&&)
async_rw_mutex(async_rw_mutex const&)
async_rw_mutex &operator=(async_rw_mutex const&)
sender<detail::async_rw_mutex_access_type::read> read()
sender<detail::async_rw_mutex_access_type::readwrite> readwrite()

Private Types

template<>
using shared_state_type = detail::async_rw_mutex_shared_state<value_type>
template<>
using shared_state_ptr_type = std::shared_ptr<shared_state_type>

Private Members

value_type value
allocator_type alloc
detail::async_rw_mutex_access_type prev_access = detail::async_rw_mutex_access_type::readwrite
shared_state_ptr_type prev_state
shared_state_ptr_type state
template<detail::async_rw_mutex_access_type AccessType>
struct sender

Public Types

template<>
template<>
using access_type = detail::async_rw_mutex_access_wrapper<readwrite_type, read_type, AccessType>

Public Members

template<>
shared_state_ptr_type prev_state
template<>
shared_state_ptr_type state

Friends

template<typename Env>
auto tag_invoke(hpx::execution::experimental::get_completion_signatures_t, sender const&, Env)
template<typename R>
auto tag_invoke(hpx::execution::experimental::connect_t, sender &&s, R &&r)
template<typename Env>
struct generate_completion_signatures

Public Types

template<>
template<>
template<template<typename...> typename Tuple, template<typename...> typename Variant>
using value_types = Variant<Tuple<access_type>>
template<>
template<>
template<template<typename...> typename Variant>
using error_types = Variant<std::exception_ptr>

Public Static Attributes

template<>
template<>
constexpr bool sends_stopped = false
template<typename R>
struct operation_state

Public Functions

template<>
template<typename R_>
operation_state(R_ &&r, shared_state_ptr_type prev_state, shared_state_ptr_type state)
template<>
template<>
operation_state(operation_state&&)
template<>
template<>
operation_state &operator=(operation_state&&)
template<>
template<>
operation_state(operation_state const&)
template<>
template<>
operation_state &operator=(operation_state const&)

Public Members

template<>
template<>
std::decay_t<R> r
template<>
template<>
shared_state_ptr_type prev_state
template<>
template<>
shared_state_ptr_type state

Friends

void tag_invoke(hpx::execution::experimental::start_t, operation_state &os)
template<typename Allocator>
class async_rw_mutex<void, void, Allocator>

Public Types

template<>
using read_type = void
template<>
using readwrite_type = void
template<>
using read_access_type = detail::async_rw_mutex_access_wrapper<readwrite_type, read_type, detail::async_rw_mutex_access_type::read>
template<>
using readwrite_access_type = detail::async_rw_mutex_access_wrapper<readwrite_type, read_type, detail::async_rw_mutex_access_type::readwrite>
template<>
using allocator_type = Allocator

Public Functions

async_rw_mutex(allocator_type const &alloc = {})
async_rw_mutex(async_rw_mutex&&)
async_rw_mutex &operator=(async_rw_mutex&&)
async_rw_mutex(async_rw_mutex const&)
async_rw_mutex &operator=(async_rw_mutex const&)
sender<detail::async_rw_mutex_access_type::read> read()
sender<detail::async_rw_mutex_access_type::readwrite> readwrite()

Private Types

template<>
using shared_state_type = detail::async_rw_mutex_shared_state<void>
template<>
using shared_state_ptr_type = std::shared_ptr<shared_state_type>

Private Members

allocator_type alloc
detail::async_rw_mutex_access_type prev_access = detail::async_rw_mutex_access_type::readwrite
shared_state_ptr_type prev_state
shared_state_ptr_type state
template<detail::async_rw_mutex_access_type AccessType>
struct sender

Public Types

template<>
template<>
using access_type = detail::async_rw_mutex_access_wrapper<readwrite_type, read_type, AccessType>

Public Members

template<>
shared_state_ptr_type prev_state
template<>
shared_state_ptr_type state

Friends

template<typename Env>
auto tag_invoke(hpx::execution::experimental::get_completion_signatures_t, sender const&, Env)
template<typename R>
auto tag_invoke(hpx::execution::experimental::connect_t, sender &&s, R &&r)
template<typename Env>
struct generate_completion_signatures

Public Types

template<>
template<>
template<template<typename...> typename Tuple, template<typename...> typename Variant>
using value_types = Variant<Tuple<access_type>>
template<>
template<>
template<template<typename...> typename Variant>
using error_types = Variant<std::exception_ptr>

Public Static Attributes

template<>
template<>
constexpr bool sends_stopped = false
template<typename R>
struct operation_state

Public Functions

template<>
template<typename R_>
operation_state(R_ &&r, shared_state_ptr_type prev_state, shared_state_ptr_type state)
template<>
template<>
operation_state(operation_state&&)
template<>
template<>
operation_state &operator=(operation_state&&)
template<>
template<>
operation_state(operation_state const&)
template<>
template<>
operation_state &operator=(operation_state const&)

Public Members

template<>
template<>
std::decay_t<R> r
template<>
template<>
shared_state_ptr_type prev_state
template<>
template<>
shared_state_ptr_type state

Friends

void tag_invoke(hpx::execution::experimental::start_t, operation_state &os)
namespace hpx
template<typename OnCompletion = detail::empty_oncompletion>
class barrier
#include <barrier.hpp>

A barrier is a thread coordination mechanism whose lifetime consists of a sequence of barrier phases, where each phase allows at most an expected number of threads to block until the expected number of threads arrive at the barrier. [ Note: A barrier is useful for managing repeated tasks that are handled by multiple threads. - end note ] Each barrier phase consists of the following steps:

  • The expected count is decremented by each call to arrive or arrive_and_drop.

  • When the expected count reaches zero, the phase completion step is run. For the specialization with the default value of the CompletionFunction template parameter, the completion step is run as part of the call to arrive or arrive_and_drop that caused the expected count to reach zero. For other specializations, the completion step is run on one of the threads that arrived at the barrier during the phase.

  • When the completion step finishes, the expected count is reset to what was specified by the expected argument to the constructor, possibly adjusted by calls to arrive_and_drop, and the next phase starts.

Each phase defines a phase synchronization point. Threads that arrive at the barrier during the phase can block on the phase synchronization point by calling wait, and will remain blocked until the phase completion step is run. The phase completion step that is executed at the end of each phase has the following effects:

  • Invokes the completion function, equivalent to completion().

  • Unblocks all threads that are blocked on the phase synchronization point.

The end of the completion step strongly happens before the returns from all calls that were unblocked by the completion step. For specializations that do not have the default value of the CompletionFunction template parameter, the behavior is undefined if any of the barrier object’s member functions other than wait are called while the completion step is in progress.

Concurrent invocations of the member functions of barrier, other than its destructor, do not introduce data races. The member functions arrive and arrive_and_drop execute atomically.

CompletionFunction shall meet the Cpp17MoveConstructible (Table 28) and Cpp17Destructible (Table 32) requirements. std::is_nothrow_invocable_v<CompletionFunction&> shall be true.

The default value of the CompletionFunction template parameter is an unspecified type, such that, in addition to satisfying the requirements of CompletionFunction, it meets the Cpp17DefaultConstructible requirements (Table 27) and completion() has no effects.

barrier::arrival_token is an unspecified type, such that it meets the Cpp17MoveConstructible (Table 28), Cpp17MoveAssignable (Table 30), and Cpp17Destructible (Table 32) requirements.

Public Types

template<>
using arrival_token = bool

Public Functions

constexpr barrier(std::ptrdiff_t expected, OnCompletion completion = OnCompletion())

Preconditions: expected >= 0 is true and expected <= max() is true.

Effects: Sets both the initial expected count for each barrier phase and the current expected count for the first phase to expected. Initializes completion with std::move(f). Starts the first phase. [Note: If expected is 0 this object can only be destroyed.- end note]

Exceptions

arrival_token arrive(std::ptrdiff_t update = 1)

Preconditions: update > 0 is true, and update is less than or equal to the expected count for the current barrier phase.

Effects: Constructs an object of type arrival_token that is associated with the phase synchronization point for the current phase. Then, decrements the expected count by update.

Synchronization: The call to arrive strongly happens before the start of the phase completion step for the current phase.

Return

: The constructed arrival_token object.

Exceptions

void wait(arrival_token &&old_phase) const

Preconditions: arrival is associated with the phase synchronization point for the current phase or the immediately preceding phase of the same barrier object.

Effects: Blocks at the synchronization point associated with HPX_MOVE(arrival) until the phase completion step of the synchronization point’s phase is run. [ Note: If arrival is associated with the synchronization point for a previous phase, the call returns immediately. - end note ]

Exceptions

void arrive_and_wait()

Effects: Equivalent to: wait(arrive()).

void arrive_and_drop()

Preconditions: The expected count for the current barrier phase is greater than zero.

Effects: Decrements the initial expected count for all subsequent phases by one. Then decrements the expected count for the current phase by one.

Synchronization: The call to arrive_and_drop strongly happens before the start of the phase completion step for the current phase.

Exceptions

Public Static Functions

static constexpr std::ptrdiff_t() hpx::barrier::max()

Private Types

template<>
using mutex_type = lcos::local::spinlock

Private Members

mutex_type mtx_
hpx::lcos::local::detail::condition_variable cond_
std::ptrdiff_t expected_
std::ptrdiff_t arrived_
OnCompletion completion_
bool phase_
namespace hpx
namespace lcos
namespace local

Typedefs

template<typename T>
using channel_mpmc = bounded_channel<T, hpx::lcos::local::spinlock>
template<typename T, typename Mutex = util::spinlock>
class bounded_channel

Public Functions

bounded_channel(std::size_t size)
bounded_channel(bounded_channel &&rhs)
bounded_channel &operator=(bounded_channel &&rhs)
~bounded_channel()
bool get(T *val = nullptr) const
bool set(T &&t)
std::size_t close()
std::size_t capacity() const

Protected Functions

std::size_t close(std::unique_lock<mutex_type> &l)

Private Types

template<>
using mutex_type = Mutex

Private Functions

bool is_full(std::size_t tail) const
bool is_empty(std::size_t head) const

Private Members

hpx::util::cache_aligned_data<mutex_type> mtx_
hpx::util::cache_aligned_data<std::size_t> head_
hpx::util::cache_aligned_data<std::size_t> tail_
std::size_t size_
std::unique_ptr<T[]> buffer_
bool closed_
namespace hpx
namespace lcos
namespace local

Typedefs

template<typename T>
using channel_mpsc = base_channel_mpsc<T, hpx::lcos::local::spinlock>
template<typename T, typename Mutex = util::spinlock>
class base_channel_mpsc

Public Functions

base_channel_mpsc(std::size_t size)
base_channel_mpsc(base_channel_mpsc &&rhs)
base_channel_mpsc &operator=(base_channel_mpsc &&rhs)
~base_channel_mpsc()
bool get(T *val = nullptr) const
bool set(T &&t)
std::size_t close()
std::size_t capacity() const

Private Types

template<>
using mutex_type = Mutex

Private Functions

bool is_full(std::size_t tail) const
bool is_empty(std::size_t head) const

Private Members

hpx::util::cache_aligned_data<std::atomic<std::size_t>> head_
hpx::util::cache_aligned_data<tail_data> tail_
std::size_t size_
std::unique_ptr<T[]> buffer_
std::atomic<bool> closed_
struct tail_data

Public Members

template<>
mutex_type mtx_
template<>
std::atomic<std::size_t> tail_
namespace hpx
namespace lcos
namespace local
template<typename T>
class channel_spsc

Public Functions

channel_spsc(std::size_t size)
channel_spsc(channel_spsc &&rhs)
channel_spsc &operator=(channel_spsc &&rhs)
~channel_spsc()
bool get(T *val = nullptr) const
bool set(T &&t)
std::size_t close()
std::size_t capacity() const

Private Functions

bool is_full(std::size_t tail) const
bool is_empty(std::size_t head) const

Private Members

hpx::util::cache_aligned_data<std::atomic<std::size_t>> head_
hpx::util::cache_aligned_data<std::atomic<std::size_t>> tail_
std::size_t size_
std::unique_ptr<T[]> buffer_
std::atomic<bool> closed_
namespace hpx
namespace lcos
namespace local

Enums

enum cv_status

Values:

no_timeout
timeout
error
class condition_variable

Public Functions

condition_variable()
~condition_variable()
void notify_one(error_code &ec = throws)
void notify_all(error_code &ec = throws)
template<typename Mutex>
void wait(std::unique_lock<Mutex> &lock, error_code &ec = throws)
template<typename Mutex, typename Predicate>
void wait(std::unique_lock<Mutex> &lock, Predicate pred, error_code& = throws)
template<typename Mutex>
cv_status wait_until(std::unique_lock<Mutex> &lock, hpx::chrono::steady_time_point const &abs_time, error_code &ec = throws)
template<typename Mutex, typename Predicate>
bool wait_until(std::unique_lock<Mutex> &lock, hpx::chrono::steady_time_point const &abs_time, Predicate pred, error_code &ec = throws)
template<typename Mutex>
cv_status wait_for(std::unique_lock<Mutex> &lock, hpx::chrono::steady_duration const &rel_time, error_code &ec = throws)
template<typename Mutex, typename Predicate>
bool wait_for(std::unique_lock<Mutex> &lock, hpx::chrono::steady_duration const &rel_time, Predicate pred, error_code &ec = throws)

Private Types

using mutex_type = detail::condition_variable_data::mutex_type
using data_type = hpx::intrusive_ptr<detail::condition_variable_data>

Private Members

hpx::util::cache_aligned_data_derived<data_type> data_
class condition_variable_any

Public Functions

condition_variable_any()
~condition_variable_any()
void notify_one(error_code &ec = throws)
void notify_all(error_code &ec = throws)
template<typename Lock>
void wait(Lock &lock, error_code &ec = throws)
template<typename Lock, typename Predicate>
void wait(Lock &lock, Predicate pred, error_code& = throws)
template<typename Lock>
cv_status wait_until(Lock &lock, hpx::chrono::steady_time_point const &abs_time, error_code &ec = throws)
template<typename Lock, typename Predicate>
bool wait_until(Lock &lock, hpx::chrono::steady_time_point const &abs_time, Predicate pred, error_code &ec = throws)
template<typename Lock>
cv_status wait_for(Lock &lock, hpx::chrono::steady_duration const &rel_time, error_code &ec = throws)
template<typename Lock, typename Predicate>
bool wait_for(Lock &lock, hpx::chrono::steady_duration const &rel_time, Predicate pred, error_code &ec = throws)
template<typename Lock, typename Predicate>
bool wait(Lock &lock, stop_token stoken, Predicate pred, error_code &ec = throws)
template<typename Lock, typename Predicate>
bool wait_until(Lock &lock, stop_token stoken, hpx::chrono::steady_time_point const &abs_time, Predicate pred, error_code &ec = throws)
template<typename Lock, typename Predicate>
bool wait_for(Lock &lock, stop_token stoken, hpx::chrono::steady_duration const &rel_time, Predicate pred, error_code &ec = throws)

Private Types

using mutex_type = detail::condition_variable_data::mutex_type
using data_type = hpx::intrusive_ptr<detail::condition_variable_data>

Private Members

hpx::util::cache_aligned_data_derived<data_type> data_
namespace hpx
namespace lcos
namespace local

Typedefs

typedef counting_semaphore_var counting_semaphore
template<typename Mutex = hpx::lcos::local::spinlock, int N = 0>
class counting_semaphore_var : private hpx::lcos::local::cpp20_counting_semaphore<PTRDIFF_MAX, hpx::lcos::local::spinlock>

Public Functions

counting_semaphore_var(std::ptrdiff_t value = N)
counting_semaphore_var(counting_semaphore_var const&)
counting_semaphore_var &operator=(counting_semaphore_var const&)
void wait(std::ptrdiff_t count = 1)
bool try_wait(std::ptrdiff_t count = 1)
void signal(std::ptrdiff_t count = 1)

Signal the semaphore.

std::ptrdiff_t signal_all()

Private Types

template<>
using mutex_type = Mutex
template<typename Mutex = hpx::lcos::local::spinlock>
class cpp20_binary_semaphore : public hpx::lcos::local::cpp20_counting_semaphore<1, hpx::lcos::local::spinlock>

Public Functions

HPX_NON_COPYABLE(cpp20_binary_semaphore)
cpp20_binary_semaphore(std::ptrdiff_t value = 1)
~cpp20_binary_semaphore()
template<std::ptrdiff_t LeastMaxValue = PTRDIFF_MAX, typename Mutex = hpx::lcos::local::spinlock>
class cpp20_counting_semaphore

Public Functions

HPX_NON_COPYABLE(cpp20_counting_semaphore)
cpp20_counting_semaphore(std::ptrdiff_t value)
~cpp20_counting_semaphore()
void release(std::ptrdiff_t update = 1)
bool try_acquire()
void acquire()
bool try_acquire_until(hpx::chrono::steady_time_point const &abs_time)
bool try_acquire_for(hpx::chrono::steady_duration const &rel_time)

Public Static Functions

static constexpr std::ptrdiff_t() hpx::lcos::local::cpp20_counting_semaphore::max()

Protected Types

template<>
using mutex_type = Mutex

Protected Attributes

mutex_type mtx_
detail::counting_semaphore sem_
namespace hpx
namespace lcos
namespace local
class event
#include <event.hpp>

Event semaphores can be used for synchronizing multiple threads that need to wait for an event to occur. When the event occurs, all threads waiting for the event are woken up.

Public Functions

event()

Construct a new event semaphore.

bool occurred()

Check if the event has occurred.

void wait()

Wait for the event to occur.

void set()

Release all threads waiting on this semaphore.

void reset()

Reset the event.

Private Types

typedef lcos::local::spinlock mutex_type

Private Functions

void wait_locked(std::unique_lock<mutex_type> &l)
void set_locked(std::unique_lock<mutex_type> l)

Private Members

mutex_type mtx_

This mutex protects the queue.

local::detail::condition_variable cond_
std::atomic<bool> event_
namespace hpx
class latch
#include <latch.hpp>

Latches are a thread coordination mechanism that allow one or more threads to block until an operation is completed. An individual latch is a singleuse object; once the operation has been completed, the latch cannot be reused.

Subclassed by hpx::lcos::local::latch

Public Functions

HPX_NON_COPYABLE(latch)
latch(std::ptrdiff_t count)

Initialize the latch

Requires: count >= 0. Synchronization: None Postconditions: counter_ == count.

~latch()

Requires: No threads are blocked at the synchronization point.

Note

May be called even if some threads have not yet returned from wait() or count_down_and_wait(), provided that counter_ is 0.

Note

The destructor might not return until all threads have exited wait() or count_down_and_wait().

Note

It is the caller’s responsibility to ensure that no other thread enters wait() after one thread has called the destructor. This may require additional coordination.

void count_down(std::ptrdiff_t update)

Decrements counter_ by n. Does not block.

Requires: counter_ >= n and n >= 0.

Synchronization: Synchronizes with all calls that block on this latch and with all try_wait calls on this latch that return true .

Exceptions
  • Nothing.:

bool try_wait() const

Returns: With very low probability false. Otherwise counter == 0.

void wait() const

If counter_ is 0, returns immediately. Otherwise, blocks the calling thread at the synchronization point until counter_ reaches 0.

Exceptions
  • Nothing.:

void arrive_and_wait(std::ptrdiff_t update = 1)

Effects: Equivalent to: count_down(update); wait();

Public Static Functions

static constexpr std::ptrdiff_t() hpx::latch::max()

Returns: The maximum value of counter that the implementation supports.

Protected Types

using mutex_type = hpx::lcos::local::spinlock

Protected Attributes

util::cache_line_data<mutex_type> mtx_
util::cache_line_data<hpx::lcos::local::detail::condition_variable> cond_
std::atomic<std::ptrdiff_t> counter_
bool notified_
namespace lcos
namespace local
class latch : public hpx::latch
#include <latch.hpp>

A latch maintains an internal counter_ that is initialized when the latch is created. Threads may block at a synchronization point waiting for counter_ to be decremented to 0. When counter_ reaches 0, all such blocked threads are released.

Calls to countdown_and_wait() , count_down() , wait() , is_ready(), count_up() , and reset() behave as atomic operations.

Note

A hpx::latch is not an LCO in the sense that it has no global id and it can’t be triggered using the action (parcel) mechanism. Use hpx::distributed::latch instead if this is required. It is just a low level synchronization primitive allowing to synchronize a given number of threads.

Public Functions

HPX_NON_COPYABLE(latch)
latch(std::ptrdiff_t count)

Initialize the latch

Requires: count >= 0. Synchronization: None Postconditions: counter_ == count.

~latch()

Requires: No threads are blocked at the synchronization point.

Note

May be called even if some threads have not yet returned from wait() or count_down_and_wait(), provided that counter_ is 0.

Note

The destructor might not return until all threads have exited wait() or count_down_and_wait().

Note

It is the caller’s responsibility to ensure that no other thread enters wait() after one thread has called the destructor. This may require additional coordination.

void count_down_and_wait()

Decrements counter_ by 1 . Blocks at the synchronization point until counter_ reaches 0.

Requires: counter_ > 0.

Synchronization: Synchronizes with all calls that block on this latch and with all is_ready calls on this latch that return true.

Exceptions
  • Nothing.:

bool is_ready() const

Returns: counter_ == 0. Does not block.

Exceptions
  • Nothing.:

void abort_all()
void count_up(std::ptrdiff_t n)

Increments counter_ by n. Does not block.

Requires: n >= 0.

Exceptions
  • Nothing.:

void reset(std::ptrdiff_t n)

Reset counter_ to n. Does not block.

Requires: n >= 0.

Exceptions
  • Nothing.:

bool reset_if_needed_and_count_up(std::ptrdiff_t n, std::ptrdiff_t count)

Effects: Equivalent to: if (is_ready()) reset(count); count_up(n); Returns: true if the latch was reset

namespace hpx
namespace lcos
namespace local

Functions

template<typename Mutex>
void swap(upgrade_lock<Mutex> &lhs, upgrade_lock<Mutex> &rhs)
template<typename Mutex>
class upgrade_lock

Public Types

template<>
using mutex_type = Mutex

Public Functions

upgrade_lock(upgrade_lock const&)
upgrade_lock &operator=(upgrade_lock const&)
upgrade_lock()
upgrade_lock(Mutex &m_)
upgrade_lock(Mutex &m_, std::adopt_lock_t)
upgrade_lock(Mutex &m_, std::defer_lock_t)
upgrade_lock(Mutex &m_, std::try_to_lock_t)
upgrade_lock(upgrade_lock<Mutex> &&other)
upgrade_lock(std::unique_lock<Mutex> &&other)
upgrade_lock &operator=(upgrade_lock<Mutex> &&other)
void swap(upgrade_lock &other)
Mutex *mutex() const
Mutex *release()
~upgrade_lock()
void lock()
bool try_lock()
void unlock()
operator bool() const
bool owns_lock() const

Protected Attributes

Mutex *m
bool is_locked

Friends

friend hpx::lcos::local::upgrade_to_unique_lock
template<typename Mutex>
class upgrade_to_unique_lock

Public Types

template<>
using mutex_type = Mutex

Public Functions

upgrade_to_unique_lock(upgrade_to_unique_lock const&)
upgrade_to_unique_lock &operator=(upgrade_to_unique_lock const&)
upgrade_to_unique_lock(upgrade_lock<Mutex> &m_)
~upgrade_to_unique_lock()
upgrade_to_unique_lock(upgrade_to_unique_lock<Mutex> &&other)
upgrade_to_unique_lock &operator=(upgrade_to_unique_lock<Mutex> &&other)
void swap(upgrade_to_unique_lock &other)
operator bool() const
bool owns_lock() const
Mutex *mutex() const

Private Members

upgrade_lock<Mutex> *source
std::unique_lock<Mutex> exclusive
namespace hpx
namespace lcos
namespace local
class mutex

Subclassed by hpx::lcos::local::timed_mutex

Public Functions

HPX_NON_COPYABLE(mutex)
mutex(char const *const description = "")
~mutex()
void lock(char const *description, error_code &ec = throws)
void lock(error_code &ec = throws)
bool try_lock(char const *description, error_code &ec = throws)
bool try_lock(error_code &ec = throws)
void unlock(error_code &ec = throws)

Protected Types

typedef lcos::local::spinlock mutex_type

Protected Attributes

mutex_type mtx_
threads::thread_id_type owner_id_
lcos::local::detail::condition_variable cond_
class timed_mutex : private hpx::lcos::local::mutex

Public Functions

HPX_NON_COPYABLE(timed_mutex)
timed_mutex(char const *const description = "")
~timed_mutex()
bool try_lock_until(hpx::chrono::steady_time_point const &abs_time, char const *description, error_code &ec = throws)
bool try_lock_until(hpx::chrono::steady_time_point const &abs_time, error_code &ec = throws)
bool try_lock_for(hpx::chrono::steady_duration const &rel_time, char const *description, error_code &ec = throws)
bool try_lock_for(hpx::chrono::steady_duration const &rel_time, error_code &ec = throws)
void lock(char const *description, error_code &ec = throws)
void lock(error_code &ec = throws)
bool try_lock(char const *description, error_code &ec = throws)
bool try_lock(error_code &ec = throws)
void unlock(error_code &ec = throws)
namespace threads

Typedefs

using thread_id_ref_type = thread_id_ref
using thread_self = coroutines::detail::coroutine_self

Functions

thread_id get_self_id()

The function get_self_id returns the HPX thread id of the current thread (or zero if the current thread is not a HPX thread).

thread_self *get_self_ptr()

The function get_self_ptr returns a pointer to the (OS thread specific) self reference to the current HPX thread.

namespace hpx
namespace lcos
namespace local
struct no_mutex

Public Functions

void lock()
bool try_lock()
void unlock()

Defines

HPX_ONCE_INIT
namespace hpx
namespace lcos
namespace local

Functions

template<typename F, typename ...Args>
void call_once(once_flag &flag, F &&f, Args&&... args)
struct once_flag

Public Functions

HPX_NON_COPYABLE(once_flag)
once_flag()

Private Members

std::atomic<long> status_
lcos::local::event event_

Friends

template<typename F, typename ...Args>
void call_once(once_flag &flag, F &&f, Args&&... args)
namespace hpx
namespace lcos
namespace local

Typedefs

using recursive_mutex = detail::recursive_mutex_impl<>
namespace hpx
namespace lcos
namespace local

Typedefs

typedef detail::shared_mutex shared_mutex
namespace hpx
namespace lcos
namespace local

Typedefs

typedef sliding_semaphore_var sliding_semaphore
template<typename Mutex = hpx::lcos::local::spinlock>
class sliding_semaphore_var
#include <sliding_semaphore.hpp>

A semaphore is a protected variable (an entity storing a value) or abstract data type (an entity grouping several variables that may or may not be numerical) which constitutes the classic method for restricting access to shared resources, such as shared memory, in a multiprogramming environment. Semaphores exist in many variants, though usually the term refers to a counting semaphore, since a binary semaphore is better known as a mutex. A counting semaphore is a counter for a set of available resources, rather than a locked/unlocked flag of a single resource. It was invented by Edsger Dijkstra. Semaphores are the classic solution to preventing race conditions in the dining philosophers problem, although they do not prevent resource deadlocks.

Sliding semaphores can be used for synchronizing multiple threads as well: one thread waiting for several other threads to touch (signal) the semaphore, or several threads waiting for one other thread to touch this semaphore. The difference to a counting semaphore is that a sliding semaphore will not limit the number of threads which are allowed to proceed, but will make sure that the difference between the (arbitrary) number passed to set and wait does not exceed a given threshold.

Public Functions

sliding_semaphore_var(std::int64_t max_difference, std::int64_t lower_limit = 0)

Construct a new sliding semaphore.

Parameters
  • max_difference: [in] The max difference between the upper limit (as set by wait()) and the lower limit (as set by signal()) which is allowed without suspending any thread calling wait().

  • lower_limit: [in] The initial lower limit.

void set_max_difference(std::int64_t max_difference, std::int64_t lower_limit = 0)

Set/Change the difference that will cause the semaphore to trigger.

Parameters
  • max_difference: [in] The max difference between the upper limit (as set by wait()) and the lower limit (as set by signal()) which is allowed without suspending any thread calling wait().

  • lower_limit: [in] The initial lower limit.

void wait(std::int64_t upper_limit)

Wait for the semaphore to be signaled.

Parameters
  • upper_limit: [in] The new upper limit. The calling thread will be suspended if the difference between this value and the largest lower_limit which was set by signal() is larger than the max_difference.

bool try_wait(std::int64_t upper_limit = 1)

Try to wait for the semaphore to be signaled.

Return

The function returns true if the calling thread would not block if it was calling wait().

Parameters
  • upper_limit: [in] The new upper limit. The calling thread will be suspended if the difference between this value and the largest lower_limit which was set by signal() is larger than the max_difference.

void signal(std::int64_t lower_limit)

Signal the semaphore.

Parameters
  • lower_limit: [in] The new lower limit. This will update the current lower limit of this semaphore. It will also re-schedule all suspended threads for which their associated upper limit is not larger than the lower limit plus the max_difference.

std::int64_t signal_all()

Private Types

typedef Mutex mutex_type

Private Members

mutex_type mtx_
detail::sliding_semaphore sem_
namespace hpx
namespace lcos
namespace local
struct spinlock

Public Functions

HPX_NON_COPYABLE(spinlock)
spinlock(char const *const desc = "hpx::lcos::local::spinlock")
~spinlock()
void lock()
bool try_lock()
void unlock()

Private Functions

bool acquire_lock()
void relinquish_lock()
bool is_locked() const

Private Members

std::atomic<bool> v_
namespace hpx
namespace lcos
namespace local
struct spinlock_no_backoff
#include <spinlock_no_backoff.hpp>

boost::mutex-compatible spinlock class

Public Functions

HPX_NON_COPYABLE(spinlock_no_backoff)
spinlock_no_backoff()
~spinlock_no_backoff()
void lock()
bool try_lock()
void unlock()

Private Functions

bool acquire_lock()
void relinquish_lock()
bool is_locked() const

Private Members

std::atomic<bool> v_
namespace hpx
namespace lcos
namespace local
template<typename Tag, std::size_t N = HPX_HAVE_SPINLOCK_POOL_NUM>
class spinlock_pool

Public Static Functions

static lcos::local::spinlock &spinlock_for(void const *pv)

Private Static Attributes

util::cache_aligned_data<lcos::local::spinlock> pool_
class scoped_lock

Public Functions

template<>
HPX_NON_COPYABLE(scoped_lock)
template<>
scoped_lock(void const *pv)
template<>
~scoped_lock()
template<>
void lock()
template<>
void unlock()

Private Members

template<>
hpx::lcos::local::spinlock &sp_
namespace hpx

Functions

template<typename Callback>
stop_callback(stop_token, Callback)
void swap(stop_token &lhs, stop_token &rhs)
void swap(stop_source &lhs, stop_source &rhs)

Variables

constexpr nostopstate_t nostopstate = {}
struct nostopstate_t

Public Functions

nostopstate_t()
template<typename Callback>
class stop_callback

Public Types

template<>
using callback_type = Callback

Public Functions

template<typename CB, typename Enable = std::enable_if_t<std::is_constructible_v<Callback, CB>>>
stop_callback(stop_token const &st, CB &&cb)
template<typename CB, typename Enable = std::enable_if_t<std::is_constructible_v<Callback, CB>>>
stop_callback(stop_token &&st, CB &&cb)
~stop_callback()
stop_callback(stop_callback const&)
stop_callback(stop_callback&&)
stop_callback &operator=(stop_callback const&)
stop_callback &operator=(stop_callback&&)

Private Functions

void execute()

Private Members

HPX_NO_UNIQUE_ADDRESS Callback hpx::stop_callback::callback_
hpx::intrusive_ptr<detail::stop_state> state_
class stop_source

Public Functions

stop_source()
stop_source(nostopstate_t)
stop_source(stop_source const &rhs)
stop_source(stop_source&&)
stop_source &operator=(stop_source const &rhs)
stop_source &operator=(stop_source&&)
~stop_source()
void swap(stop_source &s)
stop_token get_token() const
bool stop_possible() const
bool stop_requested() const
bool request_stop()

Private Members

hpx::intrusive_ptr<detail::stop_state> state_

Friends

bool operator==(stop_source const &lhs, stop_source const &rhs)
bool operator!=(stop_source const &lhs, stop_source const &rhs)
class stop_token

Public Types

template<typename Callback>
using callback_type = stop_callback<Callback>

Public Functions

stop_token()
stop_token(stop_token const &rhs)
stop_token(stop_token&&)
stop_token &operator=(stop_token const &rhs)
stop_token &operator=(stop_token&&)
~stop_token()
void swap(stop_token &s)
bool stop_requested() const
bool stop_possible() const

Private Functions

stop_token(hpx::intrusive_ptr<detail::stop_state> const &state)

Private Members

hpx::intrusive_ptr<detail::stop_state> state_

Friends

friend hpx::stop_callback
friend hpx::stop_source
bool operator==(stop_token const &lhs, stop_token const &rhs)
bool operator!=(stop_token const &lhs, stop_token const &rhs)
namespace p2300_stop_token

Functions

template<typename Callback>
in_place_stop_callback(in_place_stop_token, Callback)
template<typename Callback>
class in_place_stop_callback

Public Types

template<>
using callback_type = Callback

Public Functions

template<typename CB, typename Enable = std::enable_if_t<std::is_constructible_v<Callback, CB>>>
in_place_stop_callback(in_place_stop_token const &st, CB &&cb)
template<typename CB, typename Enable = std::enable_if_t<std::is_constructible_v<Callback, CB>>>
in_place_stop_callback(in_place_stop_token &&st, CB &&cb)
~in_place_stop_callback()
in_place_stop_callback(in_place_stop_callback const&)
in_place_stop_callback(in_place_stop_callback&&)
in_place_stop_callback &operator=(in_place_stop_callback const&)
in_place_stop_callback &operator=(in_place_stop_callback&&)

Private Functions

void execute()

Private Members

HPX_NO_UNIQUE_ADDRESS Callback hpx::p2300_stop_token::in_place_stop_callback::callback_
in_place_stop_source *source_
class in_place_stop_source

Public Functions

in_place_stop_source()
~in_place_stop_source()
in_place_stop_source(in_place_stop_source const&)
in_place_stop_source(in_place_stop_source&&)
in_place_stop_source &operator=(in_place_stop_source const&)
in_place_stop_source &operator=(in_place_stop_source&&)
in_place_stop_token get_token() const
bool request_stop()
bool stop_requested() const
bool stop_possible() const

Private Functions

bool register_callback(hpx::detail::stop_callback_base *cb)
void remove_callback(hpx::detail::stop_callback_base *cb)

Private Members

hpx::detail::stop_state state_

Friends

friend hpx::p2300_stop_token::in_place_stop_token
friend hpx::p2300_stop_token::in_place_stop_callback
class in_place_stop_token

Public Types

template<typename Callback>
using callback_type = in_place_stop_callback<Callback>

Public Functions

constexpr in_place_stop_token()
in_place_stop_token(in_place_stop_token const &rhs)
in_place_stop_token(in_place_stop_token &&rhs)
in_place_stop_token &operator=(in_place_stop_token const &rhs)
in_place_stop_token &operator=(in_place_stop_token &&rhs)
bool stop_requested() const
bool stop_possible() const
void swap(in_place_stop_token &rhs)

Private Functions

in_place_stop_token(in_place_stop_source const *source)

Private Members

in_place_stop_source const *source_

Friends

friend hpx::p2300_stop_token::in_place_stop_source
friend hpx::p2300_stop_token::in_place_stop_callback
friend constexpr bool operator==(in_place_stop_token const &lhs, in_place_stop_token const &rhs)
void swap(in_place_stop_token &x, in_place_stop_token &y)
struct never_stop_token

Public Types

template<typename>
using callback_type = callback_impl

Public Static Functions

static constexpr bool stop_requested()
static constexpr bool stop_possible()

Friends

friend constexpr bool operator==(never_stop_token const&, never_stop_token const&)
struct callback_impl

Public Functions

template<typename Callback>
constexpr callback_impl(never_stop_token, Callback&&)