execution

The contents of this module can be included with the header hpx/modules/execution.hpp. These headers may be used by user-code but are not guaranteed stable (neither header location nor contents). You are using these at your own risk. If you wish to use non-public functionality from a module we strongly suggest only including the module header hpx/modules/execution.hpp, not the particular header in which the functionality you would like to use is defined. See Public API for a list of names that are part of the public HPX API.

namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::bulk_t bulk
struct bulk_t : public hpx::functional::tag_fallback<bulk_t>

Friends

template<typename S, typename Shape, typename F>
friend constexpr auto tag_fallback_dispatch(bulk_t, S &&s, Shape const &shape, F &&f)
template<typename S, typename Shape, typename F>
friend constexpr auto tag_fallback_dispatch(bulk_t, S &&s, Shape &&shape, F &&f)
template<typename Shape, typename F>
friend constexpr auto tag_fallback_dispatch(bulk_t, Shape &&shape, F &&f)
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::detach_t detach
struct detach_t : public hpx::functional::tag_fallback<detach_t>

Friends

template<typename S, typename Allocator = hpx::util::internal_allocator<>>
friend constexpr void tag_fallback_dispatch(detach_t, S &&s, Allocator const &a = Allocator{})
template<typename Allocator = hpx::util::internal_allocator<>>
friend constexpr auto tag_fallback_dispatch(detach_t, Allocator const &a = Allocator{})
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::just_t just
struct just_t : public hpx::functional::tag_fallback<just_t>

Friends

template<typename ...Ts>
friend constexpr auto tag_fallback_dispatch(just_t, Ts&&... ts)
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::just_on_t just_on
struct just_on_t : public hpx::functional::tag_fallback<just_on_t>

Friends

template<typename Scheduler, typename ...Ts>
friend constexpr auto tag_fallback_dispatch(just_on_t, Scheduler &&scheduler, Ts&&... ts)
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::keep_future_t keep_future
struct keep_future_t : public hpx::functional::tag_fallback<keep_future_t>

Friends

template<typename Future>
friend constexpr auto tag_fallback_dispatch(keep_future_t, Future &&future)
friend constexpr auto tag_fallback_dispatch(keep_future_t)
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::make_future_t make_future
struct make_future_t : public hpx::functional::tag_fallback<make_future_t>

Friends

template<typename S, typename Allocator = hpx::util::internal_allocator<>>
friend constexpr auto tag_fallback_dispatch(make_future_t, S &&s, Allocator const &a = Allocator{})
template<typename Allocator = hpx::util::internal_allocator<>>
friend constexpr auto tag_fallback_dispatch(make_future_t, Allocator const &a = Allocator{})
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::transform_t transform
struct transform_t : public hpx::functional::tag_fallback<transform_t>

Friends

template<typename S, typename F>
friend constexpr auto tag_fallback_dispatch(transform_t, S &&s, F &&f)
template<typename F>
friend constexpr auto tag_fallback_dispatch(transform_t, F &&f)
namespace hpx
namespace execution
struct auto_chunk_size
#include <auto_chunk_size.hpp>

Loop iterations are divided into pieces and then assigned to threads. The number of loop iterations combined is determined based on measurements of how long the execution of 1% of the overall number of iterations takes. This executor parameters type makes sure that as many loop iterations are combined as necessary to run for the amount of time specified.

Public Functions

constexpr auto_chunk_size(std::uint64_t num_iters_for_timing = 0)

Construct an auto_chunk_size executor parameters object

Note

Default constructed auto_chunk_size executor parameter types will use 80 microseconds as the minimal time for which any of the scheduled chunks should run.

auto_chunk_size(hpx::chrono::steady_duration const &rel_time, std::uint64_t num_iters_for_timing = 0)

Construct an auto_chunk_size executor parameters object

Parameters
  • rel_time: [in] The time duration to use as the minimum to decide how many loop iterations should be combined.

namespace parallel
namespace execution

Typedefs

typedef hpx::is_sequenced_execution_policy<T> instead
namespace hpx
namespace execution
struct dynamic_chunk_size
#include <dynamic_chunk_size.hpp>

Loop iterations are divided into pieces of size chunk_size and then dynamically scheduled among the threads; when a thread finishes one chunk, it is dynamically assigned another If chunk_size is not specified, the default chunk size is 1.

Note

This executor parameters type is equivalent to OpenMP’s DYNAMIC scheduling directive.

Public Functions

constexpr dynamic_chunk_size(std::size_t chunk_size = 1)

Construct a dynamic_chunk_size executor parameters object

Parameters
  • chunk_size: [in] The optional chunk size to use as the number of loop iterations to schedule together. The default chunk size is 1.

namespace hpx
namespace parallel
namespace execution

Functions

HPX_HAS_MEMBER_XXX_TRAIT_DEF(has_pending_closures)
HPX_HAS_MEMBER_XXX_TRAIT_DEF(get_pu_mask)
HPX_HAS_MEMBER_XXX_TRAIT_DEF(set_scheduler_mode)

Variables

hpx::parallel::execution::has_pending_closures_t has_pending_closures
hpx::parallel::execution::get_pu_mask_t get_pu_mask
hpx::parallel::execution::set_scheduler_mode_t set_scheduler_mode
struct get_pu_mask_t : public hpx::functional::tag_fallback<get_pu_mask_t>
#include <execution_information.hpp>

Retrieve the bitmask describing the processing units the given thread is allowed to run on

All threads::executors invoke sched.get_pu_mask().

Note

If the executor does not support this operation, this call will always invoke hpx::threads::get_pu_mask()

Parameters
  • exec: [in] The executor object to use for querying the number of pending tasks.

  • topo: [in] The topology object to use to extract the requested information.

  • thream_num: [in] The sequence number of the thread to retrieve information for.

Private Functions

template<typename Executor>
decltype(auto) friend tag_fallback_dispatch(get_pu_mask_t, Executor&&, threads::topology &topo, std::size_t thread_num)
template<typename Executor>
decltype(auto) friend tag_dispatch(get_pu_mask_t, Executor &&exec, threads::topology &topo, std::size_t thread_num)
struct has_pending_closures_t : public hpx::functional::tag_fallback<has_pending_closures_t>
#include <execution_information.hpp>

Retrieve whether this executor has operations pending or not.

Note

If the executor does not expose this information, this call will always return false

Parameters
  • exec: [in] The executor object to use to extract the requested information for.

Private Functions

template<typename Executor>
decltype(auto) friend tag_fallback_dispatch(has_pending_closures_t, Executor&&)
template<typename Executor>
decltype(auto) friend tag_dispatch(has_pending_closures_t, Executor &&exec)
struct set_scheduler_mode_t : public hpx::functional::tag_fallback<set_scheduler_mode_t>
#include <execution_information.hpp>

Set various modes of operation on the scheduler underneath the given executor.

Note

This calls exec.set_scheduler_mode(mode) if it exists; otherwise it does nothing.

Parameters
  • exec: [in] The executor object to use.

  • mode: [in] The new mode for the scheduler to pick up

Friends

template<typename Executor, typename Mode>
void tag_fallback_dispatch(set_scheduler_mode_t, Executor&&, Mode const&)
template<typename Executor, typename Mode>
void tag_dispatch(set_scheduler_mode_t, Executor &&exec, Mode const &mode)
namespace hpx
namespace execution
namespace experimental

Variables

hpx::execution::experimental::with_priority_t with_priority
hpx::execution::experimental::get_priority_t get_priority
hpx::execution::experimental::with_stacksize_t with_stacksize
hpx::execution::experimental::get_stacksize_t get_stacksize
hpx::execution::experimental::with_hint_t with_hint
hpx::execution::experimental::get_hint_t get_hint
hpx::execution::experimental::with_annotation_t with_annotation
hpx::execution::experimental::get_annotation_t get_annotation
namespace parallel
namespace execution

Functions

template<typename ...Params>
constexpr executor_parameters_join<Params...>::type join_executor_parameters(Params&&... params)
template<typename Param>
constexpr Param &&join_executor_parameters(Param &&param)
template<typename ...Params>
struct executor_parameters_join

Public Types

template<>
using type = detail::executor_parameters<std::decay_t<Params>...>
template<typename Param>
struct executor_parameters_join<Param>

Public Types

template<>
using type = Param
namespace hpx
namespace parallel
namespace execution

Variables

hpx::parallel::execution::get_chunk_size_t get_chunk_size
hpx::parallel::execution::maximal_number_of_chunks_t maximal_number_of_chunks
hpx::parallel::execution::reset_thread_distribution_t reset_thread_distribution
hpx::parallel::execution::processing_units_count_t processing_units_count
hpx::parallel::execution::mark_begin_execution_t mark_begin_execution
hpx::parallel::execution::mark_end_of_scheduling_t mark_end_of_scheduling
hpx::parallel::execution::mark_end_execution_t mark_end_execution
struct get_chunk_size_t : public hpx::functional::tag_fallback<get_chunk_size_t>
#include <execution_parameters_fwd.hpp>

Return the number of invocations of the given function f which should be combined into a single task

Note

The parameter f is expected to be a nullary function returning a std::size_t representing the number of iteration the function has already executed (i.e. which don’t have to be scheduled anymore).

Parameters
  • params: [in] The executor parameters object to use for determining the chunk size for the given number of tasks num_tasks.

  • exec: [in] The executor object which will be used for scheduling of the loop iterations.

  • f: [in] The function which will be optionally scheduled using the given executor.

  • cores: [in] The number of cores the number of chunks should be determined for.

  • num_tasks: [in] The number of tasks the chunk size should be determined for

Private Functions

template<typename Parameters, typename Executor, typename F>
decltype(auto) friend tag_fallback_dispatch(get_chunk_size_t, Parameters &&params, Executor &&exec, F &&f, std::size_t cores, std::size_t num_tasks)
struct mark_begin_execution_t : public hpx::functional::tag_fallback<mark_begin_execution_t>
#include <execution_parameters_fwd.hpp>

Mark the begin of a parallel algorithm execution

Note

This calls params.mark_begin_execution(exec) if it exists; otherwise it does nothing.

Parameters
  • params: [in] The executor parameters object to use as a fallback if the executor does not expose

Private Functions

template<typename Parameters, typename Executor>
decltype(auto) friend tag_fallback_dispatch(mark_begin_execution_t, Parameters &&params, Executor &&exec)
struct mark_end_execution_t : public hpx::functional::tag_fallback<mark_end_execution_t>
#include <execution_parameters_fwd.hpp>

Mark the end of a parallel algorithm execution

Note

This calls params.mark_end_execution(exec) if it exists; otherwise it does nothing.

Parameters
  • params: [in] The executor parameters object to use as a fallback if the executor does not expose

Private Functions

template<typename Parameters, typename Executor>
decltype(auto) friend tag_fallback_dispatch(mark_end_execution_t, Parameters &&params, Executor &&exec)
struct mark_end_of_scheduling_t : public hpx::functional::tag_fallback<mark_end_of_scheduling_t>
#include <execution_parameters_fwd.hpp>

Mark the end of scheduling tasks during parallel algorithm execution

Note

This calls params.mark_begin_execution(exec) if it exists; otherwise it does nothing.

Parameters
  • params: [in] The executor parameters object to use as a fallback if the executor does not expose

Private Functions

template<typename Parameters, typename Executor>
decltype(auto) friend tag_fallback_dispatch(mark_end_of_scheduling_t, Parameters &&params, Executor &&exec)
struct maximal_number_of_chunks_t : public hpx::functional::tag_fallback<maximal_number_of_chunks_t>
#include <execution_parameters_fwd.hpp>

Return the largest reasonable number of chunks to create for a single algorithm invocation.

Parameters
  • params: [in] The executor parameters object to use for determining the number of chunks for the given number of cores.

  • exec: [in] The executor object which will be used for scheduling of the loop iterations.

  • cores: [in] The number of cores the number of chunks should be determined for.

  • num_tasks: [in] The number of tasks the chunk size should be determined for

Private Functions

template<typename Parameters, typename Executor>
decltype(auto) friend tag_fallback_dispatch(maximal_number_of_chunks_t, Parameters &&params, Executor &&exec, std::size_t cores, std::size_t num_tasks)
struct processing_units_count_t : public hpx::functional::tag_fallback<processing_units_count_t>
#include <execution_parameters_fwd.hpp>

Retrieve the number of (kernel-)threads used by the associated executor.

Note

This calls params.processing_units_count(Executor&&) if it exists; otherwise it forwards the request to the executor parameters object.

Parameters
  • params: [in] The executor parameters object to use as a fallback if the executor does not expose

Private Functions

template<typename Parameters, typename Executor>
decltype(auto) friend tag_fallback_dispatch(processing_units_count_t, Parameters &&params, Executor &&exec)
struct reset_thread_distribution_t : public hpx::functional::tag_fallback<reset_thread_distribution_t>
#include <execution_parameters_fwd.hpp>

Reset the internal round robin thread distribution scheme for the given executor.

Note

This calls params.reset_thread_distribution(exec) if it exists; otherwise it does nothing.

Parameters
  • params: [in] The executor parameters object to use for resetting the thread distribution scheme.

  • exec: [in] The executor object to use.

Private Functions

template<typename Parameters, typename Executor>
decltype(auto) friend tag_fallback_dispatch(reset_thread_distribution_t, Parameters &&params, Executor &&exec)
namespace hpx
namespace execution
struct guided_chunk_size
#include <guided_chunk_size.hpp>

Iterations are dynamically assigned to threads in blocks as threads request them until no blocks remain to be assigned. Similar to dynamic_chunk_size except that the block size decreases each time a number of loop iterations is given to a thread. The size of the initial block is proportional to number_of_iterations / number_of_cores. Subsequent blocks are proportional to number_of_iterations_remaining / number_of_cores. The optional chunk size parameter defines the minimum block size. The default chunk size is 1.

Note

This executor parameters type is equivalent to OpenMP’s GUIDED scheduling directive.

Public Functions

constexpr guided_chunk_size(std::size_t min_chunk_size = 1)

Construct a guided_chunk_size executor parameters object

Parameters
  • min_chunk_size: [in] The optional minimal chunk size to use as the minimal number of loop iterations to schedule together. The default minimal chunk size is 1.

namespace hpx
namespace execution
struct persistent_auto_chunk_size
#include <persistent_auto_chunk_size.hpp>

Loop iterations are divided into pieces and then assigned to threads. The number of loop iterations combined is determined based on measurements of how long the execution of 1% of the overall number of iterations takes. This executor parameters type makes sure that as many loop iterations are combined as necessary to run for the amount of time specified.

Public Functions

constexpr persistent_auto_chunk_size(std::uint64_t num_iters_for_timing = 0)

Construct an persistent_auto_chunk_size executor parameters object

Note

Default constructed persistent_auto_chunk_size executor parameter types will use 0 microseconds as the execution time for each chunk and 80 microseconds as the minimal time for which any of the scheduled chunks should run.

persistent_auto_chunk_size(hpx::chrono::steady_duration const &time_cs, std::uint64_t num_iters_for_timing = 0)

Construct an persistent_auto_chunk_size executor parameters object

Parameters
  • time_cs: The execution time for each chunk.

persistent_auto_chunk_size(hpx::chrono::steady_duration const &time_cs, hpx::chrono::steady_duration const &rel_time, std::uint64_t num_iters_for_timing = 0)

Construct an persistent_auto_chunk_size executor parameters object

Parameters
  • rel_time: [in] The time duration to use as the minimum to decide how many loop iterations should be combined.

  • time_cs: The execution time for each chunk.

namespace hpx
namespace parallel
namespace execution
template<typename R, typename ...Ts>
class polymorphic_executor<R(Ts...)> : private hpx::parallel::execution::detail::polymorphic_executor_base

Public Types

template<typename T>
using future_type = hpx::future<R>

Public Functions

constexpr polymorphic_executor()
polymorphic_executor(polymorphic_executor const &other)
polymorphic_executor(polymorphic_executor &&other)
polymorphic_executor &operator=(polymorphic_executor const &other)
polymorphic_executor &operator=(polymorphic_executor &&other)
template<typename Exec, typename PE = typename std::decay<Exec>::type, typename Enable = typename std::enable_if<!std::is_same<PE, polymorphic_executor>::value>::type>
polymorphic_executor(Exec &&exec)
template<typename Exec, typename PE = typename std::decay<Exec>::type, typename Enable = typename std::enable_if<!std::is_same<PE, polymorphic_executor>::value>::type>
polymorphic_executor &operator=(Exec &&exec)
void reset()
template<typename F>
void post(F &&f, Ts... ts) const
template<typename F>
R sync_execute(F &&f, Ts... ts) const
template<typename F>
hpx::future<R> async_execute(F &&f, Ts... ts) const
template<typename F, typename Future>
hpx::future<R> then_execute(F &&f, Future &&predecessor, Ts&&... ts) const
template<typename F, typename Shape>
std::vector<R> bulk_sync_execute(F &&f, Shape const &s, Ts&&... ts) const
template<typename F, typename Shape>
std::vector<hpx::future<R>> bulk_async_execute(F &&f, Shape const &s, Ts&&... ts) const
template<typename F, typename Shape>
hpx::future<std::vector<R>> bulk_then_execute(F &&f, Shape const &s, hpx::shared_future<void> const &predecessor, Ts&&... ts) const

Private Types

template<>
using base_type = detail::polymorphic_executor_base
template<>
using vtable = detail::polymorphic_executor_vtable<R(Ts...)>

Private Functions

void assign(std::nullptr_t)
template<typename Exec>
void assign(Exec &&exec)

Private Static Functions

static constexpr vtable const *get_empty_vtable()
template<typename T>
static constexpr vtable const *get_vtable()
namespace hpx
namespace parallel
namespace execution

Variables

HPX_INLINE_CONSTEXPR_VARIABLE create_rebound_policy_t hpx::parallel::execution::create_rebound_policy = {}
struct create_rebound_policy_t

Public Functions

template<typename ExPolicy, typename Executor, typename Parameters>
constexpr decltype(auto) operator()(ExPolicy&&, Executor &&exec, Parameters &&parameters) const
template<typename ExPolicy, typename Executor, typename Parameters>
struct rebind_executor
#include <rebind_executor.hpp>

Rebind the type of executor used by an execution policy. The execution category of Executor shall not be weaker than that of ExecutionPolicy.

Public Types

template<>
using type = typename policy_type::template rebind::type

The type of the rebound execution policy.

namespace hpx
namespace execution
struct static_chunk_size
#include <static_chunk_size.hpp>

Loop iterations are divided into pieces of size chunk_size and then assigned to threads. If chunk_size is not specified, the iterations are evenly (if possible) divided contiguously among the threads.

Note

This executor parameters type is equivalent to OpenMP’s STATIC scheduling directive.

Public Functions

constexpr static_chunk_size()

Construct a static_chunk_size executor parameters object

Note

By default the number of loop iterations is determined from the number of available cores and the overall number of loop iterations to schedule.

constexpr static_chunk_size(std::size_t chunk_size)

Construct a static_chunk_size executor parameters object

Parameters
  • chunk_size: [in] The optional chunk size to use as the number of loop iterations to run on a single thread.

namespace hpx
namespace parallel
namespace execution

Typedefs

template<typename Executor, typename T, typename ...Ts>
using executor_future_t = typename executor_future<Executor, T, Ts...>::type
template<typename Executor>
struct executor_context

Public Types

template<>
using type = std::decay_t<decltype(std::declval<Executor const&>().context())>
template<typename Executor>
struct executor_execution_category

Public Types

template<>
using type = hpx::util::detected_or_t<hpx::execution::unsequenced_execution_tag, execution_category, Executor>

Private Types

template<typename T>
using execution_category = typename T::execution_category
template<typename Executor>
struct executor_index

Public Types

template<>
using type = hpx::util::detected_or_t<typename executor_shape<Executor>::type, index_type, Executor>

Private Types

template<typename T>
using index_type = typename T::index_type
template<typename Executor>
struct executor_parameters_type

Public Types

template<>
using type = hpx::util::detected_or_t<hpx::execution::static_chunk_size, parameters_type, Executor>

Private Types

template<typename T>
using parameters_type = typename T::parameters_type
template<typename Executor>
struct executor_shape

Public Types

template<>
using type = hpx::util::detected_or_t<std::size_t, shape_type, Executor>

Private Types

template<typename T>
using shape_type = typename T::shape_type
namespace traits

Typedefs

template<typename Executor>
using executor_context_t = typename executor_context<Executor>::type
template<typename Executor>
using executor_execution_category_t = typename executor_execution_category<Executor>::type
template<typename Executor>
using executor_shape_t = typename executor_shape<Executor>::type
template<typename Executor>
using executor_index_t = typename executor_index<Executor>::type
template<typename Executor, typename T, typename ...Ts>
using executor_future_t = typename executor_future<Executor, T, Ts...>::type
template<typename Executor>
using executor_parameters_type_t = typename executor_parameters_type<Executor>::type

Variables

template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_post_member_v=has_post_member<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_sync_execute_member_v=has_sync_execute_member<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_async_execute_member_v=has_async_execute_member<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_then_execute_member_v=has_then_execute_member<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_bulk_sync_execute_member_v=has_bulk_sync_execute_member<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_bulk_async_execute_member_v=has_bulk_async_execute_member<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::traits::has_bulk_then_execute_member_v=has_bulk_then_execute_member<T>::value
namespace hpx

Variables

template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::is_execution_policy_v=is_execution_policy<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::is_parallel_execution_policy_v=is_parallel_execution_policy<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::is_sequenced_execution_policy_v=is_sequenced_execution_policy<T>::value
template<typename T>HPX_INLINE_CONSTEXPR_VARIABLE bool hpx::is_async_execution_policy_v=is_async_execution_policy<T>::value
template<typename T>
struct is_async_execution_policy : public hpx::detail::is_async_execution_policy<std::decay<T>::type>
#include <is_execution_policy.hpp>

Extension: Detect whether given execution policy makes algorithms asynchronous

  1. The type is_async_execution_policy can be used to detect asynchronous execution policies for the purpose of excluding function signatures from otherwise ambiguous overload resolution participation.

  2. If T is the type of a standard or implementation-defined execution policy, is_async_execution_policy<T> shall be publicly derived from integral_constant<bool, true>, otherwise from integral_constant<bool, false>.

  3. The behavior of a program that adds specializations for is_async_execution_policy is undefined.

template<typename T>
struct is_execution_policy : public hpx::detail::is_execution_policy<std::decay<T>::type>
#include <is_execution_policy.hpp>

  1. The type is_execution_policy can be used to detect execution policies for the purpose of excluding function signatures from otherwise ambiguous overload resolution participation.

  2. If T is the type of a standard or implementation-defined execution policy, is_execution_policy<T> shall be publicly derived from integral_constant<bool, true>, otherwise from integral_constant<bool, false>.

  3. The behavior of a program that adds specializations for is_execution_policy is undefined.

template<typename T>
struct is_parallel_execution_policy : public hpx::detail::is_parallel_execution_policy<std::decay<T>::type>
#include <is_execution_policy.hpp>

Extension: Detect whether given execution policy enables parallelization

  1. The type is_parallel_execution_policy can be used to detect parallel execution policies for the purpose of excluding function signatures from otherwise ambiguous overload resolution participation.

  2. If T is the type of a standard or implementation-defined execution policy, is_parallel_execution_policy<T> shall be publicly derived from integral_constant<bool, true>, otherwise from integral_constant<bool, false>.

  3. The behavior of a program that adds specializations for is_parallel_execution_policy is undefined.

template<typename T>
struct is_sequenced_execution_policy : public hpx::detail::is_sequenced_execution_policy<std::decay<T>::type>
#include <is_execution_policy.hpp>

Extension: Detect whether given execution policy does not enable parallelization

  1. The type is_sequenced_execution_policy can be used to detect non-parallel execution policies for the purpose of excluding function signatures from otherwise ambiguous overload resolution participation.

  2. If T is the type of a standard or implementation-defined execution policy, is_sequenced_execution_policy<T> shall be publicly derived from integral_constant<bool, true>, otherwise from integral_constant<bool, false>.

  3. The behavior of a program that adds specializations for is_sequenced_execution_policy is undefined.

namespace hpx
namespace parallel
namespace traits

Functions

std::size_t count_bits(bool value)