hpx/compute_local/host/block_executor.hpp#

Defined in header hpx/compute_local/host/block_executor.hpp.

See Public API for a list of names and headers that are part of the public HPX API.

template<typename Executor>
struct hpx::parallel::execution::executor_execution_category<compute::host::block_executor<Executor>>#

Public Types

using type = hpx::execution::parallel_execution_tag#
template<typename Executor>
struct is_one_way_executor<compute::host::block_executor<Executor>> : public true_type#
template<typename Executor>
struct is_two_way_executor<compute::host::block_executor<Executor>> : public true_type#
template<typename Executor>
struct is_bulk_one_way_executor<compute::host::block_executor<Executor>> : public true_type#
template<typename Executor>
struct is_bulk_two_way_executor<compute::host::block_executor<Executor>> : public true_type#
namespace hpx
namespace compute#
namespace host#
template<typename Executor = hpx::parallel::execution::restricted_thread_pool_executor>
struct block_executor#
#include <block_executor.hpp>

The block executor can be used to build NUMA aware programs. It will distribute work evenly across the passed targets

Template Parameters

Executor – The underlying executor to use

Public Types

using executor_parameters_type = hpx::execution::experimental::default_parameters#

Public Functions

inline explicit block_executor(std::vector<host::target> const &targets, threads::thread_priority priority = threads::thread_priority::high, threads::thread_stacksize stacksize = threads::thread_stacksize::default_, threads::thread_schedule_hint schedulehint = {})#
inline explicit block_executor(std::vector<host::target> &&targets)#
inline block_executor(block_executor const &other)#
inline block_executor(block_executor &&other) noexcept#
inline block_executor &operator=(block_executor const &other)#
inline block_executor &operator=(block_executor &&other) noexcept#
inline std::vector<host::target> const &targets() const noexcept#

Private Functions

inline auto get_next_executor() const#
template<typename F, typename ...Ts>
inline decltype(auto) friend tag_invoke(hpx::parallel::execution::post_t, block_executor const &exec, F &&f, Ts&&... ts)#
template<typename F, typename ...Ts>
inline decltype(auto) friend tag_invoke(hpx::parallel::execution::async_execute_t, block_executor const &exec, F &&f, Ts&&... ts)#
template<typename F, typename ...Ts>
inline decltype(auto) friend tag_invoke(hpx::parallel::execution::sync_execute_t, block_executor const &exec, F &&f, Ts&&... ts)#
template<typename F, typename Shape, typename ...Ts>
inline decltype(auto) bulk_async_execute_impl(F &&f, Shape const &shape, Ts&&... ts) const#
template<typename F, typename Shape, typename ...Ts>
inline decltype(auto) friend tag_invoke(hpx::parallel::execution::bulk_async_execute_t, block_executor const &exec, F &&f, Shape const &shape, Ts&&... ts)#
template<typename F, typename Shape, typename ...Ts>
inline decltype(auto) bulk_sync_execute_impl(F &&f, Shape const &shape, Ts&&... ts) const#
template<typename F, typename Shape, typename ...Ts>
inline decltype(auto) friend tag_invoke(hpx::parallel::execution::bulk_sync_execute_t, block_executor const &exec, F &&f, Shape const &shape, Ts&&... ts)#
inline void init_executors()#

Private Members

std::vector<host::target> targets_#
mutable std::atomic<std::size_t> current_#
std::vector<Executor> executors_#
threads::thread_priority priority_ = threads::thread_priority::high#
threads::thread_stacksize stacksize_ = threads::thread_stacksize::default_#
threads::thread_schedule_hint schedulehint_ = {}#
namespace parallel
namespace execution
template<typename Executor> block_executor< Executor > >

Public Types

using type = hpx::execution::parallel_execution_tag#
template<typename Executor> block_executor< Executor > > : public true_type
template<typename Executor> block_executor< Executor > > : public true_type
template<typename Executor> block_executor< Executor > > : public true_type
template<typename Executor> block_executor< Executor > > : public true_type