hpx/compute_local/host/block_executor.hpp
hpx/compute_local/host/block_executor.hpp#
See Public API for a list of names and headers that are part of the public HPX API.
-
template<typename Executor>
struct hpx::parallel::execution::executor_execution_category<compute::host::block_executor<Executor>># Public Types
-
typedef hpx::execution::parallel_execution_tag type#
-
typedef hpx::execution::parallel_execution_tag type#
-
template<typename Executor>
struct is_one_way_executor<compute::host::block_executor<Executor>> : public true_type#
-
template<typename Executor>
struct is_two_way_executor<compute::host::block_executor<Executor>> : public true_type#
-
template<typename Executor>
struct is_bulk_one_way_executor<compute::host::block_executor<Executor>> : public true_type#
-
template<typename Executor>
struct is_bulk_two_way_executor<compute::host::block_executor<Executor>> : public true_type#
-
namespace hpx
-
namespace compute#
-
namespace host#
-
template<typename Executor = hpx::parallel::execution::restricted_thread_pool_executor>
struct block_executor# - #include <block_executor.hpp>
The block executor can be used to build NUMA aware programs. It will distribute work evenly across the passed targets
- Template Parameters
Executor – The underlying executor to use
Public Types
-
using executor_parameters_type = hpx::execution::static_chunk_size#
Public Functions
-
inline block_executor(std::vector<host::target> const &targets, threads::thread_priority priority = threads::thread_priority::high, threads::thread_stacksize stacksize = threads::thread_stacksize::default_, threads::thread_schedule_hint schedulehint = {})#
-
inline block_executor(block_executor const &other)#
-
inline block_executor(block_executor &&other)#
-
inline block_executor &operator=(block_executor const &other)#
-
inline block_executor &operator=(block_executor &&other)#
-
template<typename F, typename ...Ts>
inline hpx::future<typename hpx::util::detail::invoke_deferred_result<F, Ts...>::type> async_execute(F &&f, Ts&&... ts)#
-
template<typename F, typename ...Ts>
inline hpx::util::detail::invoke_deferred_result<F, Ts...>::type sync_execute(F &&f, Ts&&... ts)#
-
template<typename F, typename Shape, typename ...Ts>
inline std::vector<hpx::future<typename parallel::execution::detail::bulk_function_result<F, Shape, Ts...>::type>> bulk_async_execute(F &&f, Shape const &shape, Ts&&... ts)#
Private Functions
-
inline void init_executors()#
Private Members
-
threads::thread_priority priority_ = threads::thread_priority::high#
-
threads::thread_stacksize stacksize_ = threads::thread_stacksize::default_#
-
threads::thread_schedule_hint schedulehint_ = {}#
-
template<typename Executor = hpx::parallel::execution::restricted_thread_pool_executor>
-
namespace host#
-
namespace parallel
-
namespace execution
- template<typename Executor> block_executor< Executor > >
Public Types
-
typedef hpx::execution::parallel_execution_tag type#
-
typedef hpx::execution::parallel_execution_tag type#
- template<typename Executor> block_executor< Executor > > : public true_type
- template<typename Executor> block_executor< Executor > > : public true_type
- template<typename Executor> block_executor< Executor > > : public true_type
- template<typename Executor> block_executor< Executor > > : public true_type
-
namespace execution
-
namespace compute#