diff --git a/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl.h b/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl.h index 2c98933b540..4ffb4fc8e2e 100644 --- a/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl.h +++ b/include/oneapi/dpl/pstl/hetero/dpcpp/parallel_backend_sycl.h @@ -148,8 +148,8 @@ struct iter_mode // for zip_iterator template auto - operator()(const oneapi::dpl::zip_iterator& it) - -> decltype(oneapi::dpl::__internal::map_zip(*this, it.base())) + operator()(const oneapi::dpl::zip_iterator& it) -> decltype(oneapi::dpl::__internal::map_zip(*this, + it.base())) { return oneapi::dpl::__internal::map_zip(*this, it.base()); } @@ -511,8 +511,8 @@ struct __parallel_transform_scan_static_single_group_submitter<_Inclusive, _Elem } } - __scan_work_group<_ValueType, _Inclusive>(__group, __lacc_ptr, __lacc_ptr + __n, - __lacc_ptr, __bin_op, __init); + __scan_work_group<_ValueType, _Inclusive>(__group, __lacc_ptr, __lacc_ptr + __n, __lacc_ptr, + __bin_op, __init); if constexpr (__can_use_subgroup_load_store) { @@ -617,7 +617,7 @@ struct __parallel_copy_if_static_single_group_submitter<_Size, _ElemsPerItem, _W __scan_work_group<_ValueType, /* _Inclusive */ false>( __group, __lacc_ptr, __lacc_ptr + __elems_per_wg, __lacc_ptr + __elems_per_wg, __bin_op, - __init); + __init); for (::std::uint16_t __idx = __item_id; __idx < __n; __idx += _WGSize) { @@ -840,8 +840,8 @@ __parallel_transform_scan(oneapi::dpl::__internal::__device_backend_tag __backen _WriteOp{}, __init, _Inclusive{}); } } - //else use legacy scan implementation + //else use legacy scan implementation using _Assigner = unseq_backend::__scan_assigner; using _NoAssign = unseq_backend::__scan_no_assign; using _UnaryFunctor = unseq_backend::walk_n<_ExecutionPolicy, _UnaryOperation>; @@ -852,18 +852,18 @@ __parallel_transform_scan(oneapi::dpl::__internal::__device_backend_tag __backen _NoOpFunctor __get_data_op; return __parallel_transform_scan_base( - __backend_tag, std::forward<_ExecutionPolicy>(__exec), std::forward<_Range1>(__in_rng), - std::forward<_Range2>(__out_rng), __binary_op, __init, - // local scan - unseq_backend::__scan<_Inclusive, _ExecutionPolicy, _BinaryOperation, _UnaryFunctor, _Assigner, _Assigner, - _NoOpFunctor, _InitType>{__binary_op, _UnaryFunctor{__unary_op}, __assign_op, - __assign_op, __get_data_op}, - // scan between groups - unseq_backend::__scan>{ - __binary_op, _NoOpFunctor{}, __no_assign_op, __assign_op, __get_data_op}, - // global scan - unseq_backend::__global_scan_functor<_Inclusive, _BinaryOperation, _InitType>{__binary_op, __init}); + __backend_tag, ::std::forward<_ExecutionPolicy>(__exec), ::std::forward<_Range1>(__in_rng), + ::std::forward<_Range2>(__out_rng), __binary_op, __init, + // local scan + unseq_backend::__scan<_Inclusive, _ExecutionPolicy, _BinaryOperation, _UnaryFunctor, _Assigner, _Assigner, + _NoOpFunctor, _InitType>{__binary_op, _UnaryFunctor{__unary_op}, __assign_op, + __assign_op, __get_data_op}, + // scan between groups + unseq_backend::__scan>{ + __binary_op, _NoOpFunctor{}, __no_assign_op, __assign_op, __get_data_op}, + // global scan + unseq_backend::__global_scan_functor<_Inclusive, _BinaryOperation, _InitType>{__binary_op, __init}); } template @@ -886,21 +886,19 @@ struct __invoke_single_group_copy_if if (__is_full_group) return __par_backend_hetero::__parallel_copy_if_static_single_group_submitter< _SizeType, __num_elems_per_item, __wg_size, true, - oneapi::dpl::__par_backend_hetero::__internal::__kernel_name_provider< + oneapi::dpl::__par_backend_hetero::__internal::__kernel_name_provider< __scan_copy_single_wg_kernel<::std::integral_constant<::std::uint16_t, __wg_size>, - ::std::integral_constant<::std::uint16_t, __num_elems_per_item>, - /* _IsFullGroup= */ std::true_type, _CustomName>> - >()( + ::std::integral_constant<::std::uint16_t, __num_elems_per_item>, + /* _IsFullGroup= */ std::true_type, _CustomName>>>()( __exec, ::std::forward<_InRng>(__in_rng), ::std::forward<_OutRng>(__out_rng), __n, _InitType{}, _ReduceOp{}, ::std::forward<_Pred>(__pred)); else return __par_backend_hetero::__parallel_copy_if_static_single_group_submitter< _SizeType, __num_elems_per_item, __wg_size, false, - oneapi::dpl::__par_backend_hetero::__internal::__kernel_name_provider< + oneapi::dpl::__par_backend_hetero::__internal::__kernel_name_provider< __scan_copy_single_wg_kernel<::std::integral_constant<::std::uint16_t, __wg_size>, - ::std::integral_constant<::std::uint16_t, __num_elems_per_item>, - /* _IsFullGroup= */ std::false_type, _CustomName>> - >()( + ::std::integral_constant<::std::uint16_t, __num_elems_per_item>, + /* _IsFullGroup= */ std::false_type, _CustomName>>>()( __exec, ::std::forward<_InRng>(__in_rng), ::std::forward<_OutRng>(__out_rng), __n, _InitType{}, _ReduceOp{}, ::std::forward<_Pred>(__pred)); } @@ -1637,8 +1635,8 @@ struct __is_radix_sort_usable_for_type static constexpr bool value = #if _USE_RADIX_SORT (::std::is_arithmetic_v<_T> || ::std::is_same_v) && - (__internal::__is_comp_ascending<::std::decay_t<_Compare>>::value || - __internal::__is_comp_descending<::std::decay_t<_Compare>>::value); + (__internal::__is_comp_ascending<::std::decay_t<_Compare>>::value || + __internal::__is_comp_descending<::std::decay_t<_Compare>>::value); #else false; #endif