libstdc++
simd_fixed_size.h
1 // Simd fixed_size ABI specific implementations -*- C++ -*-
2 
3 // Copyright (C) 2020-2021 Free Software Foundation, Inc.
4 //
5 // This file is part of the GNU ISO C++ Library. This library is free
6 // software; you can redistribute it and/or modify it under the
7 // terms of the GNU General Public License as published by the
8 // Free Software Foundation; either version 3, or (at your option)
9 // any later version.
10 
11 // This library is distributed in the hope that it will be useful,
12 // but WITHOUT ANY WARRANTY; without even the implied warranty of
13 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 // GNU General Public License for more details.
15 
16 // Under Section 7 of GPL version 3, you are granted additional
17 // permissions described in the GCC Runtime Library Exception, version
18 // 3.1, as published by the Free Software Foundation.
19 
20 // You should have received a copy of the GNU General Public License and
21 // a copy of the GCC Runtime Library Exception along with this program;
22 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23 // <http://www.gnu.org/licenses/>.
24 
25 /*
26  * The fixed_size ABI gives the following guarantees:
27  * - simd objects are passed via the stack
28  * - memory layout of `simd<_Tp, _Np>` is equivalent to `array<_Tp, _Np>`
29  * - alignment of `simd<_Tp, _Np>` is `_Np * sizeof(_Tp)` if _Np is __a
30  * power-of-2 value, otherwise `std::__bit_ceil(_Np * sizeof(_Tp))` (Note:
31  * if the alignment were to exceed the system/compiler maximum, it is bounded
32  * to that maximum)
33  * - simd_mask objects are passed like bitset<_Np>
34  * - memory layout of `simd_mask<_Tp, _Np>` is equivalent to `bitset<_Np>`
35  * - alignment of `simd_mask<_Tp, _Np>` is equal to the alignment of
36  * `bitset<_Np>`
37  */
38 
39 #ifndef _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
40 #define _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
41 
42 #if __cplusplus >= 201703L
43 
44 #include <array>
45 
46 _GLIBCXX_SIMD_BEGIN_NAMESPACE
47 
48 // __simd_tuple_element {{{
49 template <size_t _I, typename _Tp>
50  struct __simd_tuple_element;
51 
52 template <typename _Tp, typename _A0, typename... _As>
53  struct __simd_tuple_element<0, _SimdTuple<_Tp, _A0, _As...>>
54  { using type = simd<_Tp, _A0>; };
55 
56 template <size_t _I, typename _Tp, typename _A0, typename... _As>
57  struct __simd_tuple_element<_I, _SimdTuple<_Tp, _A0, _As...>>
58  {
59  using type =
60  typename __simd_tuple_element<_I - 1, _SimdTuple<_Tp, _As...>>::type;
61  };
62 
63 template <size_t _I, typename _Tp>
64  using __simd_tuple_element_t = typename __simd_tuple_element<_I, _Tp>::type;
65 
66 // }}}
67 // __simd_tuple_concat {{{
68 
69 template <typename _Tp, typename... _A0s, typename... _A1s>
70  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, _A0s..., _A1s...>
71  __simd_tuple_concat(const _SimdTuple<_Tp, _A0s...>& __left,
72  const _SimdTuple<_Tp, _A1s...>& __right)
73  {
74  if constexpr (sizeof...(_A0s) == 0)
75  return __right;
76  else if constexpr (sizeof...(_A1s) == 0)
77  return __left;
78  else
79  return {__left.first, __simd_tuple_concat(__left.second, __right)};
80  }
81 
82 template <typename _Tp, typename _A10, typename... _A1s>
83  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple<_Tp, simd_abi::scalar, _A10,
84  _A1s...>
85  __simd_tuple_concat(const _Tp& __left,
86  const _SimdTuple<_Tp, _A10, _A1s...>& __right)
87  { return {__left, __right}; }
88 
89 // }}}
90 // __simd_tuple_pop_front {{{
91 // Returns the next _SimdTuple in __x that has _Np elements less.
92 // Precondition: _Np must match the number of elements in __first (recursively)
93 template <size_t _Np, typename _Tp>
94  _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
95  __simd_tuple_pop_front(_Tp&& __x)
96  {
97  if constexpr (_Np == 0)
98  return static_cast<_Tp&&>(__x);
99  else
100  {
101  using _Up = __remove_cvref_t<_Tp>;
102  static_assert(_Np >= _Up::_S_first_size);
103  return __simd_tuple_pop_front<_Np - _Up::_S_first_size>(__x.second);
104  }
105  }
106 
107 // }}}
108 // __get_simd_at<_Np> {{{1
109 struct __as_simd {};
110 
111 struct __as_simd_tuple {};
112 
113 template <typename _Tp, typename _A0, typename... _Abis>
114  _GLIBCXX_SIMD_INTRINSIC constexpr simd<_Tp, _A0>
115  __simd_tuple_get_impl(__as_simd, const _SimdTuple<_Tp, _A0, _Abis...>& __t,
116  _SizeConstant<0>)
117  { return {__private_init, __t.first}; }
118 
119 template <typename _Tp, typename _A0, typename... _Abis>
120  _GLIBCXX_SIMD_INTRINSIC constexpr const auto&
121  __simd_tuple_get_impl(__as_simd_tuple,
122  const _SimdTuple<_Tp, _A0, _Abis...>& __t,
123  _SizeConstant<0>)
124  { return __t.first; }
125 
126 template <typename _Tp, typename _A0, typename... _Abis>
127  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
128  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _A0, _Abis...>& __t,
129  _SizeConstant<0>)
130  { return __t.first; }
131 
132 template <typename _R, size_t _Np, typename _Tp, typename... _Abis>
133  _GLIBCXX_SIMD_INTRINSIC constexpr auto
134  __simd_tuple_get_impl(_R, const _SimdTuple<_Tp, _Abis...>& __t,
135  _SizeConstant<_Np>)
136  { return __simd_tuple_get_impl(_R(), __t.second, _SizeConstant<_Np - 1>()); }
137 
138 template <size_t _Np, typename _Tp, typename... _Abis>
139  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
140  __simd_tuple_get_impl(__as_simd_tuple, _SimdTuple<_Tp, _Abis...>& __t,
141  _SizeConstant<_Np>)
142  {
143  return __simd_tuple_get_impl(__as_simd_tuple(), __t.second,
144  _SizeConstant<_Np - 1>());
145  }
146 
147 template <size_t _Np, typename _Tp, typename... _Abis>
148  _GLIBCXX_SIMD_INTRINSIC constexpr auto
149  __get_simd_at(const _SimdTuple<_Tp, _Abis...>& __t)
150  { return __simd_tuple_get_impl(__as_simd(), __t, _SizeConstant<_Np>()); }
151 
152 // }}}
153 // __get_tuple_at<_Np> {{{
154 template <size_t _Np, typename _Tp, typename... _Abis>
155  _GLIBCXX_SIMD_INTRINSIC constexpr auto
156  __get_tuple_at(const _SimdTuple<_Tp, _Abis...>& __t)
157  {
158  return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
159  }
160 
161 template <size_t _Np, typename _Tp, typename... _Abis>
162  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
163  __get_tuple_at(_SimdTuple<_Tp, _Abis...>& __t)
164  {
165  return __simd_tuple_get_impl(__as_simd_tuple(), __t, _SizeConstant<_Np>());
166  }
167 
168 // __tuple_element_meta {{{1
169 template <typename _Tp, typename _Abi, size_t _Offset>
170  struct __tuple_element_meta : public _Abi::_SimdImpl
171  {
172  static_assert(is_same_v<typename _Abi::_SimdImpl::abi_type,
173  _Abi>); // this fails e.g. when _SimdImpl is an
174  // alias for _SimdImplBuiltin<_DifferentAbi>
175  using value_type = _Tp;
176  using abi_type = _Abi;
177  using _Traits = _SimdTraits<_Tp, _Abi>;
178  using _MaskImpl = typename _Abi::_MaskImpl;
179  using _MaskMember = typename _Traits::_MaskMember;
180  using simd_type = simd<_Tp, _Abi>;
181  static constexpr size_t _S_offset = _Offset;
182  static constexpr size_t _S_size() { return simd_size<_Tp, _Abi>::value; }
183  static constexpr _MaskImpl _S_mask_impl = {};
184 
185  template <size_t _Np, bool _Sanitized>
186  _GLIBCXX_SIMD_INTRINSIC static auto
187  _S_submask(_BitMask<_Np, _Sanitized> __bits)
188  { return __bits.template _M_extract<_Offset, _S_size()>(); }
189 
190  template <size_t _Np, bool _Sanitized>
191  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
192  _S_make_mask(_BitMask<_Np, _Sanitized> __bits)
193  {
194  return _MaskImpl::template _S_convert<_Tp>(
195  __bits.template _M_extract<_Offset, _S_size()>()._M_sanitized());
196  }
197 
198  _GLIBCXX_SIMD_INTRINSIC static _ULLong
199  _S_mask_to_shifted_ullong(_MaskMember __k)
200  { return _MaskImpl::_S_to_bits(__k).to_ullong() << _Offset; }
201  };
202 
203 template <size_t _Offset, typename _Tp, typename _Abi, typename... _As>
204  __tuple_element_meta<_Tp, _Abi, _Offset>
205  __make_meta(const _SimdTuple<_Tp, _Abi, _As...>&)
206  { return {}; }
207 
208 // }}}1
209 // _WithOffset wrapper class {{{
210 template <size_t _Offset, typename _Base>
211  struct _WithOffset : public _Base
212  {
213  static inline constexpr size_t _S_offset = _Offset;
214 
215  _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
216  {
217  return reinterpret_cast<char*>(this)
218  + _S_offset * sizeof(typename _Base::value_type);
219  }
220 
221  _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
222  {
223  return reinterpret_cast<const char*>(this)
224  + _S_offset * sizeof(typename _Base::value_type);
225  }
226  };
227 
228 // make _WithOffset<_WithOffset> ill-formed to use:
229 template <size_t _O0, size_t _O1, typename _Base>
230  struct _WithOffset<_O0, _WithOffset<_O1, _Base>> {};
231 
232 template <size_t _Offset, typename _Tp>
233  decltype(auto)
234  __add_offset(_Tp& __base)
235  { return static_cast<_WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(__base); }
236 
237 template <size_t _Offset, typename _Tp>
238  decltype(auto)
239  __add_offset(const _Tp& __base)
240  {
241  return static_cast<const _WithOffset<_Offset, __remove_cvref_t<_Tp>>&>(
242  __base);
243  }
244 
245 template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
246  decltype(auto)
247  __add_offset(_WithOffset<_ExistingOffset, _Tp>& __base)
248  {
249  return static_cast<_WithOffset<_Offset + _ExistingOffset, _Tp>&>(
250  static_cast<_Tp&>(__base));
251  }
252 
253 template <size_t _Offset, size_t _ExistingOffset, typename _Tp>
254  decltype(auto)
255  __add_offset(const _WithOffset<_ExistingOffset, _Tp>& __base)
256  {
257  return static_cast<const _WithOffset<_Offset + _ExistingOffset, _Tp>&>(
258  static_cast<const _Tp&>(__base));
259  }
260 
261 template <typename _Tp>
262  constexpr inline size_t __offset = 0;
263 
264 template <size_t _Offset, typename _Tp>
265  constexpr inline size_t __offset<_WithOffset<_Offset, _Tp>>
266  = _WithOffset<_Offset, _Tp>::_S_offset;
267 
268 template <typename _Tp>
269  constexpr inline size_t __offset<const _Tp> = __offset<_Tp>;
270 
271 template <typename _Tp>
272  constexpr inline size_t __offset<_Tp&> = __offset<_Tp>;
273 
274 template <typename _Tp>
275  constexpr inline size_t __offset<_Tp&&> = __offset<_Tp>;
276 
277 // }}}
278 // _SimdTuple specializations {{{1
279 // empty {{{2
280 template <typename _Tp>
281  struct _SimdTuple<_Tp>
282  {
283  using value_type = _Tp;
284  static constexpr size_t _S_tuple_size = 0;
285  static constexpr size_t _S_size() { return 0; }
286  };
287 
288 // _SimdTupleData {{{2
289 template <typename _FirstType, typename _SecondType>
290  struct _SimdTupleData
291  {
292  _FirstType first;
293  _SecondType second;
294 
295  _GLIBCXX_SIMD_INTRINSIC
296  constexpr bool _M_is_constprop() const
297  {
298  if constexpr (is_class_v<_FirstType>)
299  return first._M_is_constprop() && second._M_is_constprop();
300  else
301  return __builtin_constant_p(first) && second._M_is_constprop();
302  }
303  };
304 
305 template <typename _FirstType, typename _Tp>
306  struct _SimdTupleData<_FirstType, _SimdTuple<_Tp>>
307  {
308  _FirstType first;
309  static constexpr _SimdTuple<_Tp> second = {};
310 
311  _GLIBCXX_SIMD_INTRINSIC
312  constexpr bool _M_is_constprop() const
313  {
314  if constexpr (is_class_v<_FirstType>)
315  return first._M_is_constprop();
316  else
317  return __builtin_constant_p(first);
318  }
319  };
320 
321 // 1 or more {{{2
322 template <typename _Tp, typename _Abi0, typename... _Abis>
323  struct _SimdTuple<_Tp, _Abi0, _Abis...>
324  : _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
325  _SimdTuple<_Tp, _Abis...>>
326  {
327  static_assert(!__is_fixed_size_abi_v<_Abi0>);
328  using value_type = _Tp;
329  using _FirstType = typename _SimdTraits<_Tp, _Abi0>::_SimdMember;
330  using _FirstAbi = _Abi0;
331  using _SecondType = _SimdTuple<_Tp, _Abis...>;
332  static constexpr size_t _S_tuple_size = sizeof...(_Abis) + 1;
333 
334  static constexpr size_t _S_size()
335  { return simd_size_v<_Tp, _Abi0> + _SecondType::_S_size(); }
336 
337  static constexpr size_t _S_first_size = simd_size_v<_Tp, _Abi0>;
338  static constexpr bool _S_is_homogeneous = (is_same_v<_Abi0, _Abis> && ...);
339 
340  using _Base = _SimdTupleData<typename _SimdTraits<_Tp, _Abi0>::_SimdMember,
341  _SimdTuple<_Tp, _Abis...>>;
342  using _Base::first;
343  using _Base::second;
344 
345  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple() = default;
346  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(const _SimdTuple&) = default;
347  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple& operator=(const _SimdTuple&)
348  = default;
349 
350  template <typename _Up>
351  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x)
352  : _Base{static_cast<_Up&&>(__x)} {}
353 
354  template <typename _Up, typename _Up2>
355  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _Up2&& __y)
356  : _Base{static_cast<_Up&&>(__x), static_cast<_Up2&&>(__y)} {}
357 
358  template <typename _Up>
359  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple(_Up&& __x, _SimdTuple<_Tp>)
360  : _Base{static_cast<_Up&&>(__x)} {}
361 
362  _GLIBCXX_SIMD_INTRINSIC char* _M_as_charptr()
363  { return reinterpret_cast<char*>(this); }
364 
365  _GLIBCXX_SIMD_INTRINSIC const char* _M_as_charptr() const
366  { return reinterpret_cast<const char*>(this); }
367 
368  template <size_t _Np>
369  _GLIBCXX_SIMD_INTRINSIC constexpr auto& _M_at()
370  {
371  if constexpr (_Np == 0)
372  return first;
373  else
374  return second.template _M_at<_Np - 1>();
375  }
376 
377  template <size_t _Np>
378  _GLIBCXX_SIMD_INTRINSIC constexpr const auto& _M_at() const
379  {
380  if constexpr (_Np == 0)
381  return first;
382  else
383  return second.template _M_at<_Np - 1>();
384  }
385 
386  template <size_t _Np>
387  _GLIBCXX_SIMD_INTRINSIC constexpr auto _M_simd_at() const
388  {
389  if constexpr (_Np == 0)
390  return simd<_Tp, _Abi0>(__private_init, first);
391  else
392  return second.template _M_simd_at<_Np - 1>();
393  }
394 
395  template <size_t _Offset = 0, typename _Fp>
396  _GLIBCXX_SIMD_INTRINSIC static constexpr _SimdTuple
397  _S_generate(_Fp&& __gen, _SizeConstant<_Offset> = {})
398  {
399  auto&& __first = __gen(__tuple_element_meta<_Tp, _Abi0, _Offset>());
400  if constexpr (_S_tuple_size == 1)
401  return {__first};
402  else
403  return {__first,
404  _SecondType::_S_generate(
405  static_cast<_Fp&&>(__gen),
406  _SizeConstant<_Offset + simd_size_v<_Tp, _Abi0>>())};
407  }
408 
409  template <size_t _Offset = 0, typename _Fp, typename... _More>
410  _GLIBCXX_SIMD_INTRINSIC _SimdTuple
411  _M_apply_wrapped(_Fp&& __fun, const _More&... __more) const
412  {
413  auto&& __first
414  = __fun(__make_meta<_Offset>(*this), first, __more.first...);
415  if constexpr (_S_tuple_size == 1)
416  return {__first};
417  else
418  return {
419  __first,
420  second.template _M_apply_wrapped<_Offset + simd_size_v<_Tp, _Abi0>>(
421  static_cast<_Fp&&>(__fun), __more.second...)};
422  }
423 
424  template <typename _Tup>
425  _GLIBCXX_SIMD_INTRINSIC constexpr decltype(auto)
426  _M_extract_argument(_Tup&& __tup) const
427  {
428  using _TupT = typename __remove_cvref_t<_Tup>::value_type;
429  if constexpr (is_same_v<_SimdTuple, __remove_cvref_t<_Tup>>)
430  return __tup.first;
431  else if (__builtin_is_constant_evaluated())
432  return __fixed_size_storage_t<_TupT, _S_first_size>::_S_generate([&](
433  auto __meta) constexpr {
434  return __meta._S_generator(
435  [&](auto __i) constexpr { return __tup[__i]; },
436  static_cast<_TupT*>(nullptr));
437  });
438  else
439  return [&]() {
440  __fixed_size_storage_t<_TupT, _S_first_size> __r;
441  __builtin_memcpy(__r._M_as_charptr(), __tup._M_as_charptr(),
442  sizeof(__r));
443  return __r;
444  }();
445  }
446 
447  template <typename _Tup>
448  _GLIBCXX_SIMD_INTRINSIC constexpr auto&
449  _M_skip_argument(_Tup&& __tup) const
450  {
451  static_assert(_S_tuple_size > 1);
452  using _Up = __remove_cvref_t<_Tup>;
453  constexpr size_t __off = __offset<_Up>;
454  if constexpr (_S_first_size == _Up::_S_first_size && __off == 0)
455  return __tup.second;
456  else if constexpr (_S_first_size > _Up::_S_first_size
457  && _S_first_size % _Up::_S_first_size == 0
458  && __off == 0)
459  return __simd_tuple_pop_front<_S_first_size>(__tup);
460  else if constexpr (_S_first_size + __off < _Up::_S_first_size)
461  return __add_offset<_S_first_size>(__tup);
462  else if constexpr (_S_first_size + __off == _Up::_S_first_size)
463  return __tup.second;
464  else
465  __assert_unreachable<_Tup>();
466  }
467 
468  template <size_t _Offset, typename... _More>
469  _GLIBCXX_SIMD_INTRINSIC constexpr void
470  _M_assign_front(const _SimdTuple<_Tp, _Abi0, _More...>& __x) &
471  {
472  static_assert(_Offset == 0);
473  first = __x.first;
474  if constexpr (sizeof...(_More) > 0)
475  {
476  static_assert(sizeof...(_Abis) >= sizeof...(_More));
477  second.template _M_assign_front<0>(__x.second);
478  }
479  }
480 
481  template <size_t _Offset>
482  _GLIBCXX_SIMD_INTRINSIC constexpr void
483  _M_assign_front(const _FirstType& __x) &
484  {
485  static_assert(_Offset == 0);
486  first = __x;
487  }
488 
489  template <size_t _Offset, typename... _As>
490  _GLIBCXX_SIMD_INTRINSIC constexpr void
491  _M_assign_front(const _SimdTuple<_Tp, _As...>& __x) &
492  {
493  __builtin_memcpy(_M_as_charptr() + _Offset * sizeof(value_type),
494  __x._M_as_charptr(),
495  sizeof(_Tp) * _SimdTuple<_Tp, _As...>::_S_size());
496  }
497 
498  /*
499  * Iterate over the first objects in this _SimdTuple and call __fun for each
500  * of them. If additional arguments are passed via __more, chunk them into
501  * _SimdTuple or __vector_type_t objects of the same number of values.
502  */
503  template <typename _Fp, typename... _More>
504  _GLIBCXX_SIMD_INTRINSIC constexpr _SimdTuple
505  _M_apply_per_chunk(_Fp&& __fun, _More&&... __more) const
506  {
507  if constexpr ((...
508  || conjunction_v<
509  is_lvalue_reference<_More>,
510  negation<is_const<remove_reference_t<_More>>>>) )
511  {
512  // need to write back at least one of __more after calling __fun
513  auto&& __first = [&](auto... __args) constexpr
514  {
515  auto __r = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
516  __args...);
517  [[maybe_unused]] auto&& __ignore_me = {(
518  [](auto&& __dst, const auto& __src) {
519  if constexpr (is_assignable_v<decltype(__dst),
520  decltype(__dst)>)
521  {
522  __dst.template _M_assign_front<__offset<decltype(__dst)>>(
523  __src);
524  }
525  }(static_cast<_More&&>(__more), __args),
526  0)...};
527  return __r;
528  }
529  (_M_extract_argument(__more)...);
530  if constexpr (_S_tuple_size == 1)
531  return {__first};
532  else
533  return {__first,
534  second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
535  _M_skip_argument(__more)...)};
536  }
537  else
538  {
539  auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
540  _M_extract_argument(__more)...);
541  if constexpr (_S_tuple_size == 1)
542  return {__first};
543  else
544  return {__first,
545  second._M_apply_per_chunk(static_cast<_Fp&&>(__fun),
546  _M_skip_argument(__more)...)};
547  }
548  }
549 
550  template <typename _R = _Tp, typename _Fp, typename... _More>
551  _GLIBCXX_SIMD_INTRINSIC auto _M_apply_r(_Fp&& __fun,
552  const _More&... __more) const
553  {
554  auto&& __first = __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), first,
555  __more.first...);
556  if constexpr (_S_tuple_size == 1)
557  return __first;
558  else
559  return __simd_tuple_concat<_R>(
560  __first, second.template _M_apply_r<_R>(static_cast<_Fp&&>(__fun),
561  __more.second...));
562  }
563 
564  template <typename _Fp, typename... _More>
565  _GLIBCXX_SIMD_INTRINSIC constexpr friend _SanitizedBitMask<_S_size()>
566  _M_test(const _Fp& __fun, const _SimdTuple& __x, const _More&... __more)
567  {
568  const _SanitizedBitMask<_S_first_size> __first
569  = _Abi0::_MaskImpl::_S_to_bits(
570  __fun(__tuple_element_meta<_Tp, _Abi0, 0>(), __x.first,
571  __more.first...));
572  if constexpr (_S_tuple_size == 1)
573  return __first;
574  else
575  return _M_test(__fun, __x.second, __more.second...)
576  ._M_prepend(__first);
577  }
578 
579  template <typename _Up, _Up _I>
580  _GLIBCXX_SIMD_INTRINSIC constexpr _Tp
581  operator[](integral_constant<_Up, _I>) const noexcept
582  {
583  if constexpr (_I < simd_size_v<_Tp, _Abi0>)
584  return _M_subscript_read(_I);
585  else
586  return second[integral_constant<_Up, _I - simd_size_v<_Tp, _Abi0>>()];
587  }
588 
589  _Tp operator[](size_t __i) const noexcept
590  {
591  if constexpr (_S_tuple_size == 1)
592  return _M_subscript_read(__i);
593  else
594  {
595 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
596  return reinterpret_cast<const __may_alias<_Tp>*>(this)[__i];
597 #else
598  if constexpr (__is_scalar_abi<_Abi0>())
599  {
600  const _Tp* ptr = &first;
601  return ptr[__i];
602  }
603  else
604  return __i < simd_size_v<_Tp, _Abi0>
605  ? _M_subscript_read(__i)
606  : second[__i - simd_size_v<_Tp, _Abi0>];
607 #endif
608  }
609  }
610 
611  void _M_set(size_t __i, _Tp __val) noexcept
612  {
613  if constexpr (_S_tuple_size == 1)
614  return _M_subscript_write(__i, __val);
615  else
616  {
617 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
618  reinterpret_cast<__may_alias<_Tp>*>(this)[__i] = __val;
619 #else
620  if (__i < simd_size_v<_Tp, _Abi0>)
621  _M_subscript_write(__i, __val);
622  else
623  second._M_set(__i - simd_size_v<_Tp, _Abi0>, __val);
624 #endif
625  }
626  }
627 
628  private:
629  // _M_subscript_read/_write {{{
630  _Tp _M_subscript_read([[maybe_unused]] size_t __i) const noexcept
631  {
632  if constexpr (__is_vectorizable_v<_FirstType>)
633  return first;
634  else
635  return first[__i];
636  }
637 
638  void _M_subscript_write([[maybe_unused]] size_t __i, _Tp __y) noexcept
639  {
640  if constexpr (__is_vectorizable_v<_FirstType>)
641  first = __y;
642  else
643  first._M_set(__i, __y);
644  }
645 
646  // }}}
647  };
648 
649 // __make_simd_tuple {{{1
650 template <typename _Tp, typename _A0>
651  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
652  __make_simd_tuple(simd<_Tp, _A0> __x0)
653  { return {__data(__x0)}; }
654 
655 template <typename _Tp, typename _A0, typename... _As>
656  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _As...>
657  __make_simd_tuple(const simd<_Tp, _A0>& __x0, const simd<_Tp, _As>&... __xs)
658  { return {__data(__x0), __make_simd_tuple(__xs...)}; }
659 
660 template <typename _Tp, typename _A0>
661  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0>
662  __make_simd_tuple(const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0)
663  { return {__arg0}; }
664 
665 template <typename _Tp, typename _A0, typename _A1, typename... _Abis>
666  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp, _A0, _A1, _Abis...>
667  __make_simd_tuple(
668  const typename _SimdTraits<_Tp, _A0>::_SimdMember& __arg0,
669  const typename _SimdTraits<_Tp, _A1>::_SimdMember& __arg1,
670  const typename _SimdTraits<_Tp, _Abis>::_SimdMember&... __args)
671  { return {__arg0, __make_simd_tuple<_Tp, _A1, _Abis...>(__arg1, __args...)}; }
672 
673 // __to_simd_tuple {{{1
674 template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
675  _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
676  __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX);
677 
678 template <typename _Tp, size_t _Np,
679  size_t _Offset = 0, // skip this many elements in __from0
680  typename _R = __fixed_size_storage_t<_Tp, _Np>, typename _V0,
681  typename _V0VT = _VectorTraits<_V0>, typename... _VX>
682  _GLIBCXX_SIMD_INTRINSIC _R constexpr __to_simd_tuple(const _V0 __from0,
683  const _VX... __fromX)
684  {
685  static_assert(is_same_v<typename _V0VT::value_type, _Tp>);
686  static_assert(_Offset < _V0VT::_S_full_size);
687  using _R0 = __vector_type_t<_Tp, _R::_S_first_size>;
688  if constexpr (_R::_S_tuple_size == 1)
689  {
690  if constexpr (_Np == 1)
691  return _R{__from0[_Offset]};
692  else if constexpr (_Offset == 0 && _V0VT::_S_full_size >= _Np)
693  return _R{__intrin_bitcast<_R0>(__from0)};
694  else if constexpr (_Offset * 2 == _V0VT::_S_full_size
695  && _V0VT::_S_full_size / 2 >= _Np)
696  return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0))};
697  else if constexpr (_Offset * 4 == _V0VT::_S_full_size
698  && _V0VT::_S_full_size / 4 >= _Np)
699  return _R{__intrin_bitcast<_R0>(__extract_part<1, 4>(__from0))};
700  else
701  __assert_unreachable<_Tp>();
702  }
703  else
704  {
705  if constexpr (1 == _R::_S_first_size)
706  { // extract one scalar and recurse
707  if constexpr (_Offset + 1 < _V0VT::_S_full_size)
708  return _R{__from0[_Offset],
709  __to_simd_tuple<_Tp, _Np - 1, _Offset + 1>(__from0,
710  __fromX...)};
711  else
712  return _R{__from0[_Offset],
713  __to_simd_tuple<_Tp, _Np - 1, 0>(__fromX...)};
714  }
715 
716  // place __from0 into _R::first and recurse for __fromX -> _R::second
717  else if constexpr (_V0VT::_S_full_size == _R::_S_first_size
718  && _Offset == 0)
719  return _R{__from0,
720  __to_simd_tuple<_Tp, _Np - _R::_S_first_size>(__fromX...)};
721 
722  // place lower part of __from0 into _R::first and recurse with _Offset
723  else if constexpr (_V0VT::_S_full_size > _R::_S_first_size
724  && _Offset == 0)
725  return _R{__intrin_bitcast<_R0>(__from0),
726  __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
727  _R::_S_first_size>(__from0, __fromX...)};
728 
729  // place lower part of second quarter of __from0 into _R::first and
730  // recurse with _Offset
731  else if constexpr (_Offset * 4 == _V0VT::_S_full_size
732  && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
733  return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
734  __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
735  _Offset + _R::_S_first_size>(__from0,
736  __fromX...)};
737 
738  // place lower half of high half of __from0 into _R::first and recurse
739  // with _Offset
740  else if constexpr (_Offset * 2 == _V0VT::_S_full_size
741  && _V0VT::_S_full_size >= 4 * _R::_S_first_size)
742  return _R{__intrin_bitcast<_R0>(__extract_part<2, 4>(__from0)),
743  __to_simd_tuple<_Tp, _Np - _R::_S_first_size,
744  _Offset + _R::_S_first_size>(__from0,
745  __fromX...)};
746 
747  // place high half of __from0 into _R::first and recurse with __fromX
748  else if constexpr (_Offset * 2 == _V0VT::_S_full_size
749  && _V0VT::_S_full_size / 2 >= _R::_S_first_size)
750  return _R{__intrin_bitcast<_R0>(__extract_part<1, 2>(__from0)),
751  __to_simd_tuple<_Tp, _Np - _R::_S_first_size, 0>(
752  __fromX...)};
753 
754  // ill-formed if some unforseen pattern is needed
755  else
756  __assert_unreachable<_Tp>();
757  }
758  }
759 
760 template <typename _Tp, size_t _Np, typename _V, size_t _NV, typename... _VX>
761  _GLIBCXX_SIMD_INTRINSIC constexpr __fixed_size_storage_t<_Tp, _Np>
762  __to_simd_tuple(const array<_V, _NV>& __from, const _VX... __fromX)
763  {
764  if constexpr (is_same_v<_Tp, _V>)
765  {
766  static_assert(
767  sizeof...(_VX) == 0,
768  "An array of scalars must be the last argument to __to_simd_tuple");
769  return __call_with_subscripts(
770  __from,
771  make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
772  return __simd_tuple_concat(
773  _SimdTuple<_Tp, simd_abi::scalar>{__args}..., _SimdTuple<_Tp>());
774  });
775  }
776  else
777  return __call_with_subscripts(
778  __from,
779  make_index_sequence<_NV>(), [&](const auto... __args) constexpr {
780  return __to_simd_tuple<_Tp, _Np>(__args..., __fromX...);
781  });
782  }
783 
784 template <size_t, typename _Tp>
785  using __to_tuple_helper = _Tp;
786 
787 template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
788  size_t... _Indexes>
789  _GLIBCXX_SIMD_INTRINSIC __fixed_size_storage_t<_Tp, _NOut>
790  __to_simd_tuple_impl(index_sequence<_Indexes...>,
791  const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
792  {
793  return __make_simd_tuple<_Tp, __to_tuple_helper<_Indexes, _A0>...>(
794  __args[_Indexes]...);
795  }
796 
797 template <typename _Tp, typename _A0, size_t _NOut, size_t _Np,
798  typename _R = __fixed_size_storage_t<_Tp, _NOut>>
799  _GLIBCXX_SIMD_INTRINSIC _R
800  __to_simd_tuple_sized(
801  const array<__vector_type_t<_Tp, simd_size_v<_Tp, _A0>>, _Np>& __args)
802  {
803  static_assert(_Np * simd_size_v<_Tp, _A0> >= _NOut);
804  return __to_simd_tuple_impl<_Tp, _A0, _NOut>(
805  make_index_sequence<_R::_S_tuple_size>(), __args);
806  }
807 
808 // __optimize_simd_tuple {{{1
809 template <typename _Tp>
810  _GLIBCXX_SIMD_INTRINSIC _SimdTuple<_Tp>
811  __optimize_simd_tuple(const _SimdTuple<_Tp>)
812  { return {}; }
813 
814 template <typename _Tp, typename _Ap>
815  _GLIBCXX_SIMD_INTRINSIC const _SimdTuple<_Tp, _Ap>&
816  __optimize_simd_tuple(const _SimdTuple<_Tp, _Ap>& __x)
817  { return __x; }
818 
819 template <typename _Tp, typename _A0, typename _A1, typename... _Abis,
820  typename _R = __fixed_size_storage_t<
821  _Tp, _SimdTuple<_Tp, _A0, _A1, _Abis...>::_S_size()>>
822  _GLIBCXX_SIMD_INTRINSIC _R
823  __optimize_simd_tuple(const _SimdTuple<_Tp, _A0, _A1, _Abis...>& __x)
824  {
825  using _Tup = _SimdTuple<_Tp, _A0, _A1, _Abis...>;
826  if constexpr (is_same_v<_R, _Tup>)
827  return __x;
828  else if constexpr (is_same_v<typename _R::_FirstType,
829  typename _Tup::_FirstType>)
830  return {__x.first, __optimize_simd_tuple(__x.second)};
831  else if constexpr (__is_scalar_abi<_A0>()
832  || _A0::template _S_is_partial<_Tp>)
833  return {__generate_from_n_evaluations<_R::_S_first_size,
834  typename _R::_FirstType>(
835  [&](auto __i) { return __x[__i]; }),
836  __optimize_simd_tuple(
837  __simd_tuple_pop_front<_R::_S_first_size>(__x))};
838  else if constexpr (is_same_v<_A0, _A1>
839  && _R::_S_first_size == simd_size_v<_Tp, _A0> + simd_size_v<_Tp, _A1>)
840  return {__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
841  __optimize_simd_tuple(__x.second.second)};
842  else if constexpr (sizeof...(_Abis) >= 2
843  && _R::_S_first_size == (4 * simd_size_v<_Tp, _A0>)
844  && simd_size_v<_Tp, _A0> == __simd_tuple_element_t<
845  (sizeof...(_Abis) >= 2 ? 3 : 0), _Tup>::size())
846  return {
847  __concat(__concat(__x.template _M_at<0>(), __x.template _M_at<1>()),
848  __concat(__x.template _M_at<2>(), __x.template _M_at<3>())),
849  __optimize_simd_tuple(__x.second.second.second.second)};
850  else
851  {
852  static_assert(sizeof(_R) == sizeof(__x));
853  _R __r;
854  __builtin_memcpy(__r._M_as_charptr(), __x._M_as_charptr(),
855  sizeof(_Tp) * _R::_S_size());
856  return __r;
857  }
858  }
859 
860 // __for_each(const _SimdTuple &, Fun) {{{1
861 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
862  _GLIBCXX_SIMD_INTRINSIC constexpr void
863  __for_each(const _SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
864  { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
865 
866 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
867  typename... _As, typename _Fp>
868  _GLIBCXX_SIMD_INTRINSIC constexpr void
869  __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
870  {
871  __fun(__make_meta<_Offset>(__t), __t.first);
872  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
873  static_cast<_Fp&&>(__fun));
874  }
875 
876 // __for_each(_SimdTuple &, Fun) {{{1
877 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
878  _GLIBCXX_SIMD_INTRINSIC constexpr void
879  __for_each(_SimdTuple<_Tp, _A0>& __t, _Fp&& __fun)
880  { static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__t), __t.first); }
881 
882 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
883  typename... _As, typename _Fp>
884  _GLIBCXX_SIMD_INTRINSIC constexpr void
885  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __t, _Fp&& __fun)
886  {
887  __fun(__make_meta<_Offset>(__t), __t.first);
888  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__t.second,
889  static_cast<_Fp&&>(__fun));
890  }
891 
892 // __for_each(_SimdTuple &, const _SimdTuple &, Fun) {{{1
893 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
894  _GLIBCXX_SIMD_INTRINSIC constexpr void
895  __for_each(_SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
896  _Fp&& __fun)
897  {
898  static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
899  }
900 
901 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
902  typename... _As, typename _Fp>
903  _GLIBCXX_SIMD_INTRINSIC constexpr void
904  __for_each(_SimdTuple<_Tp, _A0, _A1, _As...>& __a,
905  const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
906  {
907  __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
908  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
909  static_cast<_Fp&&>(__fun));
910  }
911 
912 // __for_each(const _SimdTuple &, const _SimdTuple &, Fun) {{{1
913 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _Fp>
914  _GLIBCXX_SIMD_INTRINSIC constexpr void
915  __for_each(const _SimdTuple<_Tp, _A0>& __a, const _SimdTuple<_Tp, _A0>& __b,
916  _Fp&& __fun)
917  {
918  static_cast<_Fp&&>(__fun)(__make_meta<_Offset>(__a), __a.first, __b.first);
919  }
920 
921 template <size_t _Offset = 0, typename _Tp, typename _A0, typename _A1,
922  typename... _As, typename _Fp>
923  _GLIBCXX_SIMD_INTRINSIC constexpr void
924  __for_each(const _SimdTuple<_Tp, _A0, _A1, _As...>& __a,
925  const _SimdTuple<_Tp, _A0, _A1, _As...>& __b, _Fp&& __fun)
926  {
927  __fun(__make_meta<_Offset>(__a), __a.first, __b.first);
928  __for_each<_Offset + simd_size<_Tp, _A0>::value>(__a.second, __b.second,
929  static_cast<_Fp&&>(__fun));
930  }
931 
932 // }}}1
933 // __extract_part(_SimdTuple) {{{
934 template <int _Index, int _Total, int _Combine, typename _Tp, typename _A0,
935  typename... _As>
936  _GLIBCXX_SIMD_INTRINSIC auto // __vector_type_t or _SimdTuple
937  __extract_part(const _SimdTuple<_Tp, _A0, _As...>& __x)
938  {
939  // worst cases:
940  // (a) 4, 4, 4 => 3, 3, 3, 3 (_Total = 4)
941  // (b) 2, 2, 2 => 3, 3 (_Total = 2)
942  // (c) 4, 2 => 2, 2, 2 (_Total = 3)
943  using _Tuple = _SimdTuple<_Tp, _A0, _As...>;
944  static_assert(_Index + _Combine <= _Total && _Index >= 0 && _Total >= 1);
945  constexpr size_t _Np = _Tuple::_S_size();
946  static_assert(_Np >= _Total && _Np % _Total == 0);
947  constexpr size_t __values_per_part = _Np / _Total;
948  [[maybe_unused]] constexpr size_t __values_to_skip
949  = _Index * __values_per_part;
950  constexpr size_t __return_size = __values_per_part * _Combine;
951  using _RetAbi = simd_abi::deduce_t<_Tp, __return_size>;
952 
953  // handle (optimize) the simple cases
954  if constexpr (_Index == 0 && _Tuple::_S_first_size == __return_size)
955  return __x.first._M_data;
956  else if constexpr (_Index == 0 && _Total == _Combine)
957  return __x;
958  else if constexpr (_Index == 0 && _Tuple::_S_first_size >= __return_size)
959  return __intrin_bitcast<__vector_type_t<_Tp, __return_size>>(
960  __as_vector(__x.first));
961 
962  // recurse to skip unused data members at the beginning of _SimdTuple
963  else if constexpr (__values_to_skip >= _Tuple::_S_first_size)
964  { // recurse
965  if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
966  {
967  constexpr int __parts_in_first
968  = _Tuple::_S_first_size / __values_per_part;
969  return __extract_part<_Index - __parts_in_first,
970  _Total - __parts_in_first, _Combine>(
971  __x.second);
972  }
973  else
974  return __extract_part<__values_to_skip - _Tuple::_S_first_size,
975  _Np - _Tuple::_S_first_size, __return_size>(
976  __x.second);
977  }
978 
979  // extract from multiple _SimdTuple data members
980  else if constexpr (__return_size > _Tuple::_S_first_size - __values_to_skip)
981  {
982 #ifdef _GLIBCXX_SIMD_USE_ALIASING_LOADS
983  const __may_alias<_Tp>* const element_ptr
984  = reinterpret_cast<const __may_alias<_Tp>*>(&__x) + __values_to_skip;
985  return __as_vector(simd<_Tp, _RetAbi>(element_ptr, element_aligned));
986 #else
987  [[maybe_unused]] constexpr size_t __offset = __values_to_skip;
988  return __as_vector(simd<_Tp, _RetAbi>([&](auto __i) constexpr {
989  constexpr _SizeConstant<__i + __offset> __k;
990  return __x[__k];
991  }));
992 #endif
993  }
994 
995  // all of the return values are in __x.first
996  else if constexpr (_Tuple::_S_first_size % __values_per_part == 0)
997  return __extract_part<_Index, _Tuple::_S_first_size / __values_per_part,
998  _Combine>(__x.first);
999  else
1000  return __extract_part<__values_to_skip, _Tuple::_S_first_size,
1001  _Combine * __values_per_part>(__x.first);
1002  }
1003 
1004 // }}}
1005 // __fixed_size_storage_t<_Tp, _Np>{{{
1006 template <typename _Tp, int _Np, typename _Tuple,
1007  typename _Next = simd<_Tp, _AllNativeAbis::_BestAbi<_Tp, _Np>>,
1008  int _Remain = _Np - int(_Next::size())>
1009  struct __fixed_size_storage_builder;
1010 
1011 template <typename _Tp, int _Np>
1012  struct __fixed_size_storage
1013  : public __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp>> {};
1014 
1015 template <typename _Tp, int _Np, typename... _As, typename _Next>
1016  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1017  0>
1018  { using type = _SimdTuple<_Tp, _As..., typename _Next::abi_type>; };
1019 
1020 template <typename _Tp, int _Np, typename... _As, typename _Next, int _Remain>
1021  struct __fixed_size_storage_builder<_Tp, _Np, _SimdTuple<_Tp, _As...>, _Next,
1022  _Remain>
1023  {
1024  using type = typename __fixed_size_storage_builder<
1025  _Tp, _Remain, _SimdTuple<_Tp, _As..., typename _Next::abi_type>>::type;
1026  };
1027 
1028 // }}}
1029 // _AbisInSimdTuple {{{
1030 template <typename _Tp>
1031  struct _SeqOp;
1032 
1033 template <size_t _I0, size_t... _Is>
1034  struct _SeqOp<index_sequence<_I0, _Is...>>
1035  {
1036  using _FirstPlusOne = index_sequence<_I0 + 1, _Is...>;
1037  using _NotFirstPlusOne = index_sequence<_I0, (_Is + 1)...>;
1038  template <size_t _First, size_t _Add>
1039  using _Prepend = index_sequence<_First, _I0 + _Add, (_Is + _Add)...>;
1040  };
1041 
1042 template <typename _Tp>
1043  struct _AbisInSimdTuple;
1044 
1045 template <typename _Tp>
1046  struct _AbisInSimdTuple<_SimdTuple<_Tp>>
1047  {
1048  using _Counts = index_sequence<0>;
1049  using _Begins = index_sequence<0>;
1050  };
1051 
1052 template <typename _Tp, typename _Ap>
1053  struct _AbisInSimdTuple<_SimdTuple<_Tp, _Ap>>
1054  {
1055  using _Counts = index_sequence<1>;
1056  using _Begins = index_sequence<0>;
1057  };
1058 
1059 template <typename _Tp, typename _A0, typename... _As>
1060  struct _AbisInSimdTuple<_SimdTuple<_Tp, _A0, _A0, _As...>>
1061  {
1062  using _Counts = typename _SeqOp<typename _AbisInSimdTuple<
1063  _SimdTuple<_Tp, _A0, _As...>>::_Counts>::_FirstPlusOne;
1064  using _Begins = typename _SeqOp<typename _AbisInSimdTuple<
1065  _SimdTuple<_Tp, _A0, _As...>>::_Begins>::_NotFirstPlusOne;
1066  };
1067 
1068 template <typename _Tp, typename _A0, typename _A1, typename... _As>
1069  struct _AbisInSimdTuple<_SimdTuple<_Tp, _A0, _A1, _As...>>
1070  {
1071  using _Counts = typename _SeqOp<typename _AbisInSimdTuple<
1072  _SimdTuple<_Tp, _A1, _As...>>::_Counts>::template _Prepend<1, 0>;
1073  using _Begins = typename _SeqOp<typename _AbisInSimdTuple<
1074  _SimdTuple<_Tp, _A1, _As...>>::_Begins>::template _Prepend<0, 1>;
1075  };
1076 
1077 // }}}
1078 // __autocvt_to_simd {{{
1079 template <typename _Tp, bool = is_arithmetic_v<__remove_cvref_t<_Tp>>>
1080  struct __autocvt_to_simd
1081  {
1082  _Tp _M_data;
1083  using _TT = __remove_cvref_t<_Tp>;
1084 
1085  operator _TT()
1086  { return _M_data; }
1087 
1088  operator _TT&()
1089  {
1090  static_assert(is_lvalue_reference<_Tp>::value, "");
1091  static_assert(!is_const<_Tp>::value, "");
1092  return _M_data;
1093  }
1094 
1095  operator _TT*()
1096  {
1097  static_assert(is_lvalue_reference<_Tp>::value, "");
1098  static_assert(!is_const<_Tp>::value, "");
1099  return &_M_data;
1100  }
1101 
1102  constexpr inline __autocvt_to_simd(_Tp dd) : _M_data(dd) {}
1103 
1104  template <typename _Abi>
1105  operator simd<typename _TT::value_type, _Abi>()
1106  { return {__private_init, _M_data}; }
1107 
1108  template <typename _Abi>
1109  operator simd<typename _TT::value_type, _Abi>&()
1110  {
1111  return *reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1112  &_M_data);
1113  }
1114 
1115  template <typename _Abi>
1116  operator simd<typename _TT::value_type, _Abi>*()
1117  {
1118  return reinterpret_cast<simd<typename _TT::value_type, _Abi>*>(
1119  &_M_data);
1120  }
1121  };
1122 
1123 template <typename _Tp>
1124  __autocvt_to_simd(_Tp &&) -> __autocvt_to_simd<_Tp>;
1125 
1126 template <typename _Tp>
1127  struct __autocvt_to_simd<_Tp, true>
1128  {
1129  using _TT = __remove_cvref_t<_Tp>;
1130  _Tp _M_data;
1131  fixed_size_simd<_TT, 1> _M_fd;
1132 
1133  constexpr inline __autocvt_to_simd(_Tp dd) : _M_data(dd), _M_fd(_M_data) {}
1134 
1135  ~__autocvt_to_simd()
1136  { _M_data = __data(_M_fd).first; }
1137 
1138  operator fixed_size_simd<_TT, 1>()
1139  { return _M_fd; }
1140 
1141  operator fixed_size_simd<_TT, 1> &()
1142  {
1143  static_assert(is_lvalue_reference<_Tp>::value, "");
1144  static_assert(!is_const<_Tp>::value, "");
1145  return _M_fd;
1146  }
1147 
1148  operator fixed_size_simd<_TT, 1> *()
1149  {
1150  static_assert(is_lvalue_reference<_Tp>::value, "");
1151  static_assert(!is_const<_Tp>::value, "");
1152  return &_M_fd;
1153  }
1154  };
1155 
1156 // }}}
1157 
1158 struct _CommonImplFixedSize;
1159 template <int _Np> struct _SimdImplFixedSize;
1160 template <int _Np> struct _MaskImplFixedSize;
1161 // simd_abi::_Fixed {{{
1162 template <int _Np>
1163  struct simd_abi::_Fixed
1164  {
1165  template <typename _Tp> static constexpr size_t _S_size = _Np;
1166  template <typename _Tp> static constexpr size_t _S_full_size = _Np;
1167  // validity traits {{{
1168  struct _IsValidAbiTag : public __bool_constant<(_Np > 0)> {};
1169 
1170  template <typename _Tp>
1171  struct _IsValidSizeFor
1172  : __bool_constant<(_Np <= simd_abi::max_fixed_size<_Tp>)> {};
1173 
1174  template <typename _Tp>
1175  struct _IsValid : conjunction<_IsValidAbiTag, __is_vectorizable<_Tp>,
1176  _IsValidSizeFor<_Tp>> {};
1177 
1178  template <typename _Tp>
1179  static constexpr bool _S_is_valid_v = _IsValid<_Tp>::value;
1180 
1181  // }}}
1182  // _S_masked {{{
1183  _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1184  _S_masked(_BitMask<_Np> __x)
1185  { return __x._M_sanitized(); }
1186 
1187  _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1188  _S_masked(_SanitizedBitMask<_Np> __x)
1189  { return __x; }
1190 
1191  // }}}
1192  // _*Impl {{{
1193  using _CommonImpl = _CommonImplFixedSize;
1194  using _SimdImpl = _SimdImplFixedSize<_Np>;
1195  using _MaskImpl = _MaskImplFixedSize<_Np>;
1196 
1197  // }}}
1198  // __traits {{{
1199  template <typename _Tp, bool = _S_is_valid_v<_Tp>>
1200  struct __traits : _InvalidTraits {};
1201 
1202  template <typename _Tp>
1203  struct __traits<_Tp, true>
1204  {
1205  using _IsValid = true_type;
1206  using _SimdImpl = _SimdImplFixedSize<_Np>;
1207  using _MaskImpl = _MaskImplFixedSize<_Np>;
1208 
1209  // simd and simd_mask member types {{{
1210  using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1211  using _MaskMember = _SanitizedBitMask<_Np>;
1212 
1213  static constexpr size_t _S_simd_align
1214  = std::__bit_ceil(_Np * sizeof(_Tp));
1215 
1216  static constexpr size_t _S_mask_align = alignof(_MaskMember);
1217 
1218  // }}}
1219  // _SimdBase / base class for simd, providing extra conversions {{{
1220  struct _SimdBase
1221  {
1222  // The following ensures, function arguments are passed via the stack.
1223  // This is important for ABI compatibility across TU boundaries
1224  _SimdBase(const _SimdBase&) {}
1225  _SimdBase() = default;
1226 
1227  explicit operator const _SimdMember &() const
1228  { return static_cast<const simd<_Tp, _Fixed>*>(this)->_M_data; }
1229 
1230  explicit operator array<_Tp, _Np>() const
1231  {
1232  array<_Tp, _Np> __r;
1233  // _SimdMember can be larger because of higher alignment
1234  static_assert(sizeof(__r) <= sizeof(_SimdMember), "");
1235  __builtin_memcpy(__r.data(), &static_cast<const _SimdMember&>(*this),
1236  sizeof(__r));
1237  return __r;
1238  }
1239  };
1240 
1241  // }}}
1242  // _MaskBase {{{
1243  // empty. The bitset interface suffices
1244  struct _MaskBase {};
1245 
1246  // }}}
1247  // _SimdCastType {{{
1248  struct _SimdCastType
1249  {
1250  _SimdCastType(const array<_Tp, _Np>&);
1251  _SimdCastType(const _SimdMember& dd) : _M_data(dd) {}
1252  explicit operator const _SimdMember &() const { return _M_data; }
1253 
1254  private:
1255  const _SimdMember& _M_data;
1256  };
1257 
1258  // }}}
1259  // _MaskCastType {{{
1260  class _MaskCastType
1261  {
1262  _MaskCastType() = delete;
1263  };
1264  // }}}
1265  };
1266  // }}}
1267  };
1268 
1269 // }}}
1270 // _CommonImplFixedSize {{{
1271 struct _CommonImplFixedSize
1272 {
1273  // _S_store {{{
1274  template <typename _Tp, typename... _As>
1275  _GLIBCXX_SIMD_INTRINSIC static void
1276  _S_store(const _SimdTuple<_Tp, _As...>& __x, void* __addr)
1277  {
1278  constexpr size_t _Np = _SimdTuple<_Tp, _As...>::_S_size();
1279  __builtin_memcpy(__addr, &__x, _Np * sizeof(_Tp));
1280  }
1281 
1282  // }}}
1283 };
1284 
1285 // }}}
1286 // _SimdImplFixedSize {{{1
1287 // fixed_size should not inherit from _SimdMathFallback in order for
1288 // specializations in the used _SimdTuple Abis to get used
1289 template <int _Np>
1290  struct _SimdImplFixedSize
1291  {
1292  // member types {{{2
1293  using _MaskMember = _SanitizedBitMask<_Np>;
1294 
1295  template <typename _Tp>
1296  using _SimdMember = __fixed_size_storage_t<_Tp, _Np>;
1297 
1298  template <typename _Tp>
1299  static constexpr size_t _S_tuple_size = _SimdMember<_Tp>::_S_tuple_size;
1300 
1301  template <typename _Tp>
1302  using _Simd = simd<_Tp, simd_abi::fixed_size<_Np>>;
1303 
1304  template <typename _Tp>
1305  using _TypeTag = _Tp*;
1306 
1307  // broadcast {{{2
1308  template <typename _Tp>
1309  static constexpr inline _SimdMember<_Tp> _S_broadcast(_Tp __x) noexcept
1310  {
1311  return _SimdMember<_Tp>::_S_generate([&](auto __meta) constexpr {
1312  return __meta._S_broadcast(__x);
1313  });
1314  }
1315 
1316  // _S_generator {{{2
1317  template <typename _Fp, typename _Tp>
1318  static constexpr inline _SimdMember<_Tp> _S_generator(_Fp&& __gen,
1319  _TypeTag<_Tp>)
1320  {
1321  return _SimdMember<_Tp>::_S_generate([&__gen](auto __meta) constexpr {
1322  return __meta._S_generator(
1323  [&](auto __i) constexpr {
1324  return __i < _Np ? __gen(_SizeConstant<__meta._S_offset + __i>())
1325  : 0;
1326  },
1327  _TypeTag<_Tp>());
1328  });
1329  }
1330 
1331  // _S_load {{{2
1332  template <typename _Tp, typename _Up>
1333  static inline _SimdMember<_Tp> _S_load(const _Up* __mem,
1334  _TypeTag<_Tp>) noexcept
1335  {
1336  return _SimdMember<_Tp>::_S_generate([&](auto __meta) {
1337  return __meta._S_load(&__mem[__meta._S_offset], _TypeTag<_Tp>());
1338  });
1339  }
1340 
1341  // _S_masked_load {{{2
1342  template <typename _Tp, typename... _As, typename _Up>
1343  static inline _SimdTuple<_Tp, _As...>
1344  _S_masked_load(const _SimdTuple<_Tp, _As...>& __old,
1345  const _MaskMember __bits, const _Up* __mem) noexcept
1346  {
1347  auto __merge = __old;
1348  __for_each(__merge, [&](auto __meta, auto& __native) {
1349  if (__meta._S_submask(__bits).any())
1350 #pragma GCC diagnostic push
1351  // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1352  // the responsibility for avoiding UB to the caller of the masked load
1353  // via the mask. Consequently, the compiler may assume this branch is
1354  // unreachable, if the pointer arithmetic is UB.
1355 #pragma GCC diagnostic ignored "-Warray-bounds"
1356  __native
1357  = __meta._S_masked_load(__native, __meta._S_make_mask(__bits),
1358  __mem + __meta._S_offset);
1359 #pragma GCC diagnostic pop
1360  });
1361  return __merge;
1362  }
1363 
1364  // _S_store {{{2
1365  template <typename _Tp, typename _Up>
1366  static inline void _S_store(const _SimdMember<_Tp>& __v, _Up* __mem,
1367  _TypeTag<_Tp>) noexcept
1368  {
1369  __for_each(__v, [&](auto __meta, auto __native) {
1370  __meta._S_store(__native, &__mem[__meta._S_offset], _TypeTag<_Tp>());
1371  });
1372  }
1373 
1374  // _S_masked_store {{{2
1375  template <typename _Tp, typename... _As, typename _Up>
1376  static inline void _S_masked_store(const _SimdTuple<_Tp, _As...>& __v,
1377  _Up* __mem,
1378  const _MaskMember __bits) noexcept
1379  {
1380  __for_each(__v, [&](auto __meta, auto __native) {
1381  if (__meta._S_submask(__bits).any())
1382 #pragma GCC diagnostic push
1383  // __mem + __mem._S_offset could be UB ([expr.add]/4.3, but it punts
1384  // the responsibility for avoiding UB to the caller of the masked
1385  // store via the mask. Consequently, the compiler may assume this
1386  // branch is unreachable, if the pointer arithmetic is UB.
1387 #pragma GCC diagnostic ignored "-Warray-bounds"
1388  __meta._S_masked_store(__native, __mem + __meta._S_offset,
1389  __meta._S_make_mask(__bits));
1390 #pragma GCC diagnostic pop
1391  });
1392  }
1393 
1394  // negation {{{2
1395  template <typename _Tp, typename... _As>
1396  static inline _MaskMember
1397  _S_negate(const _SimdTuple<_Tp, _As...>& __x) noexcept
1398  {
1399  _MaskMember __bits = 0;
1400  __for_each(
1401  __x, [&__bits](auto __meta, auto __native) constexpr {
1402  __bits
1403  |= __meta._S_mask_to_shifted_ullong(__meta._S_negate(__native));
1404  });
1405  return __bits;
1406  }
1407 
1408  // reductions {{{2
1409  template <typename _Tp, typename _BinaryOperation>
1410  static constexpr inline _Tp _S_reduce(const _Simd<_Tp>& __x,
1411  const _BinaryOperation& __binary_op)
1412  {
1413  using _Tup = _SimdMember<_Tp>;
1414  const _Tup& __tup = __data(__x);
1415  if constexpr (_Tup::_S_tuple_size == 1)
1416  return _Tup::_FirstAbi::_SimdImpl::_S_reduce(
1417  __tup.template _M_simd_at<0>(), __binary_op);
1418  else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 2
1419  && _Tup::_SecondType::_S_size() == 1)
1420  {
1421  return __binary_op(simd<_Tp, simd_abi::scalar>(
1422  reduce(__tup.template _M_simd_at<0>(),
1423  __binary_op)),
1424  __tup.template _M_simd_at<1>())[0];
1425  }
1426  else if constexpr (_Tup::_S_tuple_size == 2 && _Tup::_S_size() > 4
1427  && _Tup::_SecondType::_S_size() == 2)
1428  {
1429  return __binary_op(
1430  simd<_Tp, simd_abi::scalar>(
1431  reduce(__tup.template _M_simd_at<0>(), __binary_op)),
1432  simd<_Tp, simd_abi::scalar>(
1433  reduce(__tup.template _M_simd_at<1>(), __binary_op)))[0];
1434  }
1435  else
1436  {
1437  const auto& __x2 = __call_with_n_evaluations<
1438  __div_roundup(_Tup::_S_tuple_size, 2)>(
1439  [](auto __first_simd, auto... __remaining) {
1440  if constexpr (sizeof...(__remaining) == 0)
1441  return __first_simd;
1442  else
1443  {
1444  using _Tup2
1445  = _SimdTuple<_Tp,
1446  typename decltype(__first_simd)::abi_type,
1447  typename decltype(__remaining)::abi_type...>;
1448  return fixed_size_simd<_Tp, _Tup2::_S_size()>(
1449  __private_init,
1450  __make_simd_tuple(__first_simd, __remaining...));
1451  }
1452  },
1453  [&](auto __i) {
1454  auto __left = __tup.template _M_simd_at<2 * __i>();
1455  if constexpr (2 * __i + 1 == _Tup::_S_tuple_size)
1456  return __left;
1457  else
1458  {
1459  auto __right = __tup.template _M_simd_at<2 * __i + 1>();
1460  using _LT = decltype(__left);
1461  using _RT = decltype(__right);
1462  if constexpr (_LT::size() == _RT::size())
1463  return __binary_op(__left, __right);
1464  else
1465  {
1466  _GLIBCXX_SIMD_USE_CONSTEXPR_API
1467  typename _LT::mask_type __k(
1468  __private_init,
1469  [](auto __j) constexpr { return __j < _RT::size(); });
1470  _LT __ext_right = __left;
1471  where(__k, __ext_right)
1472  = __proposed::resizing_simd_cast<_LT>(__right);
1473  where(__k, __left) = __binary_op(__left, __ext_right);
1474  return __left;
1475  }
1476  }
1477  });
1478  return reduce(__x2, __binary_op);
1479  }
1480  }
1481 
1482  // _S_min, _S_max {{{2
1483  template <typename _Tp, typename... _As>
1484  static inline constexpr _SimdTuple<_Tp, _As...>
1485  _S_min(const _SimdTuple<_Tp, _As...>& __a,
1486  const _SimdTuple<_Tp, _As...>& __b)
1487  {
1488  return __a._M_apply_per_chunk(
1489  [](auto __impl, auto __aa, auto __bb) constexpr {
1490  return __impl._S_min(__aa, __bb);
1491  },
1492  __b);
1493  }
1494 
1495  template <typename _Tp, typename... _As>
1496  static inline constexpr _SimdTuple<_Tp, _As...>
1497  _S_max(const _SimdTuple<_Tp, _As...>& __a,
1498  const _SimdTuple<_Tp, _As...>& __b)
1499  {
1500  return __a._M_apply_per_chunk(
1501  [](auto __impl, auto __aa, auto __bb) constexpr {
1502  return __impl._S_max(__aa, __bb);
1503  },
1504  __b);
1505  }
1506 
1507  // _S_complement {{{2
1508  template <typename _Tp, typename... _As>
1509  static inline constexpr _SimdTuple<_Tp, _As...>
1510  _S_complement(const _SimdTuple<_Tp, _As...>& __x) noexcept
1511  {
1512  return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
1513  return __impl._S_complement(__xx);
1514  });
1515  }
1516 
1517  // _S_unary_minus {{{2
1518  template <typename _Tp, typename... _As>
1519  static inline constexpr _SimdTuple<_Tp, _As...>
1520  _S_unary_minus(const _SimdTuple<_Tp, _As...>& __x) noexcept
1521  {
1522  return __x._M_apply_per_chunk([](auto __impl, auto __xx) constexpr {
1523  return __impl._S_unary_minus(__xx);
1524  });
1525  }
1526 
1527  // arithmetic operators {{{2
1528 
1529 #define _GLIBCXX_SIMD_FIXED_OP(name_, op_) \
1530  template <typename _Tp, typename... _As> \
1531  static inline constexpr _SimdTuple<_Tp, _As...> name_( \
1532  const _SimdTuple<_Tp, _As...> __x, const _SimdTuple<_Tp, _As...> __y) \
1533  { \
1534  return __x._M_apply_per_chunk( \
1535  [](auto __impl, auto __xx, auto __yy) constexpr { \
1536  return __impl.name_(__xx, __yy); \
1537  }, \
1538  __y); \
1539  }
1540 
1541  _GLIBCXX_SIMD_FIXED_OP(_S_plus, +)
1542  _GLIBCXX_SIMD_FIXED_OP(_S_minus, -)
1543  _GLIBCXX_SIMD_FIXED_OP(_S_multiplies, *)
1544  _GLIBCXX_SIMD_FIXED_OP(_S_divides, /)
1545  _GLIBCXX_SIMD_FIXED_OP(_S_modulus, %)
1546  _GLIBCXX_SIMD_FIXED_OP(_S_bit_and, &)
1547  _GLIBCXX_SIMD_FIXED_OP(_S_bit_or, |)
1548  _GLIBCXX_SIMD_FIXED_OP(_S_bit_xor, ^)
1549  _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_left, <<)
1550  _GLIBCXX_SIMD_FIXED_OP(_S_bit_shift_right, >>)
1551 #undef _GLIBCXX_SIMD_FIXED_OP
1552 
1553  template <typename _Tp, typename... _As>
1554  static inline constexpr _SimdTuple<_Tp, _As...>
1555  _S_bit_shift_left(const _SimdTuple<_Tp, _As...>& __x, int __y)
1556  {
1557  return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
1558  return __impl._S_bit_shift_left(__xx, __y);
1559  });
1560  }
1561 
1562  template <typename _Tp, typename... _As>
1563  static inline constexpr _SimdTuple<_Tp, _As...>
1564  _S_bit_shift_right(const _SimdTuple<_Tp, _As...>& __x, int __y)
1565  {
1566  return __x._M_apply_per_chunk([__y](auto __impl, auto __xx) constexpr {
1567  return __impl._S_bit_shift_right(__xx, __y);
1568  });
1569  }
1570 
1571  // math {{{2
1572 #define _GLIBCXX_SIMD_APPLY_ON_TUPLE(_RetTp, __name) \
1573  template <typename _Tp, typename... _As, typename... _More> \
1574  static inline __fixed_size_storage_t<_RetTp, _Np> \
1575  _S_##__name(const _SimdTuple<_Tp, _As...>& __x, \
1576  const _More&... __more) \
1577  { \
1578  if constexpr (sizeof...(_More) == 0) \
1579  { \
1580  if constexpr (is_same_v<_Tp, _RetTp>) \
1581  return __x._M_apply_per_chunk( \
1582  [](auto __impl, auto __xx) constexpr { \
1583  using _V = typename decltype(__impl)::simd_type; \
1584  return __data(__name(_V(__private_init, __xx))); \
1585  }); \
1586  else \
1587  return __optimize_simd_tuple( \
1588  __x.template _M_apply_r<_RetTp>([](auto __impl, auto __xx) { \
1589  return __impl._S_##__name(__xx); \
1590  })); \
1591  } \
1592  else if constexpr ( \
1593  is_same_v< \
1594  _Tp, \
1595  _RetTp> && (... && is_same_v<_SimdTuple<_Tp, _As...>, _More>) ) \
1596  return __x._M_apply_per_chunk( \
1597  [](auto __impl, auto __xx, auto... __pack) constexpr { \
1598  using _V = typename decltype(__impl)::simd_type; \
1599  return __data(__name(_V(__private_init, __xx), \
1600  _V(__private_init, __pack)...)); \
1601  }, \
1602  __more...); \
1603  else if constexpr (is_same_v<_Tp, _RetTp>) \
1604  return __x._M_apply_per_chunk( \
1605  [](auto __impl, auto __xx, auto... __pack) constexpr { \
1606  using _V = typename decltype(__impl)::simd_type; \
1607  return __data(__name(_V(__private_init, __xx), \
1608  __autocvt_to_simd(__pack)...)); \
1609  }, \
1610  __more...); \
1611  else \
1612  __assert_unreachable<_Tp>(); \
1613  }
1614 
1615  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acos)
1616  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asin)
1617  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan)
1618  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atan2)
1619  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cos)
1620  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sin)
1621  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tan)
1622  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, acosh)
1623  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, asinh)
1624  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, atanh)
1625  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cosh)
1626  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sinh)
1627  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tanh)
1628  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp)
1629  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, exp2)
1630  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, expm1)
1631  _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, ilogb)
1632  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log)
1633  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log10)
1634  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log1p)
1635  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, log2)
1636  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, logb)
1637  // modf implemented in simd_math.h
1638  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp,
1639  scalbn) // double scalbn(double x, int exp);
1640  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, scalbln)
1641  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, cbrt)
1642  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, abs)
1643  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fabs)
1644  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, pow)
1645  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, sqrt)
1646  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erf)
1647  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, erfc)
1648  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, lgamma)
1649  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, tgamma)
1650  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, trunc)
1651  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ceil)
1652  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, floor)
1653  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nearbyint)
1654 
1655  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, rint)
1656  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lrint)
1657  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llrint)
1658 
1659  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, round)
1660  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long, lround)
1661  _GLIBCXX_SIMD_APPLY_ON_TUPLE(long long, llround)
1662 
1663  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, ldexp)
1664  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmod)
1665  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, remainder)
1666  // copysign in simd_math.h
1667  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, nextafter)
1668  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fdim)
1669  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmax)
1670  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fmin)
1671  _GLIBCXX_SIMD_APPLY_ON_TUPLE(_Tp, fma)
1672  _GLIBCXX_SIMD_APPLY_ON_TUPLE(int, fpclassify)
1673 #undef _GLIBCXX_SIMD_APPLY_ON_TUPLE
1674 
1675  template <typename _Tp, typename... _Abis>
1676  static _SimdTuple<_Tp, _Abis...> _S_remquo(
1677  const _SimdTuple<_Tp, _Abis...>& __x,
1678  const _SimdTuple<_Tp, _Abis...>& __y,
1679  __fixed_size_storage_t<int, _SimdTuple<_Tp, _Abis...>::_S_size()>* __z)
1680  {
1681  return __x._M_apply_per_chunk(
1682  [](auto __impl, const auto __xx, const auto __yy, auto& __zz) {
1683  return __impl._S_remquo(__xx, __yy, &__zz);
1684  },
1685  __y, *__z);
1686  }
1687 
1688  template <typename _Tp, typename... _As>
1689  static inline _SimdTuple<_Tp, _As...>
1690  _S_frexp(const _SimdTuple<_Tp, _As...>& __x,
1691  __fixed_size_storage_t<int, _Np>& __exp) noexcept
1692  {
1693  return __x._M_apply_per_chunk(
1694  [](auto __impl, const auto& __a, auto& __b) {
1695  return __data(
1696  frexp(typename decltype(__impl)::simd_type(__private_init, __a),
1697  __autocvt_to_simd(__b)));
1698  },
1699  __exp);
1700  }
1701 
1702 #define _GLIBCXX_SIMD_TEST_ON_TUPLE_(name_) \
1703  template <typename _Tp, typename... _As> \
1704  static inline _MaskMember \
1705  _S_##name_(const _SimdTuple<_Tp, _As...>& __x) noexcept \
1706  { \
1707  return _M_test([](auto __impl, \
1708  auto __xx) { return __impl._S_##name_(__xx); }, \
1709  __x); \
1710  }
1711 
1712  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isinf)
1713  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isfinite)
1714  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnan)
1715  _GLIBCXX_SIMD_TEST_ON_TUPLE_(isnormal)
1716  _GLIBCXX_SIMD_TEST_ON_TUPLE_(signbit)
1717 #undef _GLIBCXX_SIMD_TEST_ON_TUPLE_
1718 
1719  // _S_increment & _S_decrement{{{2
1720  template <typename... _Ts>
1721  _GLIBCXX_SIMD_INTRINSIC static constexpr void
1722  _S_increment(_SimdTuple<_Ts...>& __x)
1723  {
1724  __for_each(
1725  __x, [](auto __meta, auto& native) constexpr {
1726  __meta._S_increment(native);
1727  });
1728  }
1729 
1730  template <typename... _Ts>
1731  _GLIBCXX_SIMD_INTRINSIC static constexpr void
1732  _S_decrement(_SimdTuple<_Ts...>& __x)
1733  {
1734  __for_each(
1735  __x, [](auto __meta, auto& native) constexpr {
1736  __meta._S_decrement(native);
1737  });
1738  }
1739 
1740  // compares {{{2
1741 #define _GLIBCXX_SIMD_CMP_OPERATIONS(__cmp) \
1742  template <typename _Tp, typename... _As> \
1743  _GLIBCXX_SIMD_INTRINSIC constexpr static _MaskMember \
1744  __cmp(const _SimdTuple<_Tp, _As...>& __x, \
1745  const _SimdTuple<_Tp, _As...>& __y) \
1746  { \
1747  return _M_test( \
1748  [](auto __impl, auto __xx, auto __yy) constexpr { \
1749  return __impl.__cmp(__xx, __yy); \
1750  }, \
1751  __x, __y); \
1752  }
1753 
1754  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_equal_to)
1755  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_not_equal_to)
1756  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less)
1757  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_less_equal)
1758  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isless)
1759  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessequal)
1760  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreater)
1761  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isgreaterequal)
1762  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_islessgreater)
1763  _GLIBCXX_SIMD_CMP_OPERATIONS(_S_isunordered)
1764 #undef _GLIBCXX_SIMD_CMP_OPERATIONS
1765 
1766  // smart_reference access {{{2
1767  template <typename _Tp, typename... _As, typename _Up>
1768  _GLIBCXX_SIMD_INTRINSIC static void _S_set(_SimdTuple<_Tp, _As...>& __v,
1769  int __i, _Up&& __x) noexcept
1770  { __v._M_set(__i, static_cast<_Up&&>(__x)); }
1771 
1772  // _S_masked_assign {{{2
1773  template <typename _Tp, typename... _As>
1774  _GLIBCXX_SIMD_INTRINSIC static void
1775  _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1776  const __type_identity_t<_SimdTuple<_Tp, _As...>>& __rhs)
1777  {
1778  __for_each(
1779  __lhs, __rhs,
1780  [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
1781  __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1782  __native_rhs);
1783  });
1784  }
1785 
1786  // Optimization for the case where the RHS is a scalar. No need to broadcast
1787  // the scalar to a simd first.
1788  template <typename _Tp, typename... _As>
1789  _GLIBCXX_SIMD_INTRINSIC static void
1790  _S_masked_assign(const _MaskMember __bits, _SimdTuple<_Tp, _As...>& __lhs,
1791  const __type_identity_t<_Tp> __rhs)
1792  {
1793  __for_each(
1794  __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
1795  __meta._S_masked_assign(__meta._S_make_mask(__bits), __native_lhs,
1796  __rhs);
1797  });
1798  }
1799 
1800  // _S_masked_cassign {{{2
1801  template <typename _Op, typename _Tp, typename... _As>
1802  static inline void _S_masked_cassign(const _MaskMember __bits,
1803  _SimdTuple<_Tp, _As...>& __lhs,
1804  const _SimdTuple<_Tp, _As...>& __rhs,
1805  _Op __op)
1806  {
1807  __for_each(
1808  __lhs, __rhs,
1809  [&](auto __meta, auto& __native_lhs, auto __native_rhs) constexpr {
1810  __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1811  __native_lhs, __native_rhs, __op);
1812  });
1813  }
1814 
1815  // Optimization for the case where the RHS is a scalar. No need to broadcast
1816  // the scalar to a simd first.
1817  template <typename _Op, typename _Tp, typename... _As>
1818  static inline void _S_masked_cassign(const _MaskMember __bits,
1819  _SimdTuple<_Tp, _As...>& __lhs,
1820  const _Tp& __rhs, _Op __op)
1821  {
1822  __for_each(
1823  __lhs, [&](auto __meta, auto& __native_lhs) constexpr {
1824  __meta.template _S_masked_cassign(__meta._S_make_mask(__bits),
1825  __native_lhs, __rhs, __op);
1826  });
1827  }
1828 
1829  // _S_masked_unary {{{2
1830  template <template <typename> class _Op, typename _Tp, typename... _As>
1831  static inline _SimdTuple<_Tp, _As...>
1832  _S_masked_unary(const _MaskMember __bits,
1833  const _SimdTuple<_Tp, _As...> __v) // TODO: const-ref __v?
1834  {
1835  return __v._M_apply_wrapped([&__bits](auto __meta,
1836  auto __native) constexpr {
1837  return __meta.template _S_masked_unary<_Op>(__meta._S_make_mask(
1838  __bits),
1839  __native);
1840  });
1841  }
1842 
1843  // }}}2
1844  };
1845 
1846 // _MaskImplFixedSize {{{1
1847 template <int _Np>
1848  struct _MaskImplFixedSize
1849  {
1850  static_assert(
1851  sizeof(_ULLong) * __CHAR_BIT__ >= _Np,
1852  "The fixed_size implementation relies on one _ULLong being able to store "
1853  "all boolean elements."); // required in load & store
1854 
1855  // member types {{{
1856  using _Abi = simd_abi::fixed_size<_Np>;
1857 
1858  using _MaskMember = _SanitizedBitMask<_Np>;
1859 
1860  template <typename _Tp>
1861  using _FirstAbi = typename __fixed_size_storage_t<_Tp, _Np>::_FirstAbi;
1862 
1863  template <typename _Tp>
1864  using _TypeTag = _Tp*;
1865 
1866  // }}}
1867  // _S_broadcast {{{
1868  template <typename>
1869  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1870  _S_broadcast(bool __x)
1871  { return __x ? ~_MaskMember() : _MaskMember(); }
1872 
1873  // }}}
1874  // _S_load {{{
1875  template <typename>
1876  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1877  _S_load(const bool* __mem)
1878  {
1879  using _Ip = __int_for_sizeof_t<bool>;
1880  // the following load uses element_aligned and relies on __mem already
1881  // carrying alignment information from when this load function was
1882  // called.
1883  const simd<_Ip, _Abi> __bools(reinterpret_cast<const __may_alias<_Ip>*>(
1884  __mem),
1885  element_aligned);
1886  return __data(__bools != 0);
1887  }
1888 
1889  // }}}
1890  // _S_to_bits {{{
1891  template <bool _Sanitized>
1892  _GLIBCXX_SIMD_INTRINSIC static constexpr _SanitizedBitMask<_Np>
1893  _S_to_bits(_BitMask<_Np, _Sanitized> __x)
1894  {
1895  if constexpr (_Sanitized)
1896  return __x;
1897  else
1898  return __x._M_sanitized();
1899  }
1900 
1901  // }}}
1902  // _S_convert {{{
1903  template <typename _Tp, typename _Up, typename _UAbi>
1904  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1905  _S_convert(simd_mask<_Up, _UAbi> __x)
1906  {
1907  return _UAbi::_MaskImpl::_S_to_bits(__data(__x))
1908  .template _M_extract<0, _Np>();
1909  }
1910 
1911  // }}}
1912  // _S_from_bitmask {{{2
1913  template <typename _Tp>
1914  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1915  _S_from_bitmask(_MaskMember __bits, _TypeTag<_Tp>) noexcept
1916  { return __bits; }
1917 
1918  // _S_load {{{2
1919  static inline _MaskMember _S_load(const bool* __mem) noexcept
1920  {
1921  // TODO: _UChar is not necessarily the best type to use here. For smaller
1922  // _Np _UShort, _UInt, _ULLong, float, and double can be more efficient.
1923  _ULLong __r = 0;
1924  using _Vs = __fixed_size_storage_t<_UChar, _Np>;
1925  __for_each(_Vs{}, [&](auto __meta, auto) {
1926  __r |= __meta._S_mask_to_shifted_ullong(
1927  __meta._S_mask_impl._S_load(&__mem[__meta._S_offset],
1928  _SizeConstant<__meta._S_size()>()));
1929  });
1930  return __r;
1931  }
1932 
1933  // _S_masked_load {{{2
1934  static inline _MaskMember _S_masked_load(_MaskMember __merge,
1935  _MaskMember __mask,
1936  const bool* __mem) noexcept
1937  {
1938  _BitOps::_S_bit_iteration(__mask.to_ullong(), [&](auto __i) {
1939  __merge.set(__i, __mem[__i]);
1940  });
1941  return __merge;
1942  }
1943 
1944  // _S_store {{{2
1945  static inline void _S_store(const _MaskMember __bitmask,
1946  bool* __mem) noexcept
1947  {
1948  if constexpr (_Np == 1)
1949  __mem[0] = __bitmask[0];
1950  else
1951  _FirstAbi<_UChar>::_CommonImpl::_S_store_bool_array(__bitmask, __mem);
1952  }
1953 
1954  // _S_masked_store {{{2
1955  static inline void _S_masked_store(const _MaskMember __v, bool* __mem,
1956  const _MaskMember __k) noexcept
1957  {
1958  _BitOps::_S_bit_iteration(__k, [&](auto __i) { __mem[__i] = __v[__i]; });
1959  }
1960 
1961  // logical and bitwise operators {{{2
1962  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1963  _S_logical_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1964  { return __x & __y; }
1965 
1966  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1967  _S_logical_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1968  { return __x | __y; }
1969 
1970  _GLIBCXX_SIMD_INTRINSIC static constexpr _MaskMember
1971  _S_bit_not(const _MaskMember& __x) noexcept
1972  { return ~__x; }
1973 
1974  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1975  _S_bit_and(const _MaskMember& __x, const _MaskMember& __y) noexcept
1976  { return __x & __y; }
1977 
1978  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1979  _S_bit_or(const _MaskMember& __x, const _MaskMember& __y) noexcept
1980  { return __x | __y; }
1981 
1982  _GLIBCXX_SIMD_INTRINSIC static _MaskMember
1983  _S_bit_xor(const _MaskMember& __x, const _MaskMember& __y) noexcept
1984  { return __x ^ __y; }
1985 
1986  // smart_reference access {{{2
1987  _GLIBCXX_SIMD_INTRINSIC static void _S_set(_MaskMember& __k, int __i,
1988  bool __x) noexcept
1989  { __k.set(__i, __x); }
1990 
1991  // _S_masked_assign {{{2
1992  _GLIBCXX_SIMD_INTRINSIC static void
1993  _S_masked_assign(const _MaskMember __k, _MaskMember& __lhs,
1994  const _MaskMember __rhs)
1995  { __lhs = (__lhs & ~__k) | (__rhs & __k); }
1996 
1997  // Optimization for the case where the RHS is a scalar.
1998  _GLIBCXX_SIMD_INTRINSIC static void _S_masked_assign(const _MaskMember __k,
1999  _MaskMember& __lhs,
2000  const bool __rhs)
2001  {
2002  if (__rhs)
2003  __lhs |= __k;
2004  else
2005  __lhs &= ~__k;
2006  }
2007 
2008  // }}}2
2009  // _S_all_of {{{
2010  template <typename _Tp>
2011  _GLIBCXX_SIMD_INTRINSIC static bool _S_all_of(simd_mask<_Tp, _Abi> __k)
2012  { return __data(__k).all(); }
2013 
2014  // }}}
2015  // _S_any_of {{{
2016  template <typename _Tp>
2017  _GLIBCXX_SIMD_INTRINSIC static bool _S_any_of(simd_mask<_Tp, _Abi> __k)
2018  { return __data(__k).any(); }
2019 
2020  // }}}
2021  // _S_none_of {{{
2022  template <typename _Tp>
2023  _GLIBCXX_SIMD_INTRINSIC static bool _S_none_of(simd_mask<_Tp, _Abi> __k)
2024  { return __data(__k).none(); }
2025 
2026  // }}}
2027  // _S_some_of {{{
2028  template <typename _Tp>
2029  _GLIBCXX_SIMD_INTRINSIC static bool
2030  _S_some_of([[maybe_unused]] simd_mask<_Tp, _Abi> __k)
2031  {
2032  if constexpr (_Np == 1)
2033  return false;
2034  else
2035  return __data(__k).any() && !__data(__k).all();
2036  }
2037 
2038  // }}}
2039  // _S_popcount {{{
2040  template <typename _Tp>
2041  _GLIBCXX_SIMD_INTRINSIC static int _S_popcount(simd_mask<_Tp, _Abi> __k)
2042  { return __data(__k).count(); }
2043 
2044  // }}}
2045  // _S_find_first_set {{{
2046  template <typename _Tp>
2047  _GLIBCXX_SIMD_INTRINSIC static int
2048  _S_find_first_set(simd_mask<_Tp, _Abi> __k)
2049  { return std::__countr_zero(__data(__k).to_ullong()); }
2050 
2051  // }}}
2052  // _S_find_last_set {{{
2053  template <typename _Tp>
2054  _GLIBCXX_SIMD_INTRINSIC static int
2055  _S_find_last_set(simd_mask<_Tp, _Abi> __k)
2056  { return std::__bit_width(__data(__k).to_ullong()) - 1; }
2057 
2058  // }}}
2059  };
2060 // }}}1
2061 
2062 _GLIBCXX_SIMD_END_NAMESPACE
2063 #endif // __cplusplus >= 201703L
2064 #endif // _GLIBCXX_EXPERIMENTAL_SIMD_FIXED_SIZE_H_
2065 
2066 // vim: foldmethod=marker sw=2 noet ts=8 sts=2 tw=80
constexpr auto size(const _Container &__cont) noexcept(noexcept(__cont.size())) -> decltype(__cont.size())
Return the size of a container.
Definition: range_access.h:245
complex< _Tp > sin(const complex< _Tp > &)
Return complex sine of z.
Definition: complex:859
constexpr _Tp reduce(_InputIterator __first, _InputIterator __last, _Tp __init, _BinaryOperation __binary_op)
Calculate reduction of values in a range.
Definition: numeric:278
complex< _Tp > tan(const complex< _Tp > &)
Return complex tangent of z.
Definition: complex:960
integer_sequence< size_t, _Idx... > index_sequence
Alias template index_sequence.
Definition: utility:339
std::complex< _Tp > acosh(const std::complex< _Tp > &)
acosh(__z) [8.1.5].
Definition: complex:1754
complex< _Tp > log(const complex< _Tp > &)
Return complex natural logarithm of z.
Definition: complex:824
complex< _Tp > pow(const complex< _Tp > &, int)
Return x to the y&#39;th power.
Definition: complex:1019
std::complex< _Tp > asinh(const std::complex< _Tp > &)
asinh(__z) [8.1.6].
Definition: complex:1793
constexpr _Iterator __base(_Iterator __it)
std::complex< _Tp > atanh(const std::complex< _Tp > &)
atanh(__z) [8.1.7].
Definition: complex:1837
complex< _Tp > cos(const complex< _Tp > &)
Return complex cosine of z.
Definition: complex:741
complex< _Tp > exp(const complex< _Tp > &)
Return complex base e exponential of z.
Definition: complex:797
complex< _Tp > sqrt(const complex< _Tp > &)
Return complex square root of z.
Definition: complex:933
complex< _Tp > sinh(const complex< _Tp > &)
Return complex hyperbolic sine of z.
Definition: complex:889
complex< _Tp > tanh(const complex< _Tp > &)
Return complex hyperbolic tangent of z.
Definition: complex:988
complex< _Tp > log10(const complex< _Tp > &)
Return complex base 10 logarithm of z.
Definition: complex:829
auto_ptr & operator=(auto_ptr &__a)
auto_ptr assignment operator.
Definition: auto_ptr.h:128
std::complex< _Tp > acos(const std::complex< _Tp > &)
acos(__z) [8.1.2].
Definition: complex:1638
std::complex< _Tp > atan(const std::complex< _Tp > &)
atan(__z) [8.1.4].
Definition: complex:1718
std::complex< _Tp > asin(const std::complex< _Tp > &)
asin(__z) [8.1.3].
Definition: complex:1674
complex< _Tp > cosh(const complex< _Tp > &)
Return complex hyperbolic cosine of z.
Definition: complex:771
_Tp fabs(const std::complex< _Tp > &)
fabs(__z) [8.1.8].
Definition: complex:1846