libstdc++
|
00001 // The template and inlines for the -*- C++ -*- internal _Array helper class. 00002 00003 // Copyright (C) 1997-2013 Free Software Foundation, Inc. 00004 // 00005 // This file is part of the GNU ISO C++ Library. This library is free 00006 // software; you can redistribute it and/or modify it under the 00007 // terms of the GNU General Public License as published by the 00008 // Free Software Foundation; either version 3, or (at your option) 00009 // any later version. 00010 00011 // This library is distributed in the hope that it will be useful, 00012 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00014 // GNU General Public License for more details. 00015 00016 // Under Section 7 of GPL version 3, you are granted additional 00017 // permissions described in the GCC Runtime Library Exception, version 00018 // 3.1, as published by the Free Software Foundation. 00019 00020 // You should have received a copy of the GNU General Public License and 00021 // a copy of the GCC Runtime Library Exception along with this program; 00022 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00023 // <http://www.gnu.org/licenses/>. 00024 00025 /** @file bits/valarray_array.h 00026 * This is an internal header file, included by other library headers. 00027 * Do not attempt to use it directly. @headername{valarray} 00028 */ 00029 00030 // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> 00031 00032 #ifndef _VALARRAY_ARRAY_H 00033 #define _VALARRAY_ARRAY_H 1 00034 00035 #pragma GCC system_header 00036 00037 #include <bits/c++config.h> 00038 #include <bits/cpp_type_traits.h> 00039 #include <cstdlib> 00040 #include <new> 00041 00042 namespace std _GLIBCXX_VISIBILITY(default) 00043 { 00044 _GLIBCXX_BEGIN_NAMESPACE_VERSION 00045 00046 // 00047 // Helper functions on raw pointers 00048 // 00049 00050 // We get memory by the old fashion way 00051 inline void* 00052 __valarray_get_memory(size_t __n) 00053 { return operator new(__n); } 00054 00055 template<typename _Tp> 00056 inline _Tp*__restrict__ 00057 __valarray_get_storage(size_t __n) 00058 { 00059 return static_cast<_Tp*__restrict__> 00060 (std::__valarray_get_memory(__n * sizeof(_Tp))); 00061 } 00062 00063 // Return memory to the system 00064 inline void 00065 __valarray_release_memory(void* __p) 00066 { operator delete(__p); } 00067 00068 // Turn a raw-memory into an array of _Tp filled with _Tp() 00069 // This is required in 'valarray<T> v(n);' 00070 template<typename _Tp, bool> 00071 struct _Array_default_ctor 00072 { 00073 // Please note that this isn't exception safe. But 00074 // valarrays aren't required to be exception safe. 00075 inline static void 00076 _S_do_it(_Tp* __b, _Tp* __e) 00077 { 00078 while (__b != __e) 00079 new(__b++) _Tp(); 00080 } 00081 }; 00082 00083 template<typename _Tp> 00084 struct _Array_default_ctor<_Tp, true> 00085 { 00086 // For fundamental types, it suffices to say 'memset()' 00087 inline static void 00088 _S_do_it(_Tp* __b, _Tp* __e) 00089 { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); } 00090 }; 00091 00092 template<typename _Tp> 00093 inline void 00094 __valarray_default_construct(_Tp* __b, _Tp* __e) 00095 { 00096 _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e); 00097 } 00098 00099 // Turn a raw-memory into an array of _Tp filled with __t 00100 // This is the required in valarray<T> v(n, t). Also 00101 // used in valarray<>::resize(). 00102 template<typename _Tp, bool> 00103 struct _Array_init_ctor 00104 { 00105 // Please note that this isn't exception safe. But 00106 // valarrays aren't required to be exception safe. 00107 inline static void 00108 _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) 00109 { 00110 while (__b != __e) 00111 new(__b++) _Tp(__t); 00112 } 00113 }; 00114 00115 template<typename _Tp> 00116 struct _Array_init_ctor<_Tp, true> 00117 { 00118 inline static void 00119 _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) 00120 { 00121 while (__b != __e) 00122 *__b++ = __t; 00123 } 00124 }; 00125 00126 template<typename _Tp> 00127 inline void 00128 __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t) 00129 { 00130 _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t); 00131 } 00132 00133 // 00134 // copy-construct raw array [__o, *) from plain array [__b, __e) 00135 // We can't just say 'memcpy()' 00136 // 00137 template<typename _Tp, bool> 00138 struct _Array_copy_ctor 00139 { 00140 // Please note that this isn't exception safe. But 00141 // valarrays aren't required to be exception safe. 00142 inline static void 00143 _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) 00144 { 00145 while (__b != __e) 00146 new(__o++) _Tp(*__b++); 00147 } 00148 }; 00149 00150 template<typename _Tp> 00151 struct _Array_copy_ctor<_Tp, true> 00152 { 00153 inline static void 00154 _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) 00155 { __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); } 00156 }; 00157 00158 template<typename _Tp> 00159 inline void 00160 __valarray_copy_construct(const _Tp* __b, const _Tp* __e, 00161 _Tp* __restrict__ __o) 00162 { 00163 _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o); 00164 } 00165 00166 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] 00167 template<typename _Tp> 00168 inline void 00169 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, 00170 size_t __s, _Tp* __restrict__ __o) 00171 { 00172 if (__is_trivial(_Tp)) 00173 while (__n--) 00174 { 00175 *__o++ = *__a; 00176 __a += __s; 00177 } 00178 else 00179 while (__n--) 00180 { 00181 new(__o++) _Tp(*__a); 00182 __a += __s; 00183 } 00184 } 00185 00186 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] 00187 template<typename _Tp> 00188 inline void 00189 __valarray_copy_construct (const _Tp* __restrict__ __a, 00190 const size_t* __restrict__ __i, 00191 _Tp* __restrict__ __o, size_t __n) 00192 { 00193 if (__is_trivial(_Tp)) 00194 while (__n--) 00195 *__o++ = __a[*__i++]; 00196 else 00197 while (__n--) 00198 new (__o++) _Tp(__a[*__i++]); 00199 } 00200 00201 // Do the necessary cleanup when we're done with arrays. 00202 template<typename _Tp> 00203 inline void 00204 __valarray_destroy_elements(_Tp* __b, _Tp* __e) 00205 { 00206 if (!__is_trivial(_Tp)) 00207 while (__b != __e) 00208 { 00209 __b->~_Tp(); 00210 ++__b; 00211 } 00212 } 00213 00214 // Fill a plain array __a[<__n>] with __t 00215 template<typename _Tp> 00216 inline void 00217 __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t) 00218 { 00219 while (__n--) 00220 *__a++ = __t; 00221 } 00222 00223 // fill strided array __a[<__n-1 : __s>] with __t 00224 template<typename _Tp> 00225 inline void 00226 __valarray_fill(_Tp* __restrict__ __a, size_t __n, 00227 size_t __s, const _Tp& __t) 00228 { 00229 for (size_t __i = 0; __i < __n; ++__i, __a += __s) 00230 *__a = __t; 00231 } 00232 00233 // fill indirect array __a[__i[<__n>]] with __i 00234 template<typename _Tp> 00235 inline void 00236 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, 00237 size_t __n, const _Tp& __t) 00238 { 00239 for (size_t __j = 0; __j < __n; ++__j, ++__i) 00240 __a[*__i] = __t; 00241 } 00242 00243 // copy plain array __a[<__n>] in __b[<__n>] 00244 // For non-fundamental types, it is wrong to say 'memcpy()' 00245 template<typename _Tp, bool> 00246 struct _Array_copier 00247 { 00248 inline static void 00249 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00250 { 00251 while(__n--) 00252 *__b++ = *__a++; 00253 } 00254 }; 00255 00256 template<typename _Tp> 00257 struct _Array_copier<_Tp, true> 00258 { 00259 inline static void 00260 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00261 { __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); } 00262 }; 00263 00264 // Copy a plain array __a[<__n>] into a play array __b[<>] 00265 template<typename _Tp> 00266 inline void 00267 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00268 _Tp* __restrict__ __b) 00269 { 00270 _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b); 00271 } 00272 00273 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00274 template<typename _Tp> 00275 inline void 00276 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, 00277 _Tp* __restrict__ __b) 00278 { 00279 for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s) 00280 *__b = *__a; 00281 } 00282 00283 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00284 template<typename _Tp> 00285 inline void 00286 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, 00287 size_t __n, size_t __s) 00288 { 00289 for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s) 00290 *__b = *__a; 00291 } 00292 00293 // Copy strided array __src[<__n : __s1>] into another 00294 // strided array __dst[< : __s2>]. Their sizes must match. 00295 template<typename _Tp> 00296 inline void 00297 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, 00298 _Tp* __restrict__ __dst, size_t __s2) 00299 { 00300 for (size_t __i = 0; __i < __n; ++__i) 00301 __dst[__i * __s2] = __src[__i * __s1]; 00302 } 00303 00304 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00305 template<typename _Tp> 00306 inline void 00307 __valarray_copy(const _Tp* __restrict__ __a, 00308 const size_t* __restrict__ __i, 00309 _Tp* __restrict__ __b, size_t __n) 00310 { 00311 for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i) 00312 *__b = __a[*__i]; 00313 } 00314 00315 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00316 template<typename _Tp> 00317 inline void 00318 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00319 _Tp* __restrict__ __b, const size_t* __restrict__ __i) 00320 { 00321 for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i) 00322 __b[*__i] = *__a; 00323 } 00324 00325 // Copy the __n first elements of an indexed array __src[<__i>] into 00326 // another indexed array __dst[<__j>]. 00327 template<typename _Tp> 00328 inline void 00329 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, 00330 const size_t* __restrict__ __i, 00331 _Tp* __restrict__ __dst, const size_t* __restrict__ __j) 00332 { 00333 for (size_t __k = 0; __k < __n; ++__k) 00334 __dst[*__j++] = __src[*__i++]; 00335 } 00336 00337 // 00338 // Compute the sum of elements in range [__f, __l) 00339 // This is a naive algorithm. It suffers from cancelling. 00340 // In the future try to specialize 00341 // for _Tp = float, double, long double using a more accurate 00342 // algorithm. 00343 // 00344 template<typename _Tp> 00345 inline _Tp 00346 __valarray_sum(const _Tp* __f, const _Tp* __l) 00347 { 00348 _Tp __r = _Tp(); 00349 while (__f != __l) 00350 __r += *__f++; 00351 return __r; 00352 } 00353 00354 // Compute the product of all elements in range [__f, __l) 00355 template<typename _Tp> 00356 inline _Tp 00357 __valarray_product(const _Tp* __f, const _Tp* __l) 00358 { 00359 _Tp __r = _Tp(1); 00360 while (__f != __l) 00361 __r = __r * *__f++; 00362 return __r; 00363 } 00364 00365 // Compute the min/max of an array-expression 00366 template<typename _Ta> 00367 inline typename _Ta::value_type 00368 __valarray_min(const _Ta& __a) 00369 { 00370 size_t __s = __a.size(); 00371 typedef typename _Ta::value_type _Value_type; 00372 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00373 for (size_t __i = 1; __i < __s; ++__i) 00374 { 00375 _Value_type __t = __a[__i]; 00376 if (__t < __r) 00377 __r = __t; 00378 } 00379 return __r; 00380 } 00381 00382 template<typename _Ta> 00383 inline typename _Ta::value_type 00384 __valarray_max(const _Ta& __a) 00385 { 00386 size_t __s = __a.size(); 00387 typedef typename _Ta::value_type _Value_type; 00388 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00389 for (size_t __i = 1; __i < __s; ++__i) 00390 { 00391 _Value_type __t = __a[__i]; 00392 if (__t > __r) 00393 __r = __t; 00394 } 00395 return __r; 00396 } 00397 00398 // 00399 // Helper class _Array, first layer of valarray abstraction. 00400 // All operations on valarray should be forwarded to this class 00401 // whenever possible. -- gdr 00402 // 00403 00404 template<typename _Tp> 00405 struct _Array 00406 { 00407 explicit _Array(size_t); 00408 explicit _Array(_Tp* const __restrict__); 00409 explicit _Array(const valarray<_Tp>&); 00410 _Array(const _Tp* __restrict__, size_t); 00411 00412 _Tp* begin() const; 00413 00414 _Tp* const __restrict__ _M_data; 00415 }; 00416 00417 00418 // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]] 00419 template<typename _Tp> 00420 inline void 00421 __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i, 00422 _Array<_Tp> __b, size_t __n) 00423 { std::__valarray_copy_construct(__a._M_data, __i._M_data, 00424 __b._M_data, __n); } 00425 00426 // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>] 00427 template<typename _Tp> 00428 inline void 00429 __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s, 00430 _Array<_Tp> __b) 00431 { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); } 00432 00433 template<typename _Tp> 00434 inline void 00435 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) 00436 { std::__valarray_fill(__a._M_data, __n, __t); } 00437 00438 template<typename _Tp> 00439 inline void 00440 __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) 00441 { std::__valarray_fill(__a._M_data, __n, __s, __t); } 00442 00443 template<typename _Tp> 00444 inline void 00445 __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i, 00446 size_t __n, const _Tp& __t) 00447 { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); } 00448 00449 // Copy a plain array __a[<__n>] into a play array __b[<>] 00450 template<typename _Tp> 00451 inline void 00452 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) 00453 { std::__valarray_copy(__a._M_data, __n, __b._M_data); } 00454 00455 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00456 template<typename _Tp> 00457 inline void 00458 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) 00459 { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } 00460 00461 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00462 template<typename _Tp> 00463 inline void 00464 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) 00465 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } 00466 00467 // Copy strided array __src[<__n : __s1>] into another 00468 // strided array __dst[< : __s2>]. Their sizes must match. 00469 template<typename _Tp> 00470 inline void 00471 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, 00472 _Array<_Tp> __b, size_t __s2) 00473 { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } 00474 00475 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00476 template<typename _Tp> 00477 inline void 00478 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, 00479 _Array<_Tp> __b, size_t __n) 00480 { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } 00481 00482 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00483 template<typename _Tp> 00484 inline void 00485 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, 00486 _Array<size_t> __i) 00487 { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } 00488 00489 // Copy the __n first elements of an indexed array __src[<__i>] into 00490 // another indexed array __dst[<__j>]. 00491 template<typename _Tp> 00492 inline void 00493 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, 00494 _Array<_Tp> __dst, _Array<size_t> __j) 00495 { 00496 std::__valarray_copy(__src._M_data, __n, __i._M_data, 00497 __dst._M_data, __j._M_data); 00498 } 00499 00500 template<typename _Tp> 00501 inline 00502 _Array<_Tp>::_Array(size_t __n) 00503 : _M_data(__valarray_get_storage<_Tp>(__n)) 00504 { std::__valarray_default_construct(_M_data, _M_data + __n); } 00505 00506 template<typename _Tp> 00507 inline 00508 _Array<_Tp>::_Array(_Tp* const __restrict__ __p) 00509 : _M_data (__p) {} 00510 00511 template<typename _Tp> 00512 inline 00513 _Array<_Tp>::_Array(const valarray<_Tp>& __v) 00514 : _M_data (__v._M_data) {} 00515 00516 template<typename _Tp> 00517 inline 00518 _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s) 00519 : _M_data(__valarray_get_storage<_Tp>(__s)) 00520 { std::__valarray_copy_construct(__b, __s, _M_data); } 00521 00522 template<typename _Tp> 00523 inline _Tp* 00524 _Array<_Tp>::begin () const 00525 { return _M_data; } 00526 00527 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ 00528 template<typename _Tp> \ 00529 inline void \ 00530 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \ 00531 { \ 00532 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \ 00533 *__p _Op##= __t; \ 00534 } \ 00535 \ 00536 template<typename _Tp> \ 00537 inline void \ 00538 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ 00539 { \ 00540 _Tp* __p = __a._M_data; \ 00541 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \ 00542 *__p _Op##= *__q; \ 00543 } \ 00544 \ 00545 template<typename _Tp, class _Dom> \ 00546 void \ 00547 _Array_augmented_##_Name(_Array<_Tp> __a, \ 00548 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00549 { \ 00550 _Tp* __p(__a._M_data); \ 00551 for (size_t __i = 0; __i < __n; ++__i, ++__p) \ 00552 *__p _Op##= __e[__i]; \ 00553 } \ 00554 \ 00555 template<typename _Tp> \ 00556 inline void \ 00557 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \ 00558 _Array<_Tp> __b) \ 00559 { \ 00560 _Tp* __q(__b._M_data); \ 00561 for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \ 00562 __p += __s, ++__q) \ 00563 *__p _Op##= *__q; \ 00564 } \ 00565 \ 00566 template<typename _Tp> \ 00567 inline void \ 00568 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \ 00569 size_t __n, size_t __s) \ 00570 { \ 00571 _Tp* __q(__b._M_data); \ 00572 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 00573 ++__p, __q += __s) \ 00574 *__p _Op##= *__q; \ 00575 } \ 00576 \ 00577 template<typename _Tp, class _Dom> \ 00578 void \ 00579 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \ 00580 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00581 { \ 00582 _Tp* __p(__a._M_data); \ 00583 for (size_t __i = 0; __i < __n; ++__i, __p += __s) \ 00584 *__p _Op##= __e[__i]; \ 00585 } \ 00586 \ 00587 template<typename _Tp> \ 00588 inline void \ 00589 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 00590 _Array<_Tp> __b, size_t __n) \ 00591 { \ 00592 _Tp* __q(__b._M_data); \ 00593 for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \ 00594 ++__j, ++__q) \ 00595 __a._M_data[*__j] _Op##= *__q; \ 00596 } \ 00597 \ 00598 template<typename _Tp> \ 00599 inline void \ 00600 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 00601 _Array<_Tp> __b, _Array<size_t> __i) \ 00602 { \ 00603 _Tp* __p(__a._M_data); \ 00604 for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \ 00605 ++__j, ++__p) \ 00606 *__p _Op##= __b._M_data[*__j]; \ 00607 } \ 00608 \ 00609 template<typename _Tp, class _Dom> \ 00610 void \ 00611 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 00612 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00613 { \ 00614 size_t* __j(__i._M_data); \ 00615 for (size_t __k = 0; __k<__n; ++__k, ++__j) \ 00616 __a._M_data[*__j] _Op##= __e[__k]; \ 00617 } \ 00618 \ 00619 template<typename _Tp> \ 00620 void \ 00621 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 00622 _Array<_Tp> __b, size_t __n) \ 00623 { \ 00624 bool* __ok(__m._M_data); \ 00625 _Tp* __p(__a._M_data); \ 00626 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \ 00627 ++__q, ++__ok, ++__p) \ 00628 { \ 00629 while (! *__ok) \ 00630 { \ 00631 ++__ok; \ 00632 ++__p; \ 00633 } \ 00634 *__p _Op##= *__q; \ 00635 } \ 00636 } \ 00637 \ 00638 template<typename _Tp> \ 00639 void \ 00640 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 00641 _Array<_Tp> __b, _Array<bool> __m) \ 00642 { \ 00643 bool* __ok(__m._M_data); \ 00644 _Tp* __q(__b._M_data); \ 00645 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 00646 ++__p, ++__ok, ++__q) \ 00647 { \ 00648 while (! *__ok) \ 00649 { \ 00650 ++__ok; \ 00651 ++__q; \ 00652 } \ 00653 *__p _Op##= *__q; \ 00654 } \ 00655 } \ 00656 \ 00657 template<typename _Tp, class _Dom> \ 00658 void \ 00659 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 00660 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00661 { \ 00662 bool* __ok(__m._M_data); \ 00663 _Tp* __p(__a._M_data); \ 00664 for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \ 00665 { \ 00666 while (! *__ok) \ 00667 { \ 00668 ++__ok; \ 00669 ++__p; \ 00670 } \ 00671 *__p _Op##= __e[__i]; \ 00672 } \ 00673 } 00674 00675 _DEFINE_ARRAY_FUNCTION(+, __plus) 00676 _DEFINE_ARRAY_FUNCTION(-, __minus) 00677 _DEFINE_ARRAY_FUNCTION(*, __multiplies) 00678 _DEFINE_ARRAY_FUNCTION(/, __divides) 00679 _DEFINE_ARRAY_FUNCTION(%, __modulus) 00680 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) 00681 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) 00682 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) 00683 _DEFINE_ARRAY_FUNCTION(<<, __shift_left) 00684 _DEFINE_ARRAY_FUNCTION(>>, __shift_right) 00685 00686 #undef _DEFINE_ARRAY_FUNCTION 00687 00688 _GLIBCXX_END_NAMESPACE_VERSION 00689 } // namespace 00690 00691 # include <bits/valarray_array.tcc> 00692 00693 #endif /* _ARRAY_H */