libstdc++
|
00001 // The template and inlines for the -*- C++ -*- internal _Array helper class. 00002 00003 // Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 00004 // 2006, 2007, 2008, 2009, 2010, 2011 00005 // Free Software Foundation, Inc. 00006 // 00007 // This file is part of the GNU ISO C++ Library. This library is free 00008 // software; you can redistribute it and/or modify it under the 00009 // terms of the GNU General Public License as published by the 00010 // Free Software Foundation; either version 3, or (at your option) 00011 // any later version. 00012 00013 // This library is distributed in the hope that it will be useful, 00014 // but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 // GNU General Public License for more details. 00017 00018 // Under Section 7 of GPL version 3, you are granted additional 00019 // permissions described in the GCC Runtime Library Exception, version 00020 // 3.1, as published by the Free Software Foundation. 00021 00022 // You should have received a copy of the GNU General Public License and 00023 // a copy of the GCC Runtime Library Exception along with this program; 00024 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 00025 // <http://www.gnu.org/licenses/>. 00026 00027 /** @file bits/valarray_array.h 00028 * This is an internal header file, included by other library headers. 00029 * Do not attempt to use it directly. @headername{valarray} 00030 */ 00031 00032 // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> 00033 00034 #ifndef _VALARRAY_ARRAY_H 00035 #define _VALARRAY_ARRAY_H 1 00036 00037 #pragma GCC system_header 00038 00039 #include <bits/c++config.h> 00040 #include <bits/cpp_type_traits.h> 00041 #include <cstdlib> 00042 #include <new> 00043 00044 namespace std _GLIBCXX_VISIBILITY(default) 00045 { 00046 _GLIBCXX_BEGIN_NAMESPACE_VERSION 00047 00048 // 00049 // Helper functions on raw pointers 00050 // 00051 00052 // We get memory by the old fashion way 00053 inline void* 00054 __valarray_get_memory(size_t __n) 00055 { return operator new(__n); } 00056 00057 template<typename _Tp> 00058 inline _Tp*__restrict__ 00059 __valarray_get_storage(size_t __n) 00060 { 00061 return static_cast<_Tp*__restrict__> 00062 (std::__valarray_get_memory(__n * sizeof(_Tp))); 00063 } 00064 00065 // Return memory to the system 00066 inline void 00067 __valarray_release_memory(void* __p) 00068 { operator delete(__p); } 00069 00070 // Turn a raw-memory into an array of _Tp filled with _Tp() 00071 // This is required in 'valarray<T> v(n);' 00072 template<typename _Tp, bool> 00073 struct _Array_default_ctor 00074 { 00075 // Please note that this isn't exception safe. But 00076 // valarrays aren't required to be exception safe. 00077 inline static void 00078 _S_do_it(_Tp* __b, _Tp* __e) 00079 { 00080 while (__b != __e) 00081 new(__b++) _Tp(); 00082 } 00083 }; 00084 00085 template<typename _Tp> 00086 struct _Array_default_ctor<_Tp, true> 00087 { 00088 // For fundamental types, it suffices to say 'memset()' 00089 inline static void 00090 _S_do_it(_Tp* __b, _Tp* __e) 00091 { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); } 00092 }; 00093 00094 template<typename _Tp> 00095 inline void 00096 __valarray_default_construct(_Tp* __b, _Tp* __e) 00097 { 00098 _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e); 00099 } 00100 00101 // Turn a raw-memory into an array of _Tp filled with __t 00102 // This is the required in valarray<T> v(n, t). Also 00103 // used in valarray<>::resize(). 00104 template<typename _Tp, bool> 00105 struct _Array_init_ctor 00106 { 00107 // Please note that this isn't exception safe. But 00108 // valarrays aren't required to be exception safe. 00109 inline static void 00110 _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) 00111 { 00112 while (__b != __e) 00113 new(__b++) _Tp(__t); 00114 } 00115 }; 00116 00117 template<typename _Tp> 00118 struct _Array_init_ctor<_Tp, true> 00119 { 00120 inline static void 00121 _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) 00122 { 00123 while (__b != __e) 00124 *__b++ = __t; 00125 } 00126 }; 00127 00128 template<typename _Tp> 00129 inline void 00130 __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t) 00131 { 00132 _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t); 00133 } 00134 00135 // 00136 // copy-construct raw array [__o, *) from plain array [__b, __e) 00137 // We can't just say 'memcpy()' 00138 // 00139 template<typename _Tp, bool> 00140 struct _Array_copy_ctor 00141 { 00142 // Please note that this isn't exception safe. But 00143 // valarrays aren't required to be exception safe. 00144 inline static void 00145 _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) 00146 { 00147 while (__b != __e) 00148 new(__o++) _Tp(*__b++); 00149 } 00150 }; 00151 00152 template<typename _Tp> 00153 struct _Array_copy_ctor<_Tp, true> 00154 { 00155 inline static void 00156 _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) 00157 { __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); } 00158 }; 00159 00160 template<typename _Tp> 00161 inline void 00162 __valarray_copy_construct(const _Tp* __b, const _Tp* __e, 00163 _Tp* __restrict__ __o) 00164 { 00165 _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o); 00166 } 00167 00168 // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] 00169 template<typename _Tp> 00170 inline void 00171 __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, 00172 size_t __s, _Tp* __restrict__ __o) 00173 { 00174 if (__is_trivial(_Tp)) 00175 while (__n--) 00176 { 00177 *__o++ = *__a; 00178 __a += __s; 00179 } 00180 else 00181 while (__n--) 00182 { 00183 new(__o++) _Tp(*__a); 00184 __a += __s; 00185 } 00186 } 00187 00188 // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] 00189 template<typename _Tp> 00190 inline void 00191 __valarray_copy_construct (const _Tp* __restrict__ __a, 00192 const size_t* __restrict__ __i, 00193 _Tp* __restrict__ __o, size_t __n) 00194 { 00195 if (__is_trivial(_Tp)) 00196 while (__n--) 00197 *__o++ = __a[*__i++]; 00198 else 00199 while (__n--) 00200 new (__o++) _Tp(__a[*__i++]); 00201 } 00202 00203 // Do the necessary cleanup when we're done with arrays. 00204 template<typename _Tp> 00205 inline void 00206 __valarray_destroy_elements(_Tp* __b, _Tp* __e) 00207 { 00208 if (!__is_trivial(_Tp)) 00209 while (__b != __e) 00210 { 00211 __b->~_Tp(); 00212 ++__b; 00213 } 00214 } 00215 00216 // Fill a plain array __a[<__n>] with __t 00217 template<typename _Tp> 00218 inline void 00219 __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t) 00220 { 00221 while (__n--) 00222 *__a++ = __t; 00223 } 00224 00225 // fill strided array __a[<__n-1 : __s>] with __t 00226 template<typename _Tp> 00227 inline void 00228 __valarray_fill(_Tp* __restrict__ __a, size_t __n, 00229 size_t __s, const _Tp& __t) 00230 { 00231 for (size_t __i = 0; __i < __n; ++__i, __a += __s) 00232 *__a = __t; 00233 } 00234 00235 // fill indirect array __a[__i[<__n>]] with __i 00236 template<typename _Tp> 00237 inline void 00238 __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, 00239 size_t __n, const _Tp& __t) 00240 { 00241 for (size_t __j = 0; __j < __n; ++__j, ++__i) 00242 __a[*__i] = __t; 00243 } 00244 00245 // copy plain array __a[<__n>] in __b[<__n>] 00246 // For non-fundamental types, it is wrong to say 'memcpy()' 00247 template<typename _Tp, bool> 00248 struct _Array_copier 00249 { 00250 inline static void 00251 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00252 { 00253 while(__n--) 00254 *__b++ = *__a++; 00255 } 00256 }; 00257 00258 template<typename _Tp> 00259 struct _Array_copier<_Tp, true> 00260 { 00261 inline static void 00262 _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) 00263 { __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); } 00264 }; 00265 00266 // Copy a plain array __a[<__n>] into a play array __b[<>] 00267 template<typename _Tp> 00268 inline void 00269 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00270 _Tp* __restrict__ __b) 00271 { 00272 _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b); 00273 } 00274 00275 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00276 template<typename _Tp> 00277 inline void 00278 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, 00279 _Tp* __restrict__ __b) 00280 { 00281 for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s) 00282 *__b = *__a; 00283 } 00284 00285 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00286 template<typename _Tp> 00287 inline void 00288 __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, 00289 size_t __n, size_t __s) 00290 { 00291 for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s) 00292 *__b = *__a; 00293 } 00294 00295 // Copy strided array __src[<__n : __s1>] into another 00296 // strided array __dst[< : __s2>]. Their sizes must match. 00297 template<typename _Tp> 00298 inline void 00299 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, 00300 _Tp* __restrict__ __dst, size_t __s2) 00301 { 00302 for (size_t __i = 0; __i < __n; ++__i) 00303 __dst[__i * __s2] = __src[__i * __s1]; 00304 } 00305 00306 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00307 template<typename _Tp> 00308 inline void 00309 __valarray_copy(const _Tp* __restrict__ __a, 00310 const size_t* __restrict__ __i, 00311 _Tp* __restrict__ __b, size_t __n) 00312 { 00313 for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i) 00314 *__b = __a[*__i]; 00315 } 00316 00317 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00318 template<typename _Tp> 00319 inline void 00320 __valarray_copy(const _Tp* __restrict__ __a, size_t __n, 00321 _Tp* __restrict__ __b, const size_t* __restrict__ __i) 00322 { 00323 for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i) 00324 __b[*__i] = *__a; 00325 } 00326 00327 // Copy the __n first elements of an indexed array __src[<__i>] into 00328 // another indexed array __dst[<__j>]. 00329 template<typename _Tp> 00330 inline void 00331 __valarray_copy(const _Tp* __restrict__ __src, size_t __n, 00332 const size_t* __restrict__ __i, 00333 _Tp* __restrict__ __dst, const size_t* __restrict__ __j) 00334 { 00335 for (size_t __k = 0; __k < __n; ++__k) 00336 __dst[*__j++] = __src[*__i++]; 00337 } 00338 00339 // 00340 // Compute the sum of elements in range [__f, __l) 00341 // This is a naive algorithm. It suffers from cancelling. 00342 // In the future try to specialize 00343 // for _Tp = float, double, long double using a more accurate 00344 // algorithm. 00345 // 00346 template<typename _Tp> 00347 inline _Tp 00348 __valarray_sum(const _Tp* __f, const _Tp* __l) 00349 { 00350 _Tp __r = _Tp(); 00351 while (__f != __l) 00352 __r += *__f++; 00353 return __r; 00354 } 00355 00356 // Compute the product of all elements in range [__f, __l) 00357 template<typename _Tp> 00358 inline _Tp 00359 __valarray_product(const _Tp* __f, const _Tp* __l) 00360 { 00361 _Tp __r = _Tp(1); 00362 while (__f != __l) 00363 __r = __r * *__f++; 00364 return __r; 00365 } 00366 00367 // Compute the min/max of an array-expression 00368 template<typename _Ta> 00369 inline typename _Ta::value_type 00370 __valarray_min(const _Ta& __a) 00371 { 00372 size_t __s = __a.size(); 00373 typedef typename _Ta::value_type _Value_type; 00374 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00375 for (size_t __i = 1; __i < __s; ++__i) 00376 { 00377 _Value_type __t = __a[__i]; 00378 if (__t < __r) 00379 __r = __t; 00380 } 00381 return __r; 00382 } 00383 00384 template<typename _Ta> 00385 inline typename _Ta::value_type 00386 __valarray_max(const _Ta& __a) 00387 { 00388 size_t __s = __a.size(); 00389 typedef typename _Ta::value_type _Value_type; 00390 _Value_type __r = __s == 0 ? _Value_type() : __a[0]; 00391 for (size_t __i = 1; __i < __s; ++__i) 00392 { 00393 _Value_type __t = __a[__i]; 00394 if (__t > __r) 00395 __r = __t; 00396 } 00397 return __r; 00398 } 00399 00400 // 00401 // Helper class _Array, first layer of valarray abstraction. 00402 // All operations on valarray should be forwarded to this class 00403 // whenever possible. -- gdr 00404 // 00405 00406 template<typename _Tp> 00407 struct _Array 00408 { 00409 explicit _Array(size_t); 00410 explicit _Array(_Tp* const __restrict__); 00411 explicit _Array(const valarray<_Tp>&); 00412 _Array(const _Tp* __restrict__, size_t); 00413 00414 _Tp* begin() const; 00415 00416 _Tp* const __restrict__ _M_data; 00417 }; 00418 00419 00420 // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]] 00421 template<typename _Tp> 00422 inline void 00423 __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i, 00424 _Array<_Tp> __b, size_t __n) 00425 { std::__valarray_copy_construct(__a._M_data, __i._M_data, 00426 __b._M_data, __n); } 00427 00428 // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>] 00429 template<typename _Tp> 00430 inline void 00431 __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s, 00432 _Array<_Tp> __b) 00433 { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); } 00434 00435 template<typename _Tp> 00436 inline void 00437 __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) 00438 { std::__valarray_fill(__a._M_data, __n, __t); } 00439 00440 template<typename _Tp> 00441 inline void 00442 __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) 00443 { std::__valarray_fill(__a._M_data, __n, __s, __t); } 00444 00445 template<typename _Tp> 00446 inline void 00447 __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i, 00448 size_t __n, const _Tp& __t) 00449 { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); } 00450 00451 // Copy a plain array __a[<__n>] into a play array __b[<>] 00452 template<typename _Tp> 00453 inline void 00454 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) 00455 { std::__valarray_copy(__a._M_data, __n, __b._M_data); } 00456 00457 // Copy strided array __a[<__n : __s>] in plain __b[<__n>] 00458 template<typename _Tp> 00459 inline void 00460 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) 00461 { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } 00462 00463 // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] 00464 template<typename _Tp> 00465 inline void 00466 __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) 00467 { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } 00468 00469 // Copy strided array __src[<__n : __s1>] into another 00470 // strided array __dst[< : __s2>]. Their sizes must match. 00471 template<typename _Tp> 00472 inline void 00473 __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, 00474 _Array<_Tp> __b, size_t __s2) 00475 { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } 00476 00477 // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] 00478 template<typename _Tp> 00479 inline void 00480 __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, 00481 _Array<_Tp> __b, size_t __n) 00482 { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } 00483 00484 // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] 00485 template<typename _Tp> 00486 inline void 00487 __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, 00488 _Array<size_t> __i) 00489 { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } 00490 00491 // Copy the __n first elements of an indexed array __src[<__i>] into 00492 // another indexed array __dst[<__j>]. 00493 template<typename _Tp> 00494 inline void 00495 __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, 00496 _Array<_Tp> __dst, _Array<size_t> __j) 00497 { 00498 std::__valarray_copy(__src._M_data, __n, __i._M_data, 00499 __dst._M_data, __j._M_data); 00500 } 00501 00502 template<typename _Tp> 00503 inline 00504 _Array<_Tp>::_Array(size_t __n) 00505 : _M_data(__valarray_get_storage<_Tp>(__n)) 00506 { std::__valarray_default_construct(_M_data, _M_data + __n); } 00507 00508 template<typename _Tp> 00509 inline 00510 _Array<_Tp>::_Array(_Tp* const __restrict__ __p) 00511 : _M_data (__p) {} 00512 00513 template<typename _Tp> 00514 inline 00515 _Array<_Tp>::_Array(const valarray<_Tp>& __v) 00516 : _M_data (__v._M_data) {} 00517 00518 template<typename _Tp> 00519 inline 00520 _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s) 00521 : _M_data(__valarray_get_storage<_Tp>(__s)) 00522 { std::__valarray_copy_construct(__b, __s, _M_data); } 00523 00524 template<typename _Tp> 00525 inline _Tp* 00526 _Array<_Tp>::begin () const 00527 { return _M_data; } 00528 00529 #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ 00530 template<typename _Tp> \ 00531 inline void \ 00532 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \ 00533 { \ 00534 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \ 00535 *__p _Op##= __t; \ 00536 } \ 00537 \ 00538 template<typename _Tp> \ 00539 inline void \ 00540 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ 00541 { \ 00542 _Tp* __p = __a._M_data; \ 00543 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \ 00544 *__p _Op##= *__q; \ 00545 } \ 00546 \ 00547 template<typename _Tp, class _Dom> \ 00548 void \ 00549 _Array_augmented_##_Name(_Array<_Tp> __a, \ 00550 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00551 { \ 00552 _Tp* __p(__a._M_data); \ 00553 for (size_t __i = 0; __i < __n; ++__i, ++__p) \ 00554 *__p _Op##= __e[__i]; \ 00555 } \ 00556 \ 00557 template<typename _Tp> \ 00558 inline void \ 00559 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \ 00560 _Array<_Tp> __b) \ 00561 { \ 00562 _Tp* __q(__b._M_data); \ 00563 for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \ 00564 __p += __s, ++__q) \ 00565 *__p _Op##= *__q; \ 00566 } \ 00567 \ 00568 template<typename _Tp> \ 00569 inline void \ 00570 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \ 00571 size_t __n, size_t __s) \ 00572 { \ 00573 _Tp* __q(__b._M_data); \ 00574 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 00575 ++__p, __q += __s) \ 00576 *__p _Op##= *__q; \ 00577 } \ 00578 \ 00579 template<typename _Tp, class _Dom> \ 00580 void \ 00581 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \ 00582 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00583 { \ 00584 _Tp* __p(__a._M_data); \ 00585 for (size_t __i = 0; __i < __n; ++__i, __p += __s) \ 00586 *__p _Op##= __e[__i]; \ 00587 } \ 00588 \ 00589 template<typename _Tp> \ 00590 inline void \ 00591 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 00592 _Array<_Tp> __b, size_t __n) \ 00593 { \ 00594 _Tp* __q(__b._M_data); \ 00595 for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \ 00596 ++__j, ++__q) \ 00597 __a._M_data[*__j] _Op##= *__q; \ 00598 } \ 00599 \ 00600 template<typename _Tp> \ 00601 inline void \ 00602 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 00603 _Array<_Tp> __b, _Array<size_t> __i) \ 00604 { \ 00605 _Tp* __p(__a._M_data); \ 00606 for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \ 00607 ++__j, ++__p) \ 00608 *__p _Op##= __b._M_data[*__j]; \ 00609 } \ 00610 \ 00611 template<typename _Tp, class _Dom> \ 00612 void \ 00613 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ 00614 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00615 { \ 00616 size_t* __j(__i._M_data); \ 00617 for (size_t __k = 0; __k<__n; ++__k, ++__j) \ 00618 __a._M_data[*__j] _Op##= __e[__k]; \ 00619 } \ 00620 \ 00621 template<typename _Tp> \ 00622 void \ 00623 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 00624 _Array<_Tp> __b, size_t __n) \ 00625 { \ 00626 bool* __ok(__m._M_data); \ 00627 _Tp* __p(__a._M_data); \ 00628 for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \ 00629 ++__q, ++__ok, ++__p) \ 00630 { \ 00631 while (! *__ok) \ 00632 { \ 00633 ++__ok; \ 00634 ++__p; \ 00635 } \ 00636 *__p _Op##= *__q; \ 00637 } \ 00638 } \ 00639 \ 00640 template<typename _Tp> \ 00641 void \ 00642 _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ 00643 _Array<_Tp> __b, _Array<bool> __m) \ 00644 { \ 00645 bool* __ok(__m._M_data); \ 00646 _Tp* __q(__b._M_data); \ 00647 for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ 00648 ++__p, ++__ok, ++__q) \ 00649 { \ 00650 while (! *__ok) \ 00651 { \ 00652 ++__ok; \ 00653 ++__q; \ 00654 } \ 00655 *__p _Op##= *__q; \ 00656 } \ 00657 } \ 00658 \ 00659 template<typename _Tp, class _Dom> \ 00660 void \ 00661 _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ 00662 const _Expr<_Dom, _Tp>& __e, size_t __n) \ 00663 { \ 00664 bool* __ok(__m._M_data); \ 00665 _Tp* __p(__a._M_data); \ 00666 for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \ 00667 { \ 00668 while (! *__ok) \ 00669 { \ 00670 ++__ok; \ 00671 ++__p; \ 00672 } \ 00673 *__p _Op##= __e[__i]; \ 00674 } \ 00675 } 00676 00677 _DEFINE_ARRAY_FUNCTION(+, __plus) 00678 _DEFINE_ARRAY_FUNCTION(-, __minus) 00679 _DEFINE_ARRAY_FUNCTION(*, __multiplies) 00680 _DEFINE_ARRAY_FUNCTION(/, __divides) 00681 _DEFINE_ARRAY_FUNCTION(%, __modulus) 00682 _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) 00683 _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) 00684 _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) 00685 _DEFINE_ARRAY_FUNCTION(<<, __shift_left) 00686 _DEFINE_ARRAY_FUNCTION(>>, __shift_right) 00687 00688 #undef _DEFINE_ARRAY_FUNCTION 00689 00690 _GLIBCXX_END_NAMESPACE_VERSION 00691 } // namespace 00692 00693 # include <bits/valarray_array.tcc> 00694 00695 #endif /* _ARRAY_H */