Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5134 serge 1
// The template and inlines for the -*- C++ -*- internal _Array helper class.
2
 
3
// Copyright (C) 1997-2013 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 3, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// Under Section 7 of GPL version 3, you are granted additional
17
// permissions described in the GCC Runtime Library Exception, version
18
// 3.1, as published by the Free Software Foundation.
19
 
20
// You should have received a copy of the GNU General Public License and
21
// a copy of the GCC Runtime Library Exception along with this program;
22
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
// .
24
 
25
/** @file bits/valarray_array.h
26
 *  This is an internal header file, included by other library headers.
27
 *  Do not attempt to use it directly. @headername{valarray}
28
 */
29
 
30
// Written by Gabriel Dos Reis 
31
 
32
#ifndef _VALARRAY_ARRAY_H
33
#define _VALARRAY_ARRAY_H 1
34
 
35
#pragma GCC system_header
36
 
37
#include 
38
#include 
39
#include 
40
#include 
41
 
42
namespace std _GLIBCXX_VISIBILITY(default)
43
{
44
_GLIBCXX_BEGIN_NAMESPACE_VERSION
45
 
46
  //
47
  // Helper functions on raw pointers
48
  //
49
 
50
  // We get memory by the old fashion way
51
  inline void*
52
  __valarray_get_memory(size_t __n)
53
  { return operator new(__n); }
54
 
55
  template
56
    inline _Tp*__restrict__
57
    __valarray_get_storage(size_t __n)
58
    {
59
      return static_cast<_Tp*__restrict__>
60
	(std::__valarray_get_memory(__n * sizeof(_Tp)));
61
    }
62
 
63
  // Return memory to the system
64
  inline void
65
  __valarray_release_memory(void* __p)
66
  { operator delete(__p); }
67
 
68
  // Turn a raw-memory into an array of _Tp filled with _Tp()
69
  // This is required in 'valarray v(n);'
70
  template
71
    struct _Array_default_ctor
72
    {
73
      // Please note that this isn't exception safe.  But
74
      // valarrays aren't required to be exception safe.
75
      inline static void
76
      _S_do_it(_Tp* __b, _Tp* __e)
77
      {
78
	while (__b != __e)
79
	  new(__b++) _Tp();
80
      }
81
    };
82
 
83
  template
84
    struct _Array_default_ctor<_Tp, true>
85
    {
86
      // For fundamental types, it suffices to say 'memset()'
87
      inline static void
88
      _S_do_it(_Tp* __b, _Tp* __e)
89
      { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); }
90
    };
91
 
92
  template
93
    inline void
94
    __valarray_default_construct(_Tp* __b, _Tp* __e)
95
    {
96
      _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e);
97
    }
98
 
99
  // Turn a raw-memory into an array of _Tp filled with __t
100
  // This is the required in valarray v(n, t).  Also
101
  // used in valarray<>::resize().
102
  template
103
    struct _Array_init_ctor
104
    {
105
      // Please note that this isn't exception safe.  But
106
      // valarrays aren't required to be exception safe.
107
      inline static void
108
      _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
109
      {
110
	while (__b != __e)
111
	  new(__b++) _Tp(__t);
112
      }
113
    };
114
 
115
  template
116
    struct _Array_init_ctor<_Tp, true>
117
    {
118
      inline static void
119
      _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t)
120
      {
121
	while (__b != __e)
122
	  *__b++ = __t;
123
      }
124
    };
125
 
126
  template
127
    inline void
128
    __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t)
129
    {
130
      _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t);
131
    }
132
 
133
  //
134
  // copy-construct raw array [__o, *) from plain array [__b, __e)
135
  // We can't just say 'memcpy()'
136
  //
137
  template
138
    struct _Array_copy_ctor
139
    {
140
      // Please note that this isn't exception safe.  But
141
      // valarrays aren't required to be exception safe.
142
      inline static void
143
      _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
144
      {
145
	while (__b != __e)
146
	  new(__o++) _Tp(*__b++);
147
      }
148
    };
149
 
150
  template
151
    struct _Array_copy_ctor<_Tp, true>
152
    {
153
      inline static void
154
      _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o)
155
      { __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); }
156
    };
157
 
158
  template
159
    inline void
160
    __valarray_copy_construct(const _Tp* __b, const _Tp* __e,
161
			      _Tp* __restrict__ __o)
162
    {
163
      _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o);
164
    }
165
 
166
  // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
167
  template
168
    inline void
169
    __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
170
			       size_t __s, _Tp* __restrict__ __o)
171
    {
172
      if (__is_trivial(_Tp))
173
	while (__n--)
174
	  {
175
	    *__o++ = *__a;
176
	    __a += __s;
177
	  }
178
      else
179
	while (__n--)
180
	  {
181
	    new(__o++) _Tp(*__a);
182
	    __a += __s;
183
	  }
184
    }
185
 
186
  // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
187
  template
188
    inline void
189
    __valarray_copy_construct (const _Tp* __restrict__ __a,
190
			       const size_t* __restrict__ __i,
191
			       _Tp* __restrict__ __o, size_t __n)
192
    {
193
      if (__is_trivial(_Tp))
194
	while (__n--)
195
	  *__o++ = __a[*__i++];
196
      else
197
	while (__n--)
198
	  new (__o++) _Tp(__a[*__i++]);
199
    }
200
 
201
  // Do the necessary cleanup when we're done with arrays.
202
  template
203
    inline void
204
    __valarray_destroy_elements(_Tp* __b, _Tp* __e)
205
    {
206
      if (!__is_trivial(_Tp))
207
	while (__b != __e)
208
	  {
209
	    __b->~_Tp();
210
	    ++__b;
211
	  }
212
    }
213
 
214
  // Fill a plain array __a[<__n>] with __t
215
  template
216
    inline void
217
    __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
218
    {
219
      while (__n--)
220
	*__a++ = __t;
221
    }
222
 
223
  // fill strided array __a[<__n-1 : __s>] with __t
224
  template
225
    inline void
226
    __valarray_fill(_Tp* __restrict__ __a, size_t __n,
227
		    size_t __s, const _Tp& __t)
228
    {
229
      for (size_t __i = 0; __i < __n; ++__i, __a += __s)
230
	*__a = __t;
231
    }
232
 
233
  // fill indirect array __a[__i[<__n>]] with __i
234
  template
235
    inline void
236
    __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
237
		    size_t __n, const _Tp& __t)
238
    {
239
      for (size_t __j = 0; __j < __n; ++__j, ++__i)
240
	__a[*__i] = __t;
241
    }
242
 
243
  // copy plain array __a[<__n>] in __b[<__n>]
244
  // For non-fundamental types, it is wrong to say 'memcpy()'
245
  template
246
    struct _Array_copier
247
    {
248
      inline static void
249
      _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
250
      {
251
	while(__n--)
252
	  *__b++ = *__a++;
253
      }
254
    };
255
 
256
  template
257
    struct _Array_copier<_Tp, true>
258
    {
259
      inline static void
260
      _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
261
      { __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); }
262
    };
263
 
264
  // Copy a plain array __a[<__n>] into a play array __b[<>]
265
  template
266
    inline void
267
    __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
268
		    _Tp* __restrict__ __b)
269
    {
270
      _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b);
271
    }
272
 
273
  // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
274
  template
275
    inline void
276
    __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s,
277
		    _Tp* __restrict__ __b)
278
    {
279
      for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s)
280
	*__b = *__a;
281
    }
282
 
283
  // Copy a plain array  __a[<__n>] into a strided array __b[<__n : __s>]
284
  template
285
    inline void
286
    __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
287
		    size_t __n, size_t __s)
288
    {
289
      for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s)
290
	*__b = *__a;
291
    }
292
 
293
  // Copy strided array __src[<__n : __s1>] into another
294
  // strided array __dst[< : __s2>].  Their sizes must match.
295
  template
296
    inline void
297
    __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1,
298
		    _Tp* __restrict__ __dst, size_t __s2)
299
    {
300
      for (size_t __i = 0; __i < __n; ++__i)
301
	__dst[__i * __s2] = __src[__i * __s1];
302
    }
303
 
304
  // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
305
  template
306
    inline void
307
    __valarray_copy(const _Tp* __restrict__ __a,
308
		    const size_t* __restrict__ __i,
309
		    _Tp* __restrict__ __b, size_t __n)
310
    {
311
      for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i)
312
	*__b = __a[*__i];
313
    }
314
 
315
  // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
316
  template
317
    inline void
318
    __valarray_copy(const _Tp* __restrict__ __a, size_t __n,
319
		    _Tp* __restrict__ __b, const size_t* __restrict__ __i)
320
    {
321
      for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i)
322
	__b[*__i] = *__a;
323
    }
324
 
325
  // Copy the __n first elements of an indexed array __src[<__i>] into
326
  // another indexed array __dst[<__j>].
327
  template
328
    inline void
329
    __valarray_copy(const _Tp* __restrict__ __src, size_t __n,
330
		    const size_t* __restrict__ __i,
331
		    _Tp* __restrict__ __dst, const size_t* __restrict__ __j)
332
    {
333
      for (size_t __k = 0; __k < __n; ++__k)
334
	__dst[*__j++] = __src[*__i++];
335
    }
336
 
337
  //
338
  // Compute the sum of elements in range [__f, __l)
339
  // This is a naive algorithm.  It suffers from cancelling.
340
  // In the future try to specialize
341
  // for _Tp = float, double, long double using a more accurate
342
  // algorithm.
343
  //
344
  template
345
    inline _Tp
346
    __valarray_sum(const _Tp* __f, const _Tp* __l)
347
    {
348
      _Tp __r = _Tp();
349
      while (__f != __l)
350
	__r += *__f++;
351
      return __r;
352
    }
353
 
354
  // Compute the product of all elements in range [__f, __l)
355
  template
356
    inline _Tp
357
    __valarray_product(const _Tp* __f, const _Tp* __l)
358
    {
359
      _Tp __r = _Tp(1);
360
      while (__f != __l)
361
	__r = __r * *__f++;
362
      return __r;
363
    }
364
 
365
  // Compute the min/max of an array-expression
366
  template
367
    inline typename _Ta::value_type
368
    __valarray_min(const _Ta& __a)
369
    {
370
      size_t __s = __a.size();
371
      typedef typename _Ta::value_type _Value_type;
372
      _Value_type __r = __s == 0 ? _Value_type() : __a[0];
373
      for (size_t __i = 1; __i < __s; ++__i)
374
	{
375
	  _Value_type __t = __a[__i];
376
	  if (__t < __r)
377
	    __r = __t;
378
	}
379
      return __r;
380
    }
381
 
382
  template
383
    inline typename _Ta::value_type
384
    __valarray_max(const _Ta& __a)
385
    {
386
      size_t __s = __a.size();
387
      typedef typename _Ta::value_type _Value_type;
388
      _Value_type __r = __s == 0 ? _Value_type() : __a[0];
389
      for (size_t __i = 1; __i < __s; ++__i)
390
	{
391
	  _Value_type __t = __a[__i];
392
	  if (__t > __r)
393
	    __r = __t;
394
	}
395
      return __r;
396
    }
397
 
398
  //
399
  // Helper class _Array, first layer of valarray abstraction.
400
  // All operations on valarray should be forwarded to this class
401
  // whenever possible. -- gdr
402
  //
403
 
404
  template
405
    struct _Array
406
    {
407
      explicit _Array(size_t);
408
      explicit _Array(_Tp* const __restrict__);
409
      explicit _Array(const valarray<_Tp>&);
410
      _Array(const _Tp* __restrict__, size_t);
411
 
412
      _Tp* begin() const;
413
 
414
      _Tp* const __restrict__ _M_data;
415
    };
416
 
417
 
418
  // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]]
419
  template
420
    inline void
421
    __valarray_copy_construct(_Array<_Tp> __a, _Array __i,
422
			      _Array<_Tp> __b, size_t __n)
423
    { std::__valarray_copy_construct(__a._M_data, __i._M_data,
424
				     __b._M_data, __n); }
425
 
426
  // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>]
427
  template
428
    inline void
429
    __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s,
430
			      _Array<_Tp> __b)
431
    { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); }
432
 
433
  template
434
    inline void
435
    __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
436
    { std::__valarray_fill(__a._M_data, __n, __t); }
437
 
438
  template
439
    inline void
440
    __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
441
    { std::__valarray_fill(__a._M_data, __n, __s, __t); }
442
 
443
  template
444
    inline void
445
    __valarray_fill(_Array<_Tp> __a, _Array __i,
446
		    size_t __n, const _Tp& __t)
447
    { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); }
448
 
449
  // Copy a plain array __a[<__n>] into a play array __b[<>]
450
  template
451
    inline void
452
    __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
453
    { std::__valarray_copy(__a._M_data, __n, __b._M_data); }
454
 
455
  // Copy strided array __a[<__n : __s>] in plain __b[<__n>]
456
  template
457
    inline void
458
    __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
459
    { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); }
460
 
461
  // Copy a plain array  __a[<__n>] into a strided array __b[<__n : __s>]
462
  template
463
    inline void
464
    __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
465
    { __valarray_copy(__a._M_data, __b._M_data, __n, __s); }
466
 
467
  // Copy strided array __src[<__n : __s1>] into another
468
  // strided array __dst[< : __s2>].  Their sizes must match.
469
  template
470
    inline void
471
    __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1,
472
                    _Array<_Tp> __b, size_t __s2)
473
    { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); }
474
 
475
  // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>]
476
  template
477
    inline void
478
    __valarray_copy(_Array<_Tp> __a, _Array __i,
479
		    _Array<_Tp> __b, size_t __n)
480
    { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); }
481
 
482
  // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]]
483
  template
484
    inline void
485
    __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
486
		    _Array __i)
487
    { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); }
488
 
489
  // Copy the __n first elements of an indexed array __src[<__i>] into
490
  // another indexed array __dst[<__j>].
491
  template
492
    inline void
493
    __valarray_copy(_Array<_Tp> __src, size_t __n, _Array __i,
494
                    _Array<_Tp> __dst, _Array __j)
495
    {
496
      std::__valarray_copy(__src._M_data, __n, __i._M_data,
497
		    __dst._M_data, __j._M_data);
498
    }
499
 
500
  template
501
    inline
502
    _Array<_Tp>::_Array(size_t __n)
503
    : _M_data(__valarray_get_storage<_Tp>(__n))
504
    { std::__valarray_default_construct(_M_data, _M_data + __n); }
505
 
506
  template
507
    inline
508
    _Array<_Tp>::_Array(_Tp* const __restrict__ __p)
509
    : _M_data (__p) {}
510
 
511
  template
512
    inline
513
    _Array<_Tp>::_Array(const valarray<_Tp>& __v)
514
    : _M_data (__v._M_data) {}
515
 
516
  template
517
    inline
518
    _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s)
519
    : _M_data(__valarray_get_storage<_Tp>(__s))
520
    { std::__valarray_copy_construct(__b, __s, _M_data); }
521
 
522
  template
523
    inline _Tp*
524
    _Array<_Tp>::begin () const
525
    { return _M_data; }
526
 
527
#define _DEFINE_ARRAY_FUNCTION(_Op, _Name)				\
528
  template		        			\
529
    inline void								\
530
    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \
531
    {									\
532
      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p)	\
533
        *__p _Op##= __t;						\
534
    }									\
535
									\
536
  template						\
537
    inline void								\
538
    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \
539
    {									\
540
      _Tp* __p = __a._M_data;						\
541
      for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \
542
        *__p _Op##= *__q;						\
543
    }									\
544
									\
545
  template					\
546
    void								\
547
    _Array_augmented_##_Name(_Array<_Tp> __a,	        		\
548
                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
549
    {									\
550
      _Tp* __p(__a._M_data);						\
551
      for (size_t __i = 0; __i < __n; ++__i, ++__p)                     \
552
        *__p _Op##= __e[__i];                                          	\
553
    }									\
554
									\
555
  template						\
556
    inline void								\
557
    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s,	\
558
	                     _Array<_Tp> __b)				\
559
    {									\
560
      _Tp* __q(__b._M_data);						\
561
      for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n;       \
562
	   __p += __s, ++__q)                                           \
563
        *__p _Op##= *__q;						\
564
    }									\
565
									\
566
  template						\
567
    inline void								\
568
    _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b,		\
569
		             size_t __n, size_t __s)			\
570
    {									\
571
      _Tp* __q(__b._M_data);						\
572
      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n;             \
573
	   ++__p, __q += __s)                                           \
574
        *__p _Op##= *__q;						\
575
    }									\
576
									\
577
  template					\
578
    void								\
579
    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s,		\
580
                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
581
    {									\
582
      _Tp* __p(__a._M_data);						\
583
      for (size_t __i = 0; __i < __n; ++__i, __p += __s)                \
584
        *__p _Op##= __e[__i];                                          	\
585
    }									\
586
									\
587
  template						\
588
    inline void								\
589
    _Array_augmented_##_Name(_Array<_Tp> __a, _Array __i,	\
590
                             _Array<_Tp> __b, size_t __n)		\
591
    {									\
592
      _Tp* __q(__b._M_data);						\
593
      for (size_t* __j = __i._M_data; __j < __i._M_data + __n;          \
594
           ++__j, ++__q)                                                \
595
        __a._M_data[*__j] _Op##= *__q;					\
596
    }									\
597
									\
598
  template						\
599
    inline void					        		\
600
    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n,		\
601
                             _Array<_Tp> __b, _Array __i)	\
602
    {									\
603
      _Tp* __p(__a._M_data);						\
604
      for (size_t* __j = __i._M_data; __j<__i._M_data + __n;            \
605
	   ++__j, ++__p)                                                \
606
        *__p _Op##= __b._M_data[*__j];					\
607
    }									\
608
									\
609
  template					\
610
    void								\
611
    _Array_augmented_##_Name(_Array<_Tp> __a, _Array __i,	\
612
                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
613
    {									\
614
      size_t* __j(__i._M_data);	        				\
615
      for (size_t __k = 0; __k<__n; ++__k, ++__j)			\
616
        __a._M_data[*__j] _Op##= __e[__k];				\
617
    }									\
618
									\
619
  template						\
620
    void								\
621
    _Array_augmented_##_Name(_Array<_Tp> __a, _Array __m,         \
622
                             _Array<_Tp> __b, size_t __n)		\
623
    {									\
624
      bool* __ok(__m._M_data);						\
625
      _Tp* __p(__a._M_data);						\
626
      for (_Tp* __q = __b._M_data; __q < __b._M_data + __n;             \
627
	   ++__q, ++__ok, ++__p)                                        \
628
        {                                                               \
629
          while (! *__ok)                                               \
630
            {						        	\
631
              ++__ok;							\
632
              ++__p;							\
633
            }								\
634
          *__p _Op##= *__q;						\
635
        }								\
636
    }									\
637
									\
638
  template						\
639
    void								\
640
    _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n,		\
641
                             _Array<_Tp> __b, _Array __m)   	\
642
    {									\
643
      bool* __ok(__m._M_data);						\
644
      _Tp* __q(__b._M_data);						\
645
      for (_Tp* __p = __a._M_data; __p < __a._M_data + __n;             \
646
	   ++__p, ++__ok, ++__q)                                        \
647
        {                                                               \
648
          while (! *__ok)                                               \
649
            {					        		\
650
              ++__ok;							\
651
              ++__q;							\
652
            }								\
653
          *__p _Op##= *__q;						\
654
        }								\
655
    }									\
656
									\
657
  template					\
658
    void								\
659
    _Array_augmented_##_Name(_Array<_Tp> __a, _Array __m,  	\
660
                             const _Expr<_Dom, _Tp>& __e, size_t __n)	\
661
    {									\
662
      bool* __ok(__m._M_data);						\
663
      _Tp* __p(__a._M_data);						\
664
      for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p)             \
665
        {	                                           		\
666
          while (! *__ok)                                               \
667
            {		         					\
668
	      ++__ok;							\
669
              ++__p;							\
670
            }								\
671
          *__p _Op##= __e[__i];						\
672
        }								\
673
    }
674
 
675
   _DEFINE_ARRAY_FUNCTION(+, __plus)
676
   _DEFINE_ARRAY_FUNCTION(-, __minus)
677
   _DEFINE_ARRAY_FUNCTION(*, __multiplies)
678
   _DEFINE_ARRAY_FUNCTION(/, __divides)
679
   _DEFINE_ARRAY_FUNCTION(%, __modulus)
680
   _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor)
681
   _DEFINE_ARRAY_FUNCTION(|, __bitwise_or)
682
   _DEFINE_ARRAY_FUNCTION(&, __bitwise_and)
683
   _DEFINE_ARRAY_FUNCTION(<<, __shift_left)
684
   _DEFINE_ARRAY_FUNCTION(>>, __shift_right)
685
 
686
#undef _DEFINE_ARRAY_FUNCTION
687
 
688
_GLIBCXX_END_NAMESPACE_VERSION
689
} // namespace
690
 
691
# include 
692
 
693
#endif /* _ARRAY_H */