Subversion Repositories Kolibri OS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
4680 right-hear 1
// The template and inlines for the -*- C++ -*- internal _Array helper class.
2
 
3
// Copyright (C) 1997-2000 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 2, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// You should have received a copy of the GNU General Public License along
17
// with this library; see the file COPYING.  If not, write to the Free
18
// Software Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307,
19
// USA.
20
 
21
// As a special exception, you may use this file as part of a free software
22
// library without restriction.  Specifically, if other files instantiate
23
// templates or use macros or inline functions from this file, or you compile
24
// this file and link it with other files to produce an executable, this
25
// file does not by itself cause the resulting executable to be covered by
26
// the GNU General Public License.  This exception does not however
27
// invalidate any other reasons why the executable file might be covered by
28
// the GNU General Public License.
29
 
30
// Written by Gabriel Dos Reis 
31
 
32
#ifndef _CPP_BITS_ARRAY_H
33
#define _CPP_BITS_ARRAY_H 1
34
 
35
#pragma GCC system_header
36
 
37
#include 
38
#include 
39
#include 
40
#include 
41
#include 
42
 
43
namespace std
44
{
45
 
46
  //
47
  // Helper functions on raw pointers
48
  //
49
 
50
  // We get memory by the old fashion way
51
  inline void*
52
  __valarray_get_memory(size_t __n)
53
  { return operator new(__n); }
54
 
55
  template
56
  inline _Tp*__restrict__
57
  __valarray_get_storage(size_t __n)
58
  {
59
    return static_cast<_Tp*__restrict__>
60
      (__valarray_get_memory(__n * sizeof(_Tp)));
61
  }
62
 
63
  // Return memory to the system
64
  inline void
65
  __valarray_release_memory(void* __p)
66
  { operator delete(__p); }
67
 
68
  // Turn a raw-memory into an array of _Tp filled with _Tp()
69
  // This is required in 'valarray v(n);'
70
  template
71
  struct _Array_default_ctor
72
  {
73
    // Please note that this isn't exception safe.  But
74
    // valarrays aren't required to be exception safe.
75
    inline static void
76
    _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
77
    { while (__b != __e) new(__b++) _Tp(); }
78
  };
79
 
80
  template
81
  struct _Array_default_ctor<_Tp, true>
82
  {
83
    // For fundamental types, it suffices to say 'memset()'
84
    inline static void
85
    _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
86
    { memset(__b, 0, (__e - __b)*sizeof(_Tp)); }
87
  };
88
 
89
  template
90
  inline void
91
  __valarray_default_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
92
  {
93
    _Array_default_ctor<_Tp, __is_fundamental<_Tp>::_M_type>::
94
      _S_do_it(__b, __e);
95
  }
96
 
97
  // Turn a raw-memory into an array of _Tp filled with __t
98
  // This is the required in valarray v(n, t).  Also
99
  // used in valarray<>::resize().
100
  template
101
  struct _Array_init_ctor
102
  {
103
    // Please note that this isn't exception safe.  But
104
    // valarrays aren't required to be exception safe.
105
    inline static void
106
    _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e, const _Tp __t)
107
    { while (__b != __e) new(__b++) _Tp(__t); }
108
  };
109
 
110
  template
111
  struct _Array_init_ctor<_Tp, true>
112
  {
113
    inline static void
114
    _S_do_it(_Tp* __restrict__ __b, _Tp* __restrict__ __e,  const _Tp __t)
115
    { while (__b != __e) *__b++ = __t; }
116
  };
117
 
118
  template
119
  inline void
120
  __valarray_fill_construct(_Tp* __restrict__ __b, _Tp* __restrict__ __e,
121
                            const _Tp __t)
122
  {
123
    _Array_init_ctor<_Tp, __is_fundamental<_Tp>::_M_type>::
124
      _S_do_it(__b, __e, __t);
125
  }
126
 
127
  //
128
  // copy-construct raw array [__o, *) from plain array [__b, __e)
129
  // We can't just say 'memcpy()'
130
  //
131
  template
132
  struct _Array_copy_ctor
133
  {
134
    // Please note that this isn't exception safe.  But
135
    // valarrays aren't required to be exception safe.
136
    inline static void
137
    _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e,
138
             _Tp* __restrict__ __o)
139
    { while (__b != __e) new(__o++) _Tp(*__b++); }
140
  };
141
 
142
  template
143
  struct _Array_copy_ctor<_Tp, true>
144
  {
145
    inline static void
146
    _S_do_it(const _Tp* __restrict__ __b, const _Tp* __restrict__ __e,
147
             _Tp* __restrict__ __o)
148
    { memcpy(__o, __b, (__e - __b)*sizeof(_Tp)); }
149
  };
150
 
151
  template
152
  inline void
153
  __valarray_copy_construct(const _Tp* __restrict__ __b,
154
                            const _Tp* __restrict__ __e,
155
                            _Tp* __restrict__ __o)
156
  {
157
    _Array_copy_ctor<_Tp, __is_fundamental<_Tp>::_M_type>::
158
      _S_do_it(__b, __e, __o);
159
  }
160
 
161
  // copy-construct raw array [__o, *) from strided array __a[<__n : __s>]
162
  template
163
  inline void
164
  __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n,
165
                             size_t __s, _Tp* __restrict__ __o)
166
  {
167
    if (__is_fundamental<_Tp>::_M_type)
168
      while (__n--) { *__o++ = *__a; __a += __s; }
169
    else
170
      while (__n--) { new(__o++) _Tp(*__a);  __a += __s; }
171
  }
172
 
173
  // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]]
174
  template
175
  inline void
176
  __valarray_copy_construct (const _Tp* __restrict__ __a,
177
                             const size_t* __restrict__ __i,
178
                             _Tp* __restrict__ __o, size_t __n)
179
  {
180
    if (__is_fundamental<_Tp>::_M_type)
181
      while (__n--) *__o++ = __a[*__i++];
182
    else
183
      while (__n--) new (__o++) _Tp(__a[*__i++]);
184
  }
185
 
186
  // Do the necessary cleanup when we're done with arrays.
187
  template
188
  inline void
189
  __valarray_destroy_elements(_Tp* __restrict__ __b, _Tp* __restrict__ __e)
190
  {
191
    if (!__is_fundamental<_Tp>::_M_type)
192
      while (__b != __e) { __b->~_Tp(); ++__b; }
193
  }
194
 
195
  // fill plain array __a[<__n>] with __t
196
  template
197
  void
198
  __valarray_fill (_Tp* __restrict__ __a, size_t __n, const _Tp& __t)
199
  { while (__n--) *__a++ = __t; }
200
 
201
  // fill strided array __a[<__n-1 : __s>] with __t
202
  template
203
  inline void
204
  __valarray_fill (_Tp* __restrict__ __a, size_t __n,
205
                   size_t __s, const _Tp& __t)
206
  { for (size_t __i=0; __i<__n; ++__i, __a+=__s) *__a = __t; }
207
 
208
  // fill indir   ect array __a[__i[<__n>]] with __i
209
  template
210
  inline void
211
  __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i,
212
                  size_t __n, const _Tp& __t)
213
  { for (size_t __j=0; __j<__n; ++__j, ++__i) __a[*__i] = __t; }
214
 
215
  // copy plain array __a[<__n>] in __b[<__n>]
216
  // For non-fundamental types, it is wrong to say 'memcpy()'
217
  template
218
  struct _Array_copier
219
  {
220
    inline static void
221
    _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
222
    { while (__n--) *__b++ = *__a++; }
223
  };
224
 
225
  template
226
  struct _Array_copier<_Tp, true>
227
  {
228
    inline static void
229
    _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b)
230
    { memcpy (__b, __a, __n * sizeof (_Tp)); }
231
  };
232
 
233
  template
234
  inline void
235
  __valarray_copy (const _Tp* __restrict__ __a, size_t __n,
236
                   _Tp* __restrict__ __b)
237
  {
238
    _Array_copier<_Tp, __is_fundamental<_Tp>::_M_type>::
239
      _S_do_it(__a, __n, __b);
240
  }
241
 
242
  // copy strided array __a[<__n : __s>] in plain __b[<__n>]
243
  template
244
  inline void
245
  __valarray_copy (const _Tp* __restrict__ __a, size_t __n, size_t __s,
246
                   _Tp* __restrict__ __b)
247
  { for (size_t __i=0; __i<__n; ++__i, ++__b, __a += __s) *__b = *__a; }
248
 
249
  // copy plain __a[<__n>] in strided __b[<__n : __s>]
250
  template
251
  inline void
252
  __valarray_copy (const _Tp* __restrict__ __a, _Tp* __restrict__ __b,
253
                   size_t __n, size_t __s)
254
  { for (size_t __i=0; __i<__n; ++__i, ++__a, __b+=__s) *__b = *__a; }
255
 
256
  // copy indexed __a[__i[<__n>]] in plain __b[<__n>]
257
  template
258
  inline void
259
  __valarray_copy (const _Tp* __restrict__ __a,
260
                   const size_t* __restrict__ __i,
261
                   _Tp* __restrict__ __b, size_t __n)
262
  { for (size_t __j=0; __j<__n; ++__j, ++__b, ++__i) *__b = __a[*__i]; }
263
 
264
  // copy plain __a[<__n>] in indexed __b[__i[<__n>]]
265
  template
266
  inline void
267
  __valarray_copy (const _Tp* __restrict__ __a, size_t __n,
268
                   _Tp* __restrict__ __b, const size_t* __restrict__ __i)
269
  { for (size_t __j=0; __j<__n; ++__j, ++__a, ++__i) __b[*__i] = *__a; }
270
 
271
 
272
  //
273
  // Compute the sum of elements in range [__f, __l)
274
  // This is a naive algorithm.  It suffers from cancelling.
275
  // In the future try to specialize
276
  // for _Tp = float, double, long double using a more accurate
277
  // algorithm.
278
  //
279
  template
280
  inline _Tp
281
  __valarray_sum(const _Tp* __restrict__ __f, const _Tp* __restrict__ __l)
282
  {
283
    _Tp __r = _Tp();
284
    while (__f != __l) __r += *__f++;
285
    return __r;
286
  }
287
 
288
  // Compute the product of all elements in range [__f, __l)
289
  template
290
  inline _Tp
291
  __valarray_product(const _Tp* __restrict__ __f,
292
                     const _Tp* __restrict__ __l)
293
  {
294
    _Tp __r = _Tp(1);
295
    while (__f != __l) __r = __r * *__f++;
296
    return __r;
297
  }
298
 
299
  // Compute the min/max of an array-expression
300
  template
301
  inline typename _Ta::value_type
302
  __valarray_min(const _Ta& __a)
303
  {
304
    size_t __s = __a.size();
305
    typedef typename _Ta::value_type _Value_type;
306
    _Value_type __r = __s == 0 ? _Value_type() : __a[0];
307
    for (size_t __i = 1; __i < __s; ++__i)
308
      {
309
        _Value_type __t = __a[__i];
310
        if (__t < __r)
311
          __r = __t;
312
      }
313
    return __r;
314
  }
315
 
316
  template
317
  inline typename _Ta::value_type
318
  __valarray_max(const _Ta& __a)
319
  {
320
    size_t __s = __a.size();
321
    typedef typename _Ta::value_type _Value_type;
322
    _Value_type __r = __s == 0 ? _Value_type() : __a[0];
323
    for (size_t __i = 1; __i < __s; ++__i)
324
      {
325
        _Value_type __t = __a[__i];
326
        if (__t > __r)
327
          __r = __t;
328
      }
329
    return __r;
330
  }
331
 
332
  //
333
  // Helper class _Array, first layer of valarray abstraction.
334
  // All operations on valarray should be forwarded to this class
335
  // whenever possible. -- gdr
336
  //
337
 
338
  template
339
  struct _Array
340
  {
341
    explicit _Array (size_t);
342
    explicit _Array (_Tp* const __restrict__);
343
    explicit _Array (const valarray<_Tp>&);
344
    _Array (const _Tp* __restrict__, size_t);
345
 
346
    _Tp* begin () const;
347
 
348
    _Tp* const __restrict__ _M_data;
349
  };
350
 
351
  template
352
  inline void
353
  __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t)
354
  { __valarray_fill (__a._M_data, __n, __t); }
355
 
356
  template
357
  inline void
358
  __valarray_fill (_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t)
359
  { __valarray_fill (__a._M_data, __n, __s, __t); }
360
 
361
  template
362
  inline void
363
  __valarray_fill (_Array<_Tp> __a, _Array __i,
364
                   size_t __n, const _Tp& __t)
365
  { __valarray_fill (__a._M_data, __i._M_data, __n, __t); }
366
 
367
  template
368
  inline void
369
  __valarray_copy (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)
370
  { __valarray_copy (__a._M_data, __n, __b._M_data); }
371
 
372
  template
373
  inline void
374
  __valarray_copy (_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b)
375
  { __valarray_copy(__a._M_data, __n, __s, __b._M_data); }
376
 
377
  template
378
  inline void
379
  __valarray_copy (_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s)
380
  { __valarray_copy (__a._M_data, __b._M_data, __n, __s); }
381
 
382
  template
383
  inline void
384
  __valarray_copy (_Array<_Tp> __a, _Array __i,
385
                   _Array<_Tp> __b, size_t __n)
386
  { __valarray_copy (__a._M_data, __i._M_data, __b._M_data, __n); }
387
 
388
  template
389
  inline void
390
  __valarray_copy (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b,
391
                   _Array __i)
392
  { __valarray_copy (__a._M_data, __n, __b._M_data, __i._M_data); }
393
 
394
  template
395
  inline
396
  _Array<_Tp>::_Array (size_t __n)
397
    : _M_data(__valarray_get_storage<_Tp>(__n))
398
  { __valarray_default_construct(_M_data, _M_data + __n); }
399
 
400
  template
401
  inline
402
  _Array<_Tp>::_Array (_Tp* const __restrict__ __p) : _M_data (__p) {}
403
 
404
  template
405
  inline _Array<_Tp>::_Array (const valarray<_Tp>& __v)
406
      : _M_data (__v._M_data) {}
407
 
408
  template
409
  inline
410
  _Array<_Tp>::_Array (const _Tp* __restrict__ __b, size_t __s)
411
    : _M_data(__valarray_get_storage<_Tp>(__s))
412
  { __valarray_copy_construct(__b, __s, _M_data); }
413
 
414
  template
415
  inline _Tp*
416
  _Array<_Tp>::begin () const
417
  { return _M_data; }
418
 
419
#define _DEFINE_ARRAY_FUNCTION(_Op, _Name)				\
420
template							\
421
inline void								\
422
_Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, const _Tp& __t)	\
423
{									\
424
  for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p)		\
425
    *__p _Op##= __t;							\
426
}									\
427
									\
428
template							\
429
inline void								\
430
_Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, _Array<_Tp> __b)	\
431
{									\
432
  _Tp* __p = __a._M_data;						\
433
  for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__p, ++__q)		\
434
    *__p _Op##= *__q;							\
435
}									\
436
									\
437
template					\
438
void									\
439
_Array_augmented_##_Name (_Array<_Tp> __a, 				\
440
                         const _Expr<_Dom,_Tp>& __e, size_t __n)	\
441
{									\
442
    _Tp* __p (__a._M_data);						\
443
    for (size_t __i=0; __i<__n; ++__i, ++__p) *__p _Op##= __e[__i];	\
444
}									\
445
									\
446
template							\
447
inline void								\
448
_Array_augmented_##_Name (_Array<_Tp> __a, size_t __n, size_t __s, 	\
449
			 _Array<_Tp> __b)				\
450
{					       				\
451
    _Tp* __q (__b._M_data);						\
452
    for (_Tp* __p=__a._M_data; __p<__a._M_data+__s*__n; __p+=__s, ++__q) \
453
      *__p _Op##= *__q;							\
454
}									\
455
									\
456
template							\
457
inline void								\
458
_Array_augmented_##_Name (_Array<_Tp> __a, _Array<_Tp> __b, 		\
459
			 size_t __n, size_t __s)			\
460
{									\
461
    _Tp* __q (__b._M_data);						\
462
    for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, __q+=__s)	\
463
      *__p _Op##= *__q;							\
464
}									\
465
									\
466
template					\
467
void									\
468
_Array_augmented_##_Name (_Array<_Tp> __a, size_t __s,			\
469
                          const _Expr<_Dom,_Tp>& __e, size_t __n)	\
470
{									\
471
    _Tp* __p (__a._M_data);						\
472
    for (size_t __i=0; __i<__n; ++__i, __p+=__s) *__p _Op##= __e[__i];	\
473
}									\
474
									\
475
template							\
476
inline void								\
477
_Array_augmented_##_Name (_Array<_Tp> __a, _Array __i,		\
478
                          _Array<_Tp> __b, size_t __n)			\
479
{									\
480
    _Tp* __q (__b._M_data);						\
481
    for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__q)	\
482
        __a._M_data[*__j] _Op##= *__q;					\
483
}									\
484
									\
485
template							\
486
inline void								\
487
_Array_augmented_##_Name (_Array<_Tp> __a, size_t __n,			\
488
                          _Array<_Tp> __b, _Array __i)		\
489
{									\
490
    _Tp* __p (__a._M_data);						\
491
    for (size_t* __j=__i._M_data; __j<__i._M_data+__n; ++__j, ++__p)	\
492
        *__p _Op##= __b._M_data[*__j];					\
493
}									\
494
									\
495
template					\
496
void									\
497
_Array_augmented_##_Name (_Array<_Tp> __a, _Array __i,		\
498
                          const _Expr<_Dom, _Tp>& __e, size_t __n)	\
499
{									\
500
    size_t* __j (__i._M_data);						\
501
    for (size_t __k=0; __k<__n; ++__k, ++__j) 				\
502
      __a._M_data[*__j] _Op##= __e[__k];				\
503
}									\
504
									\
505
template							\
506
void									\
507
_Array_augmented_##_Name (_Array<_Tp> __a, _Array __m,		\
508
                          _Array<_Tp> __b, size_t __n)			\
509
{									\
510
    bool* ok (__m._M_data);						\
511
    _Tp* __p (__a._M_data);						\
512
    for (_Tp* __q=__b._M_data; __q<__b._M_data+__n; ++__q, ++ok, ++__p) { \
513
        while (! *ok) {							\
514
            ++ok;							\
515
            ++__p;							\
516
        }								\
517
        *__p _Op##= *__q;						\
518
    }									\
519
}									\
520
									\
521
template							\
522
void									\
523
_Array_augmented_##_Name (_Array<_Tp> __a, size_t __n,			\
524
                         _Array<_Tp> __b, _Array __m)		\
525
{									\
526
    bool* ok (__m._M_data);						\
527
    _Tp* __q (__b._M_data);						\
528
    for (_Tp* __p=__a._M_data; __p<__a._M_data+__n; ++__p, ++ok, ++__q) { \
529
        while (! *ok) {							\
530
            ++ok;							\
531
            ++__q;							\
532
        }								\
533
        *__p _Op##= *__q;						\
534
    }									\
535
}									\
536
									\
537
template					\
538
void									\
539
_Array_augmented_##_Name (_Array<_Tp> __a, _Array __m,		\
540
                          const _Expr<_Dom, _Tp>& __e, size_t __n)	\
541
{									\
542
    bool* ok(__m._M_data);						\
543
    _Tp* __p (__a._M_data);						\
544
    for (size_t __i=0; __i<__n; ++__i, ++ok, ++__p) {			\
545
        while (! *ok) {							\
546
            ++ok;							\
547
            ++__p;							\
548
        }								\
549
        *__p _Op##= __e[__i];						\
550
    }									\
551
}
552
 
553
_DEFINE_ARRAY_FUNCTION(+, plus)
554
_DEFINE_ARRAY_FUNCTION(-, minus)
555
_DEFINE_ARRAY_FUNCTION(*, multiplies)
556
_DEFINE_ARRAY_FUNCTION(/, divides)
557
_DEFINE_ARRAY_FUNCTION(%, modulus)
558
_DEFINE_ARRAY_FUNCTION(^, xor)
559
_DEFINE_ARRAY_FUNCTION(|, or)
560
_DEFINE_ARRAY_FUNCTION(&, and)
561
_DEFINE_ARRAY_FUNCTION(<<, shift_left)
562
_DEFINE_ARRAY_FUNCTION(>>, shift_right)
563
 
564
#undef _DEFINE_VALARRAY_FUNCTION
565
 
566
} // std::
567
 
568
#ifdef _GLIBCPP_NO_TEMPLATE_EXPORT
569
# define export
570
# include 
571
#endif
572
 
573
#endif /* _CPP_BITS_ARRAY_H */
574
 
575
// Local Variables:
576
// mode:c++
577
// End: