Subversion Repositories Kolibri OS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
5134 serge 1
// Allocator details.
2
 
3
// Copyright (C) 2004-2013 Free Software Foundation, Inc.
4
//
5
// This file is part of the GNU ISO C++ Library.  This library is free
6
// software; you can redistribute it and/or modify it under the
7
// terms of the GNU General Public License as published by the
8
// Free Software Foundation; either version 3, or (at your option)
9
// any later version.
10
 
11
// This library is distributed in the hope that it will be useful,
12
// but WITHOUT ANY WARRANTY; without even the implied warranty of
13
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14
// GNU General Public License for more details.
15
 
16
// Under Section 7 of GPL version 3, you are granted additional
17
// permissions described in the GCC Runtime Library Exception, version
18
// 3.1, as published by the Free Software Foundation.
19
 
20
// You should have received a copy of the GNU General Public License and
21
// a copy of the GCC Runtime Library Exception along with this program;
22
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
23
// .
24
 
25
//
26
// ISO C++ 14882:
27
//
28
 
29
#include 
30
#include 
31
#include 
32
#include 
33
 
34
namespace
35
{
36
#ifdef __GTHREADS
37
  struct __freelist
38
  {
39
    typedef __gnu_cxx::__pool::_Thread_record _Thread_record;
40
    _Thread_record* 	_M_thread_freelist;
41
    _Thread_record* 	_M_thread_freelist_array;
42
    size_t 		_M_max_threads;
43
    __gthread_key_t 	_M_key;
44
 
45
    ~__freelist()
46
    {
47
      if (_M_thread_freelist_array)
48
	{
49
	  __gthread_key_delete(_M_key);
50
	  ::operator delete(static_cast(_M_thread_freelist_array));
51
	  _M_thread_freelist = 0;
52
	}
53
    }
54
  };
55
 
56
  __freelist&
57
  get_freelist()
58
  {
59
    static __freelist freelist;
60
    return freelist;
61
  }
62
 
63
  __gnu_cxx::__mutex&
64
  get_freelist_mutex()
65
  {
66
    static __gnu_cxx::__mutex freelist_mutex;
67
    return freelist_mutex;
68
  }
69
 
70
  static void
71
  _M_destroy_thread_key(void* __id)
72
  {
73
    // Return this thread id record to the front of thread_freelist.
74
    __freelist& freelist = get_freelist();
75
    {
76
      __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
77
      size_t _M_id = reinterpret_cast(__id);
78
 
79
      typedef __gnu_cxx::__pool::_Thread_record _Thread_record;
80
      _Thread_record* __tr = &freelist._M_thread_freelist_array[_M_id - 1];
81
      __tr->_M_next = freelist._M_thread_freelist;
82
      freelist._M_thread_freelist = __tr;
83
    }
84
  }
85
#endif
86
} // anonymous namespace
87
 
88
namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
89
{
90
_GLIBCXX_BEGIN_NAMESPACE_VERSION
91
 
92
  void
93
  __pool::_M_destroy() throw()
94
  {
95
    if (_M_init && !_M_options._M_force_new)
96
      {
97
	for (size_t __n = 0; __n < _M_bin_size; ++__n)
98
	  {
99
	    _Bin_record& __bin = _M_bin[__n];
100
	    while (__bin._M_address)
101
	      {
102
		_Block_address* __tmp = __bin._M_address->_M_next;
103
		::operator delete(__bin._M_address->_M_initial);
104
		__bin._M_address = __tmp;
105
	      }
106
	    ::operator delete(__bin._M_first);
107
	  }
108
	::operator delete(_M_bin);
109
	::operator delete(_M_binmap);
110
      }
111
  }
112
 
113
  void
114
  __pool::_M_reclaim_block(char* __p, size_t __bytes) throw ()
115
  {
116
    // Round up to power of 2 and figure out which bin to use.
117
    const size_t __which = _M_binmap[__bytes];
118
    _Bin_record& __bin = _M_bin[__which];
119
 
120
    char* __c = __p - _M_get_align();
121
    _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
122
 
123
    // Single threaded application - return to global pool.
124
    __block->_M_next = __bin._M_first[0];
125
    __bin._M_first[0] = __block;
126
  }
127
 
128
  char*
129
  __pool::_M_reserve_block(size_t __bytes, const size_t __thread_id)
130
  {
131
    // Round up to power of 2 and figure out which bin to use.
132
    const size_t __which = _M_binmap[__bytes];
133
    _Bin_record& __bin = _M_bin[__which];
134
    const _Tune& __options = _M_get_options();
135
    const size_t __bin_size = (__options._M_min_bin << __which)
136
			       + __options._M_align;
137
    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
138
    __block_count /= __bin_size;
139
 
140
    // Get a new block dynamically, set it up for use.
141
    void* __v = ::operator new(__options._M_chunk_size);
142
    _Block_address* __address = static_cast<_Block_address*>(__v);
143
    __address->_M_initial = __v;
144
    __address->_M_next = __bin._M_address;
145
    __bin._M_address = __address;
146
 
147
    char* __c = static_cast(__v) + sizeof(_Block_address);
148
    _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
149
    __bin._M_first[__thread_id] = __block;
150
    while (--__block_count > 0)
151
      {
152
	__c += __bin_size;
153
	__block->_M_next = reinterpret_cast<_Block_record*>(__c);
154
	__block = __block->_M_next;
155
      }
156
    __block->_M_next = 0;
157
 
158
    __block = __bin._M_first[__thread_id];
159
    __bin._M_first[__thread_id] = __block->_M_next;
160
 
161
    // NB: For alignment reasons, we can't use the first _M_align
162
    // bytes, even when sizeof(_Block_record) < _M_align.
163
    return reinterpret_cast(__block) + __options._M_align;
164
  }
165
 
166
  void
167
  __pool::_M_initialize()
168
  {
169
    // _M_force_new must not change after the first allocate(), which
170
    // in turn calls this method, so if it's false, it's false forever
171
    // and we don't need to return here ever again.
172
    if (_M_options._M_force_new)
173
      {
174
	_M_init = true;
175
	return;
176
      }
177
 
178
    // Create the bins.
179
    // Calculate the number of bins required based on _M_max_bytes.
180
    // _M_bin_size is statically-initialized to one.
181
    size_t __bin_size = _M_options._M_min_bin;
182
    while (_M_options._M_max_bytes > __bin_size)
183
      {
184
	__bin_size <<= 1;
185
	++_M_bin_size;
186
      }
187
 
188
    // Setup the bin map for quick lookup of the relevant bin.
189
    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
190
    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
191
    _Binmap_type* __bp = _M_binmap;
192
    _Binmap_type __bin_max = _M_options._M_min_bin;
193
    _Binmap_type __bint = 0;
194
    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
195
      {
196
	if (__ct > __bin_max)
197
	  {
198
	    __bin_max <<= 1;
199
	    ++__bint;
200
	  }
201
	*__bp++ = __bint;
202
      }
203
 
204
    // Initialize _M_bin and its members.
205
    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
206
    _M_bin = static_cast<_Bin_record*>(__v);
207
    for (size_t __n = 0; __n < _M_bin_size; ++__n)
208
      {
209
	_Bin_record& __bin = _M_bin[__n];
210
	__v = ::operator new(sizeof(_Block_record*));
211
	__bin._M_first = static_cast<_Block_record**>(__v);
212
	__bin._M_first[0] = 0;
213
	__bin._M_address = 0;
214
      }
215
    _M_init = true;
216
  }
217
 
218
 
219
#ifdef __GTHREADS
220
  void
221
  __pool::_M_destroy() throw()
222
  {
223
    if (_M_init && !_M_options._M_force_new)
224
      {
225
	if (__gthread_active_p())
226
	  {
227
	    for (size_t __n = 0; __n < _M_bin_size; ++__n)
228
	      {
229
		_Bin_record& __bin = _M_bin[__n];
230
		while (__bin._M_address)
231
		  {
232
		    _Block_address* __tmp = __bin._M_address->_M_next;
233
		    ::operator delete(__bin._M_address->_M_initial);
234
		    __bin._M_address = __tmp;
235
		  }
236
		::operator delete(__bin._M_first);
237
		::operator delete(__bin._M_free);
238
		::operator delete(__bin._M_used);
239
		::operator delete(__bin._M_mutex);
240
	      }
241
	  }
242
	else
243
	  {
244
	    for (size_t __n = 0; __n < _M_bin_size; ++__n)
245
	      {
246
		_Bin_record& __bin = _M_bin[__n];
247
		while (__bin._M_address)
248
		  {
249
		    _Block_address* __tmp = __bin._M_address->_M_next;
250
		    ::operator delete(__bin._M_address->_M_initial);
251
		    __bin._M_address = __tmp;
252
		  }
253
		::operator delete(__bin._M_first);
254
	      }
255
	  }
256
	::operator delete(_M_bin);
257
	::operator delete(_M_binmap);
258
      }
259
  }
260
 
261
  void
262
  __pool::_M_reclaim_block(char* __p, size_t __bytes) throw ()
263
  {
264
    // Round up to power of 2 and figure out which bin to use.
265
    const size_t __which = _M_binmap[__bytes];
266
    const _Bin_record& __bin = _M_bin[__which];
267
 
268
    // Know __p not null, assume valid block.
269
    char* __c = __p - _M_get_align();
270
    _Block_record* __block = reinterpret_cast<_Block_record*>(__c);
271
    if (__gthread_active_p())
272
      {
273
	// Calculate the number of records to remove from our freelist:
274
	// in order to avoid too much contention we wait until the
275
	// number of records is "high enough".
276
	const size_t __thread_id = _M_get_thread_id();
277
	const _Tune& __options = _M_get_options();
278
	const size_t __limit = (100 * (_M_bin_size - __which)
279
				* __options._M_freelist_headroom);
280
 
281
	size_t __remove = __bin._M_free[__thread_id];
282
	__remove *= __options._M_freelist_headroom;
283
 
284
	// NB: We assume that reads of _Atomic_words are atomic.
285
	const size_t __max_threads = __options._M_max_threads + 1;
286
	_Atomic_word* const __reclaimed_base =
287
	  reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
288
	const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
289
	const size_t __net_used = __bin._M_used[__thread_id] - __reclaimed;
290
 
291
	// NB: For performance sake we don't resync every time, in order
292
	// to spare atomic ops.  Note that if __reclaimed increased by,
293
	// say, 1024, since the last sync, it means that the other
294
	// threads executed the atomic in the else below at least the
295
	// same number of times (at least, because _M_reserve_block may
296
	// have decreased the counter), therefore one more cannot hurt.
297
	if (__reclaimed > 1024)
298
	  {
299
	    __bin._M_used[__thread_id] -= __reclaimed;
300
	    __atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
301
	  }
302
 
303
	if (__remove >= __net_used)
304
	  __remove -= __net_used;
305
	else
306
	  __remove = 0;
307
	if (__remove > __limit && __remove > __bin._M_free[__thread_id])
308
	  {
309
	    _Block_record* __first = __bin._M_first[__thread_id];
310
	    _Block_record* __tmp = __first;
311
	    __remove /= __options._M_freelist_headroom;
312
	    const size_t __removed = __remove;
313
	    while (--__remove > 0)
314
	      __tmp = __tmp->_M_next;
315
	    __bin._M_first[__thread_id] = __tmp->_M_next;
316
	    __bin._M_free[__thread_id] -= __removed;
317
 
318
	    __gthread_mutex_lock(__bin._M_mutex);
319
	    __tmp->_M_next = __bin._M_first[0];
320
	    __bin._M_first[0] = __first;
321
	    __bin._M_free[0] += __removed;
322
	    __gthread_mutex_unlock(__bin._M_mutex);
323
	  }
324
 
325
	// Return this block to our list and update counters and
326
	// owner id as needed.
327
	if (__block->_M_thread_id == __thread_id)
328
	  --__bin._M_used[__thread_id];
329
	else
330
	  __atomic_add(&__reclaimed_base[__block->_M_thread_id], 1);
331
 
332
	__block->_M_next = __bin._M_first[__thread_id];
333
	__bin._M_first[__thread_id] = __block;
334
 
335
	++__bin._M_free[__thread_id];
336
      }
337
    else
338
      {
339
	// Not using threads, so single threaded application - return
340
	// to global pool.
341
	__block->_M_next = __bin._M_first[0];
342
	__bin._M_first[0] = __block;
343
      }
344
  }
345
 
346
  char*
347
  __pool::_M_reserve_block(size_t __bytes, const size_t __thread_id)
348
  {
349
    // Round up to power of 2 and figure out which bin to use.
350
    const size_t __which = _M_binmap[__bytes];
351
    const _Tune& __options = _M_get_options();
352
    const size_t __bin_size = ((__options._M_min_bin << __which)
353
			       + __options._M_align);
354
    size_t __block_count = __options._M_chunk_size - sizeof(_Block_address);
355
    __block_count /= __bin_size;
356
 
357
    // Are we using threads?
358
    // - Yes, check if there are free blocks on the global
359
    //   list. If so, grab up to __block_count blocks in one
360
    //   lock and change ownership. If the global list is
361
    //   empty, we allocate a new chunk and add those blocks
362
    //   directly to our own freelist (with us as owner).
363
    // - No, all operations are made directly to global pool 0
364
    //   no need to lock or change ownership but check for free
365
    //   blocks on global list (and if not add new ones) and
366
    //   get the first one.
367
    _Bin_record& __bin = _M_bin[__which];
368
    _Block_record* __block = 0;
369
    if (__gthread_active_p())
370
      {
371
	// Resync the _M_used counters.
372
	const size_t __max_threads = __options._M_max_threads + 1;
373
	_Atomic_word* const __reclaimed_base =
374
	  reinterpret_cast<_Atomic_word*>(__bin._M_used + __max_threads);
375
	const _Atomic_word __reclaimed = __reclaimed_base[__thread_id];
376
	__bin._M_used[__thread_id] -= __reclaimed;
377
	__atomic_add(&__reclaimed_base[__thread_id], -__reclaimed);
378
 
379
	__gthread_mutex_lock(__bin._M_mutex);
380
	if (__bin._M_first[0] == 0)
381
	  {
382
	    void* __v = ::operator new(__options._M_chunk_size);
383
	    _Block_address* __address = static_cast<_Block_address*>(__v);
384
	    __address->_M_initial = __v;
385
	    __address->_M_next = __bin._M_address;
386
	    __bin._M_address = __address;
387
	    __gthread_mutex_unlock(__bin._M_mutex);
388
 
389
	    // No need to hold the lock when we are adding a whole
390
	    // chunk to our own list.
391
	    char* __c = static_cast(__v) + sizeof(_Block_address);
392
	    __block = reinterpret_cast<_Block_record*>(__c);
393
	    __bin._M_free[__thread_id] = __block_count;
394
	    __bin._M_first[__thread_id] = __block;
395
	    while (--__block_count > 0)
396
	      {
397
		__c += __bin_size;
398
		__block->_M_next = reinterpret_cast<_Block_record*>(__c);
399
		__block = __block->_M_next;
400
	      }
401
	    __block->_M_next = 0;
402
	  }
403
	else
404
	  {
405
	    // Is the number of required blocks greater than or equal
406
	    // to the number that can be provided by the global free
407
	    // list?
408
	    __bin._M_first[__thread_id] = __bin._M_first[0];
409
	    if (__block_count >= __bin._M_free[0])
410
	      {
411
		__bin._M_free[__thread_id] = __bin._M_free[0];
412
		__bin._M_free[0] = 0;
413
		__bin._M_first[0] = 0;
414
	      }
415
	    else
416
	      {
417
		__bin._M_free[__thread_id] = __block_count;
418
		__bin._M_free[0] -= __block_count;
419
		__block = __bin._M_first[0];
420
		while (--__block_count > 0)
421
		  __block = __block->_M_next;
422
		__bin._M_first[0] = __block->_M_next;
423
		__block->_M_next = 0;
424
	      }
425
	    __gthread_mutex_unlock(__bin._M_mutex);
426
	  }
427
      }
428
    else
429
      {
430
	void* __v = ::operator new(__options._M_chunk_size);
431
	_Block_address* __address = static_cast<_Block_address*>(__v);
432
	__address->_M_initial = __v;
433
	__address->_M_next = __bin._M_address;
434
	__bin._M_address = __address;
435
 
436
	char* __c = static_cast(__v) + sizeof(_Block_address);
437
	__block = reinterpret_cast<_Block_record*>(__c);
438
 	__bin._M_first[0] = __block;
439
	while (--__block_count > 0)
440
	  {
441
	    __c += __bin_size;
442
	    __block->_M_next = reinterpret_cast<_Block_record*>(__c);
443
	    __block = __block->_M_next;
444
	  }
445
	__block->_M_next = 0;
446
      }
447
 
448
    __block = __bin._M_first[__thread_id];
449
    __bin._M_first[__thread_id] = __block->_M_next;
450
 
451
    if (__gthread_active_p())
452
      {
453
	__block->_M_thread_id = __thread_id;
454
	--__bin._M_free[__thread_id];
455
	++__bin._M_used[__thread_id];
456
      }
457
 
458
    // NB: For alignment reasons, we can't use the first _M_align
459
    // bytes, even when sizeof(_Block_record) < _M_align.
460
    return reinterpret_cast(__block) + __options._M_align;
461
  }
462
 
463
  void
464
  __pool::_M_initialize()
465
  {
466
    // _M_force_new must not change after the first allocate(),
467
    // which in turn calls this method, so if it's false, it's false
468
    // forever and we don't need to return here ever again.
469
    if (_M_options._M_force_new)
470
      {
471
	_M_init = true;
472
	return;
473
      }
474
 
475
    // Create the bins.
476
    // Calculate the number of bins required based on _M_max_bytes.
477
    // _M_bin_size is statically-initialized to one.
478
    size_t __bin_size = _M_options._M_min_bin;
479
    while (_M_options._M_max_bytes > __bin_size)
480
      {
481
	__bin_size <<= 1;
482
	++_M_bin_size;
483
      }
484
 
485
    // Setup the bin map for quick lookup of the relevant bin.
486
    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
487
    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
488
    _Binmap_type* __bp = _M_binmap;
489
    _Binmap_type __bin_max = _M_options._M_min_bin;
490
    _Binmap_type __bint = 0;
491
    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
492
      {
493
	if (__ct > __bin_max)
494
	  {
495
	    __bin_max <<= 1;
496
	    ++__bint;
497
	  }
498
	*__bp++ = __bint;
499
      }
500
 
501
    // Initialize _M_bin and its members.
502
    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
503
    _M_bin = static_cast<_Bin_record*>(__v);
504
 
505
    // If __gthread_active_p() create and initialize the list of
506
    // free thread ids. Single threaded applications use thread id 0
507
    // directly and have no need for this.
508
    if (__gthread_active_p())
509
      {
510
	__freelist& freelist = get_freelist();
511
	{
512
	  __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
513
 
514
	  if (!freelist._M_thread_freelist_array
515
	      || freelist._M_max_threads < _M_options._M_max_threads)
516
	    {
517
	      const size_t __k = sizeof(_Thread_record)
518
				 * _M_options._M_max_threads;
519
	      __v = ::operator new(__k);
520
	      _M_thread_freelist = static_cast<_Thread_record*>(__v);
521
 
522
	      // NOTE! The first assignable thread id is 1 since the
523
	      // global pool uses id 0
524
	      size_t __i;
525
	      for (__i = 1; __i < _M_options._M_max_threads; ++__i)
526
		{
527
		  _Thread_record& __tr = _M_thread_freelist[__i - 1];
528
		  __tr._M_next = &_M_thread_freelist[__i];
529
		  __tr._M_id = __i;
530
		}
531
 
532
	      // Set last record.
533
	      _M_thread_freelist[__i - 1]._M_next = 0;
534
	      _M_thread_freelist[__i - 1]._M_id = __i;
535
 
536
	      if (!freelist._M_thread_freelist_array)
537
		{
538
		  // Initialize per thread key to hold pointer to
539
		  // _M_thread_freelist.
540
		  __gthread_key_create(&freelist._M_key,
541
				       ::_M_destroy_thread_key);
542
		  freelist._M_thread_freelist = _M_thread_freelist;
543
		}
544
	      else
545
		{
546
		  _Thread_record* _M_old_freelist
547
		    = freelist._M_thread_freelist;
548
		  _Thread_record* _M_old_array
549
		    = freelist._M_thread_freelist_array;
550
		  freelist._M_thread_freelist
551
		    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
552
		  while (_M_old_freelist)
553
		    {
554
		      size_t next_id;
555
		      if (_M_old_freelist->_M_next)
556
			next_id = _M_old_freelist->_M_next - _M_old_array;
557
		      else
558
			next_id = freelist._M_max_threads;
559
		      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
560
			= &_M_thread_freelist[next_id];
561
		      _M_old_freelist = _M_old_freelist->_M_next;
562
		    }
563
		  ::operator delete(static_cast(_M_old_array));
564
		}
565
	      freelist._M_thread_freelist_array = _M_thread_freelist;
566
	      freelist._M_max_threads = _M_options._M_max_threads;
567
	    }
568
	}
569
 
570
	const size_t __max_threads = _M_options._M_max_threads + 1;
571
	for (size_t __n = 0; __n < _M_bin_size; ++__n)
572
	  {
573
	    _Bin_record& __bin = _M_bin[__n];
574
	    __v = ::operator new(sizeof(_Block_record*) * __max_threads);
575
	    std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
576
	    __bin._M_first = static_cast<_Block_record**>(__v);
577
 
578
	    __bin._M_address = 0;
579
 
580
	    __v = ::operator new(sizeof(size_t) * __max_threads);
581
	    std::memset(__v, 0, sizeof(size_t) * __max_threads);
582
 
583
	    __bin._M_free = static_cast(__v);
584
 
585
	    __v = ::operator new(sizeof(size_t) * __max_threads
586
				 + sizeof(_Atomic_word) * __max_threads);
587
	    std::memset(__v, 0, (sizeof(size_t) * __max_threads
588
				 + sizeof(_Atomic_word) * __max_threads));
589
	    __bin._M_used = static_cast(__v);
590
 
591
	    __v = ::operator new(sizeof(__gthread_mutex_t));
592
	    __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
593
 
594
#ifdef __GTHREAD_MUTEX_INIT
595
	    {
596
	      // Do not copy a POSIX/gthr mutex once in use.
597
	      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
598
	      *__bin._M_mutex = __tmp;
599
	    }
600
#else
601
	    { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
602
#endif
603
	  }
604
      }
605
    else
606
      {
607
	for (size_t __n = 0; __n < _M_bin_size; ++__n)
608
	  {
609
	    _Bin_record& __bin = _M_bin[__n];
610
	    __v = ::operator new(sizeof(_Block_record*));
611
	    __bin._M_first = static_cast<_Block_record**>(__v);
612
	    __bin._M_first[0] = 0;
613
	    __bin._M_address = 0;
614
	  }
615
      }
616
    _M_init = true;
617
  }
618
 
619
  size_t
620
  __pool::_M_get_thread_id()
621
  {
622
    // If we have thread support and it's active we check the thread
623
    // key value and return its id or if it's not set we take the
624
    // first record from _M_thread_freelist and sets the key and
625
    // returns its id.
626
    if (__gthread_active_p())
627
      {
628
	__freelist& freelist = get_freelist();
629
	void* v = __gthread_getspecific(freelist._M_key);
630
	size_t _M_id = (size_t)v;
631
	if (_M_id == 0)
632
	  {
633
	    {
634
	      __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
635
	      if (freelist._M_thread_freelist)
636
		{
637
		  _M_id = freelist._M_thread_freelist->_M_id;
638
		  freelist._M_thread_freelist
639
		    = freelist._M_thread_freelist->_M_next;
640
		}
641
	    }
642
 
643
	    __gthread_setspecific(freelist._M_key, (void*)_M_id);
644
	  }
645
	return _M_id >= _M_options._M_max_threads ? 0 : _M_id;
646
      }
647
 
648
    // Otherwise (no thread support or inactive) all requests are
649
    // served from the global pool 0.
650
    return 0;
651
  }
652
 
653
  // XXX GLIBCXX_ABI Deprecated
654
  void
655
  __pool::_M_destroy_thread_key(void*) throw () { }
656
 
657
  // XXX GLIBCXX_ABI Deprecated
658
  void
659
  __pool::_M_initialize(__destroy_handler)
660
  {
661
    // _M_force_new must not change after the first allocate(),
662
    // which in turn calls this method, so if it's false, it's false
663
    // forever and we don't need to return here ever again.
664
    if (_M_options._M_force_new)
665
      {
666
	_M_init = true;
667
	return;
668
      }
669
 
670
    // Create the bins.
671
    // Calculate the number of bins required based on _M_max_bytes.
672
    // _M_bin_size is statically-initialized to one.
673
    size_t __bin_size = _M_options._M_min_bin;
674
    while (_M_options._M_max_bytes > __bin_size)
675
      {
676
	__bin_size <<= 1;
677
	++_M_bin_size;
678
      }
679
 
680
    // Setup the bin map for quick lookup of the relevant bin.
681
    const size_t __j = (_M_options._M_max_bytes + 1) * sizeof(_Binmap_type);
682
    _M_binmap = static_cast<_Binmap_type*>(::operator new(__j));
683
    _Binmap_type* __bp = _M_binmap;
684
    _Binmap_type __bin_max = _M_options._M_min_bin;
685
    _Binmap_type __bint = 0;
686
    for (_Binmap_type __ct = 0; __ct <= _M_options._M_max_bytes; ++__ct)
687
      {
688
	if (__ct > __bin_max)
689
	  {
690
	    __bin_max <<= 1;
691
	    ++__bint;
692
	  }
693
	*__bp++ = __bint;
694
      }
695
 
696
    // Initialize _M_bin and its members.
697
    void* __v = ::operator new(sizeof(_Bin_record) * _M_bin_size);
698
    _M_bin = static_cast<_Bin_record*>(__v);
699
 
700
    // If __gthread_active_p() create and initialize the list of
701
    // free thread ids. Single threaded applications use thread id 0
702
    // directly and have no need for this.
703
    if (__gthread_active_p())
704
      {
705
	__freelist& freelist = get_freelist();
706
	{
707
	  __gnu_cxx::__scoped_lock sentry(get_freelist_mutex());
708
 
709
	  if (!freelist._M_thread_freelist_array
710
	      || freelist._M_max_threads < _M_options._M_max_threads)
711
	    {
712
	      const size_t __k = sizeof(_Thread_record)
713
				 * _M_options._M_max_threads;
714
	      __v = ::operator new(__k);
715
	      _M_thread_freelist = static_cast<_Thread_record*>(__v);
716
 
717
	      // NOTE! The first assignable thread id is 1 since the
718
	      // global pool uses id 0
719
	      size_t __i;
720
	      for (__i = 1; __i < _M_options._M_max_threads; ++__i)
721
		{
722
		  _Thread_record& __tr = _M_thread_freelist[__i - 1];
723
		  __tr._M_next = &_M_thread_freelist[__i];
724
		  __tr._M_id = __i;
725
		}
726
 
727
	      // Set last record.
728
	      _M_thread_freelist[__i - 1]._M_next = 0;
729
	      _M_thread_freelist[__i - 1]._M_id = __i;
730
 
731
	      if (!freelist._M_thread_freelist_array)
732
		{
733
		  // Initialize per thread key to hold pointer to
734
		  // _M_thread_freelist.
735
		  __gthread_key_create(&freelist._M_key,
736
				       ::_M_destroy_thread_key);
737
		  freelist._M_thread_freelist = _M_thread_freelist;
738
		}
739
	      else
740
		{
741
		  _Thread_record* _M_old_freelist
742
		    = freelist._M_thread_freelist;
743
		  _Thread_record* _M_old_array
744
		    = freelist._M_thread_freelist_array;
745
		  freelist._M_thread_freelist
746
		    = &_M_thread_freelist[_M_old_freelist - _M_old_array];
747
		  while (_M_old_freelist)
748
		    {
749
		      size_t next_id;
750
		      if (_M_old_freelist->_M_next)
751
			next_id = _M_old_freelist->_M_next - _M_old_array;
752
		      else
753
			next_id = freelist._M_max_threads;
754
		      _M_thread_freelist[_M_old_freelist->_M_id - 1]._M_next
755
			= &_M_thread_freelist[next_id];
756
		      _M_old_freelist = _M_old_freelist->_M_next;
757
		    }
758
		  ::operator delete(static_cast(_M_old_array));
759
		}
760
	      freelist._M_thread_freelist_array = _M_thread_freelist;
761
	      freelist._M_max_threads = _M_options._M_max_threads;
762
	    }
763
	}
764
 
765
	const size_t __max_threads = _M_options._M_max_threads + 1;
766
	for (size_t __n = 0; __n < _M_bin_size; ++__n)
767
	  {
768
	    _Bin_record& __bin = _M_bin[__n];
769
	    __v = ::operator new(sizeof(_Block_record*) * __max_threads);
770
	    std::memset(__v, 0, sizeof(_Block_record*) * __max_threads);
771
	    __bin._M_first = static_cast<_Block_record**>(__v);
772
 
773
	    __bin._M_address = 0;
774
 
775
	    __v = ::operator new(sizeof(size_t) * __max_threads);
776
	    std::memset(__v, 0, sizeof(size_t) * __max_threads);
777
	    __bin._M_free = static_cast(__v);
778
 
779
	    __v = ::operator new(sizeof(size_t) * __max_threads +
780
				 sizeof(_Atomic_word) * __max_threads);
781
	    std::memset(__v, 0, (sizeof(size_t) * __max_threads
782
				 + sizeof(_Atomic_word) * __max_threads));
783
	    __bin._M_used = static_cast(__v);
784
 
785
	    __v = ::operator new(sizeof(__gthread_mutex_t));
786
	    __bin._M_mutex = static_cast<__gthread_mutex_t*>(__v);
787
 
788
#ifdef __GTHREAD_MUTEX_INIT
789
	    {
790
	      // Do not copy a POSIX/gthr mutex once in use.
791
	      __gthread_mutex_t __tmp = __GTHREAD_MUTEX_INIT;
792
	      *__bin._M_mutex = __tmp;
793
	    }
794
#else
795
	    { __GTHREAD_MUTEX_INIT_FUNCTION(__bin._M_mutex); }
796
#endif
797
	  }
798
      }
799
    else
800
      {
801
	for (size_t __n = 0; __n < _M_bin_size; ++__n)
802
	  {
803
	    _Bin_record& __bin = _M_bin[__n];
804
	    __v = ::operator new(sizeof(_Block_record*));
805
	    __bin._M_first = static_cast<_Block_record**>(__v);
806
	    __bin._M_first[0] = 0;
807
	    __bin._M_address = 0;
808
	  }
809
      }
810
    _M_init = true;
811
  }
812
#endif
813
 
814
  // Instantiations.
815
  template class __mt_alloc;
816
  template class __mt_alloc;
817
 
818
_GLIBCXX_END_NAMESPACE_VERSION
819
} // namespace