00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031
00032
00033
00034
00035
00036
00037
00038
00039
00040
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065 #include "config.h"
00066 #include <new>
00067 #include <stdio.h>
00068 #include <stddef.h>
00069 #if defined HAVE_STDINT_H
00070 #include <stdint.h>
00071 #elif defined HAVE_INTTYPES_H
00072 #include <inttypes.h>
00073 #else
00074 #include <sys/types.h>
00075 #endif
00076 #include <stdlib.h>
00077 #include <string.h>
00078 #include <pthread.h>
00079 #include <unistd.h>
00080 #include <errno.h>
00081 #include <stdarg.h>
00082 #include "base/commandlineflags.h"
00083 #include "google/malloc_hook.h"
00084 #include "google/malloc_extension.h"
00085 #include "internal_logging.h"
00086 #include "internal_spinlock.h"
00087 #include "pagemap.h"
00088 #include "system-alloc.h"
00089 #include "maybe_threads.h"
00090
00091 #if defined HAVE_INTTYPES_H
00092 #define __STDC_FORMAT_MACROS
00093 #include <inttypes.h>
00094 #define LLU PRIu64
00095 #else
00096 #define LLU "llu" // hope for the best
00097 #endif
00098
00099
00100
00101
00102
00103
00104
00105
00106 static const size_t kPageShift = 12;
00107 static const size_t kPageSize = 1 << kPageShift;
00108 static const size_t kMaxSize = 8u * kPageSize;
00109 static const size_t kAlignShift = 3;
00110 static const size_t kAlignment = 1 << kAlignShift;
00111 static const size_t kNumClasses = 170;
00112
00113
00114
00115 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
00116
00117
00118
00119
00120
00121
00122
00123 static const int kMinSystemAlloc = 1 << (20 - kPageShift);
00124
00125
00126
00127
00128
00129
00130 static int num_objects_to_move[kNumClasses];
00131
00132
00133
00134
00135
00136
00137 static const int kMaxFreeListLength = 256;
00138
00139
00140 static const size_t kMinThreadCacheSize = kMaxSize * 2;
00141 static const size_t kMaxThreadCacheSize = 2 << 20;
00142
00143
00144 static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
00145
00146
00147
00148 static const size_t kMaxPages = kMinSystemAlloc;
00149
00150
00151 static unsigned int primes_list[] = {
00152
00153
00154
00155
00156 32771, 65537, 131101, 262147, 524309, 1048583,
00157 2097169, 4194319, 8388617, 16777259, 33554467 };
00158
00159
00160
00161
00162
00163
00164 DEFINE_int64(tcmalloc_sample_parameter, 262147,
00165 "Twice the approximate gap between sampling actions."
00166 " Must be a prime number. Otherwise will be rounded up to a "
00167 " larger prime number");
00168 static size_t sample_period = 262147;
00169
00170 static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
00171
00172
00173
00174
00175
00176
00177
00178 static const int kSizeBits = 8 * sizeof(size_t);
00179 static unsigned char size_base[kSizeBits];
00180 static unsigned char size_shift[kSizeBits];
00181
00182
00183 static size_t class_to_size[kNumClasses];
00184
00185
00186 static size_t class_to_pages[kNumClasses];
00187
00188
00189
00190
00191
00192
00193 struct TCEntry {
00194 void *head;
00195 void *tail;
00196 };
00197
00198
00199
00200
00201
00202 static const int kNumTransferEntries = kNumClasses;
00203
00204
00205 #if (defined __i386__ || defined __x86_64__) && defined __GNUC__
00206 static inline int LgFloor(size_t n) {
00207
00208
00209 size_t result;
00210 __asm__("bsr %1, %0"
00211 : "=r" (result)
00212 : "ro" (n)
00213 : "cc"
00214 );
00215 return result;
00216 }
00217 #else
00218
00219
00220 static inline int LgFloor(size_t n) {
00221 int log = 0;
00222 for (int i = 4; i >= 0; --i) {
00223 int shift = (1 << i);
00224 size_t x = n >> shift;
00225 if (x != 0) {
00226 n = x;
00227 log += shift;
00228 }
00229 }
00230 ASSERT(n == 1);
00231 return log;
00232 }
00233 #endif
00234
00235
00236
00237
00238
00239 static inline void *SLL_Next(void *t) {
00240 return *(reinterpret_cast<void**>(t));
00241 }
00242
00243 static inline void SLL_SetNext(void *t, void *n) {
00244 *(reinterpret_cast<void**>(t)) = n;
00245 }
00246
00247 static inline void SLL_Push(void **list, void *element) {
00248 SLL_SetNext(element, *list);
00249 *list = element;
00250 }
00251
00252 static inline void *SLL_Pop(void **list) {
00253 void *result = *list;
00254 *list = SLL_Next(*list);
00255 return result;
00256 }
00257
00258
00259
00260
00261
00262
00263 static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
00264 if (N == 0) {
00265 *start = NULL;
00266 *end = NULL;
00267 return;
00268 }
00269
00270 void *tmp = *head;
00271 for (int i = 1; i < N; ++i) {
00272 tmp = SLL_Next(tmp);
00273 }
00274
00275 *start = *head;
00276 *end = tmp;
00277 *head = SLL_Next(tmp);
00278
00279 SLL_SetNext(tmp, NULL);
00280 }
00281
00282 static inline void SLL_PushRange(void **head, void *start, void *end) {
00283 if (!start) return;
00284 SLL_SetNext(end, *head);
00285 *head = start;
00286 }
00287
00288 static inline size_t SLL_Size(void *head) {
00289 int count = 0;
00290 while (head) {
00291 count++;
00292 head = SLL_Next(head);
00293 }
00294 return count;
00295 }
00296
00297
00298
00299 static inline int SizeClass(size_t size) {
00300 if (size == 0) size = 1;
00301 const int lg = LgFloor(size);
00302 const int align = size_shift[lg];
00303 return static_cast<int>(size_base[lg]) + ((size-1) >> align);
00304 }
00305
00306
00307 static inline size_t ByteSizeForClass(size_t cl) {
00308 return class_to_size[cl];
00309 }
00310
00311
00312 static int NumMoveSize(size_t size) {
00313 if (size == 0) return 0;
00314
00315 int num = static_cast<int>(64.0 * 1024.0 / size);
00316 if (num < 2) num = 2;
00317
00318
00319 if (num > static_cast<int>(0.8 * kMaxFreeListLength))
00320 num = static_cast<int>(0.8 * kMaxFreeListLength);
00321
00322
00323
00324
00325
00326
00327
00328
00329
00330 if (num > 32) num = 32;
00331
00332 return num;
00333 }
00334
00335
00336 static void InitSizeClasses() {
00337
00338 for (int lg = 0; lg < kAlignShift; lg++) {
00339 size_base[lg] = 1;
00340 size_shift[lg] = kAlignShift;
00341 }
00342
00343 int next_class = 1;
00344 int alignshift = kAlignShift;
00345 int last_lg = -1;
00346 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
00347 int lg = LgFloor(size);
00348 if (lg > last_lg) {
00349
00350
00351
00352
00353
00354
00355
00356 if ((lg >= 7) && (alignshift < 8)) {
00357 alignshift++;
00358 }
00359 size_base[lg] = next_class - ((size-1) >> alignshift);
00360 size_shift[lg] = alignshift;
00361 }
00362
00363 class_to_size[next_class] = size;
00364 last_lg = lg;
00365
00366 next_class++;
00367 }
00368 if (next_class >= kNumClasses) {
00369 MESSAGE("used up too many size classes: %d\n", next_class);
00370 std::__throw_bad_alloc();
00371 }
00372
00373
00374
00375 for (size_t cl = 1; cl < next_class; cl++) {
00376
00377
00378 size_t psize = kPageSize;
00379 const size_t s = class_to_size[cl];
00380 while ((psize % s) > (psize >> 3)) {
00381 psize += kPageSize;
00382 }
00383 class_to_pages[cl] = psize >> kPageShift;
00384 }
00385
00386
00387 for (size_t size = 0; size <= kMaxSize; size++) {
00388 const int sc = SizeClass(size);
00389 if (sc == 0) {
00390 MESSAGE("Bad size class %d for %" PRIuS "\n", sc, size);
00391 std::__throw_bad_alloc();
00392 }
00393 if (sc > 1 && size <= class_to_size[sc-1]) {
00394 MESSAGE("Allocating unnecessarily large class %d for %" PRIuS
00395 "\n", sc, size);
00396 std::__throw_bad_alloc();
00397 }
00398 if (sc >= kNumClasses) {
00399 MESSAGE("Bad size class %d for %" PRIuS "\n", sc, size);
00400 std::__throw_bad_alloc();
00401 }
00402 const size_t s = class_to_size[sc];
00403 if (size > s) {
00404 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
00405 std::__throw_bad_alloc();
00406 }
00407 if (s == 0) {
00408 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %d)\n", s, size, sc);
00409 std::__throw_bad_alloc();
00410 }
00411 }
00412
00413
00414 for (size_t cl = 1; cl < kNumClasses; ++cl) {
00415 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
00416 }
00417 }
00418
00419
00420
00421
00422
00423
00424
00425 static uint64_t metadata_system_bytes = 0;
00426 static void* MetaDataAlloc(size_t bytes) {
00427 void* result = TCMalloc_SystemAlloc(bytes);
00428 if (result != NULL) {
00429 metadata_system_bytes += bytes;
00430 }
00431 return result;
00432 }
00433
00434 template <class T>
00435 class PageHeapAllocator {
00436 private:
00437
00438 static const int kAllocIncrement = 128 << 10;
00439
00440
00441 static const size_t kAlignedSize
00442 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
00443
00444
00445 char* free_area_;
00446 size_t free_avail_;
00447
00448
00449 void* free_list_;
00450
00451
00452 int inuse_;
00453
00454 public:
00455 void Init() {
00456 ASSERT(kAlignedSize <= kAllocIncrement);
00457 inuse_ = 0;
00458 free_area_ = NULL;
00459 free_avail_ = 0;
00460 free_list_ = NULL;
00461
00462 Delete(New());
00463 }
00464
00465 T* New() {
00466
00467 void* result;
00468 if (free_list_ != NULL) {
00469 result = free_list_;
00470 free_list_ = *(reinterpret_cast<void**>(result));
00471 } else {
00472 if (free_avail_ < kAlignedSize) {
00473
00474 free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
00475 if (free_area_ == NULL) std::__throw_bad_alloc();
00476 free_avail_ = kAllocIncrement;
00477 }
00478 result = free_area_;
00479 free_area_ += kAlignedSize;
00480 free_avail_ -= kAlignedSize;
00481 }
00482 inuse_++;
00483 return reinterpret_cast<T*>(result);
00484 }
00485
00486 void Delete(T* p) {
00487 *(reinterpret_cast<void**>(p)) = free_list_;
00488 free_list_ = p;
00489 inuse_--;
00490 }
00491
00492 int inuse() const { return inuse_; }
00493 };
00494
00495
00496
00497
00498
00499
00500 typedef uintptr_t PageID;
00501
00502
00503 typedef uintptr_t Length;
00504
00505
00506 static inline Length pages(size_t bytes) {
00507 return ((bytes + kPageSize - 1) >> kPageShift);
00508 }
00509
00510
00511
00512 static size_t AllocationSize(size_t bytes) {
00513 if (bytes > kMaxSize) {
00514
00515 return pages(bytes) << kPageShift;
00516 } else {
00517
00518 return ByteSizeForClass(SizeClass(bytes));
00519 }
00520 }
00521
00522
00523 struct Span {
00524 PageID start;
00525 Length length;
00526 Span* next;
00527 Span* prev;
00528 void* objects;
00529 unsigned int free : 1;
00530 unsigned int sample : 1;
00531 unsigned int sizeclass : 8;
00532 unsigned int refcount : 11;
00533
00534 #undef SPAN_HISTORY
00535 #ifdef SPAN_HISTORY
00536
00537 int nexthistory;
00538 char history[64];
00539 int value[64];
00540 #endif
00541 };
00542
00543 #ifdef SPAN_HISTORY
00544 void Event(Span* span, char op, int v = 0) {
00545 span->history[span->nexthistory] = op;
00546 span->value[span->nexthistory] = v;
00547 span->nexthistory++;
00548 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
00549 }
00550 #else
00551 #define Event(s,o,v) ((void) 0)
00552 #endif
00553
00554
00555 static PageHeapAllocator<Span> span_allocator;
00556 static Span* NewSpan(PageID p, Length len) {
00557 Span* result = span_allocator.New();
00558 memset(result, 0, sizeof(*result));
00559 result->start = p;
00560 result->length = len;
00561 #ifdef SPAN_HISTORY
00562 result->nexthistory = 0;
00563 #endif
00564 return result;
00565 }
00566
00567 static void DeleteSpan(Span* span) {
00568 #ifndef NDEBUG
00569
00570 memset(span, 0x3f, sizeof(*span));
00571 #endif
00572 span_allocator.Delete(span);
00573 }
00574
00575
00576
00577
00578
00579 static void DLL_Init(Span* list) {
00580 list->next = list;
00581 list->prev = list;
00582 }
00583
00584 static void DLL_Remove(Span* span) {
00585 span->prev->next = span->next;
00586 span->next->prev = span->prev;
00587 span->prev = NULL;
00588 span->next = NULL;
00589 }
00590
00591 static inline bool DLL_IsEmpty(const Span* list) {
00592 return list->next == list;
00593 }
00594
00595 static int DLL_Length(const Span* list) {
00596 int result = 0;
00597 for (Span* s = list->next; s != list; s = s->next) {
00598 result++;
00599 }
00600 return result;
00601 }
00602
00603 #if 0
00604 static void DLL_Print(const char* label, const Span* list) {
00605 MESSAGE("%-10s %p:", label, list);
00606 for (const Span* s = list->next; s != list; s = s->next) {
00607 MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
00608 }
00609 MESSAGE("\n");
00610 }
00611 #endif
00612
00613 static void DLL_Prepend(Span* list, Span* span) {
00614 ASSERT(span->next == NULL);
00615 ASSERT(span->prev == NULL);
00616 span->next = list->next;
00617 span->prev = list;
00618 list->next->prev = span;
00619 list->next = span;
00620 }
00621
00622 static void DLL_InsertOrdered(Span* list, Span* span) {
00623 ASSERT(span->next == NULL);
00624 ASSERT(span->prev == NULL);
00625
00626 Span* x = list;
00627 while ((x->next != list) && (x->next->start < span->start)) {
00628 x = x->next;
00629 }
00630 span->next = x->next;
00631 span->prev = x;
00632 x->next->prev = span;
00633 x->next = span;
00634 }
00635
00636
00637
00638
00639
00640
00641 static const int kMaxStackDepth = 31;
00642 struct StackTrace {
00643 uintptr_t size;
00644 int depth;
00645 void* stack[kMaxStackDepth];
00646 };
00647 static PageHeapAllocator<StackTrace> stacktrace_allocator;
00648 static Span sampled_objects;
00649
00650
00651
00652
00653
00654 static StackTrace* growth_stacks = NULL;
00655
00656
00657
00658
00659
00660
00661
00662
00663 template <int BITS> class MapSelector {
00664 public:
00665 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
00666 };
00667
00668
00669 template <> class MapSelector<32> {
00670 public:
00671 typedef TCMalloc_PageMap2<32-kPageShift> Type;
00672 };
00673
00674
00675
00676
00677
00678
00679
00680
00681
00682 class TCMalloc_PageHeap {
00683 public:
00684 TCMalloc_PageHeap();
00685
00686
00687
00688
00689 Span* New(Length n);
00690
00691
00692
00693
00694 void Delete(Span* span);
00695
00696
00697
00698
00699
00700 void RegisterSizeClass(Span* span, size_t sc);
00701
00702
00703
00704
00705
00706
00707
00708
00709
00710 Span* Split(Span* span, Length n);
00711
00712
00713 inline Span* GetDescriptor(PageID p) const {
00714 return reinterpret_cast<Span*>(pagemap_.get(p));
00715 }
00716
00717
00718 void Dump(TCMalloc_Printer* out);
00719
00720
00721 inline uint64_t SystemBytes() const { return system_bytes_; }
00722
00723
00724 uint64_t FreeBytes() const {
00725 return (static_cast<uint64_t>(free_pages_) << kPageShift);
00726 }
00727
00728 bool Check();
00729 bool CheckList(Span* list, Length min_pages, Length max_pages);
00730
00731 private:
00732
00733 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
00734 PageMap pagemap_;
00735
00736
00737 Span large_;
00738
00739
00740 Span free_[kMaxPages];
00741
00742
00743 uintptr_t free_pages_;
00744
00745
00746 uint64_t system_bytes_;
00747
00748 bool GrowHeap(Length n);
00749
00750
00751
00752
00753
00754
00755 void Carve(Span* span, Length n);
00756
00757 void RecordSpan(Span* span) {
00758 pagemap_.set(span->start, span);
00759 if (span->length > 1) {
00760 pagemap_.set(span->start + span->length - 1, span);
00761 }
00762 }
00763 };
00764
00765 TCMalloc_PageHeap::TCMalloc_PageHeap() : pagemap_(MetaDataAlloc),
00766 free_pages_(0),
00767 system_bytes_(0) {
00768 DLL_Init(&large_);
00769 for (int i = 0; i < kMaxPages; i++) {
00770 DLL_Init(&free_[i]);
00771 }
00772 }
00773
00774 Span* TCMalloc_PageHeap::New(Length n) {
00775 ASSERT(Check());
00776
00777
00778 if (n == 0) return NULL;
00779
00780
00781 for (Length s = n; s < kMaxPages; s++) {
00782 if (!DLL_IsEmpty(&free_[s])) {
00783 Span* result = free_[s].next;
00784 Carve(result, n);
00785 ASSERT(Check());
00786 free_pages_ -= n;
00787 return result;
00788 }
00789 }
00790
00791
00792
00793 for (int i = 0; i < 2; i++) {
00794
00795 Span *best = NULL;
00796 for (Span* span = large_.next; span != &large_; span = span->next) {
00797 if (span->length >= n &&
00798 (best == NULL || span->length < best->length)) {
00799 best = span;
00800 }
00801 }
00802 if (best != NULL) {
00803 Carve(best, n);
00804 ASSERT(Check());
00805 free_pages_ -= n;
00806 return best;
00807 }
00808 if (i == 0) {
00809
00810 if (!GrowHeap(n)) {
00811 ASSERT(Check());
00812 return NULL;
00813 }
00814 }
00815 }
00816 return NULL;
00817 }
00818
00819 Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
00820 ASSERT(0 < n);
00821 ASSERT(n < span->length);
00822 ASSERT(!span->free);
00823 ASSERT(span->sizeclass == 0);
00824 Event(span, 'T', n);
00825
00826 const int extra = span->length - n;
00827 Span* leftover = NewSpan(span->start + n, extra);
00828 Event(leftover, 'U', extra);
00829 RecordSpan(leftover);
00830 pagemap_.set(span->start + n - 1, span);
00831 span->length = n;
00832
00833 return leftover;
00834 }
00835
00836 void TCMalloc_PageHeap::Carve(Span* span, Length n) {
00837 ASSERT(n > 0);
00838 DLL_Remove(span);
00839 span->free = 0;
00840 Event(span, 'A', n);
00841
00842 const int extra = span->length - n;
00843 ASSERT(extra >= 0);
00844 if (extra > 0) {
00845 Span* leftover = NewSpan(span->start + n, extra);
00846 leftover->free = 1;
00847 Event(leftover, 'S', extra);
00848 RecordSpan(leftover);
00849 if (extra < kMaxPages) {
00850 DLL_Prepend(&free_[extra], leftover);
00851 } else {
00852 DLL_InsertOrdered(&large_, leftover);
00853 }
00854 span->length = n;
00855 pagemap_.set(span->start + n - 1, span);
00856 }
00857 }
00858
00859 void TCMalloc_PageHeap::Delete(Span* span) {
00860 ASSERT(Check());
00861 ASSERT(!span->free);
00862 ASSERT(span->length > 0);
00863 ASSERT(GetDescriptor(span->start) == span);
00864 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
00865 span->sizeclass = 0;
00866 span->sample = 0;
00867
00868
00869
00870
00871
00872 const PageID p = span->start;
00873 const Length n = span->length;
00874 Span* prev = GetDescriptor(p-1);
00875 if (prev != NULL && prev->free) {
00876
00877 ASSERT(prev->start + prev->length == p);
00878 const Length len = prev->length;
00879 DLL_Remove(prev);
00880 DeleteSpan(prev);
00881 span->start -= len;
00882 span->length += len;
00883 pagemap_.set(span->start, span);
00884 Event(span, 'L', len);
00885 }
00886 Span* next = GetDescriptor(p+n);
00887 if (next != NULL && next->free) {
00888
00889 ASSERT(next->start == p+n);
00890 const Length len = next->length;
00891 DLL_Remove(next);
00892 DeleteSpan(next);
00893 span->length += len;
00894 pagemap_.set(span->start + span->length - 1, span);
00895 Event(span, 'R', len);
00896 }
00897
00898 Event(span, 'D', span->length);
00899 span->free = 1;
00900 if (span->length < kMaxPages) {
00901 DLL_Prepend(&free_[span->length], span);
00902 } else {
00903 DLL_InsertOrdered(&large_, span);
00904 }
00905 free_pages_ += n;
00906
00907 ASSERT(Check());
00908 }
00909
00910 void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
00911
00912 ASSERT(!span->free);
00913 ASSERT(GetDescriptor(span->start) == span);
00914 ASSERT(GetDescriptor(span->start+span->length-1) == span);
00915 Event(span, 'C', sc);
00916 span->sizeclass = sc;
00917 for (Length i = 1; i < span->length-1; i++) {
00918 pagemap_.set(span->start+i, span);
00919 }
00920 }
00921
00922 void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
00923 int nonempty_sizes = 0;
00924 for (int s = 0; s < kMaxPages; s++) {
00925 if (!DLL_IsEmpty(&free_[s])) nonempty_sizes++;
00926 }
00927 out->printf("------------------------------------------------\n");
00928 out->printf("PageHeap: %d sizes; %6.1f MB free\n", nonempty_sizes,
00929 (static_cast<double>(free_pages_) * kPageSize) / 1048576.0);
00930 out->printf("------------------------------------------------\n");
00931 uint64_t cumulative = 0;
00932 for (int s = 0; s < kMaxPages; s++) {
00933 if (!DLL_IsEmpty(&free_[s])) {
00934 const int list_length = DLL_Length(&free_[s]);
00935 uint64_t s_pages = s * list_length;
00936 cumulative += s_pages;
00937 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
00938 s, list_length,
00939 (s_pages << kPageShift) / 1048576.0,
00940 (cumulative << kPageShift) / 1048576.0);
00941 }
00942 }
00943
00944 uint64_t large_pages = 0;
00945 int large_spans = 0;
00946 for (Span* s = large_.next; s != &large_; s = s->next) {
00947 out->printf(" [ %6" PRIuS " pages ]\n", s->length);
00948 large_pages += s->length;
00949 large_spans++;
00950 }
00951 cumulative += large_pages;
00952 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
00953 large_spans,
00954 (large_pages << kPageShift) / 1048576.0,
00955 (cumulative << kPageShift) / 1048576.0);
00956 }
00957
00958 static void RecordGrowth(size_t growth) {
00959 StackTrace* t = stacktrace_allocator.New();
00960 t->depth = 0;
00961 t->size = growth;
00962 t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(growth_stacks);
00963 growth_stacks = t;
00964 }
00965
00966 bool TCMalloc_PageHeap::GrowHeap(Length n) {
00967 ASSERT(kMaxPages >= kMinSystemAlloc);
00968 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
00969 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
00970 if (ptr == NULL) {
00971 if (n < ask) {
00972
00973 ask = n;
00974 ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
00975 }
00976 if (ptr == NULL) return false;
00977 }
00978 RecordGrowth(ask << kPageShift);
00979
00980 uint64_t old_system_bytes = system_bytes_;
00981 system_bytes_ += (ask << kPageShift);
00982 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
00983 ASSERT(p > 0);
00984
00985
00986
00987
00988
00989 if (old_system_bytes < kPageMapBigAllocationThreshold
00990 && system_bytes_ >= kPageMapBigAllocationThreshold) {
00991 pagemap_.PreallocateMoreMemory();
00992 }
00993
00994
00995
00996
00997 if (pagemap_.Ensure(p-1, ask+2)) {
00998
00999
01000
01001
01002 Span* span = NewSpan(p, ask);
01003 RecordSpan(span);
01004 Delete(span);
01005 ASSERT(Check());
01006 return true;
01007 } else {
01008
01009
01010 return false;
01011 }
01012 }
01013
01014 bool TCMalloc_PageHeap::Check() {
01015 ASSERT(free_[0].next == &free_[0]);
01016 CheckList(&large_, kMaxPages, 1000000000);
01017 for (Length s = 1; s < kMaxPages; s++) {
01018 CheckList(&free_[s], s, s);
01019 }
01020 return true;
01021 }
01022
01023 bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
01024 for (Span* s = list->next; s != list; s = s->next) {
01025 CHECK_CONDITION(s->free);
01026 CHECK_CONDITION(s->length >= min_pages);
01027 CHECK_CONDITION(s->length <= max_pages);
01028 CHECK_CONDITION(GetDescriptor(s->start) == s);
01029 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
01030 }
01031 return true;
01032 }
01033
01034
01035
01036
01037
01038 class TCMalloc_ThreadCache_FreeList {
01039 private:
01040 void* list_;
01041 uint16_t length_;
01042 uint16_t lowater_;
01043
01044 public:
01045 void Init() {
01046 list_ = NULL;
01047 length_ = 0;
01048 lowater_ = 0;
01049 }
01050
01051
01052 int length() const {
01053 return length_;
01054 }
01055
01056
01057 bool empty() const {
01058 return list_ == NULL;
01059 }
01060
01061
01062 int lowwatermark() const { return lowater_; }
01063 void clear_lowwatermark() { lowater_ = length_; }
01064
01065 void Push(void* ptr) {
01066 SLL_Push(&list_, ptr);
01067 length_++;
01068 }
01069
01070 void* Pop() {
01071 ASSERT(list_ != NULL);
01072 length_--;
01073 if (length_ < lowater_) lowater_ = length_;
01074 return SLL_Pop(&list_);
01075 }
01076
01077 void PushRange(int N, void *start, void *end) {
01078 SLL_PushRange(&list_, start, end);
01079 length_ += N;
01080 }
01081
01082 void PopRange(int N, void **start, void **end) {
01083 SLL_PopRange(&list_, N, start, end);
01084 ASSERT(length_ >= N);
01085 length_ -= N;
01086 if (length_ < lowater_) lowater_ = length_;
01087 }
01088 };
01089
01090
01091
01092
01093
01094 class TCMalloc_ThreadCache {
01095 private:
01096 typedef TCMalloc_ThreadCache_FreeList FreeList;
01097
01098 size_t size_;
01099 pthread_t tid_;
01100 bool in_setspecific_;
01101 FreeList list_[kNumClasses];
01102
01103
01104 uint32_t rnd_;
01105 size_t bytes_until_sample_;
01106
01107 public:
01108
01109 TCMalloc_ThreadCache* next_;
01110 TCMalloc_ThreadCache* prev_;
01111
01112 void Init(pthread_t tid);
01113 void Cleanup();
01114
01115
01116 int freelist_length(size_t cl) const { return list_[cl].length(); }
01117
01118
01119 size_t Size() const { return size_; }
01120
01121 void* Allocate(size_t size);
01122 void Deallocate(void* ptr, size_t size_class);
01123
01124 void FetchFromCentralCache(size_t cl);
01125 void ReleaseToCentralCache(size_t cl, int N);
01126 void Scavenge();
01127 void Print() const;
01128
01129
01130
01131 bool SampleAllocation(size_t k);
01132
01133
01134 void PickNextSample();
01135
01136 static void InitModule();
01137 static void InitTSD();
01138 static TCMalloc_ThreadCache* GetCache();
01139 static TCMalloc_ThreadCache* GetCacheIfPresent();
01140 static void* CreateCacheIfNecessary();
01141 static void DeleteCache(void* ptr);
01142 static void RecomputeThreadCacheSize();
01143 };
01144
01145
01146
01147
01148
01149 class TCMalloc_Central_FreeList {
01150 public:
01151 void Init(size_t cl);
01152
01153
01154
01155
01156
01157 void InsertRange(void *start, void *end, int N);
01158
01159
01160 void RemoveRange(void **start, void **end, int *N);
01161
01162
01163 int length() {
01164 SpinLockHolder h(&lock_);
01165 return counter_;
01166 }
01167
01168
01169 int tc_length() {
01170 SpinLockHolder h(&lock_);
01171 return used_slots_ * num_objects_to_move[size_class_];
01172 }
01173
01174 private:
01175
01176
01177
01178 void* FetchFromSpans();
01179
01180
01181
01182
01183
01184 void* FetchFromSpansSafe();
01185
01186
01187
01188
01189 void ReleaseListToSpans(void *start);
01190
01191
01192
01193
01194 void ReleaseToSpans(void* object);
01195
01196
01197
01198
01199 void Populate();
01200
01201
01202
01203
01204
01205 bool MakeCacheSpace();
01206
01207
01208
01209
01210
01211
01212 static bool EvictRandomSizeClass(int locked_size_class, bool force);
01213
01214
01215
01216
01217
01218
01219
01220
01221 bool ShrinkCache(int locked_size_class, bool force);
01222
01223
01224
01225 SpinLock lock_;
01226
01227
01228 size_t size_class_;
01229 Span empty_;
01230 Span nonempty_;
01231 size_t counter_;
01232
01233
01234
01235
01236 TCEntry tc_slots_[kNumTransferEntries];
01237
01238
01239
01240 int32_t used_slots_;
01241
01242
01243
01244 int32_t cache_size_;
01245 };
01246
01247
01248 class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
01249 private:
01250 char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
01251 };
01252
01253
01254
01255
01256
01257
01258
01259 static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
01260
01261
01262 static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
01263 static char pageheap_memory[sizeof(TCMalloc_PageHeap)];
01264 static bool phinited = false;
01265
01266
01267
01268 #define pageheap ((TCMalloc_PageHeap*) pageheap_memory)
01269
01270
01271
01272
01273
01274
01275 static bool tsd_inited = false;
01276 static pthread_key_t heap_key;
01277
01278
01279 static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
01280
01281
01282 static TCMalloc_ThreadCache* thread_heaps = NULL;
01283 static int thread_heap_count = 0;
01284
01285
01286 static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
01287
01288
01289
01290
01291
01292 static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
01293
01294
01295
01296
01297
01298 void TCMalloc_Central_FreeList::Init(size_t cl) {
01299 lock_.Init();
01300 size_class_ = cl;
01301 DLL_Init(&empty_);
01302 DLL_Init(&nonempty_);
01303 counter_ = 0;
01304
01305 cache_size_ = 1;
01306 used_slots_ = 0;
01307 ASSERT(cache_size_ <= kNumTransferEntries);
01308 }
01309
01310 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
01311 while (start) {
01312 void *next = SLL_Next(start);
01313 ReleaseToSpans(start);
01314 start = next;
01315 }
01316 }
01317
01318 void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
01319 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
01320 Span* span = pageheap->GetDescriptor(p);
01321 ASSERT(span != NULL);
01322 ASSERT(span->refcount > 0);
01323
01324
01325 if (span->objects == NULL) {
01326 DLL_Remove(span);
01327 DLL_Prepend(&nonempty_, span);
01328 Event(span, 'N', 0);
01329 }
01330
01331
01332 if (false) {
01333
01334 int got = 0;
01335 for (void* p = span->objects; p != NULL; p = *((void**) p)) {
01336 ASSERT(p != object);
01337 got++;
01338 }
01339 ASSERT(got + span->refcount ==
01340 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
01341 }
01342
01343 counter_++;
01344 span->refcount--;
01345 if (span->refcount == 0) {
01346 Event(span, '#', 0);
01347 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
01348 DLL_Remove(span);
01349
01350
01351 lock_.Unlock();
01352 {
01353 SpinLockHolder h(&pageheap_lock);
01354 pageheap->Delete(span);
01355 }
01356 lock_.Lock();
01357 } else {
01358 *(reinterpret_cast<void**>(object)) = span->objects;
01359 span->objects = object;
01360 }
01361 }
01362
01363 bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
01364 int locked_size_class, bool force) {
01365 static int race_counter = 0;
01366 int t = race_counter++;
01367 if (t >= kNumClasses) {
01368 while (t >= kNumClasses) {
01369 t -= kNumClasses;
01370 }
01371 race_counter = t;
01372 }
01373 ASSERT(t >= 0);
01374 ASSERT(t < kNumClasses);
01375 if (t == locked_size_class) return false;
01376 return central_cache[t].ShrinkCache(locked_size_class, force);
01377 }
01378
01379 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
01380
01381 if (used_slots_ < cache_size_) return true;
01382
01383 if (cache_size_ == kNumTransferEntries) return false;
01384
01385 if (EvictRandomSizeClass(size_class_, false) ||
01386 EvictRandomSizeClass(size_class_, true)) {
01387
01388 cache_size_++;
01389 return true;
01390 }
01391 return false;
01392 }
01393
01394
01395 namespace {
01396 class LockInverter {
01397 private:
01398 TCMalloc_SpinLock *held_, *temp_;
01399 public:
01400 inline explicit LockInverter(TCMalloc_SpinLock* held, TCMalloc_SpinLock *temp)
01401 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
01402 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
01403 };
01404 }
01405
01406 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
01407
01408 if (cache_size_ == 0) return false;
01409
01410 if (force == false && used_slots_ == cache_size_) return false;
01411
01412
01413
01414
01415
01416 LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_);
01417 ASSERT(used_slots_ <= cache_size_);
01418 ASSERT(0 <= cache_size_);
01419 if (cache_size_ == 0) return false;
01420 if (used_slots_ == cache_size_) {
01421 if (force == false) return false;
01422
01423
01424 cache_size_--;
01425 used_slots_--;
01426 ReleaseListToSpans(tc_slots_[used_slots_].head);
01427 return true;
01428 }
01429 cache_size_--;
01430 return true;
01431 }
01432
01433 void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
01434 SpinLockHolder h(&lock_);
01435 if (N == num_objects_to_move[size_class_] &&
01436 MakeCacheSpace()) {
01437 int slot = used_slots_++;
01438 ASSERT(slot >=0);
01439 ASSERT(slot < kNumTransferEntries);
01440 TCEntry *entry = &tc_slots_[slot];
01441 entry->head = start;
01442 entry->tail = end;
01443 return;
01444 }
01445 ReleaseListToSpans(start);
01446 }
01447
01448 void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
01449 int num = *N;
01450 ASSERT(num > 0);
01451
01452 SpinLockHolder h(&lock_);
01453 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
01454 int slot = --used_slots_;
01455 ASSERT(slot >= 0);
01456 TCEntry *entry = &tc_slots_[slot];
01457 *start = entry->head;
01458 *end = entry->tail;
01459 return;
01460 }
01461
01462
01463 void *tail = FetchFromSpansSafe();
01464 if (!tail) {
01465
01466 *start = *end = NULL;
01467 *N = 0;
01468 return;
01469 }
01470
01471 SLL_SetNext(tail, NULL);
01472 void *head = tail;
01473 int count = 1;
01474 while (count < num) {
01475 void *t = FetchFromSpans();
01476 if (!t) break;
01477 SLL_Push(&head, t);
01478 count++;
01479 }
01480 *start = head;
01481 *end = tail;
01482 *N = count;
01483 }
01484
01485
01486 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
01487 void *t = FetchFromSpans();
01488 if (!t) {
01489 Populate();
01490 t = FetchFromSpans();
01491 }
01492 return t;
01493 }
01494
01495 void* TCMalloc_Central_FreeList::FetchFromSpans() {
01496 if (DLL_IsEmpty(&nonempty_)) return NULL;
01497 Span* span = nonempty_.next;
01498
01499 ASSERT(span->objects != NULL);
01500 span->refcount++;
01501 void* result = span->objects;
01502 span->objects = *(reinterpret_cast<void**>(result));
01503 if (span->objects == NULL) {
01504
01505 DLL_Remove(span);
01506 DLL_Prepend(&empty_, span);
01507 Event(span, 'E', 0);
01508 }
01509 counter_--;
01510 return result;
01511 }
01512
01513
01514 void TCMalloc_Central_FreeList::Populate() {
01515
01516 lock_.Unlock();
01517 const size_t npages = class_to_pages[size_class_];
01518
01519 Span* span;
01520 {
01521 SpinLockHolder h(&pageheap_lock);
01522 span = pageheap->New(npages);
01523 if (span) pageheap->RegisterSizeClass(span, size_class_);
01524 }
01525 if (span == NULL) {
01526 MESSAGE("allocation failed: %d\n", errno);
01527 lock_.Lock();
01528 return;
01529 }
01530
01531
01532
01533 void** tail = &span->objects;
01534 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
01535 char* limit = ptr + (npages << kPageShift);
01536 const size_t size = ByteSizeForClass(size_class_);
01537 int num = 0;
01538 while (ptr + size <= limit) {
01539 *tail = ptr;
01540 tail = reinterpret_cast<void**>(ptr);
01541 ptr += size;
01542 num++;
01543 }
01544 ASSERT(ptr <= limit);
01545 *tail = NULL;
01546 span->refcount = 0;
01547
01548
01549 lock_.Lock();
01550 DLL_Prepend(&nonempty_, span);
01551 counter_ += num;
01552 }
01553
01554
01555
01556
01557
01558 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
01559 if (bytes_until_sample_ < k) {
01560 PickNextSample();
01561 return true;
01562 } else {
01563 bytes_until_sample_ -= k;
01564 return false;
01565 }
01566 }
01567
01568 void TCMalloc_ThreadCache::Init(pthread_t tid) {
01569 size_ = 0;
01570 next_ = NULL;
01571 prev_ = NULL;
01572 tid_ = tid;
01573 in_setspecific_ = false;
01574 for (size_t cl = 0; cl < kNumClasses; ++cl) {
01575 list_[cl].Init();
01576 }
01577
01578
01579 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
01580 for (int i = 0; i < 100; i++) {
01581 PickNextSample();
01582 }
01583 }
01584
01585 void TCMalloc_ThreadCache::Cleanup() {
01586
01587 for (int cl = 0; cl < kNumClasses; ++cl) {
01588 if (list_[cl].length() > 0) {
01589 ReleaseToCentralCache(cl, list_[cl].length());
01590 }
01591 }
01592 }
01593
01594 inline void* TCMalloc_ThreadCache::Allocate(size_t size) {
01595 ASSERT(size <= kMaxSize);
01596 const size_t cl = SizeClass(size);
01597 FreeList* list = &list_[cl];
01598 if (list->empty()) {
01599 FetchFromCentralCache(cl);
01600 if (list->empty()) return NULL;
01601 }
01602 size_ -= ByteSizeForClass(cl);
01603 return list->Pop();
01604 }
01605
01606 inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
01607 size_ += ByteSizeForClass(cl);
01608 FreeList* list = &list_[cl];
01609 list->Push(ptr);
01610
01611 if (list->length() > kMaxFreeListLength) {
01612 ReleaseToCentralCache(cl, num_objects_to_move[cl]);
01613 }
01614 if (size_ >= per_thread_cache_size) Scavenge();
01615 }
01616
01617
01618 void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl) {
01619 int fetch_count = num_objects_to_move[cl];
01620 void *start, *end;
01621 central_cache[cl].RemoveRange(&start, &end, &fetch_count);
01622 list_[cl].PushRange(fetch_count, start, end);
01623 size_ += ByteSizeForClass(cl) * fetch_count;
01624 }
01625
01626
01627 void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
01628 ASSERT(N > 0);
01629 FreeList* src = &list_[cl];
01630 if (N > src->length()) N = src->length();
01631 size_ -= N*ByteSizeForClass(cl);
01632
01633
01634
01635 int batch_size = num_objects_to_move[cl];
01636 while (N > batch_size) {
01637 void *tail, *head;
01638 src->PopRange(batch_size, &head, &tail);
01639 central_cache[cl].InsertRange(head, tail, batch_size);
01640 N -= batch_size;
01641 }
01642 void *tail, *head;
01643 src->PopRange(N, &head, &tail);
01644 central_cache[cl].InsertRange(head, tail, N);
01645 }
01646
01647
01648 void TCMalloc_ThreadCache::Scavenge() {
01649
01650
01651
01652
01653
01654
01655
01656
01657 for (int cl = 0; cl < kNumClasses; cl++) {
01658 FreeList* list = &list_[cl];
01659 const int lowmark = list->lowwatermark();
01660 if (lowmark > 0) {
01661 const int drop = (lowmark > 1) ? lowmark/2 : 1;
01662 ReleaseToCentralCache(cl, drop);
01663 }
01664 list->clear_lowwatermark();
01665 }
01666
01667
01668
01669
01670 }
01671
01672 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
01673 void* ptr = NULL;
01674 if (!tsd_inited) {
01675 InitModule();
01676 } else {
01677 ptr = perftools_pthread_getspecific(heap_key);
01678 }
01679 if (ptr == NULL) ptr = CreateCacheIfNecessary();
01680 return reinterpret_cast<TCMalloc_ThreadCache*>(ptr);
01681 }
01682
01683
01684
01685
01686 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
01687 if (!tsd_inited) return NULL;
01688 return reinterpret_cast<TCMalloc_ThreadCache*>
01689 (perftools_pthread_getspecific(heap_key));
01690 }
01691
01692 void TCMalloc_ThreadCache::PickNextSample() {
01693
01694
01695 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
01696 uint32_t r = rnd_;
01697 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
01698
01699
01700
01701 const int flag_value = FLAGS_tcmalloc_sample_parameter;
01702 static int last_flag_value = -1;
01703
01704 if (flag_value != last_flag_value) {
01705 SpinLockHolder h(&sample_period_lock);
01706 int i;
01707 for (i = 0; i < (sizeof(primes_list)/sizeof(primes_list[0]) - 1); i++) {
01708 if (primes_list[i] >= flag_value) {
01709 break;
01710 }
01711 }
01712 sample_period = primes_list[i];
01713 last_flag_value = flag_value;
01714 }
01715 bytes_until_sample_ = rnd_ % sample_period;
01716 }
01717
01718 void TCMalloc_ThreadCache::InitModule() {
01719
01720
01721
01722
01723
01724
01725 SpinLockHolder h(&pageheap_lock);
01726 if (!phinited) {
01727 InitSizeClasses();
01728 threadheap_allocator.Init();
01729 span_allocator.Init();
01730 span_allocator.New();
01731 span_allocator.New();
01732 stacktrace_allocator.Init();
01733 DLL_Init(&sampled_objects);
01734 for (int i = 0; i < kNumClasses; ++i) {
01735 central_cache[i].Init(i);
01736 }
01737 new ((void*)pageheap_memory) TCMalloc_PageHeap;
01738 phinited = 1;
01739 }
01740 }
01741
01742 void TCMalloc_ThreadCache::InitTSD() {
01743 ASSERT(!tsd_inited);
01744 perftools_pthread_key_create(&heap_key, DeleteCache);
01745 tsd_inited = true;
01746
01747
01748 pthread_t zero;
01749 memset(&zero, 0, sizeof(zero));
01750 SpinLockHolder h(&pageheap_lock);
01751 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
01752 if (h->tid_ == zero) {
01753 h->tid_ = pthread_self();
01754 }
01755 }
01756 }
01757
01758 void* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
01759
01760 TCMalloc_ThreadCache* heap = NULL;
01761 {
01762 SpinLockHolder h(&pageheap_lock);
01763
01764
01765 pthread_t me;
01766 if (!tsd_inited) {
01767 memset(&me, 0, sizeof(me));
01768 } else {
01769 me = pthread_self();
01770 }
01771
01772
01773
01774
01775 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
01776 if (h->tid_ == me) {
01777 heap = h;
01778 break;
01779 }
01780 }
01781
01782 if (heap == NULL) {
01783
01784 heap = threadheap_allocator.New();
01785 heap->Init(me);
01786 heap->next_ = thread_heaps;
01787 heap->prev_ = NULL;
01788 if (thread_heaps != NULL) thread_heaps->prev_ = heap;
01789 thread_heaps = heap;
01790 thread_heap_count++;
01791 RecomputeThreadCacheSize();
01792 }
01793 }
01794
01795
01796
01797
01798
01799 if (!heap->in_setspecific_ && tsd_inited) {
01800 heap->in_setspecific_ = true;
01801 perftools_pthread_setspecific(heap_key, heap);
01802 heap->in_setspecific_ = false;
01803 }
01804 return heap;
01805 }
01806
01807 void TCMalloc_ThreadCache::DeleteCache(void* ptr) {
01808
01809 TCMalloc_ThreadCache* heap;
01810 heap = reinterpret_cast<TCMalloc_ThreadCache*>(ptr);
01811 heap->Cleanup();
01812
01813
01814 SpinLockHolder h(&pageheap_lock);
01815 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
01816 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
01817 if (thread_heaps == heap) thread_heaps = heap->next_;
01818 thread_heap_count--;
01819 RecomputeThreadCacheSize();
01820
01821 threadheap_allocator.Delete(heap);
01822 }
01823
01824 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
01825
01826 int n = thread_heap_count > 0 ? thread_heap_count : 1;
01827 size_t space = overall_thread_cache_size / n;
01828
01829
01830 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
01831 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
01832
01833 per_thread_cache_size = space;
01834 }
01835
01836 void TCMalloc_ThreadCache::Print() const {
01837 for (int cl = 0; cl < kNumClasses; ++cl) {
01838 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
01839 ByteSizeForClass(cl),
01840 list_[cl].length(),
01841 list_[cl].lowwatermark());
01842 }
01843 }
01844
01845
01846 struct TCMallocStats {
01847 uint64_t system_bytes;
01848 uint64_t thread_bytes;
01849 uint64_t central_bytes;
01850 uint64_t transfer_bytes;
01851 uint64_t pageheap_bytes;
01852 uint64_t metadata_bytes;
01853 };
01854
01855
01856 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
01857 r->central_bytes = 0;
01858 r->transfer_bytes = 0;
01859 for (int cl = 0; cl < kNumClasses; ++cl) {
01860 const int length = central_cache[cl].length();
01861 const int tc_length = central_cache[cl].tc_length();
01862 r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
01863 r->transfer_bytes +=
01864 static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
01865 if (class_count) class_count[cl] = length + tc_length;
01866 }
01867
01868
01869 r->thread_bytes = 0;
01870 {
01871 SpinLockHolder h(&pageheap_lock);
01872 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
01873 r->thread_bytes += h->Size();
01874 if (class_count) {
01875 for (int cl = 0; cl < kNumClasses; ++cl) {
01876 class_count[cl] += h->freelist_length(cl);
01877 }
01878 }
01879 }
01880 }
01881
01882 {
01883 SpinLockHolder h(&pageheap_lock);
01884 r->system_bytes = pageheap->SystemBytes();
01885 r->metadata_bytes = metadata_system_bytes;
01886 r->pageheap_bytes = pageheap->FreeBytes();
01887 }
01888 }
01889
01890
01891 static void DumpStats(TCMalloc_Printer* out, int level) {
01892 TCMallocStats stats;
01893 uint64_t class_count[kNumClasses];
01894 ExtractStats(&stats, (level >= 2 ? class_count : NULL));
01895
01896 if (level >= 2) {
01897 out->printf("------------------------------------------------\n");
01898 uint64_t cumulative = 0;
01899 for (int cl = 0; cl < kNumClasses; ++cl) {
01900 if (class_count[cl] > 0) {
01901 uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
01902 cumulative += class_bytes;
01903 out->printf("class %3d [ %8" PRIuS " bytes ] : "
01904 "%8" LLU " objs; %5.1f MB; %5.1f cum MB\n",
01905 cl, ByteSizeForClass(cl),
01906 class_count[cl],
01907 class_bytes / 1048576.0,
01908 cumulative / 1048576.0);
01909 }
01910 }
01911
01912 SpinLockHolder h(&pageheap_lock);
01913 pageheap->Dump(out);
01914 }
01915
01916 const uint64_t bytes_in_use = stats.system_bytes
01917 - stats.pageheap_bytes
01918 - stats.central_bytes
01919 - stats.transfer_bytes
01920 - stats.thread_bytes;
01921
01922 out->printf("------------------------------------------------\n"
01923 "MALLOC: %12" LLU " Heap size\n"
01924 "MALLOC: %12" LLU " Bytes in use by application\n"
01925 "MALLOC: %12" LLU " Bytes free in page heap\n"
01926 "MALLOC: %12" LLU " Bytes free in central cache\n"
01927 "MALLOC: %12" LLU " Bytes free in transfer cache\n"
01928 "MALLOC: %12" LLU " Bytes free in thread caches\n"
01929 "MALLOC: %12" LLU " Spans in use\n"
01930 "MALLOC: %12" LLU " Thread heaps in use\n"
01931 "MALLOC: %12" LLU " Metadata allocated\n"
01932 "------------------------------------------------\n",
01933 stats.system_bytes,
01934 bytes_in_use,
01935 stats.pageheap_bytes,
01936 stats.central_bytes,
01937 stats.transfer_bytes,
01938 stats.thread_bytes,
01939 uint64_t(span_allocator.inuse()),
01940 uint64_t(threadheap_allocator.inuse()),
01941 stats.metadata_bytes);
01942 }
01943
01944 static void PrintStats(int level) {
01945 const int kBufferSize = 16 << 10;
01946 char* buffer = new char[kBufferSize];
01947 TCMalloc_Printer printer(buffer, kBufferSize);
01948 DumpStats(&printer, level);
01949 write(STDERR_FILENO, buffer, strlen(buffer));
01950 delete[] buffer;
01951 }
01952
01953 static void** DumpStackTraces() {
01954
01955 int needed_slots = 0;
01956 {
01957 SpinLockHolder h(&pageheap_lock);
01958 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
01959 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
01960 needed_slots += 3 + stack->depth;
01961 }
01962 needed_slots += 100;
01963 needed_slots += needed_slots/8;
01964 }
01965
01966 void** result = new void*[needed_slots];
01967 if (result == NULL) {
01968 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
01969 needed_slots);
01970 return NULL;
01971 }
01972
01973 SpinLockHolder h(&pageheap_lock);
01974 int used_slots = 0;
01975 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
01976 ASSERT(used_slots < needed_slots);
01977 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
01978 if (used_slots + 3 + stack->depth >= needed_slots) {
01979
01980 break;
01981 }
01982
01983 result[used_slots+0] = reinterpret_cast<void*>(1);
01984 result[used_slots+1] = reinterpret_cast<void*>(stack->size);
01985 result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
01986 for (int d = 0; d < stack->depth; d++) {
01987 result[used_slots+3+d] = stack->stack[d];
01988 }
01989 used_slots += 3 + stack->depth;
01990 }
01991 result[used_slots] = reinterpret_cast<void*>(0);
01992 return result;
01993 }
01994
01995 static void** DumpHeapGrowthStackTraces() {
01996
01997 int needed_slots = 0;
01998 {
01999 SpinLockHolder h(&pageheap_lock);
02000 for (StackTrace* t = growth_stacks;
02001 t != NULL;
02002 t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth-1])) {
02003 needed_slots += 3 + t->depth;
02004 }
02005 needed_slots += 100;
02006 needed_slots += needed_slots/8;
02007 }
02008
02009 void** result = new void*[needed_slots];
02010 if (result == NULL) {
02011 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
02012 needed_slots);
02013 return NULL;
02014 }
02015
02016 SpinLockHolder h(&pageheap_lock);
02017 int used_slots = 0;
02018 for (StackTrace* t = growth_stacks;
02019 t != NULL;
02020 t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth-1])) {
02021 ASSERT(used_slots < needed_slots);
02022 if (used_slots + 3 + t->depth >= needed_slots) {
02023
02024 break;
02025 }
02026
02027 result[used_slots+0] = reinterpret_cast<void*>(1);
02028 result[used_slots+1] = reinterpret_cast<void*>(t->size);
02029 result[used_slots+2] = reinterpret_cast<void*>(t->depth);
02030 for (int d = 0; d < t->depth; d++) {
02031 result[used_slots+3+d] = t->stack[d];
02032 }
02033 used_slots += 3 + t->depth;
02034 }
02035 result[used_slots] = reinterpret_cast<void*>(0);
02036 return result;
02037 }
02038
02039
02040 class TCMallocImplementation : public MallocExtension {
02041 public:
02042 virtual void GetStats(char* buffer, int buffer_length) {
02043 ASSERT(buffer_length > 0);
02044 TCMalloc_Printer printer(buffer, buffer_length);
02045
02046
02047 if (buffer_length < 10000) {
02048 DumpStats(&printer, 1);
02049 } else {
02050 DumpStats(&printer, 2);
02051 }
02052 }
02053
02054 virtual void** ReadStackTraces() {
02055 return DumpStackTraces();
02056 }
02057
02058 virtual void** ReadHeapGrowthStackTraces() {
02059 return DumpHeapGrowthStackTraces();
02060 }
02061
02062 virtual bool GetNumericProperty(const char* name, size_t* value) {
02063 ASSERT(name != NULL);
02064
02065 if (strcmp(name, "generic.current_allocated_bytes") == 0) {
02066 TCMallocStats stats;
02067 ExtractStats(&stats, NULL);
02068 *value = stats.system_bytes
02069 - stats.thread_bytes
02070 - stats.central_bytes
02071 - stats.pageheap_bytes;
02072 return true;
02073 }
02074
02075 if (strcmp(name, "generic.heap_size") == 0) {
02076 TCMallocStats stats;
02077 ExtractStats(&stats, NULL);
02078 *value = stats.system_bytes;
02079 return true;
02080 }
02081
02082 if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
02083
02084
02085 SpinLockHolder l(&pageheap_lock);
02086 *value = pageheap->FreeBytes();
02087 return true;
02088 }
02089
02090 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
02091 SpinLockHolder l(&pageheap_lock);
02092 *value = overall_thread_cache_size;
02093 return true;
02094 }
02095
02096 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
02097 TCMallocStats stats;
02098 ExtractStats(&stats, NULL);
02099 *value = stats.thread_bytes;
02100 return true;
02101 }
02102
02103 return false;
02104 }
02105
02106 virtual bool SetNumericProperty(const char* name, size_t value) {
02107 ASSERT(name != NULL);
02108
02109 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
02110
02111 if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
02112 if (value > (1<<30)) value = (1<<30);
02113
02114 SpinLockHolder l(&pageheap_lock);
02115 overall_thread_cache_size = static_cast<size_t>(value);
02116 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
02117 return true;
02118 }
02119
02120 return false;
02121 }
02122 };
02123
02124
02125
02126
02127
02128 static Span* DoSampledAllocation(size_t size) {
02129 SpinLockHolder h(&pageheap_lock);
02130
02131
02132 Span* span = pageheap->New(pages(size == 0 ? 1 : size));
02133 if (span == NULL) {
02134 return NULL;
02135 }
02136
02137
02138 StackTrace* stack = stacktrace_allocator.New();
02139 if (stack == NULL) {
02140
02141 return span;
02142 }
02143
02144
02145 stack->depth = 0;
02146 stack->size = size;
02147 span->sample = 1;
02148 span->objects = stack;
02149 DLL_Prepend(&sampled_objects, span);
02150
02151 return span;
02152 }
02153
02154 static inline void* do_malloc(size_t size) {
02155 void* ret = NULL;
02156
02157 if (TCMallocDebug::level >= TCMallocDebug::kVerbose) {
02158 MESSAGE("In tcmalloc do_malloc(%" PRIuS")\n", size);
02159 }
02160
02161 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
02162 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
02163 Span* span = DoSampledAllocation(size);
02164 if (span != NULL) {
02165 ret = reinterpret_cast<void*>(span->start << kPageShift);
02166 }
02167 } else if (size > kMaxSize) {
02168
02169 SpinLockHolder h(&pageheap_lock);
02170 Span* span = pageheap->New(pages(size));
02171 if (span != NULL) {
02172 ret = reinterpret_cast<void*>(span->start << kPageShift);
02173 }
02174 } else {
02175 ret = heap->Allocate(size);
02176 }
02177 if (ret == NULL) errno = ENOMEM;
02178 return ret;
02179 }
02180
02181 static inline void do_free(void* ptr) {
02182 if (TCMallocDebug::level >= TCMallocDebug::kVerbose)
02183 MESSAGE("In tcmalloc do_free(%p)\n", ptr);
02184 if (ptr == NULL) return;
02185 ASSERT(pageheap != NULL);
02186 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
02187 Span* span = pageheap->GetDescriptor(p);
02188
02189 ASSERT(span != NULL);
02190 ASSERT(!span->free);
02191 const size_t cl = span->sizeclass;
02192 if (cl != 0) {
02193 ASSERT(!span->sample);
02194 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
02195 if (heap != NULL) {
02196 heap->Deallocate(ptr, cl);
02197 } else {
02198
02199 SLL_SetNext(ptr, NULL);
02200 central_cache[cl].InsertRange(ptr, ptr, 1);
02201 }
02202 } else {
02203 SpinLockHolder h(&pageheap_lock);
02204 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
02205 ASSERT(span->start == p);
02206 if (span->sample) {
02207 DLL_Remove(span);
02208 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
02209 span->objects = NULL;
02210 }
02211 pageheap->Delete(span);
02212 }
02213 }
02214
02215
02216
02217
02218
02219
02220
02221
02222 static void* do_memalign(size_t align, size_t size) {
02223 ASSERT((align & (align - 1)) == 0);
02224 ASSERT(align > 0);
02225 if (size + align < size) return NULL;
02226
02227 if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
02228
02229
02230 if (size == 0) size = 1;
02231
02232 if (size <= kMaxSize && align < kPageSize) {
02233
02234
02235
02236
02237
02238
02239 int cl = SizeClass(size);
02240 while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
02241 cl++;
02242 }
02243 if (cl < kNumClasses) {
02244 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
02245 return heap->Allocate(class_to_size[cl]);
02246 }
02247 }
02248
02249
02250 SpinLockHolder h(&pageheap_lock);
02251
02252 if (align <= kPageSize) {
02253
02254
02255
02256 Span* span = pageheap->New(pages(size));
02257 if (span == NULL) return NULL;
02258 return reinterpret_cast<void*>(span->start << kPageShift);
02259 }
02260
02261
02262 const int alloc = pages(size + align);
02263 Span* span = pageheap->New(alloc);
02264 if (span == NULL) return NULL;
02265
02266
02267 int skip = 0;
02268 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
02269 skip++;
02270 }
02271 ASSERT(skip < alloc);
02272 if (skip > 0) {
02273 Span* rest = pageheap->Split(span, skip);
02274 pageheap->Delete(span);
02275 span = rest;
02276 }
02277
02278
02279 const int needed = pages(size);
02280 ASSERT(span->length >= needed);
02281 if (span->length > needed) {
02282 Span* trailer = pageheap->Split(span, needed);
02283 pageheap->Delete(trailer);
02284 }
02285 return reinterpret_cast<void*>(span->start << kPageShift);
02286 }
02287
02288
02289
02290
02291
02292
02293
02294
02295
02296
02297
02298
02299
02300
02301
02302
02303 class TCMallocGuard {
02304 public:
02305 TCMallocGuard() {
02306 char *envval;
02307 if ((envval = getenv("TCMALLOC_DEBUG"))) {
02308 TCMallocDebug::level = atoi(envval);
02309 MESSAGE("Set tcmalloc debugging level to %d\n", TCMallocDebug::level);
02310 }
02311 do_free(do_malloc(1));
02312 TCMalloc_ThreadCache::InitTSD();
02313 do_free(do_malloc(1));
02314 MallocExtension::Register(new TCMallocImplementation);
02315 }
02316
02317 ~TCMallocGuard() {
02318 const char* env = getenv("MALLOCSTATS");
02319 if (env != NULL) {
02320 int level = atoi(env);
02321 if (level < 1) level = 1;
02322 PrintStats(level);
02323 }
02324 }
02325 };
02326
02327 static TCMallocGuard module_enter_exit_hook;
02328
02329
02330
02331
02332
02333
02334
02335
02336
02337
02338
02339 extern "C" void* malloc(size_t size) {
02340 void* result = do_malloc(size);
02341 MallocHook::InvokeNewHook(result, size);
02342 return result;
02343 }
02344
02345 extern "C" void free(void* ptr) {
02346 MallocHook::InvokeDeleteHook(ptr);
02347 do_free(ptr);
02348 }
02349
02350 extern "C" void* calloc(size_t n, size_t elem_size) {
02351
02352 const size_t size = n * elem_size;
02353 if (elem_size != 0 && size / elem_size != n) return NULL;
02354
02355 void* result = do_malloc(size);
02356 if (result != NULL) {
02357 memset(result, 0, size);
02358 }
02359 MallocHook::InvokeNewHook(result, size);
02360 return result;
02361 }
02362
02363 extern "C" void cfree(void* ptr) {
02364 MallocHook::InvokeDeleteHook(ptr);
02365 do_free(ptr);
02366 }
02367
02368 extern "C" void* realloc(void* old_ptr, size_t new_size) {
02369 if (old_ptr == NULL) {
02370 void* result = do_malloc(new_size);
02371 MallocHook::InvokeNewHook(result, new_size);
02372 return result;
02373 }
02374 if (new_size == 0) {
02375 MallocHook::InvokeDeleteHook(old_ptr);
02376 do_free(old_ptr);
02377 return NULL;
02378 }
02379
02380
02381 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
02382 Span* span = pageheap->GetDescriptor(p);
02383 size_t old_size;
02384 if (span->sizeclass != 0) {
02385 old_size = ByteSizeForClass(span->sizeclass);
02386 } else {
02387 old_size = span->length << kPageShift;
02388 }
02389
02390
02391
02392 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
02393
02394 void* new_ptr = do_malloc(new_size);
02395 if (new_ptr == NULL) {
02396 return NULL;
02397 }
02398 MallocHook::InvokeNewHook(new_ptr, new_size);
02399 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
02400 MallocHook::InvokeDeleteHook(old_ptr);
02401 do_free(old_ptr);
02402 return new_ptr;
02403 } else {
02404 return old_ptr;
02405 }
02406 }
02407
02408 #ifndef COMPILER_INTEL
02409 #define OP_THROWNOTHING
02410 #define OP_THROWBADALLOC
02411 #else
02412 #define OP_THROWNOTHING throw()
02413 #define OP_THROWBADALLOC throw(std::bad_alloc)
02414 #endif
02415
02416 static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
02417
02418 static inline void* cpp_alloc(size_t size, bool nothrow) {
02419 for (;;) {
02420 void* p = do_malloc(size);
02421 #ifdef PREANSINEW
02422 MallocHook::InvokeNewHook(p, size);
02423 return p;
02424 #else
02425 if (p == NULL) {
02426
02427
02428
02429
02430 std::new_handler nh;
02431 {
02432 SpinLockHolder h(&set_new_handler_lock);
02433 nh = std::set_new_handler(0);
02434 (void) std::set_new_handler(nh);
02435 }
02436
02437 if (!nh) {
02438 if (nothrow) return 0;
02439 throw std::bad_alloc();
02440 }
02441
02442
02443
02444 try {
02445 (*nh)();
02446 } catch (const std::bad_alloc&) {
02447 if (!nothrow) throw;
02448 MallocHook::InvokeNewHook(p, size);
02449 return p;
02450 }
02451 } else {
02452 MallocHook::InvokeNewHook(p, size);
02453 return p;
02454 }
02455 #endif
02456 }
02457 }
02458
02459 void* operator new(size_t size) OP_THROWBADALLOC {
02460 return cpp_alloc(size, false);
02461 }
02462
02463 void* operator new(size_t size, const std::nothrow_t&) OP_THROWNOTHING {
02464 return cpp_alloc(size, true);
02465 }
02466
02467 void operator delete(void* p) OP_THROWNOTHING {
02468 MallocHook::InvokeDeleteHook(p);
02469 do_free(p);
02470 }
02471
02472 void operator delete(void* p, const std::nothrow_t&) OP_THROWNOTHING {
02473 MallocHook::InvokeDeleteHook(p);
02474 do_free(p);
02475 }
02476
02477 void* operator new[](size_t size) OP_THROWBADALLOC {
02478 return cpp_alloc(size, false);
02479 }
02480
02481 void* operator new[](size_t size, const std::nothrow_t&) OP_THROWNOTHING {
02482 return cpp_alloc(size, true);
02483 }
02484
02485 void operator delete[](void* p) OP_THROWNOTHING {
02486 MallocHook::InvokeDeleteHook(p);
02487 do_free(p);
02488 }
02489
02490 void operator delete[](void* p, const std::nothrow_t&) OP_THROWNOTHING {
02491 MallocHook::InvokeDeleteHook(p);
02492 do_free(p);
02493 }
02494
02495 extern "C" void* memalign(size_t align, size_t size) {
02496 void* result = do_memalign(align, size);
02497 MallocHook::InvokeNewHook(result, size);
02498 return result;
02499 }
02500
02501 extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) {
02502 if (((align % sizeof(void*)) != 0) ||
02503 ((align & (align - 1)) != 0) ||
02504 (align == 0)) {
02505 return EINVAL;
02506 }
02507
02508 void* result = do_memalign(align, size);
02509 MallocHook::InvokeNewHook(result, size);
02510 if (result == NULL) {
02511 return ENOMEM;
02512 } else {
02513 *result_ptr = result;
02514 return 0;
02515 }
02516 }
02517
02518 static size_t pagesize = 0;
02519
02520 extern "C" void* valloc(size_t size) {
02521
02522 if (pagesize == 0) pagesize = getpagesize();
02523 void* result = do_memalign(pagesize, size);
02524 MallocHook::InvokeNewHook(result, size);
02525 return result;
02526 }
02527
02528 extern "C" void* pvalloc(size_t size) {
02529
02530 if (pagesize == 0) pagesize = getpagesize();
02531 size = (size + pagesize - 1) & ~(pagesize - 1);
02532 void* result = do_memalign(pagesize, size);
02533 MallocHook::InvokeNewHook(result, size);
02534 return result;
02535 }
02536
02537 extern "C" void malloc_stats(void) {
02538 PrintStats(1);
02539 }
02540
02541 extern "C" int mallopt(int cmd, int value) {
02542 return 1;
02543 }
02544
02545 #if 0
02546 extern "C" struct mallinfo mallinfo(void) {
02547 TCMallocStats stats;
02548 ExtractStats(&stats, NULL);
02549
02550
02551 struct mallinfo info;
02552 memset(&info, 0, sizeof(info));
02553
02554
02555
02556 info.arena = static_cast<int>(stats.system_bytes);
02557 info.fsmblks = static_cast<int>(stats.thread_bytes
02558 + stats.central_bytes
02559 + stats.transfer_bytes);
02560 info.fordblks = static_cast<int>(stats.pageheap_bytes);
02561 info.uordblks = static_cast<int>(stats.system_bytes
02562 - stats.thread_bytes
02563 - stats.central_bytes
02564 - stats.transfer_bytes
02565 - stats.pageheap_bytes);
02566
02567 return info;
02568 }
02569 #endif
02570
02571
02572
02573
02574
02575
02576
02577
02578 extern "C" {
02579 #if defined(__GNUC__) && defined(HAVE___ATTRIBUTE__)
02580
02581 #define ALIAS(x) __attribute__ ((weak, alias (x)))
02582 void* __libc_malloc(size_t size) ALIAS("malloc");
02583 void __libc_free(void* ptr) ALIAS("free");
02584 void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
02585 void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
02586 void __libc_cfree(void* ptr) ALIAS("cfree");
02587 void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
02588 void* __libc_valloc(size_t size) ALIAS("valloc");
02589 void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
02590 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
02591 #undef ALIAS
02592 #else
02593
02594 void* __libc_malloc(size_t size) { return malloc(size); }
02595 void __libc_free(void* ptr) { free(ptr); }
02596 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
02597 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
02598 void __libc_cfree(void* ptr) { cfree(ptr); }
02599 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
02600 void* __libc_valloc(size_t size) { return valloc(size); }
02601 void* __libc_pvalloc(size_t size) { return pvalloc(size); }
02602 int __posix_memalign(void** r, size_t a, size_t s) {
02603 return posix_memalign(r, a, s);
02604 }
02605 #endif
02606 }
02607
02608
02609
02610
02611
02612
02613
02614
02615 static void *MemalignOverride(size_t align, size_t size, const void *caller) {
02616 void* result = do_memalign(align, size);
02617 MallocHook::InvokeNewHook(result, size);
02618 return result;
02619 }
02620 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;