NCBI C++ ToolKit
memory_man.cpp
Go to the documentation of this file.

Go to the SVN repository for this file.

1 /* $Id: memory_man.cpp 92089 2020-12-21 15:53:09Z gouriano $
2  * ===========================================================================
3  *
4  * PUBLIC DOMAIN NOTICE
5  * National Center for Biotechnology Information
6  *
7  * This software/database is a "United States Government Work" under the
8  * terms of the United States Copyright Act. It was written as part of
9  * the author's official duties as a United States Government employee and
10  * thus cannot be copyrighted. This software/database is freely available
11  * to the public for use. The National Library of Medicine and the U.S.
12  * Government have not placed any restriction on its use or reproduction.
13  *
14  * Although all reasonable efforts have been taken to ensure the accuracy
15  * and reliability of the software and data, the NLM and the U.S.
16  * Government do not and cannot warrant the performance or results that
17  * may be obtained by using this software or data. The NLM and the U.S.
18  * Government disclaim all warranties, express or implied, including
19  * warranties of performance, merchantability or fitness for any particular
20  * purpose.
21  *
22  * Please cite the author in any work or product based on this material.
23  *
24  * ===========================================================================
25  *
26  * Author: Pavel Ivanov
27  *
28  */
29 
30 #include "task_server_pch.hpp"
31 
32 #include "threads_man.hpp"
33 #include "memory_man.hpp"
34 #include "srv_stat.hpp"
35 
36 #ifdef NCBI_OS_LINUX
37 # include <sys/mman.h>
38 #endif
39 
40 // malloc hooks are deprecated
41 // in case they will disappear, we should have an alternative
42 #define __NC_MEMMAN_USE_MALLOC_HOOK 1
43 
44 // adds several debug checks
45 #define __NC_MEMMAN_DEBUG 0
46 #define __NC_MEMMAN_ALLPTR_COUNT 0
47 // use per-thread MM stat or global static counters
48 // memory allocation is a foundation: it starts before main is called, before threads are created
49 // it seems using static counters makes more sense
50 #define __NC_MEMMAN_PERTHREAD_STAT 0
51 
52 #if __NC_MEMMAN_PERTHREAD_STAT
53 #define __NC_MEMMAN_PERTHREAD_STAT_ARG(x) x
54 #else
55 #define __NC_MEMMAN_PERTHREAD_STAT_ARG(x)
56 #endif
57 
58 
60 
61 
62 static const Uint2 kMMCntBlocksInPool = 100;
63 static const Uint2 kMMDrainBatchSize = 35;
64 static const Uint1 kMMCntFreeGrades = 8;
65 static const int kMMFlushPeriod = 60;
66 
67 /// If for some reason kMMAllocPageSize is changed then kMMMaxBlockSize will change
68 /// too and thus probably kNCMaxBlobChunkSize in nc_db_info.hpp in NetCache should
69 /// change correspondingly too.
70 static const Uint4 kMMAllocPageSize = 65536;
71 static const size_t kMMAllocPageMask = ~size_t(kMMAllocPageSize - 1);
72 /// This is Linux standard on x86_64. If it ever changes or some portability will
73 /// be desired then this constant will need to be obtained from OS during
74 /// initialization.
75 static const Uint2 kMMOSPageSize = 4096;
76 static const size_t kMMOSPageMask = ~size_t(kMMOSPageSize - 1);
77 
78 
80 {
87 };
88 
89 
91 {
92 #if __NC_MEMMAN_NEWALLOC
93 #if __NC_MEMMAN_USEREALPTR
94  void* real_ptr;
95 #endif
96  size_t real_size;
97 #endif
98  size_t block_size;
99  void* free_list;
105 };
106 
107 
109 {
112 };
113 
114 
116 {
118 };
119 
120 
121 #if !__NC_MEMMAN_USE_STD_MALLOC
122 class CMMFlusher : public CSrvTask
123 {
124 public:
125  CMMFlusher(void);
126  virtual ~CMMFlusher(void);
127 
128 private:
129  virtual void ExecuteSlice(TSrvThreadNum thr_num);
130 };
131 #endif
132 
133 
135 {
139 };
140 
141 
142 static bool s_HadLowLevelInit = false;
143 static bool s_HadMemMgrInit = false;
147 #if !__NC_MEMMAN_USE_STD_MALLOC
149 #endif
150 static Uint8 s_TotalSysMem = 0;
153 
154 #if !__NC_MEMMAN_PERTHREAD_STAT
163 #endif
164 
165 #if __NC_MEMMAN_ALLPTR_COUNT
166 static Int8 s_AllPtrCount[kMMCntBlockSizes] = {0};
167 static Int8 s_AllSysPtrCount[kMMCntBlockSizes] = {0};
168 #endif
169 #if __NC_MEMMAN_DEBUG
170 static Uint8 s_fdMemManStamp = 0xFEEDFEEDFEEDFEED;
171 static const Uint8 s_MaxAllPools = 200;
172 static SMMMemPoolsSet* s_AllMemPoolsSet[s_MaxAllPools];
173 static CMiniMutex lock_AllPools;
174 
175 static void s_IncPoolIdx(Uint2& idx);
176 static SMMPageHeader* s_GetPageByPtr(void* ptr);
177 // verify that this ptr is not listed in any pool as available
178 static void s_VerifyUnavailable( void* ptr, Uint2 size_idx)
179 {
180  for (Uint8 p = 0; p < s_MaxAllPools; ++p) {
181  SMMMemPoolsSet *pset = s_AllMemPoolsSet[p];
182  if (pset) {
183  const SMMBlocksPool* test_pool = &(pset->pools[size_idx]);
184  for (Uint2 idx = test_pool->get_idx; idx != test_pool->put_idx; s_IncPoolIdx(idx)) {
185  void* available = test_pool->blocks[idx];
186  if (available == ptr) {
187  abort();
188  }
189  }
190  }
191  }
192  SMMPageHeader* page = s_GetPageByPtr(ptr);
193  for (void* next_block = page->free_list; next_block; next_block = *(void**)next_block) {
194  if (next_block == ptr) {
195  abort();
196  }
197  }
198 }
199 
200 #if 0
201 // need much more, 200K, 2M ?
202 static const Uint8 s_MaxAllPtr = 20000;
203 static void* s_AllPtr[s_MaxAllPtr*kMMCntBlockSizes];
204 #endif
205 
206 static void s_GivePtr( void* ptr, Uint2 size_idx)
207 {
208 #if 0
209  void** p = s_AllPtr + (size_idx * s_MaxAllPtr);
210  for( Uint8 i = 0; i < s_MaxAllPtr; ++i, ++p) {
211  if (*p == nullptr) {
212  *p = ptr;
213  return;
214  }
215  }
216  abort();
217 #endif
218 }
219 static void s_TakePtr( void* ptr, Uint2 size_idx)
220 {
221 #if 0
222  void** p = s_AllPtr + (size_idx * s_MaxAllPtr);
223  for( Uint8 i = 0; i < s_MaxAllPtr; ++i, ++p) {
224  if (*p == ptr) {
225  *p = nullptr;
226  return;
227  }
228  }
229  abort();
230 #endif
231 }
232 
233 #endif //__NC_MEMMAN_DEBUG
234 
236 static const Uint2 kMMMaxBlockSize = (kMMPageDataSize / 2) & ~7;
238  { 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 96, 112, 128, 152, 176, 208, 248,
239  296, 352, 416, 496, 592, 704, 840, 1008, 1208, 1448, 1736, 2080, 2496,
240  (kMMPageDataSize / 11) & ~7,
241  (kMMPageDataSize / 9) & ~7,
242  (kMMPageDataSize / 8) & ~7,
243  (kMMPageDataSize / 7) & ~7,
244  (kMMPageDataSize / 6) & ~7,
245  (kMMPageDataSize / 5) & ~7,
246  (kMMPageDataSize / 4) & ~7,
247  (kMMPageDataSize / 3) & ~7,
249  };
250 static Uint2 kMMSizeIndexes[kMMMaxBlockSize / 8 + 1] = {0};
252 
253 
255 {
256  return s_TotalPageCount;
257 }
258 
259 static inline void
261 {
262  for (Uint2 i = 0; i < kMMCntBlockSizes; ++i) {
263  SMMBlocksPool& pool = pool_set->pools[i];
264  pool.size_idx = i;
265  pool.get_idx = pool.put_idx = 0;
266  pool.cnt_avail = 0;
267  }
269  pool_set->stat.ClearStats();
270 }
271 
272 static void
274 {
276 
277  Uint4 sz_ind = 0, sz = 0, lookup_ind = 0;
278  for (; sz <= kMMMaxBlockSize; sz += 8, ++lookup_ind) {
279  if (sz > kMMBlockSizes[sz_ind])
280  ++sz_ind;
281  kMMSizeIndexes[lookup_ind] = sz_ind;
282  }
285 #if __NC_MEMMAN_DEBUG
286  s_AllMemPoolsSet[s_MaxAllPools-1] = &s_GlobalPoolsSet;
287 #endif
288  for (Uint2 i = 0; i < kMMCntBlockSizes; ++i) {
290 
291  SMMFreePageGrades& free_grades = s_FreePages[i];
292  for (Uint2 j = 0; j < kMMCntFreeGrades; ++j) {
293  SMMPageHeader* free_head = &free_grades.lists[j].list_head;
294  free_head->next_page = free_head;
295  free_head->prev_page = free_head;
296  }
297  }
298 
299  s_HadLowLevelInit = true;
300 }
301 
302 #if __NC_MEMMAN_USE_STD_MALLOC
303 void InitMemoryMan(void)
304 {
305  if (!s_HadLowLevelInit)
306  s_LowLevelInit();
307  s_HadMemMgrInit = true;
308 }
309 void FinalizeMemoryMan(void)
310 {
311 }
312 
313 #else //__NC_MEMMAN_USE_STD_MALLOC
314 
315 static inline void*
316 s_DoMmap(size_t size)
317 {
318 #ifdef NCBI_OS_LINUX
319  void* ptr = mmap(NULL, size, PROT_READ | PROT_WRITE,
320  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
321  if (ptr == MAP_FAILED) {
322  static bool reentry = false;
323  if (reentry) {
324  abort();
325  } else {
326  reentry = true;
327  SRV_FATAL("s_DoMmap failed when requesting "
328  << size << " bytes, errno: " << errno
329  << ", mmap_page_cnt: " << GetMPageCount());
330  }
331  }
332  return ptr;
333 #else
334  return NULL;
335 #endif
336 }
337 
338 static inline void
339 s_DoUnmap(void* ptr, size_t size)
340 {
341 #ifdef NCBI_OS_LINUX
342  if (munmap(ptr, size) != 0) {
343  SRV_LOG(SoftFatal, "Fatal error: " << "s_DoUnmap failed, errno: " << errno
344  << ", mmap_page_cnt: " << GetMPageCount());
345  }
346 #endif
347 }
348 
349 #if __NC_MEMMAN_NEWALLOC
350 
351 static inline bool
352 s_TryUnmap(void* ptr, size_t size)
353 {
354 #if __NC_MEMMAN_USEREALPTR
355 #ifdef NCBI_OS_LINUX
356  return (munmap(ptr, size) == 0);
357 #endif
358 #else
359  s_DoUnmap(ptr, size);
360  return true;
361 #endif
362 }
363 
364 static void*
365 s_SysAllocLongWay(size_t ptr, size_t size)
366 {
367  size_t aligned_ptr = ptr, new_size = size;
368  for (;;) {
369  s_DoUnmap((void*)ptr, new_size);
370  aligned_ptr = (ptr & kMMAllocPageMask);
371  new_size = max(new_size, (size_t)kMMAllocPageSize) + (ptr - aligned_ptr);
372  ptr = (size_t)s_DoMmap(new_size);
373  aligned_ptr = (ptr & kMMAllocPageMask);
374  if (aligned_ptr == ptr) {
375  break;
376  }
377  aligned_ptr += kMMAllocPageSize;
378  if ((ptr + new_size > aligned_ptr) && (ptr + new_size - aligned_ptr >= size)) {
379  if (s_TryUnmap((void*)ptr, aligned_ptr - ptr)) {
380  new_size -= (aligned_ptr - ptr);
381  ptr = aligned_ptr;
382  }
383  break;
384  }
385  }
386 
387  SMMPageHeader* page = (SMMPageHeader*)aligned_ptr;
388  new (page) SMMPageHeader();
389 #if __NC_MEMMAN_USEREALPTR
390  page->real_ptr = (void*)ptr;
391 #endif
392  page->real_size = new_size;
394  return (void*)page;
395 }
396 
397 static void*
399 {
401  size_t ptr = (size_t)s_DoMmap(size);
402  if ((ptr & kMMAllocPageMask) == ptr) {
403  SMMPageHeader* page = (SMMPageHeader*)ptr;
404  new (page) SMMPageHeader();
405 #if __NC_MEMMAN_USEREALPTR
406  page->real_ptr = page;
407 #endif
408  page->real_size = size;
410  return (void*)page;
411  }
412  return s_SysAllocLongWay(ptr, size);
413 }
414 
415 static void
416 s_SysFree(void* ptr, size_t size)
417 {
418  SMMPageHeader* page = (SMMPageHeader*)ptr;
421 #if __NC_MEMMAN_USEREALPTR
422  s_DoUnmap(page->real_ptr, page->real_size);
423 #else
424  s_DoUnmap(page, page->real_size);
425 #endif
426 }
427 
428 #else // __NC_MEMMAN_NEWALLOC
429 
430 static void*
431 s_SysAllocLongWay(size_t size)
432 {
433  size_t ptr = (size_t)s_DoMmap(size + kMMAllocPageSize);
434  size_t aligned_ptr = ptr & kMMAllocPageMask;
435  if (aligned_ptr == ptr) {
436  s_DoUnmap((void*)(ptr + size), kMMAllocPageSize);
437  }
438  else {
439  aligned_ptr += kMMAllocPageSize;
440  s_DoUnmap((void*)ptr, aligned_ptr - ptr);
441  s_DoUnmap((void*)(aligned_ptr + size),
442  kMMAllocPageSize - (aligned_ptr - ptr));
443  }
444  return (void*)aligned_ptr;
445 }
446 
447 static void*
448 s_SysAlloc(size_t size)
449 {
452  size_t ptr = (size_t)s_DoMmap(size);
453  if ((ptr & kMMAllocPageMask) == ptr)
454  return (void*)ptr;
455 
456  s_DoUnmap((void*)ptr, size);
457  return s_SysAllocLongWay(size);
458 }
459 
460 static void
461 s_SysFree(void* ptr, size_t size)
462 {
465  s_DoUnmap(ptr, size);
466 }
467 #endif //__NC_MEMMAN_NEWALLOC
468 
469 static inline SMMPageHeader*
470 s_GetPageByPtr(void* ptr)
471 {
472  return (SMMPageHeader*)((size_t)ptr & kMMAllocPageMask);
473 }
474 
475 static inline bool
477 {
478  return page->next_page != NULL;
479 }
480 
481 static inline bool
483 {
484  return list_head->next_page == list_head;
485 }
486 
487 static inline void
489 {
490  page->prev_page->next_page = page->next_page;
491  page->next_page->prev_page = page->prev_page;
492  page->prev_page = page->next_page = NULL;
493 }
494 
495 static inline void
497 {
498 #if __NC_MEMMAN_DEBUG
499  if (list_head->block_size != 0 && list_head->block_size != page->block_size) {
500  abort();
501  }
502 #endif
503  page->prev_page = list_head;
504  page->next_page = list_head->next_page;
505  page->next_page->prev_page = page;
506  list_head->next_page = page;
507 }
508 
509 static inline void
511 {
512 #if __NC_MEMMAN_DEBUG
513  if (list_head->block_size != 0 && list_head->block_size != page->block_size) {
514  abort();
515  }
516 #endif
517  page->next_page = list_head;
518  page->prev_page = list_head->prev_page;
519  page->prev_page->next_page = page;
520  list_head->prev_page = page;
521 }
522 
523 static inline void
525 {
526  ++idx;
527  if (idx == kMMCntBlocksInPool)
528  idx = 0;
529 }
530 
531 static void
532 s_PutToFreeList(SMMPageHeader* page, Uint2 size_idx, bool to_head)
533 {
534  Uint1 grade = Uint1(Uint4(page->cnt_free) * kMMCntFreeGrades
535  / kMMCntForSize[size_idx]);
536  page->free_grade = grade;
537  SMMFreePageGrades& free_grades = s_FreePages[size_idx];
538  SMMFreePageList& free_list = free_grades.lists[grade];
539  free_list.list_lock.Lock();
540  if (to_head)
541  s_FreeListAddHead(&free_list.list_head, page);
542  else
543  s_FreeListAddTail(&free_list.list_head, page);
544  free_list.list_lock.Unlock();
545 }
546 
547 static bool
549 {
550  SMMFreePageGrades& free_grades = s_FreePages[size_idx];
551  SMMFreePageList& free_list = free_grades.lists[page->free_grade];
552  free_list.list_lock.Lock();
553  bool result = s_IsInFreeList(page);
554  if (result)
555  s_FreeListRemove(page);
556  free_list.list_lock.Unlock();
557  return result;
558 }
559 
560 static SMMPageHeader*
562 {
564 #if !__NC_MEMMAN_NEWALLOC
565  new (page) SMMPageHeader();
566 #endif
567  page->block_size = kMMBlockSizes[size_idx];
568  page->cnt_free = kMMCntForSize[size_idx];
569 #if __NC_MEMMAN_PERTHREAD_STAT
570  AtomicAdd(stat->m_SysBlAlloced[size_idx], page->cnt_free);
571 #else
572  AtomicAdd(s_SysBlAlloced[size_idx], page->cnt_free);
573 #endif
574 #if __NC_MEMMAN_ALLPTR_COUNT
575  AtomicAdd(s_AllSysPtrCount[size_idx], page->cnt_free);
576 #endif
577  page->next_page = page->prev_page = NULL;
578 
579  char* block = (char*)page + sizeof(SMMPageHeader);
580  page->free_list = block;
581 #if __NC_MEMMAN_DEBUG
582  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
583  abort();
584  }
585 #endif
586  char* next = block + page->block_size;
587  for (Uint2 i = page->cnt_free - 1; i > 0; --i) {
588  *(void**)block = next;
589  block = next;
590  next += page->block_size;
591  }
592  *(void**)block = NULL;
593  _ASSERT(next + page->block_size > (char*)page + kMMAllocPageSize);
594 
595  return page;
596 }
597 
598 static void*
600 {
601  _ASSERT(!pool || pool->cnt_avail == 0);
602  page->page_lock.Lock();
603  void* next_block = page->free_list;
604  void* result = next_block;
605  next_block = *(void**)next_block;
606  --page->cnt_free;
607 #if __NC_MEMMAN_DEBUG
608  Uint2 prev_cnt_avail = pool->cnt_avail;
609 #endif
610  Uint2 to_fill = min(kMMDrainBatchSize, page->cnt_free);
611  page->cnt_free -= to_fill;
612  for (; to_fill != 0; --to_fill) {
613  pool->blocks[pool->cnt_avail++] = next_block;
614  next_block = *(void**)next_block;
615  }
616  pool->get_idx = 0;
617  pool->put_idx = pool->cnt_avail;
618  page->free_list = next_block;
619 #if __NC_MEMMAN_DEBUG
620  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
621  abort();
622  }
623  for (Uint2 cnt = prev_cnt_avail; cnt < pool->cnt_avail; ++cnt) {
624  memcpy(pool->blocks[cnt], &s_fdMemManStamp, sizeof(s_fdMemManStamp));
625  }
626  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
627  abort();
628  }
629 #endif
630  if (page->cnt_free)
631  s_PutToFreeList(page, size_idx, true);
632  page->page_lock.Unlock();
633 
634 #if __NC_MEMMAN_DEBUG
635  memcpy(result, &s_fdMemManStamp, sizeof(s_fdMemManStamp));
636  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
637  abort();
638  }
639 #endif
640  return result;
641 }
642 
643 static void*
645 {
646  SMMPageHeader* free_page = NULL;
647  SMMFreePageGrades& free_grades = s_FreePages[size_idx];
648  for (Uint1 i = 0; i < kMMCntFreeGrades && !free_page; ++i) {
649  SMMFreePageList& free_list = free_grades.lists[i];
650  SMMPageHeader* free_head = &free_list.list_head;
651 
652  free_list.list_lock.Lock();
653  if (!s_IsFreeListEmpty(free_head)) {
654  free_page = free_head->next_page;
655  s_FreeListRemove(free_page);
656  }
657  free_list.list_lock.Unlock();
658  }
659  if (!free_page)
660  free_page = s_AllocNewPage(size_idx, stat);
661 
662  return s_FillFromPage(pool, size_idx, free_page);
663 }
664 
665 static void
668 {
669  for (Uint4 i = 0; i < cnt; ++i) {
670  void* ptr = blocks[i];
671  SMMPageHeader* page = s_GetPageByPtr(ptr);
672  page->page_lock.Lock();
673  *(void**)ptr = page->free_list;
675  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
676  abort();
677  }
678 #endif
679  page->free_list = ptr;
680 #if __NC_MEMMAN_DEBUG
681  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
682  abort();
683  }
684 #endif
685  ++page->cnt_free;
686  if (page->cnt_free == 1 || s_RemoveFromFreeList(page, size_idx)) {
687  if (page->cnt_free == kMMCntForSize[size_idx]) {
688 #if __NC_MEMMAN_PERTHREAD_STAT
689  AtomicAdd(stat->m_SysBlFreed[size_idx], page->cnt_free);
690 #else
691  AtomicAdd(s_SysBlFreed[size_idx], page->cnt_free);
692 #endif
693 #if __NC_MEMMAN_ALLPTR_COUNT
694  AtomicSub(s_AllSysPtrCount[size_idx], page->cnt_free);
695 #endif
697  continue;
698  }
699  else {
700  s_PutToFreeList(page, size_idx, false);
701  }
702  }
703  page->page_lock.Unlock();
704  }
705 }
706 
707 static void
709 {
710  for (Uint2 i = 0; i < kMMCntBlockSizes; ++i) {
711  SMMBlocksPool& pool = pool_set->pools[i];
712  if (pool.cnt_avail == 0)
713  continue;
714 
715  if (pool.get_idx < pool.put_idx) {
716  Uint2 cnt = pool.put_idx - pool.get_idx;
717  s_ReleaseToFreePages(&pool.blocks[pool.get_idx], cnt, i, &pool_set->stat);
718  }
719  else {
721  s_ReleaseToFreePages(&pool.blocks[pool.get_idx], cnt, i, &pool_set->stat);
722  if (pool.put_idx)
723  s_ReleaseToFreePages(&pool.blocks[0], pool.put_idx, i, &pool_set->stat);
724  }
725  pool.get_idx = pool.put_idx = 0;
726  pool.cnt_avail = 0;
727  }
729 }
730 
731 static void*
733 {
734  SMMBlocksPool& glob_pool = s_GlobalPoolsSet.pools[pool->size_idx];
735 
736  glob_pool.pool_lock.Lock();
737  if (glob_pool.cnt_avail == 0) {
738  glob_pool.pool_lock.Unlock();
739  return s_FillFromFreePages(pool, pool->size_idx, stat);
740  }
741 
742  _ASSERT(pool->cnt_avail == 0);
743  void* result = glob_pool.blocks[0];
744  --glob_pool.cnt_avail;
745  Uint4 cnt_copy = min(glob_pool.cnt_avail, kMMDrainBatchSize);
746  if (cnt_copy != 0) {
747  memcpy(pool->blocks, &glob_pool.blocks[1], cnt_copy * sizeof(void*));
748 #if __NC_MEMMAN_DEBUG
749  for (Uint2 cnt = 0; cnt < cnt_copy; ++cnt) {
750  memcpy(pool->blocks[cnt], &s_fdMemManStamp, sizeof(s_fdMemManStamp));
751  }
752 #endif
753  pool->cnt_avail = cnt_copy;
754  pool->get_idx = 0;
755  pool->put_idx = cnt_copy;
756  glob_pool.cnt_avail -= cnt_copy;
757  if (glob_pool.cnt_avail != 0) {
758  memmove(glob_pool.blocks, &glob_pool.blocks[cnt_copy + 1],
759  glob_pool.cnt_avail * sizeof(void*));
760  }
761  }
762  glob_pool.pool_lock.Unlock();
763 
764  return result;
765 }
766 
767 static void*
768 s_GetFromGlobal(Uint2 size_idx, SMMStat* stat)
769 {
770  SMMBlocksPool& pool = s_GlobalPoolsSet.pools[size_idx];
771 
772  pool.pool_lock.Lock();
773  if (pool.cnt_avail == 0) {
774  pool.pool_lock.Unlock();
775  return s_FillFromFreePages(NULL, size_idx, stat);
776  }
777  void* ptr = pool.blocks[--pool.cnt_avail];
778  pool.pool_lock.Unlock();
779 
780  return ptr;
781 }
782 
783 static void
784 s_DrainPool(SMMBlocksPool* pool, void* ptr, SMMStat* stat)
785 {
786  pool->get_idx = 0;
787  pool->put_idx = kMMCntBlocksInPool;
788 
789  SMMBlocksPool& glob_pool = s_GlobalPoolsSet.pools[pool->size_idx];
790 
791  glob_pool.pool_lock.Lock();
792  if (glob_pool.cnt_avail == kMMCntBlocksInPool) {
793  glob_pool.pool_lock.Unlock();
794 
796  pool->cnt_avail = pool->put_idx;
797  s_ReleaseToFreePages(&pool->blocks[pool->put_idx],
798  kMMDrainBatchSize, pool->size_idx, stat);
799  s_ReleaseToFreePages(&ptr, 1, pool->size_idx, stat);
800  }
801  else {
802  glob_pool.blocks[glob_pool.cnt_avail++] = ptr;
803  Uint4 to_copy = kMMCntBlocksInPool - glob_pool.cnt_avail;
804  if (to_copy > kMMDrainBatchSize)
805  to_copy = kMMDrainBatchSize;
806  pool->put_idx = kMMCntBlocksInPool - to_copy;
807  pool->cnt_avail = pool->put_idx;
808  memcpy(&glob_pool.blocks[glob_pool.cnt_avail],
809  &pool->blocks[pool->put_idx],
810  to_copy * sizeof(void*));
811  glob_pool.cnt_avail += to_copy;
812  glob_pool.pool_lock.Unlock();
813  }
814 }
815 
816 static void*
818 {
819  void* ptr = nullptr;
820  if (pool->cnt_avail == 0) {
821  ptr = s_FillPool(pool, stat);
822  } else {
823  --pool->cnt_avail;
824  ptr = pool->blocks[pool->get_idx];
825  s_IncPoolIdx(pool->get_idx);
826  _ASSERT((pool->put_idx + kMMCntBlocksInPool - pool->get_idx)
827  % kMMCntBlocksInPool == pool->cnt_avail);
828  }
829  return ptr;
830 }
831 
832 static void
833 s_PutToPool(SMMBlocksPool* pool, void* ptr, SMMStat* stat)
834 {
835  if (pool->cnt_avail == kMMCntBlocksInPool) {
836  s_DrainPool(pool, ptr, stat);
837  }
838  else {
839  ++pool->cnt_avail;
840  pool->blocks[pool->put_idx] = ptr;
841  s_IncPoolIdx(pool->put_idx);
842  _ASSERT((pool->put_idx + kMMCntBlocksInPool - pool->get_idx - 1)
843  % kMMCntBlocksInPool + 1 == pool->cnt_avail);
844  }
845 }
846 
847 static inline size_t
849 {
850  size_t alloc_size = size + sizeof(SMMPageHeader);
851  return (alloc_size + kMMOSPageSize - 1) & kMMOSPageMask;
852 }
853 
854 static void*
856 {
859 #if __NC_MEMMAN_PERTHREAD_STAT
860  AtomicAdd(stat->m_BigAllocedCnt, 1);
861  AtomicAdd(stat->m_BigAllocedSize, size);
862 #else
865 #endif
866  page->block_size = size - sizeof(SMMPageHeader);
867 
868 #if __NC_MEMMAN_DEBUG
869  memcpy(&page[1], &s_fdMemManStamp, sizeof(s_fdMemManStamp));
870  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
871  abort();
872  }
873 #endif
874  return &page[1];
875 }
876 
877 static void
879 {
880  size_t size = page->block_size + sizeof(SMMPageHeader);
881  s_SysFree(page, size);
882 #if __NC_MEMMAN_PERTHREAD_STAT
883  AtomicAdd(stat->m_BigFreedCnt, 1);
884  AtomicAdd(stat->m_BigFreedSize, size);
885 #else
888 #endif
889 }
890 
891 static inline Uint2
893 {
894  return kMMSizeIndexes[(size + 7) / 8];
895 }
896 
897 static inline SMMMemPoolsSet*
899 {
901  if (thr) {
902  SMMMemPoolsSet* pool_set = thr->mm_pool;
904  s_FlushPoolSet(pool_set);
905  return pool_set;
906  }
907  else if (s_HadMemMgrInit)
908  return NULL;
909  else
910  return &s_MainPoolsSet;
911 }
912 
913 static void*
915 {
916  if (!s_HadLowLevelInit)
917  s_LowLevelInit();
918 
919 #if __NC_MEMMAN_DEBUG
920  lock_AllPools.Lock();
921 #endif
922  SMMMemPoolsSet* pool_set = s_GetCurPoolsSet();
923  SMMStat* stat = (pool_set? &pool_set->stat: &s_MainPoolsSet.stat);
924  void* ptr = nullptr;
925  if (size <= kMMMaxBlockSize) {
926  Uint2 size_idx = s_CalcSizeIndex(size);
927 #if __NC_MEMMAN_PERTHREAD_STAT
928  AtomicAdd(stat->m_UserBlAlloced[size_idx], 1);
929 #else
930  AtomicAdd(s_UserBlAlloced[size_idx], 1);
931 #endif
932  if (pool_set) {
933  ptr = s_GetFromPool(&pool_set->pools[size_idx], stat);
934  } else {
935  ptr = s_GetFromGlobal(size_idx, stat);
936  }
937 #if __NC_MEMMAN_ALLPTR_COUNT
938  AtomicAdd(s_AllPtrCount[size_idx],1);
939 #endif
940 #if __NC_MEMMAN_DEBUG
941  s_GivePtr(ptr,size_idx);
942  if (s_FreePages[size_idx].lists[0].list_head.block_size != 0 &&
943  s_FreePages[size_idx].lists[0].list_head.block_size != kMMBlockSizes[size_idx]) {
944  abort();
945  }
946 // s_VerifyUnavailable(ptr, size_idx);
947 #endif
948  }
949  else {
950  ptr = s_AllocBigPage(size, stat);
951  }
952 #if __NC_MEMMAN_DEBUG
953  if (memcmp(ptr, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) != 0) {
954  abort();
955  }
956  memset(ptr, 0, sizeof(s_fdMemManStamp));
957 #endif
958 #if __NC_MEMMAN_DEBUG
959  lock_AllPools.Unlock();
960 #endif
961  return ptr;
962 }
963 
964 static void
965 s_DeallocMemory(void* ptr)
966 {
967 #if !__NC_MEMMAN_USE_MALLOC_HOOK
968  if (!s_HadLowLevelInit) {
969  return;
970  }
971 #endif
972  if (!ptr) {
973  return;
974  }
975 
976 #if __NC_MEMMAN_DEBUG
977  lock_AllPools.Lock();
978 #endif
979  SMMMemPoolsSet* pool_set = s_GetCurPoolsSet();
980  SMMStat* stat = (pool_set? &pool_set->stat: &s_MainPoolsSet.stat);
981  SMMPageHeader* page = s_GetPageByPtr(ptr);
982  if (page->block_size <= kMMMaxBlockSize) {
983  Uint2 size_idx = s_CalcSizeIndex(page->block_size);
984 #if __NC_MEMMAN_PERTHREAD_STAT
985  AtomicAdd(stat->m_UserBlFreed[size_idx], 1);
986 #else
987  AtomicAdd(s_UserBlFreed[size_idx], 1);
988 #endif
989 #if __NC_MEMMAN_ALLPTR_COUNT
990  AtomicSub(s_AllPtrCount[size_idx],1);
991 #endif
992 #if __NC_MEMMAN_DEBUG
993  s_TakePtr(ptr,size_idx);
994  if (memcmp(ptr, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
995  abort();
996  }
997  memcpy(ptr, &s_fdMemManStamp, sizeof(s_fdMemManStamp));
998  s_VerifyUnavailable(ptr, size_idx);
999 #endif
1000  if (pool_set) {
1001  s_PutToPool(&pool_set->pools[size_idx], ptr, stat);
1002  } else {
1003  s_ReleaseToFreePages(&ptr, 1, size_idx, stat);
1004  }
1005 // here, 'page' may be freed already
1006  }
1007  else {
1008 #if __NC_MEMMAN_DEBUG
1009  if (memcmp(ptr, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
1010  abort();
1011  }
1012  memcpy(ptr, &s_fdMemManStamp, sizeof(s_fdMemManStamp));
1013  if (page->free_list && memcmp(page->free_list, &s_fdMemManStamp, sizeof(s_fdMemManStamp)) == 0) {
1014  abort();
1015  }
1016 #endif
1017  s_DeallocBigPage(page, stat);
1018  }
1019 #if __NC_MEMMAN_DEBUG
1020  lock_AllPools.Unlock();
1021 #endif
1022 }
1023 
1024 static void*
1025 s_ReallocMemory(void* ptr, size_t size)
1026 {
1027  if (!ptr)
1028  return s_AllocMemory(size);
1029 
1030  SMMPageHeader* page = s_GetPageByPtr(ptr);
1031  if (size <= kMMMaxBlockSize) {
1032  Uint2 size_idx = s_CalcSizeIndex(size);
1033  size_t real_size = kMMBlockSizes[size_idx];
1034  if (real_size == page->block_size)
1035  return ptr;
1036  }
1037  else {
1038  size_t real_size = s_CalcBigPageSize(size) - sizeof(SMMPageHeader);
1039  if (real_size == page->block_size)
1040  return ptr;
1041  }
1042 
1043  void* new_ptr = s_AllocMemory(size);
1044  memcpy(new_ptr, ptr, min(size, page->block_size));
1045  s_DeallocMemory(ptr);
1046  return new_ptr;
1047 }
1048 
1049 size_t
1050 GetMemSize(void* ptr)
1051 {
1052  SMMPageHeader* page = s_GetPageByPtr(ptr);
1053  return page->block_size;
1054 }
1055 
1056 void
1058 {
1059  s_Flusher = new CMMFlusher();
1061 
1062  s_HadMemMgrInit = true;
1063 }
1064 
1066 {
1067 // when NOT using malloc hooks, when the app terminates and static objects are being deleted
1068 // there are attempts to free "wrong" memory and the app coredumps
1069 // I did not investigate that in details. Probably, not all allocations are truly intercepted
1070 // and some static objects are created using one allocator then deleted using another one.
1071 // From another side, it is probably safe to just ignore memory freeing at this time.
1072 // so, this is what we do. After this, all s_DeallocMemory calls will do nothing
1073 #if !__NC_MEMMAN_USE_MALLOC_HOOK
1074  s_HadLowLevelInit = false;
1075 #endif
1076 }
1077 #endif //__NC_MEMMAN_USE_STD_MALLOC
1078 
1079 void
1081 {
1082 #if __NC_MEMMAN_USE_STD_MALLOC
1083  if (!s_HadLowLevelInit)
1084  s_LowLevelInit();
1085 #endif
1086  if (thr->thread_num == 0) {
1087  thr->mm_pool = &s_MainPoolsSet;
1088  }
1089  else {
1090  SMMMemPoolsSet* pool_set = new SMMMemPoolsSet();
1091  s_InitPoolsSet(pool_set);
1092  thr->mm_pool = pool_set;
1093  }
1094 #if __NC_MEMMAN_DEBUG
1095  if (thr->thread_num < s_MaxAllPools) {
1096  s_AllMemPoolsSet[thr->thread_num] = thr->mm_pool;
1097  } else {
1098  abort();
1099  }
1100 #endif
1101  // Per-thread stat is never deleted, thus we can do this trick
1102  thr->stat->SetMMStat(&thr->mm_pool->stat);
1103 }
1104 
1105 void
1107 {
1108 #if !__NC_MEMMAN_USE_STD_MALLOC
1109  s_FlushPoolSet(thr->mm_pool);
1110 #endif
1111 }
1112 
1113 #if !__NC_MEMMAN_USE_STD_MALLOC
1114 
1116 {
1117 #if __NC_TASKS_MONITOR
1118  m_TaskName = "CMMFlusher";
1119 #endif
1120 }
1121 
1123 {}
1124 
1125 void
1127 {
1128 
1130  return;
1131 
1132 // move blocks from global pool into pages.
1133 // then, threads, when they see s_GlobalPoolsSet.flush_counter changed,
1134 // return their pool blocks into pages
1135 
1136  void* buffer[kMMCntBlocksInPool];
1137  SMMStat* stat = &GetCurThread()->mm_pool->stat;
1138  for (Uint2 i = 0; i < kMMCntBlockSizes; ++i) {
1140 
1141  pool.pool_lock.Lock();
1142  Uint2 cnt_blocks = pool.cnt_avail;
1143  if (cnt_blocks == 0) {
1144  pool.pool_lock.Unlock();
1145  continue;
1146  }
1147  pool.cnt_avail = 0;
1148  memcpy(buffer, pool.blocks, cnt_blocks * sizeof(void*));
1149  pool.pool_lock.Unlock();
1150 
1151  s_ReleaseToFreePages(buffer, cnt_blocks, i, stat);
1152  }
1154 
1155 // once a minute
1157 }
1158 #endif
1159 
1160 void
1162 {
1163  if (s_StartState.m_TotalSys == 0) {
1164 #if __NC_MEMMAN_PERTHREAD_STAT
1165  Uint8 total_data = 0;
1166  SMMStat* main_stat = &s_MainPoolsSet.stat;
1167  for (Uint1 i = 0; i < kMMCntBlockSizes; ++i) {
1168  Int8 cnt = main_stat->m_UserBlAlloced[i] - main_stat->m_UserBlFreed[i];
1170  total_data += cnt * kMMBlockSizes[i];
1171  s_StartState.m_SysBlocks[i] = main_stat->m_SysBlAlloced[i]
1172  - main_stat->m_SysBlFreed[i];
1173  }
1175  - main_stat->m_BigFreedCnt;
1176  Int8 size = main_stat->m_BigAllocedSize - main_stat->m_BigFreedSize;
1178  total_data += size;
1179  s_StartState.m_TotalData = total_data;
1181 #else
1182  Uint8 total_data = 0;
1183  for (Uint1 i = 0; i < kMMCntBlockSizes; ++i) {
1186  total_data += cnt * kMMBlockSizes[i];
1188  }
1192  total_data += size;
1193  s_StartState.m_TotalData = total_data;
1195 #endif
1196  }
1197 
1199 }
1200 
1201 void
1203 {
1204  m_EndState = m_StartState = src_stat->m_EndState;
1205 }
1206 
1207 void
1209 {
1210  m_StartState = src_stat->m_StartState;
1211 }
1212 
1213 void
1215 {
1216  m_EndState = src_stat->m_EndState;
1217 }
1218 
1219 void
1221 {
1222 #if __NC_MEMMAN_PERTHREAD_STAT
1223  Uint8 total_data = 0;
1224  for (size_t i = 0; i < kMMCntBlockSizes; ++i) {
1227  total_data += cnt * kMMBlockSizes[i];
1229  }
1232 #else
1233  Uint8 total_data = 0;
1234  for (size_t i = 0; i < kMMCntBlockSizes; ++i) {
1237  total_data += cnt * kMMBlockSizes[i];
1239  }
1242 #endif
1243 
1245  total_data += size;
1246  m_EndState.m_TotalData = total_data;
1248 }
1249 
1250 void
1252 {
1253  memset(m_UserBlAlloced, 0, sizeof(m_UserBlAlloced));
1254  memset(m_UserBlFreed, 0, sizeof(m_UserBlFreed));
1255  memset(m_SysBlAlloced, 0, sizeof(m_SysBlAlloced));
1256  memset(m_SysBlFreed, 0, sizeof(m_SysBlFreed));
1261 }
1262 
1263 void
1265 {
1266 #if __NC_MEMMAN_PERTHREAD_STAT
1267  for (Uint1 i = 0; i < kMMCntBlockSizes; ++i) {
1269  AtomicAdd(m_UserBlFreed[i], src_stat->m_UserBlFreed[i]);
1270  AtomicAdd(m_SysBlAlloced[i], src_stat->m_SysBlAlloced[i]);
1271  AtomicAdd(m_SysBlFreed[i], src_stat->m_SysBlFreed[i]);
1272  }
1277 #else
1278  for (Uint1 i = 0; i < kMMCntBlockSizes; ++i) {
1283  }
1288 #endif
1291 }
1292 
1293 void
1295 {
1298 }
1299 
1300 void
1302 {
1303  CSrvDiagMsg diag;
1304  diag.PrintExtra(ctx);
1305  diag.PrintParam("start_sys_mem", m_StartState.m_TotalSys)
1306  .PrintParam("end_sys_mem", m_EndState.m_TotalSys)
1307  .PrintParam("avg_sys_mem", m_TotalSysMem.GetAverage())
1308  .PrintParam("max_sys_mem", m_TotalSysMem.GetMaximum())
1309  .PrintParam("start_data_mem", m_StartState.m_TotalData)
1310  .PrintParam("end_data_mem", m_EndState.m_TotalData)
1311  .PrintParam("avg_data_mem", m_TotalDataMem.GetAverage())
1312  .PrintParam("max_data_mem", m_TotalDataMem.GetMaximum())
1313  .PrintParam("mmap_page_cnt", s_TotalPageCount)
1314  .PrintParam("big_blocks_cnt", m_EndState.m_BigBlocksCnt)
1315  .PrintParam("big_blocks_size", m_EndState.m_BigBlocksSize);
1316  diag.Flush();
1317 
1318 // 13oct16: removed - nobody will see it anyway
1319 // x_PrintUnstructured(proxy);
1320 }
1321 
1322 static void
1324  Int8 was_blocks, Int8 is_blocks)
1325 {
1326  if (was_blocks == is_blocks) {
1327  proxy << "0";
1328  }
1329  else if (is_blocks > was_blocks) {
1330  Int8 diff = is_blocks - was_blocks;
1331  proxy << "+" << g_ToSizeStr(diff * kMMBlockSizes[size_idx]);
1332  }
1333  else {
1334  Int8 diff = was_blocks - is_blocks;
1335  proxy << "-" << g_ToSizeStr(diff * kMMBlockSizes[size_idx]);
1336  }
1337 }
1338 
1339 static void
1340 s_PrintBlocksState(CSrvPrintProxy& proxy, Int8* start_blocks, Int8* end_blocks)
1341 {
1342  Uint2 low_size = 0;
1343  Uint2 size = 0;
1344  for (Uint1 i = 0; i < kMMCntBlockSizes; ++i, low_size = size + 1) {
1345  size = kMMBlockSizes[i];
1346  if (start_blocks[i] == 0 && end_blocks[i] == 0)
1347  continue;
1348 
1349  proxy << low_size << "-" << size << ": ";
1350  if (start_blocks[i] == end_blocks[i]) {
1351  proxy << "unchanged ("
1352  << g_ToSmartStr(start_blocks[i]) << " of "
1353  << g_ToSizeStr(start_blocks[i] * kMMBlockSizes[i])
1354  << ")" << endl;
1355  }
1356  else {
1357  s_PrintSizeDiff(proxy, i, start_blocks[i], end_blocks[i]);
1358  proxy << " (" << g_ToSmartStr(start_blocks[i]) << " of "
1359  << g_ToSizeStr(start_blocks[i] * kMMBlockSizes[i])
1360  << " to " << g_ToSmartStr(end_blocks[i]) << " of "
1361  << g_ToSizeStr(end_blocks[i] * kMMBlockSizes[i])
1362  << ")" << endl;
1363  }
1364  }
1365 }
1366 
1367 void
1369 {
1370  proxy << "Data memory state by size:" << endl;
1373  proxy << "Big size: ";
1376  {
1377  proxy << "unchanged ("
1379  << g_ToSizeStr(m_StartState.m_BigBlocksSize) << ")" << endl;
1380  }
1381  else {
1383  proxy << "0";
1384  }
1386  proxy << "+" << g_ToSizeStr(m_EndState.m_BigBlocksSize
1388  }
1389  else {
1390  proxy << "-" << g_ToSizeStr(m_StartState.m_BigBlocksSize
1392  }
1393  proxy << " (" << g_ToSmartStr(m_StartState.m_BigBlocksCnt) << " of "
1395  << " to " << g_ToSmartStr(m_EndState.m_BigBlocksCnt) << " of "
1397  << ")" << endl;
1398  }
1399  }
1400 
1401  proxy << endl << "Sys memory state by size:" << endl;
1403 
1404  proxy << endl << "Memory flow by size:" << endl;
1405  Uint2 low_size = 0;
1406  for (Uint1 i = 0; i < kMMCntBlockSizes; ++i) {
1408  if (m_UserBlAlloced[i] != 0 || m_UserBlFreed[i] != 0
1409  || m_SysBlAlloced[i] != 0 || m_SysBlFreed[i] != 0)
1410  {
1411  proxy << low_size << "-" << size << ": ";
1412  if (m_UserBlAlloced[i] != 0 || m_UserBlFreed[i] != 0) {
1414  proxy << " (+" << g_ToSmartStr(m_UserBlAlloced[i])
1415  << "-" << g_ToSmartStr(m_UserBlFreed[i]) << ") data";
1416  if (m_SysBlAlloced[i] != 0 || m_SysBlFreed[i] != 0)
1417  proxy << ", ";
1418  }
1419  if (m_SysBlAlloced[i] != 0 || m_SysBlFreed[i] != 0) {
1421  proxy << " (+" << g_ToSmartStr(m_SysBlAlloced[i])
1422  << "-" << g_ToSmartStr(m_SysBlFreed[i]) << ") sys";
1423  }
1424  proxy << endl;
1425  }
1426  low_size = size + 1;
1427  }
1428  if (m_BigAllocedCnt != 0 || m_BigFreedCnt != 0) {
1429  proxy << "Big size: ";
1431  proxy << "0";
1432  else if (m_BigAllocedSize >= m_BigFreedSize)
1433  proxy << "+" << g_ToSizeStr(m_BigAllocedSize - m_BigFreedSize);
1434  else
1435  proxy << "-" << g_ToSizeStr(m_BigFreedSize - m_BigAllocedSize);
1436  proxy << " (+" << g_ToSmartStr(m_BigAllocedCnt) << " of "
1437  << g_ToSizeStr(m_BigAllocedSize) << ", -"
1438  << g_ToSmartStr(m_BigFreedCnt) << " of "
1439  << g_ToSizeStr(m_BigFreedSize) << ")" << endl;
1440  }
1441  proxy << endl;
1442 }
1443 
1444 void
1446 {
1447 #if !__NC_MEMMAN_USE_STD_MALLOC
1448  proxy << endl
1449  << "Data memory state - "
1450  << g_ToSizeStr(m_StartState.m_TotalData) << " to "
1451  << g_ToSizeStr(m_EndState.m_TotalData) << " (avg "
1452  << g_ToSizeStr(m_TotalDataMem.GetAverage()) << ", max "
1453  << g_ToSizeStr(m_TotalDataMem.GetMaximum()) << ")" << endl;
1454  proxy << "System memory state - "
1455  << g_ToSizeStr(m_StartState.m_TotalSys) << " to "
1456  << g_ToSizeStr(m_EndState.m_TotalSys) << " (avg "
1457  << g_ToSizeStr(m_TotalSysMem.GetAverage()) << ", max "
1458  << g_ToSizeStr(m_TotalSysMem.GetMaximum()) << ")" << endl;
1459  proxy << endl;
1460 
1461  x_PrintUnstructured(proxy);
1462 #endif //!__NC_MEMMAN_USE_STD_MALLOC
1463 }
1464 
1466 {
1467  string is("\": "), iss("\": \""), eol(",\n\""), qt("\"");
1468  task.WriteText(eol).WriteText("memory_man").WriteText(iss);
1469 #if __NC_MEMMAN_USE_STD_MALLOC
1470 #if defined(NETCACHE_MEMORY_MAN_TCM)
1471  task.WriteText("TCM");
1472 #else
1473  task.WriteText("STD");
1474 #endif
1475 #else
1476  task.WriteText("NC");
1477 #endif
1478  task.WriteText(qt);
1479 #if __NC_MEMMAN_USE_STD_MALLOC
1480 #else
1481  task.WriteText(eol).WriteText("total_sys_memory").WriteText(iss)
1483  task.WriteText(eol).WriteText("total_data_memory").WriteText(iss)
1485  task.WriteText(eol).WriteText("big_blocks_cnt").WriteText(is).WriteNumber(m_EndState.m_BigBlocksCnt);
1486  task.WriteText(eol).WriteText("big_blocks_size" ).WriteText(iss)
1488  task.WriteText(eol).WriteText("mmap_page_cnt").WriteText(is).WriteNumber(s_TotalPageCount);
1489 #endif
1490 #if __NC_MEMMAN_ALLPTR_COUNT
1491  task.WriteText(eol).WriteText("AllPtrCount").WriteText(is);
1492  task.WriteText("[");
1493  for (Int8 i=0; i < kMMCntBlockSizes; ++i) {
1494  if (i != 0) {
1495  task.WriteText(",");
1496  }
1497  task.WriteNumber( s_AllPtrCount[i]);
1498  }
1499  task.WriteText("]");
1500  task.WriteText(eol).WriteText("AllSysPtrCount").WriteText(is);
1501  task.WriteText("[");
1502  for (Int8 i=0; i < kMMCntBlockSizes; ++i) {
1503  if (i != 0) {
1504  task.WriteText(",");
1505  }
1506  task.WriteNumber( s_AllSysPtrCount[i]);
1507  }
1508  task.WriteText("]");
1509 #endif
1510 }
1511 
1513 
1514 #if !__NC_MEMMAN_USE_STD_MALLOC
1515 
1516 void*
1517 operator new (size_t size)
1518 #if defined(NCBI_COMPILER_GCC) || defined(NCBI_COMPILER_ANY_CLANG)
1519 noexcept(false)
1520 #endif
1521 {
1522  return ncbi::s_AllocMemory(size);
1523 }
1524 
1525 void
1526 operator delete (void* ptr) throw ()
1527 {
1528  ncbi::s_DeallocMemory(ptr);
1529 }
1530 
1531 void*
1532 operator new[] (size_t size)
1533 #if defined(NCBI_COMPILER_GCC) || defined(NCBI_COMPILER_ANY_CLANG)
1534 noexcept(false)
1535 #endif
1536 {
1537  return ncbi::s_AllocMemory(size);
1538 }
1539 
1540 void
1541 operator delete[] (void* ptr) throw ()
1542 {
1543  ncbi::s_DeallocMemory(ptr);
1544 }
1545 
1546 #ifdef __GLIBC__
1547 // glibc has special method of overriding C library allocation functions.
1548 // Also, see comments in FinalizeMemoryMan
1549 #if __NC_MEMMAN_USE_MALLOC_HOOK
1550 #include <malloc.h>
1551 #ifndef __MALLOC_HOOK_VOLATILE
1552 # define __MALLOC_HOOK_VOLATILE
1553 #endif
1554 
1555 static void*
1556 s_MallocHook(size_t size, const void* caller)
1557 {
1558  return ncbi::s_AllocMemory(size);
1559 }
1560 
1561 static void*
1562 s_ReallocHook(void* mem_ptr, size_t size, const void* caller)
1563 {
1564  return ncbi::s_ReallocMemory(mem_ptr, size);
1565 }
1566 
1567 static void
1568 s_FreeHook(void* mem_ptr, const void* caller)
1569 {
1570  ncbi::s_DeallocMemory(mem_ptr);
1571 }
1572 
1573 static void
1574 s_InitMallocHook(void)
1575 {
1576  __malloc_hook = &s_MallocHook;
1577  __realloc_hook = &s_ReallocHook;
1578  __free_hook = &s_FreeHook;
1579 }
1580 
1581 void (*__MALLOC_HOOK_VOLATILE __malloc_initialize_hook) (void) = &s_InitMallocHook;
1582 
1583 #else //__NC_MEMMAN_USE_MALLOC_HOOK
1584 
1585 extern "C" {
1586 
1587 void* malloc(size_t size)
1588 {
1589  return ncbi::s_AllocMemory(size);
1590 }
1591 
1592 void* realloc(void* mem_ptr, size_t size)
1593 {
1594  return ncbi::s_ReallocMemory(mem_ptr, size);
1595 }
1596 
1597 void free(void* mem_ptr)
1598 {
1599  ncbi::s_DeallocMemory(mem_ptr);
1600 }
1601 }
1602 
1603 #endif //__NC_MEMMAN_USE_MALLOC_HOOK
1604 
1605 extern "C" {
1606 size_t malloc_usable_size(void* mem_ptr)
1607 {
1608  return ncbi::GetMemSize(mem_ptr);
1609 }
1610 }
1611 
1612 #endif //__GLIBC__
1613 
1614 #endif //__NC_MEMMAN_USE_STD_MALLOC
CMMFlusher(void)
virtual ~CMMFlusher(void)
virtual void ExecuteSlice(TSrvThreadNum thr_num)
This is the main method to do all work this task should do.
Mutex created to have minimum possible size (its size is 4 bytes) and to sleep using kernel capabilit...
Definition: srv_sync.hpp:193
void Unlock(void)
Unlock the mutex.
Definition: srv_sync.cpp:119
void Lock(void)
Lock the mutex.
Definition: srv_sync.cpp:108
Class used in all diagnostic logging.
Definition: srv_diag.hpp:73
CSrvDiagMsg & PrintExtra(void)
Starts "extra" message.
Definition: logging.cpp:1023
CSrvDiagMsg & PrintParam(CTempString name, CTempString value)
Adds parameter to "request-start" or "extra" message.
Definition: logging.cpp:1040
void Flush(void)
Finishes current message and prepare to start new one.
Definition: logging.cpp:1116
Stream-like class to help unify printing some text messages to diagnostics and to any socket.
Definition: srv_stat.hpp:86
Task controlling a socket.
CSrvSocketTask & WriteText(CTempString message)
Write text into socket.
CSrvSocketTask & WriteNumber(NumType num)
Write number into socket as string, i.e.
T GetAverage(void) const
Get average of all values in the set.
Definition: srv_stat.hpp:578
void Initialize(void)
Initialize all data members.
Definition: srv_stat.hpp:530
void AddValue(T value)
Add next value into the set.
Definition: srv_stat.hpp:539
void AddValues(const CSrvStatTerm< T > &other)
Add all values from another set.
Definition: srv_stat.hpp:548
T GetMaximum(void) const
Get maximum value in the set.
Definition: srv_stat.hpp:571
Main working entity in TaskServer.
Definition: srv_tasks.hpp:88
void RunAfter(Uint4 delay_sec)
This call is basically equivalent to SetRunnable() but with guarantee that task will be scheduled for...
Definition: timers.cpp:216
static bool IsInShutdown(void)
Checks if TaskServer received request to shutdown.
CS_CONTEXT * ctx
Definition: t0006.c:12
static DLIST_TYPE *DLIST_NAME() next(DLIST_LIST_TYPE *list, DLIST_TYPE *item)
Definition: dlist.tmpl.h:56
#define NULL
Definition: ncbistd.hpp:225
uint8_t Uint1
1-byte (8-bit) unsigned integer
Definition: ncbitype.h:99
uint32_t Uint4
4-byte (32-bit) unsigned integer
Definition: ncbitype.h:103
uint16_t Uint2
2-byte (16-bit) unsigned integer
Definition: ncbitype.h:101
int64_t Int8
8-byte (64-bit) signed integer
Definition: ncbitype.h:104
uint64_t Uint8
8-byte (64-bit) unsigned integer
Definition: ncbitype.h:105
#define END_NCBI_SCOPE
End previously defined NCBI scope.
Definition: ncbistl.hpp:103
#define BEGIN_NCBI_SCOPE
Define ncbi namespace.
Definition: ncbistl.hpp:100
static string UInt8ToString_DataSize(Uint8 value, TNumToStringFlags flags=0, unsigned int max_digits=3)
Convert UInt8 to string using "software" qualifiers.
Definition: ncbistr.hpp:5171
where boath are integers</td > n< td ></td > n</tr > n< tr > n< td > tse</td > n< td > optional</td > n< td > String</td > n< td class=\"description\"> TSE option controls what blob is smart and slim</td> n<td> orig</td> n</tr> n<tr> n<td> last_modified</td> n<td> optional</td> n<td> Integer</td> n<td class=\"description\"> The blob last modification If provided then the exact match will be requested with n the Cassandra storage corresponding field value</td> n<td> Positive integer Not provided means that the most recent match will be selected</td> n<td></td> n</tr> n<tr> n<td> use_cache</td> n<td> optional</td> n<td> String</td> n<td class=\"description\"> The option controls if the Cassandra LMDB cache and or database should be used It n affects the seq id resolution step and the blob properties lookup step The following n options are available
int i
if(yy_accept[yy_current_state])
static SMMFreePageGrades s_FreePages[kMMCntBlockSizes]
Definition: memory_man.cpp:145
static Uint2 kMMSizeIndexes[kMMMaxBlockSize/8+1]
Definition: memory_man.cpp:250
static void s_InitPoolsSet(SMMMemPoolsSet *pool_set)
Definition: memory_man.cpp:260
static const size_t kMMAllocPageMask
Definition: memory_man.cpp:71
static bool s_TryUnmap(void *ptr, size_t size)
Definition: memory_man.cpp:352
static void s_PutToPool(SMMBlocksPool *pool, void *ptr, SMMStat *stat)
Definition: memory_man.cpp:833
void AssignThreadMemMgr(SSrvThread *thr)
static CMMFlusher * s_Flusher
Definition: memory_man.cpp:148
static void * s_GetFromGlobal(Uint2 size_idx, SMMStat *stat)
Definition: memory_man.cpp:768
static SMMMemPoolsSet * s_GetCurPoolsSet(void)
Definition: memory_man.cpp:898
static void s_DeallocMemory(void *ptr)
Definition: memory_man.cpp:965
static const Uint1 kMMCntFreeGrades
Definition: memory_man.cpp:64
static bool s_IsInFreeList(SMMPageHeader *page)
Definition: memory_man.cpp:476
static void s_IncPoolIdx(Uint2 &idx)
Definition: memory_man.cpp:524
static void s_SysFree(void *ptr, size_t size)
Definition: memory_man.cpp:416
static const size_t kMMOSPageMask
Definition: memory_man.cpp:76
static bool s_RemoveFromFreeList(SMMPageHeader *page, Uint2 size_idx)
Definition: memory_man.cpp:548
static const Uint2 kMMCntBlocksInPool
Definition: memory_man.cpp:62
static bool s_HadMemMgrInit
Definition: memory_man.cpp:143
static void s_PrintBlocksState(CSrvPrintProxy &proxy, Int8 *start_blocks, Int8 *end_blocks)
static SMMPageHeader * s_AllocNewPage(Uint2 size_idx, SMMStat *)
Definition: memory_man.cpp:561
static void * s_FillPool(SMMBlocksPool *pool, SMMStat *stat)
Definition: memory_man.cpp:732
static void s_FreeListAddTail(SMMPageHeader *list_head, SMMPageHeader *page)
Definition: memory_man.cpp:510
static void * s_FillFromFreePages(SMMBlocksPool *pool, Uint2 size_idx, SMMStat *stat)
Definition: memory_man.cpp:644
static const Uint4 kMMPageDataSize
Definition: memory_man.cpp:235
static void s_DoUnmap(void *ptr, size_t size)
Definition: memory_man.cpp:339
static void s_FreeListAddHead(SMMPageHeader *list_head, SMMPageHeader *page)
Definition: memory_man.cpp:496
static void * s_DoMmap(size_t size)
Definition: memory_man.cpp:316
static Int8 s_TotalPageCount
Definition: memory_man.cpp:151
#define __NC_MEMMAN_DEBUG
Definition: memory_man.cpp:45
static Uint8 s_TotalSysMem
Definition: memory_man.cpp:150
static Uint2 kMMCntForSize[kMMCntBlockSizes]
Definition: memory_man.cpp:251
static const Uint2 kMMDrainBatchSize
Definition: memory_man.cpp:63
static SMMPageHeader * s_GetPageByPtr(void *ptr)
Definition: memory_man.cpp:470
Int8 GetMPageCount(void)
Definition: memory_man.cpp:254
static size_t s_CalcBigPageSize(size_t size)
Definition: memory_man.cpp:848
static const int kMMFlushPeriod
Definition: memory_man.cpp:65
static void s_LowLevelInit(void)
Definition: memory_man.cpp:273
static void s_FlushPoolSet(SMMMemPoolsSet *pool_set)
Definition: memory_man.cpp:708
static bool s_IsFreeListEmpty(SMMPageHeader *list_head)
Definition: memory_man.cpp:482
void InitMemoryMan(void)
static bool s_HadLowLevelInit
Definition: memory_man.cpp:142
Uint8 s_BigFreedSize
Definition: memory_man.cpp:162
static void s_FreeListRemove(SMMPageHeader *page)
Definition: memory_man.cpp:488
static void * s_AllocMemory(size_t size)
Definition: memory_man.cpp:914
static const Uint4 kMMAllocPageSize
If for some reason kMMAllocPageSize is changed then kMMMaxBlockSize will change too and thus probably...
Definition: memory_man.cpp:70
void ReleaseThreadMemMgr(SSrvThread *thr)
static void s_DrainPool(SMMBlocksPool *pool, void *ptr, SMMStat *stat)
Definition: memory_man.cpp:784
Uint8 s_BigAllocedSize
Definition: memory_man.cpp:160
void FinalizeMemoryMan(void)
static void s_PutToFreeList(SMMPageHeader *page, Uint2 size_idx, bool to_head)
Definition: memory_man.cpp:532
static void * s_SysAlloc(size_t size)
Definition: memory_man.cpp:398
static void * s_GetFromPool(SMMBlocksPool *pool, SMMStat *stat)
Definition: memory_man.cpp:817
static void s_PrintSizeDiff(CSrvPrintProxy &proxy, Uint2 size_idx, Int8 was_blocks, Int8 is_blocks)
Uint8 s_UserBlAlloced[kMMCntBlockSizes]
Definition: memory_man.cpp:155
static const Uint2 kMMOSPageSize
This is Linux standard on x86_64.
Definition: memory_man.cpp:75
static void s_DeallocBigPage(SMMPageHeader *page, SMMStat *)
Definition: memory_man.cpp:878
static SMMStateStat s_StartState
Definition: memory_man.cpp:152
static Uint2 s_CalcSizeIndex(size_t size)
Definition: memory_man.cpp:892
static SMMMemPoolsSet s_GlobalPoolsSet
Definition: memory_man.cpp:144
Uint8 s_BigAllocedCnt
Definition: memory_man.cpp:159
static const Uint2 kMMMaxBlockSize
Definition: memory_man.cpp:236
Uint8 s_BigFreedCnt
Definition: memory_man.cpp:161
static void * s_FillFromPage(SMMBlocksPool *pool, Uint2 size_idx, SMMPageHeader *page)
Definition: memory_man.cpp:599
#define __NC_MEMMAN_PERTHREAD_STAT_ARG(x)
Definition: memory_man.cpp:55
static void s_ReleaseToFreePages(void **blocks, Uint2 cnt, Uint2 size_idx, SMMStat *)
Definition: memory_man.cpp:666
size_t GetMemSize(void *ptr)
static void * s_AllocBigPage(size_t size, SMMStat *)
Definition: memory_man.cpp:855
Uint8 s_UserBlFreed[kMMCntBlockSizes]
Definition: memory_man.cpp:156
static SMMMemPoolsSet s_MainPoolsSet
Definition: memory_man.cpp:146
static const Uint2 kMMBlockSizes[kMMCntBlockSizes]
Definition: memory_man.cpp:237
Uint8 s_SysBlFreed[kMMCntBlockSizes]
Definition: memory_man.cpp:158
static void * s_ReallocMemory(void *ptr, size_t size)
static void * s_SysAllocLongWay(size_t ptr, size_t size)
Definition: memory_man.cpp:365
Uint8 s_SysBlAlloced[kMMCntBlockSizes]
Definition: memory_man.cpp:157
static const Uint2 kMMCntBlockSizes
Definition: memory_man.hpp:69
const struct ncbi::grid::netcache::search::fields::SIZE size
T max(T x_, T y_)
T min(T x_, T y_)
void abort()
static unsigned cnt[256]
#define memmove(a, b, c)
static uint8_t * buffer
Definition: pcre2test.c:1016
#define SRV_LOG(sev, msg)
Macro to be used for printing log messages.
Definition: srv_diag.hpp:162
#define SRV_FATAL(msg)
Definition: srv_diag.hpp:173
std::enable_if< std::is_unsigned< T >::value, string >::type g_ToSizeStr(T size)
Definition: srv_stat.hpp:267
string g_ToSmartStr(T num)
Definition: srv_stat.hpp:306
T AtomicAdd(T volatile &var, T add_value)
Definition: srv_sync.hpp:69
T AtomicSub(T volatile &var, T sub_value)
Definition: srv_sync.hpp:76
void * blocks[kMMCntBlocksInPool]
Definition: memory_man.cpp:86
CMiniMutex pool_lock
Definition: memory_man.cpp:81
SMMFreePageList lists[kMMCntFreeGrades]
Definition: memory_man.cpp:117
CMiniMutex list_lock
Definition: memory_man.cpp:110
SMMPageHeader list_head
Definition: memory_man.cpp:111
SMMBlocksPool pools[kMMCntBlockSizes]
Definition: memory_man.cpp:137
SMMPageHeader * prev_page
Definition: memory_man.cpp:104
void * free_list
Definition: memory_man.cpp:99
size_t real_size
Definition: memory_man.cpp:96
CMiniMutex page_lock
Definition: memory_man.cpp:100
SMMPageHeader * next_page
Definition: memory_man.cpp:103
size_t block_size
Definition: memory_man.cpp:98
SMMStateStat m_EndState
Definition: memory_man.hpp:108
void PrintToLogs(CRequestContext *ctx, CSrvPrintProxy &proxy)
Uint8 m_SysBlAlloced[kMMCntBlockSizes]
Definition: memory_man.hpp:111
void TransferEndState(SMMStat *src_stat)
CSrvStatTerm< Uint8 > m_TotalDataMem
Definition: memory_man.hpp:118
CSrvStatTerm< Uint8 > m_TotalSysMem
Definition: memory_man.hpp:117
void CopyEndState(SMMStat *src_stat)
Uint8 m_UserBlAlloced[kMMCntBlockSizes]
Definition: memory_man.hpp:109
SMMStateStat m_StartState
Definition: memory_man.hpp:107
void x_PrintUnstructured(CSrvPrintProxy &proxy)
Uint8 m_BigAllocedSize
Definition: memory_man.hpp:114
void SaveEndStateStat(SMMStat *src_stat)
Uint8 m_UserBlFreed[kMMCntBlockSizes]
Definition: memory_man.hpp:110
Uint8 m_SysBlFreed[kMMCntBlockSizes]
Definition: memory_man.hpp:112
void PrintToSocket(CSrvPrintProxy &proxy)
void CopyStartState(SMMStat *src_stat)
Uint8 m_BigFreedCnt
Definition: memory_man.hpp:115
void ClearStats(void)
void AddStats(SMMStat *src_stat)
void PrintState(CSrvSocketTask &task)
void SaveEndState(void)
void InitStartState(void)
Uint8 m_BigFreedSize
Definition: memory_man.hpp:116
Uint8 m_BigAllocedCnt
Definition: memory_man.hpp:113
Uint8 m_TotalSys
Definition: memory_man.hpp:80
Int8 m_BigBlocksSize
Definition: memory_man.hpp:79
Int8 m_UserBlocks[kMMCntBlockSizes]
Definition: memory_man.hpp:76
Int8 m_BigBlocksCnt
Definition: memory_man.hpp:78
Int8 m_SysBlocks[kMMCntBlockSizes]
Definition: memory_man.hpp:77
Uint8 m_TotalData
Definition: memory_man.hpp:81
SMMMemPoolsSet * mm_pool
Definition: threads_man.hpp:88
static DP_BlockInfo * blocks
Uint2 TSrvThreadNum
Type for thread number in TaskServer.
Definition: task_server.hpp:42
#define _ASSERT
CRef< CTestThread > thr[k_NumThreadsMax]
Definition: test_mt.cpp:267
SSrvThread * GetCurThread(void)
Definition: threads_man.cpp:82
void InitCurThreadStorage(void)
else result
Definition: token2.c:20
void free(voidpf ptr)
voidp malloc(uInt size)
Modified on Fri Sep 20 14:57:22 2024 by modify_doxy.py rev. 669887