ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/svn/ircd-hybrid/trunk/src/mempool.c
Revision: 5347
Committed: Sun Jan 11 12:42:20 2015 UTC (10 years, 7 months ago) by michael
Content type: text/x-csrc
File size: 21644 byte(s)
Log Message:
- Update copyright years

File Contents

# Content
1 /*
2 * Copyright (c) 2007-2012, The Tor Project, Inc.
3 * Copyright (c) 2012-2015 ircd-hybrid development team
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 *
17 * * Neither the names of the copyright owners nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*! \file mempool.c
35 * \brief A pooling allocator
36 * \version $Id$
37 */
38
39 #include "stdinc.h"
40 #include "memory.h"
41 #include "event.h"
42 #include "log.h"
43 #include "mempool.h"
44
45 /** Returns floor(log2(u64)). If u64 is 0, (incorrectly) returns 0. */
46 static int
47 tor_log2(uint64_t u64)
48 {
49 int r = 0;
50
51 if (u64 >= (1LLU << 32))
52 {
53 u64 >>= 32;
54 r = 32;
55 }
56
57 if (u64 >= (1LLU << 16))
58 {
59 u64 >>= 16;
60 r += 16;
61 }
62
63 if (u64 >= (1LLU << 8))
64 {
65 u64 >>= 8;
66 r += 8;
67 }
68
69 if (u64 >= (1LLU << 4))
70 {
71 u64 >>= 4;
72 r += 4;
73 }
74
75 if (u64 >= (1LLU << 2))
76 {
77 u64 >>= 2;
78 r += 2;
79 }
80
81 if (u64 >= (1LLU << 1))
82 {
83 u64 >>= 1;
84 r += 1;
85 }
86
87 return r;
88 }
89
90 /** Return the power of 2 in range [1,UINT64_MAX] closest to <b>u64</b>. If
91 * there are two powers of 2 equally close, round down. */
92 static uint64_t
93 round_to_power_of_2(uint64_t u64)
94 {
95 int lg2;
96 uint64_t low;
97 uint64_t high;
98
99 if (u64 == 0)
100 return 1;
101
102 lg2 = tor_log2(u64);
103 low = 1LLU << lg2;
104
105 if (lg2 == 63)
106 return low;
107
108 high = 1LLU << (lg2 + 1);
109 if (high - u64 < u64 - low)
110 return high;
111 else
112 return low;
113 }
114
115 /* OVERVIEW:
116 *
117 * This is an implementation of memory pools for Tor cells. It may be
118 * useful for you too.
119 *
120 * Generally, a memory pool is an allocation strategy optimized for large
121 * numbers of identically-sized objects. Rather than the elaborate arena
122 * and coalescing strategies you need to get good performance for a
123 * general-purpose malloc(), pools use a series of large memory "chunks",
124 * each of which is carved into a bunch of smaller "items" or
125 * "allocations".
126 *
127 * To get decent performance, you need to:
128 * - Minimize the number of times you hit the underlying allocator.
129 * - Try to keep accesses as local in memory as possible.
130 * - Try to keep the common case fast.
131 *
132 * Our implementation uses three lists of chunks per pool. Each chunk can
133 * be either "full" (no more room for items); "empty" (no items); or
134 * "used" (not full, not empty). There are independent doubly-linked
135 * lists for each state.
136 *
137 * CREDIT:
138 *
139 * I wrote this after looking at 3 or 4 other pooling allocators, but
140 * without copying. The strategy this most resembles (which is funny,
141 * since that's the one I looked at longest ago) is the pool allocator
142 * underlying Python's obmalloc code. Major differences from obmalloc's
143 * pools are:
144 * - We don't even try to be threadsafe.
145 * - We only handle objects of one size.
146 * - Our list of empty chunks is doubly-linked, not singly-linked.
147 * (This could change pretty easily; it's only doubly-linked for
148 * consistency.)
149 * - We keep a list of full chunks (so we can have a "nuke everything"
150 * function). Obmalloc's pools leave full chunks to float unanchored.
151 *
152 * LIMITATIONS:
153 * - Not even slightly threadsafe.
154 * - Likes to have lots of items per chunks.
155 * - One pointer overhead per allocated thing. (The alternative is
156 * something like glib's use of an RB-tree to keep track of what
157 * chunk any given piece of memory is in.)
158 * - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE
159 * if you need doubles.
160 * - Could probably be optimized a bit; the representation contains
161 * a bit more info than it really needs to have.
162 */
163
164 /* Tuning parameters */
165 /** Largest type that we need to ensure returned memory items are aligned to.
166 * Change this to "double" if we need to be safe for structs with doubles. */
167 #define ALIGNMENT_TYPE void *
168 /** Increment that we need to align allocated. */
169 #define ALIGNMENT sizeof(ALIGNMENT_TYPE)
170 /** Largest memory chunk that we should allocate. */
171 #define MAX_CHUNK (8 *(1L << 20))
172 /** Smallest memory chunk size that we should allocate. */
173 #define MIN_CHUNK 4096
174
175 typedef struct mp_allocated_t mp_allocated_t;
176 typedef struct mp_chunk_t mp_chunk_t;
177
178 /** Holds a single allocated item, allocated as part of a chunk. */
179 struct mp_allocated_t
180 {
181 /** The chunk that this item is allocated in. This adds overhead to each
182 * allocated item, thus making this implementation inappropriate for
183 * very small items. */
184 mp_chunk_t *in_chunk;
185
186 union
187 {
188 /** If this item is free, the next item on the free list. */
189 mp_allocated_t *next_free;
190
191 /** If this item is not free, the actual memory contents of this item.
192 * (Not actual size.) */
193 char mem[1];
194
195 /** An extra element to the union to insure correct alignment. */
196 ALIGNMENT_TYPE dummy_;
197 } u;
198 };
199
200 /** 'Magic' value used to detect memory corruption. */
201 #define MP_CHUNK_MAGIC 0x09870123
202
203 /** A chunk of memory. Chunks come from malloc; we use them */
204 struct mp_chunk_t
205 {
206 uint32_t magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */
207 mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */
208 mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */
209 mp_pool_t *pool; /**< The pool that this chunk is part of. */
210
211 /** First free item in the freelist for this chunk. Note that this may be
212 * NULL even if this chunk is not at capacity: if so, the free memory at
213 * next_mem has not yet been carved into items.
214 */
215 mp_allocated_t *first_free;
216 int n_allocated; /**< Number of currently allocated items in this chunk. */
217 int capacity; /**< Number of items that can be fit into this chunk. */
218 size_t mem_size; /**< Number of usable bytes in mem. */
219 char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */
220 char mem[]; /**< Storage for this chunk. */
221 };
222
223 static mp_pool_t *mp_allocated_pools = NULL;
224
225 /** Number of extra bytes needed beyond mem_size to allocate a chunk. */
226 #define CHUNK_OVERHEAD offsetof(mp_chunk_t, mem[0])
227
228 /** Given a pointer to a mp_allocated_t, return a pointer to the memory
229 * item it holds. */
230 #define A2M(a) (&(a)->u.mem)
231 /** Given a pointer to a memory_item_t, return a pointer to its enclosing
232 * mp_allocated_t. */
233 #define M2A(p) (((char *)p) - offsetof(mp_allocated_t, u.mem))
234
235 void
236 mp_pool_init(void)
237 {
238 static struct event event_mp_gc =
239 {
240 .name = "mp_pool_garbage_collect",
241 .handler = mp_pool_garbage_collect,
242 .when = 187
243 };
244
245 event_add(&event_mp_gc, NULL);
246 }
247
248 /** Helper: Allocate and return a new memory chunk for <b>pool</b>. Does not
249 * link the chunk into any list. */
250 static mp_chunk_t *
251 mp_chunk_new(mp_pool_t *pool)
252 {
253 size_t sz = pool->new_chunk_capacity * pool->item_alloc_size;
254 mp_chunk_t *chunk = MyCalloc(CHUNK_OVERHEAD + sz);
255
256 #ifdef MEMPOOL_STATS
257 ++pool->total_chunks_allocated;
258 #endif
259 chunk->magic = MP_CHUNK_MAGIC;
260 chunk->capacity = pool->new_chunk_capacity;
261 chunk->mem_size = sz;
262 chunk->next_mem = chunk->mem;
263 chunk->pool = pool;
264 return chunk;
265 }
266
267 /** Take a <b>chunk</b> that has just been allocated or removed from
268 * <b>pool</b>'s empty chunk list, and add it to the head of the used chunk
269 * list. */
270 static void
271 add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk)
272 {
273 chunk->next = pool->used_chunks;
274 if (chunk->next)
275 chunk->next->prev = chunk;
276 pool->used_chunks = chunk;
277 assert(!chunk->prev);
278 }
279
280 /** Return a newly allocated item from <b>pool</b>. */
281 void *
282 mp_pool_get(mp_pool_t *pool)
283 {
284 mp_chunk_t *chunk;
285 mp_allocated_t *allocated;
286 void *ptr = NULL;
287
288 if (pool->used_chunks)
289 {
290 /*
291 * Common case: there is some chunk that is neither full nor empty. Use
292 * that one. (We can't use the full ones, obviously, and we should fill
293 * up the used ones before we start on any empty ones.
294 */
295 chunk = pool->used_chunks;
296
297 }
298 else if (pool->empty_chunks)
299 {
300 /*
301 * We have no used chunks, but we have an empty chunk that we haven't
302 * freed yet: use that. (We pull from the front of the list, which should
303 * get us the most recently emptied chunk.)
304 */
305 chunk = pool->empty_chunks;
306
307 /* Remove the chunk from the empty list. */
308 pool->empty_chunks = chunk->next;
309 if (chunk->next)
310 chunk->next->prev = NULL;
311
312 /* Put the chunk on the 'used' list*/
313 add_newly_used_chunk_to_used_list(pool, chunk);
314
315 assert(!chunk->prev);
316 --pool->n_empty_chunks;
317 if (pool->n_empty_chunks < pool->min_empty_chunks)
318 pool->min_empty_chunks = pool->n_empty_chunks;
319 }
320 else
321 {
322 /* We have no used or empty chunks: allocate a new chunk. */
323 chunk = mp_chunk_new(pool);
324
325 /* Add the new chunk to the used list. */
326 add_newly_used_chunk_to_used_list(pool, chunk);
327 }
328
329 assert(chunk->n_allocated < chunk->capacity);
330
331 if (chunk->first_free)
332 {
333 /* If there's anything on the chunk's freelist, unlink it and use it. */
334 allocated = chunk->first_free;
335 chunk->first_free = allocated->u.next_free;
336 allocated->u.next_free = NULL; /* For debugging; not really needed. */
337 assert(allocated->in_chunk == chunk);
338 }
339 else
340 {
341 /* Otherwise, the chunk had better have some free space left on it. */
342 assert(chunk->next_mem + pool->item_alloc_size <=
343 chunk->mem + chunk->mem_size);
344
345 /* Good, it did. Let's carve off a bit of that free space, and use
346 * that. */
347 allocated = (void *)chunk->next_mem;
348 chunk->next_mem += pool->item_alloc_size;
349 allocated->in_chunk = chunk;
350 allocated->u.next_free = NULL; /* For debugging; not really needed. */
351 }
352
353 ++chunk->n_allocated;
354 #ifdef MEMPOOL_STATS
355 ++pool->total_items_allocated;
356 #endif
357
358 if (chunk->n_allocated == chunk->capacity)
359 {
360 /* This chunk just became full. */
361 assert(chunk == pool->used_chunks);
362 assert(chunk->prev == NULL);
363
364 /* Take it off the used list. */
365 pool->used_chunks = chunk->next;
366 if (chunk->next)
367 chunk->next->prev = NULL;
368
369 /* Put it on the full list. */
370 chunk->next = pool->full_chunks;
371 if (chunk->next)
372 chunk->next->prev = chunk;
373 pool->full_chunks = chunk;
374 }
375
376 /* And return the memory portion of the mp_allocated_t. */
377 ptr = A2M(allocated);
378 memset(ptr, 0, pool->item_size);
379
380 return ptr;
381 }
382
383 /** Return an allocated memory item to its memory pool. */
384 void
385 mp_pool_release(void *item)
386 {
387 mp_allocated_t *allocated = (void *)M2A(item);
388 mp_chunk_t *chunk = allocated->in_chunk;
389
390 assert(chunk);
391 assert(chunk->magic == MP_CHUNK_MAGIC);
392 assert(chunk->n_allocated > 0);
393
394 allocated->u.next_free = chunk->first_free;
395 chunk->first_free = allocated;
396
397 if (chunk->n_allocated == chunk->capacity)
398 {
399 /* This chunk was full and is about to be used. */
400 mp_pool_t *pool = chunk->pool;
401 /* unlink from the full list */
402 if (chunk->prev)
403 chunk->prev->next = chunk->next;
404 if (chunk->next)
405 chunk->next->prev = chunk->prev;
406 if (chunk == pool->full_chunks)
407 pool->full_chunks = chunk->next;
408
409 /* link to the used list. */
410 chunk->next = pool->used_chunks;
411 chunk->prev = NULL;
412
413 if (chunk->next)
414 chunk->next->prev = chunk;
415 pool->used_chunks = chunk;
416 }
417 else if (chunk->n_allocated == 1)
418 {
419 /* This was used and is about to be empty. */
420 mp_pool_t *pool = chunk->pool;
421
422 /* Unlink from the used list */
423 if (chunk->prev)
424 chunk->prev->next = chunk->next;
425 if (chunk->next)
426 chunk->next->prev = chunk->prev;
427 if (chunk == pool->used_chunks)
428 pool->used_chunks = chunk->next;
429
430 /* Link to the empty list */
431 chunk->next = pool->empty_chunks;
432 chunk->prev = NULL;
433 if (chunk->next)
434 chunk->next->prev = chunk;
435 pool->empty_chunks = chunk;
436
437 /* Reset the guts of this chunk to defragment it, in case it gets
438 * used again. */
439 chunk->first_free = NULL;
440 chunk->next_mem = chunk->mem;
441
442 ++pool->n_empty_chunks;
443 }
444
445 --chunk->n_allocated;
446 }
447
448 /** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
449 * try to fit about <b>chunk_capacity</b> bytes in each chunk. */
450 mp_pool_t *
451 mp_pool_new(size_t item_size, size_t chunk_capacity)
452 {
453 mp_pool_t *pool;
454 size_t alloc_size, new_chunk_cap;
455
456 /* assert(item_size < SIZE_T_CEILING);
457 assert(chunk_capacity < SIZE_T_CEILING);
458 assert(SIZE_T_CEILING / item_size > chunk_capacity);
459 */
460 pool = MyCalloc(sizeof(mp_pool_t));
461 /*
462 * First, we figure out how much space to allow per item. We'll want to
463 * use make sure we have enough for the overhead plus the item size.
464 */
465 alloc_size = (size_t)(offsetof(mp_allocated_t, u.mem) + item_size);
466 /*
467 * If the item_size is less than sizeof(next_free), we need to make
468 * the allocation bigger.
469 */
470 if (alloc_size < sizeof(mp_allocated_t))
471 alloc_size = sizeof(mp_allocated_t);
472
473 /* If we're not an even multiple of ALIGNMENT, round up. */
474 if (alloc_size % ALIGNMENT)
475 alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
476 if (alloc_size < ALIGNMENT)
477 alloc_size = ALIGNMENT;
478
479 assert((alloc_size % ALIGNMENT) == 0);
480
481 /*
482 * Now we figure out how many items fit in each chunk. We need to fit at
483 * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
484 * or less than MIN_CHUNK.
485 */
486 if (chunk_capacity > MAX_CHUNK)
487 chunk_capacity = MAX_CHUNK;
488
489 /*
490 * Try to be around a power of 2 in size, since that's what allocators like
491 * handing out. 512K-1 byte is a lot better than 512K+1 byte.
492 */
493 chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
494
495 while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
496 chunk_capacity *= 2;
497 if (chunk_capacity < MIN_CHUNK)
498 chunk_capacity = MIN_CHUNK;
499
500 new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
501 assert(new_chunk_cap < INT_MAX);
502 pool->new_chunk_capacity = (int)new_chunk_cap;
503
504 pool->item_size = item_size;
505 pool->item_alloc_size = alloc_size;
506
507 pool->next = mp_allocated_pools;
508 mp_allocated_pools = pool;
509
510 ilog(LOG_TYPE_DEBUG, "Capacity is %lu, item size is %lu, alloc size is %lu",
511 (unsigned long)pool->new_chunk_capacity,
512 (unsigned long)pool->item_alloc_size,
513 (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));
514
515 return pool;
516 }
517
518 /** Helper function for qsort: used to sort pointers to mp_chunk_t into
519 * descending order of fullness. */
520 static int
521 mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
522 {
523 mp_chunk_t *a = *(mp_chunk_t * const *)_a;
524 mp_chunk_t *b = *(mp_chunk_t * const *)_b;
525 return b->n_allocated - a->n_allocated;
526 }
527
528 /** Sort the used chunks in <b>pool</b> into descending order of fullness,
529 * so that we preferentially fill up mostly full chunks before we make
530 * nearly empty chunks less nearly empty. */
531 static void
532 mp_pool_sort_used_chunks(mp_pool_t *pool)
533 {
534 int i, n = 0, inverted = 0;
535 mp_chunk_t **chunks, *chunk;
536
537 for (chunk = pool->used_chunks; chunk; chunk = chunk->next)
538 {
539 ++n;
540 if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
541 ++inverted;
542 }
543
544 if (!inverted)
545 return;
546
547 chunks = MyCalloc(sizeof(mp_chunk_t *) * n);
548
549 for (i = 0, chunk = pool->used_chunks; chunk; chunk = chunk->next)
550 chunks[i++] = chunk;
551
552 qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
553 pool->used_chunks = chunks[0];
554 chunks[0]->prev = NULL;
555
556 for (i = 1; i < n; ++i)
557 {
558 chunks[i - 1]->next = chunks[i];
559 chunks[i]->prev = chunks[i - 1];
560 }
561
562 chunks[n - 1]->next = NULL;
563 MyFree(chunks);
564 mp_pool_assert_ok(pool);
565 }
566
567 /** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
568 * excess ones that have been empty for the longest. If
569 * <b>keep_recently_used</b> is true, do not free chunks unless they have been
570 * empty since the last call to this function.
571 **/
572 void
573 mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
574 {
575 mp_chunk_t *chunk, **first_to_free;
576
577 mp_pool_sort_used_chunks(pool);
578 assert(n_to_keep >= 0);
579
580 if (keep_recently_used)
581 {
582 int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
583
584 if (n_to_keep < n_recently_used)
585 n_to_keep = n_recently_used;
586 }
587
588 assert(n_to_keep >= 0);
589
590 first_to_free = &pool->empty_chunks;
591
592 while (*first_to_free && n_to_keep > 0)
593 {
594 first_to_free = &(*first_to_free)->next;
595 --n_to_keep;
596 }
597
598 if (!*first_to_free)
599 {
600 pool->min_empty_chunks = pool->n_empty_chunks;
601 return;
602 }
603
604 chunk = *first_to_free;
605
606 while (chunk)
607 {
608 mp_chunk_t *next = chunk->next;
609 chunk->magic = 0xdeadbeef;
610 MyFree(chunk);
611 #ifdef MEMPOOL_STATS
612 ++pool->total_chunks_freed;
613 #endif
614 --pool->n_empty_chunks;
615 chunk = next;
616 }
617
618 pool->min_empty_chunks = pool->n_empty_chunks;
619 *first_to_free = NULL;
620 }
621
622 #if 0
623 /** Helper: Given a list of chunks, free all the chunks in the list. */
624 static void
625 destroy_chunks(mp_chunk_t *chunk)
626 {
627 mp_chunk_t *next;
628
629 while (chunk) {
630 chunk->magic = 0xd3adb33f;
631 next = chunk->next;
632 MyFree(chunk);
633 chunk = next;
634 }
635 }
636 #endif
637
638 /** Helper: make sure that a given chunk list is not corrupt. */
639 static int
640 assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
641 {
642 mp_allocated_t *allocated;
643 int n = 0;
644
645 if (chunk)
646 assert(chunk->prev == NULL);
647
648 while (chunk)
649 {
650 n++;
651 assert(chunk->magic == MP_CHUNK_MAGIC);
652 assert(chunk->pool == pool);
653
654 for (allocated = chunk->first_free; allocated;
655 allocated = allocated->u.next_free)
656 assert(allocated->in_chunk == chunk);
657
658 if (empty)
659 assert(chunk->n_allocated == 0);
660 else if (full)
661 assert(chunk->n_allocated == chunk->capacity);
662 else
663 assert(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);
664
665 assert(chunk->capacity == pool->new_chunk_capacity);
666
667 assert(chunk->mem_size ==
668 pool->new_chunk_capacity * pool->item_alloc_size);
669
670 assert(chunk->next_mem >= chunk->mem &&
671 chunk->next_mem <= chunk->mem + chunk->mem_size);
672
673 if (chunk->next)
674 assert(chunk->next->prev == chunk);
675
676 chunk = chunk->next;
677 }
678
679 return n;
680 }
681
682 /** Fail with an assertion if <b>pool</b> is not internally consistent. */
683 void
684 mp_pool_assert_ok(mp_pool_t *pool)
685 {
686 int n_empty;
687
688 n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
689 assert_chunks_ok(pool, pool->full_chunks, 0, 1);
690 assert_chunks_ok(pool, pool->used_chunks, 0, 0);
691
692 assert(pool->n_empty_chunks == n_empty);
693 }
694
695 void
696 mp_pool_garbage_collect(void *unused)
697 {
698 for (mp_pool_t *pool = mp_allocated_pools; pool; pool = pool->next)
699 mp_pool_clean(pool, 0, 1);
700 }
701
702 /** Dump information about <b>pool</b>'s memory usage to the Tor log at level
703 * <b>severity</b>. */
704 void
705 mp_pool_log_status(mp_pool_t *pool)
706 {
707 uint64_t bytes_used = 0;
708 uint64_t bytes_allocated = 0;
709 uint64_t bu = 0, ba = 0;
710 mp_chunk_t *chunk;
711 int n_full = 0, n_used = 0;
712
713 assert(pool);
714
715 for (chunk = pool->empty_chunks; chunk; chunk = chunk->next)
716 bytes_allocated += chunk->mem_size;
717
718 ilog(LOG_TYPE_DEBUG, "%llu bytes in %d empty chunks",
719 bytes_allocated, pool->n_empty_chunks);
720 for (chunk = pool->used_chunks; chunk; chunk = chunk->next)
721 {
722 ++n_used;
723 bu += chunk->n_allocated * pool->item_alloc_size;
724 ba += chunk->mem_size;
725
726 ilog(LOG_TYPE_DEBUG, " used chunk: %d items allocated",
727 chunk->n_allocated);
728 }
729
730 ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d partially full chunks",
731 bu, ba, n_used);
732 bytes_used += bu;
733 bytes_allocated += ba;
734 bu = ba = 0;
735
736 for (chunk = pool->full_chunks; chunk; chunk = chunk->next)
737 {
738 ++n_full;
739 bu += chunk->n_allocated * pool->item_alloc_size;
740 ba += chunk->mem_size;
741 }
742
743 ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d full chunks",
744 bu, ba, n_full);
745 bytes_used += bu;
746 bytes_allocated += ba;
747
748 ilog(LOG_TYPE_DEBUG, "Total: %llu/%llu bytes allocated "
749 "for cell pools are full.",
750 bytes_used, bytes_allocated);
751
752 #ifdef MEMPOOL_STATS
753 ilog(LOG_TYPE_DEBUG, "%llu cell allocations ever; "
754 "%llu chunk allocations ever; "
755 "%llu chunk frees ever.",
756 pool->total_items_allocated,
757 pool->total_chunks_allocated,
758 pool->total_chunks_freed);
759 #endif
760 }

Properties

Name Value
svn:eol-style native
svn:keywords Id Revision