ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/svn/ircd-hybrid/trunk/src/mempool.c
Revision: 2916
Committed: Sat Jan 25 21:09:18 2014 UTC (11 years, 7 months ago) by michael
Content type: text/x-csrc
File size: 21395 byte(s)
Log Message:
- Clean up all files in include/ (fixed indentation, removed whitespaces/tabs)
- Fixed copyright years

File Contents

# Content
1 /*
2 * Copyright (c) 2007-2012, The Tor Project, Inc.
3 * Copyright (c) 2012-2014 ircd-hybrid development team
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met:
8 *
9 * * Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 *
12 * * Redistributions in binary form must reproduce the above
13 * copyright notice, this list of conditions and the following disclaimer
14 * in the documentation and/or other materials provided with the
15 * distribution.
16 *
17 * * Neither the names of the copyright owners nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*! \file mempool.c
35 * \brief A pooling allocator
36 * \version $Id$
37 */
38
39 #include "stdinc.h"
40 #include "memory.h"
41 #include "event.h"
42 #include "log.h"
43 #include "mempool.h"
44
45 /** Returns floor(log2(u64)). If u64 is 0, (incorrectly) returns 0. */
46 static int
47 tor_log2(uint64_t u64)
48 {
49 int r = 0;
50
51 if (u64 >= (1LLU << 32))
52 {
53 u64 >>= 32;
54 r = 32;
55 }
56 if (u64 >= (1LLU << 16))
57 {
58 u64 >>= 16;
59 r += 16;
60 }
61 if (u64 >= (1LLU << 8))
62 {
63 u64 >>= 8;
64 r += 8;
65 }
66 if (u64 >= (1LLU << 4))
67 {
68 u64 >>= 4;
69 r += 4;
70 }
71 if (u64 >= (1LLU << 2))
72 {
73 u64 >>= 2;
74 r += 2;
75 }
76 if (u64 >= (1LLU << 1))
77 {
78 u64 >>= 1;
79 r += 1;
80 }
81
82 return r;
83 }
84
85 /** Return the power of 2 in range [1,UINT64_MAX] closest to <b>u64</b>. If
86 * there are two powers of 2 equally close, round down. */
87 static uint64_t
88 round_to_power_of_2(uint64_t u64)
89 {
90 int lg2;
91 uint64_t low;
92 uint64_t high;
93
94 if (u64 == 0)
95 return 1;
96
97 lg2 = tor_log2(u64);
98 low = 1LLU << lg2;
99
100 if (lg2 == 63)
101 return low;
102
103 high = 1LLU << (lg2 + 1);
104 if (high - u64 < u64 - low)
105 return high;
106 else
107 return low;
108 }
109
110 /* OVERVIEW:
111 *
112 * This is an implementation of memory pools for Tor cells. It may be
113 * useful for you too.
114 *
115 * Generally, a memory pool is an allocation strategy optimized for large
116 * numbers of identically-sized objects. Rather than the elaborate arena
117 * and coalescing strategies you need to get good performance for a
118 * general-purpose malloc(), pools use a series of large memory "chunks",
119 * each of which is carved into a bunch of smaller "items" or
120 * "allocations".
121 *
122 * To get decent performance, you need to:
123 * - Minimize the number of times you hit the underlying allocator.
124 * - Try to keep accesses as local in memory as possible.
125 * - Try to keep the common case fast.
126 *
127 * Our implementation uses three lists of chunks per pool. Each chunk can
128 * be either "full" (no more room for items); "empty" (no items); or
129 * "used" (not full, not empty). There are independent doubly-linked
130 * lists for each state.
131 *
132 * CREDIT:
133 *
134 * I wrote this after looking at 3 or 4 other pooling allocators, but
135 * without copying. The strategy this most resembles (which is funny,
136 * since that's the one I looked at longest ago) is the pool allocator
137 * underlying Python's obmalloc code. Major differences from obmalloc's
138 * pools are:
139 * - We don't even try to be threadsafe.
140 * - We only handle objects of one size.
141 * - Our list of empty chunks is doubly-linked, not singly-linked.
142 * (This could change pretty easily; it's only doubly-linked for
143 * consistency.)
144 * - We keep a list of full chunks (so we can have a "nuke everything"
145 * function). Obmalloc's pools leave full chunks to float unanchored.
146 *
147 * LIMITATIONS:
148 * - Not even slightly threadsafe.
149 * - Likes to have lots of items per chunks.
150 * - One pointer overhead per allocated thing. (The alternative is
151 * something like glib's use of an RB-tree to keep track of what
152 * chunk any given piece of memory is in.)
153 * - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE
154 * if you need doubles.
155 * - Could probably be optimized a bit; the representation contains
156 * a bit more info than it really needs to have.
157 */
158
159 /* Tuning parameters */
160 /** Largest type that we need to ensure returned memory items are aligned to.
161 * Change this to "double" if we need to be safe for structs with doubles. */
162 #define ALIGNMENT_TYPE void *
163 /** Increment that we need to align allocated. */
164 #define ALIGNMENT sizeof(ALIGNMENT_TYPE)
165 /** Largest memory chunk that we should allocate. */
166 #define MAX_CHUNK (8 *(1L << 20))
167 /** Smallest memory chunk size that we should allocate. */
168 #define MIN_CHUNK 4096
169
170 typedef struct mp_allocated_t mp_allocated_t;
171 typedef struct mp_chunk_t mp_chunk_t;
172
173 /** Holds a single allocated item, allocated as part of a chunk. */
174 struct mp_allocated_t {
175 /** The chunk that this item is allocated in. This adds overhead to each
176 * allocated item, thus making this implementation inappropriate for
177 * very small items. */
178 mp_chunk_t *in_chunk;
179
180 union {
181 /** If this item is free, the next item on the free list. */
182 mp_allocated_t *next_free;
183
184 /** If this item is not free, the actual memory contents of this item.
185 * (Not actual size.) */
186 char mem[1];
187
188 /** An extra element to the union to insure correct alignment. */
189 ALIGNMENT_TYPE dummy_;
190 } u;
191 };
192
193 /** 'Magic' value used to detect memory corruption. */
194 #define MP_CHUNK_MAGIC 0x09870123
195
196 /** A chunk of memory. Chunks come from malloc; we use them */
197 struct mp_chunk_t {
198 uint32_t magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */
199 mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */
200 mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */
201 mp_pool_t *pool; /**< The pool that this chunk is part of. */
202
203 /** First free item in the freelist for this chunk. Note that this may be
204 * NULL even if this chunk is not at capacity: if so, the free memory at
205 * next_mem has not yet been carved into items.
206 */
207 mp_allocated_t *first_free;
208 int n_allocated; /**< Number of currently allocated items in this chunk. */
209 int capacity; /**< Number of items that can be fit into this chunk. */
210 size_t mem_size; /**< Number of usable bytes in mem. */
211 char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */
212 char mem[]; /**< Storage for this chunk. */
213 };
214
215 static mp_pool_t *mp_allocated_pools = NULL;
216
217 /** Number of extra bytes needed beyond mem_size to allocate a chunk. */
218 #define CHUNK_OVERHEAD offsetof(mp_chunk_t, mem[0])
219
220 /** Given a pointer to a mp_allocated_t, return a pointer to the memory
221 * item it holds. */
222 #define A2M(a) (&(a)->u.mem)
223 /** Given a pointer to a memory_item_t, return a pointer to its enclosing
224 * mp_allocated_t. */
225 #define M2A(p) (((char *)p) - offsetof(mp_allocated_t, u.mem))
226
227 void
228 mp_pool_init(void)
229 {
230 eventAdd("mp_pool_garbage_collect", &mp_pool_garbage_collect, NULL, 119);
231 }
232
233 /** Helper: Allocate and return a new memory chunk for <b>pool</b>. Does not
234 * link the chunk into any list. */
235 static mp_chunk_t *
236 mp_chunk_new(mp_pool_t *pool)
237 {
238 size_t sz = pool->new_chunk_capacity * pool->item_alloc_size;
239 mp_chunk_t *chunk = MyMalloc(CHUNK_OVERHEAD + sz);
240
241 #ifdef MEMPOOL_STATS
242 ++pool->total_chunks_allocated;
243 #endif
244 chunk->magic = MP_CHUNK_MAGIC;
245 chunk->capacity = pool->new_chunk_capacity;
246 chunk->mem_size = sz;
247 chunk->next_mem = chunk->mem;
248 chunk->pool = pool;
249 return chunk;
250 }
251
252 /** Take a <b>chunk</b> that has just been allocated or removed from
253 * <b>pool</b>'s empty chunk list, and add it to the head of the used chunk
254 * list. */
255 static void
256 add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk)
257 {
258 chunk->next = pool->used_chunks;
259 if (chunk->next)
260 chunk->next->prev = chunk;
261 pool->used_chunks = chunk;
262 assert(!chunk->prev);
263 }
264
265 /** Return a newly allocated item from <b>pool</b>. */
266 void *
267 mp_pool_get(mp_pool_t *pool)
268 {
269 mp_chunk_t *chunk;
270 mp_allocated_t *allocated;
271
272 if (pool->used_chunks != NULL) {
273 /*
274 * Common case: there is some chunk that is neither full nor empty. Use
275 * that one. (We can't use the full ones, obviously, and we should fill
276 * up the used ones before we start on any empty ones.
277 */
278 chunk = pool->used_chunks;
279
280 } else if (pool->empty_chunks) {
281 /*
282 * We have no used chunks, but we have an empty chunk that we haven't
283 * freed yet: use that. (We pull from the front of the list, which should
284 * get us the most recently emptied chunk.)
285 */
286 chunk = pool->empty_chunks;
287
288 /* Remove the chunk from the empty list. */
289 pool->empty_chunks = chunk->next;
290 if (chunk->next)
291 chunk->next->prev = NULL;
292
293 /* Put the chunk on the 'used' list*/
294 add_newly_used_chunk_to_used_list(pool, chunk);
295
296 assert(!chunk->prev);
297 --pool->n_empty_chunks;
298 if (pool->n_empty_chunks < pool->min_empty_chunks)
299 pool->min_empty_chunks = pool->n_empty_chunks;
300 } else {
301 /* We have no used or empty chunks: allocate a new chunk. */
302 chunk = mp_chunk_new(pool);
303
304 /* Add the new chunk to the used list. */
305 add_newly_used_chunk_to_used_list(pool, chunk);
306 }
307
308 assert(chunk->n_allocated < chunk->capacity);
309
310 if (chunk->first_free) {
311 /* If there's anything on the chunk's freelist, unlink it and use it. */
312 allocated = chunk->first_free;
313 chunk->first_free = allocated->u.next_free;
314 allocated->u.next_free = NULL; /* For debugging; not really needed. */
315 assert(allocated->in_chunk == chunk);
316 } else {
317 /* Otherwise, the chunk had better have some free space left on it. */
318 assert(chunk->next_mem + pool->item_alloc_size <=
319 chunk->mem + chunk->mem_size);
320
321 /* Good, it did. Let's carve off a bit of that free space, and use
322 * that. */
323 allocated = (void *)chunk->next_mem;
324 chunk->next_mem += pool->item_alloc_size;
325 allocated->in_chunk = chunk;
326 allocated->u.next_free = NULL; /* For debugging; not really needed. */
327 }
328
329 ++chunk->n_allocated;
330 #ifdef MEMPOOL_STATS
331 ++pool->total_items_allocated;
332 #endif
333
334 if (chunk->n_allocated == chunk->capacity) {
335 /* This chunk just became full. */
336 assert(chunk == pool->used_chunks);
337 assert(chunk->prev == NULL);
338
339 /* Take it off the used list. */
340 pool->used_chunks = chunk->next;
341 if (chunk->next)
342 chunk->next->prev = NULL;
343
344 /* Put it on the full list. */
345 chunk->next = pool->full_chunks;
346 if (chunk->next)
347 chunk->next->prev = chunk;
348 pool->full_chunks = chunk;
349 }
350 /* And return the memory portion of the mp_allocated_t. */
351 return A2M(allocated);
352 }
353
354 /** Return an allocated memory item to its memory pool. */
355 void
356 mp_pool_release(void *item)
357 {
358 mp_allocated_t *allocated = (void *)M2A(item);
359 mp_chunk_t *chunk = allocated->in_chunk;
360
361 assert(chunk);
362 assert(chunk->magic == MP_CHUNK_MAGIC);
363 assert(chunk->n_allocated > 0);
364
365 allocated->u.next_free = chunk->first_free;
366 chunk->first_free = allocated;
367
368 if (chunk->n_allocated == chunk->capacity) {
369 /* This chunk was full and is about to be used. */
370 mp_pool_t *pool = chunk->pool;
371 /* unlink from the full list */
372 if (chunk->prev)
373 chunk->prev->next = chunk->next;
374 if (chunk->next)
375 chunk->next->prev = chunk->prev;
376 if (chunk == pool->full_chunks)
377 pool->full_chunks = chunk->next;
378
379 /* link to the used list. */
380 chunk->next = pool->used_chunks;
381 chunk->prev = NULL;
382 if (chunk->next)
383 chunk->next->prev = chunk;
384 pool->used_chunks = chunk;
385 } else if (chunk->n_allocated == 1) {
386 /* This was used and is about to be empty. */
387 mp_pool_t *pool = chunk->pool;
388
389 /* Unlink from the used list */
390 if (chunk->prev)
391 chunk->prev->next = chunk->next;
392 if (chunk->next)
393 chunk->next->prev = chunk->prev;
394 if (chunk == pool->used_chunks)
395 pool->used_chunks = chunk->next;
396
397 /* Link to the empty list */
398 chunk->next = pool->empty_chunks;
399 chunk->prev = NULL;
400 if (chunk->next)
401 chunk->next->prev = chunk;
402 pool->empty_chunks = chunk;
403
404 /* Reset the guts of this chunk to defragment it, in case it gets
405 * used again. */
406 chunk->first_free = NULL;
407 chunk->next_mem = chunk->mem;
408
409 ++pool->n_empty_chunks;
410 }
411
412 --chunk->n_allocated;
413 }
414
415 /** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
416 * try to fit about <b>chunk_capacity</b> bytes in each chunk. */
417 mp_pool_t *
418 mp_pool_new(size_t item_size, size_t chunk_capacity)
419 {
420 mp_pool_t *pool;
421 size_t alloc_size, new_chunk_cap;
422
423 /* assert(item_size < SIZE_T_CEILING);
424 assert(chunk_capacity < SIZE_T_CEILING);
425 assert(SIZE_T_CEILING / item_size > chunk_capacity);
426 */
427 pool = MyMalloc(sizeof(mp_pool_t));
428 /*
429 * First, we figure out how much space to allow per item. We'll want to
430 * use make sure we have enough for the overhead plus the item size.
431 */
432 alloc_size = (size_t)(offsetof(mp_allocated_t, u.mem) + item_size);
433 /*
434 * If the item_size is less than sizeof(next_free), we need to make
435 * the allocation bigger.
436 */
437 if (alloc_size < sizeof(mp_allocated_t))
438 alloc_size = sizeof(mp_allocated_t);
439
440 /* If we're not an even multiple of ALIGNMENT, round up. */
441 if (alloc_size % ALIGNMENT) {
442 alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
443 }
444 if (alloc_size < ALIGNMENT)
445 alloc_size = ALIGNMENT;
446 assert((alloc_size % ALIGNMENT) == 0);
447
448 /*
449 * Now we figure out how many items fit in each chunk. We need to fit at
450 * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
451 * or less than MIN_CHUNK.
452 */
453 if (chunk_capacity > MAX_CHUNK)
454 chunk_capacity = MAX_CHUNK;
455
456 /*
457 * Try to be around a power of 2 in size, since that's what allocators like
458 * handing out. 512K-1 byte is a lot better than 512K+1 byte.
459 */
460 chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
461 while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
462 chunk_capacity *= 2;
463 if (chunk_capacity < MIN_CHUNK)
464 chunk_capacity = MIN_CHUNK;
465
466 new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
467 assert(new_chunk_cap < INT_MAX);
468 pool->new_chunk_capacity = (int)new_chunk_cap;
469
470 pool->item_alloc_size = alloc_size;
471
472 pool->next = mp_allocated_pools;
473 mp_allocated_pools = pool;
474
475 ilog(LOG_TYPE_DEBUG, "Capacity is %lu, item size is %lu, alloc size is %lu",
476 (unsigned long)pool->new_chunk_capacity,
477 (unsigned long)pool->item_alloc_size,
478 (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));
479
480 return pool;
481 }
482
483 /** Helper function for qsort: used to sort pointers to mp_chunk_t into
484 * descending order of fullness. */
485 static int
486 mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
487 {
488 mp_chunk_t *a = *(mp_chunk_t * const *)_a;
489 mp_chunk_t *b = *(mp_chunk_t * const *)_b;
490 return b->n_allocated - a->n_allocated;
491 }
492
493 /** Sort the used chunks in <b>pool</b> into descending order of fullness,
494 * so that we preferentially fill up mostly full chunks before we make
495 * nearly empty chunks less nearly empty. */
496 static void
497 mp_pool_sort_used_chunks(mp_pool_t *pool)
498 {
499 int i, n = 0, inverted = 0;
500 mp_chunk_t **chunks, *chunk;
501
502 for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
503 ++n;
504 if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
505 ++inverted;
506 }
507
508 if (!inverted)
509 return;
510
511 chunks = MyMalloc(sizeof(mp_chunk_t *) * n);
512
513 for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
514 chunks[i++] = chunk;
515
516 qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
517 pool->used_chunks = chunks[0];
518 chunks[0]->prev = NULL;
519
520 for (i = 1; i < n; ++i) {
521 chunks[i - 1]->next = chunks[i];
522 chunks[i]->prev = chunks[i - 1];
523 }
524
525 chunks[n - 1]->next = NULL;
526 MyFree(chunks);
527 mp_pool_assert_ok(pool);
528 }
529
530 /** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
531 * excess ones that have been empty for the longest. If
532 * <b>keep_recently_used</b> is true, do not free chunks unless they have been
533 * empty since the last call to this function.
534 **/
535 void
536 mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
537 {
538 mp_chunk_t *chunk, **first_to_free;
539
540 mp_pool_sort_used_chunks(pool);
541 assert(n_to_keep >= 0);
542
543 if (keep_recently_used) {
544 int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
545 if (n_to_keep < n_recently_used)
546 n_to_keep = n_recently_used;
547 }
548
549 assert(n_to_keep >= 0);
550
551 first_to_free = &pool->empty_chunks;
552 while (*first_to_free && n_to_keep > 0) {
553 first_to_free = &(*first_to_free)->next;
554 --n_to_keep;
555 }
556 if (!*first_to_free) {
557 pool->min_empty_chunks = pool->n_empty_chunks;
558 return;
559 }
560
561 chunk = *first_to_free;
562 while (chunk) {
563 mp_chunk_t *next = chunk->next;
564 chunk->magic = 0xdeadbeef;
565 MyFree(chunk);
566 #ifdef MEMPOOL_STATS
567 ++pool->total_chunks_freed;
568 #endif
569 --pool->n_empty_chunks;
570 chunk = next;
571 }
572
573 pool->min_empty_chunks = pool->n_empty_chunks;
574 *first_to_free = NULL;
575 }
576
577 /** Helper: Given a list of chunks, free all the chunks in the list. */
578 static void
579 destroy_chunks(mp_chunk_t *chunk)
580 {
581 mp_chunk_t *next;
582
583 while (chunk) {
584 chunk->magic = 0xd3adb33f;
585 next = chunk->next;
586 MyFree(chunk);
587 chunk = next;
588 }
589 }
590
591 /** Helper: make sure that a given chunk list is not corrupt. */
592 static int
593 assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
594 {
595 mp_allocated_t *allocated;
596 int n = 0;
597
598 if (chunk)
599 assert(chunk->prev == NULL);
600
601 while (chunk) {
602 n++;
603 assert(chunk->magic == MP_CHUNK_MAGIC);
604 assert(chunk->pool == pool);
605 for (allocated = chunk->first_free; allocated;
606 allocated = allocated->u.next_free) {
607 assert(allocated->in_chunk == chunk);
608 }
609 if (empty)
610 assert(chunk->n_allocated == 0);
611 else if (full)
612 assert(chunk->n_allocated == chunk->capacity);
613 else
614 assert(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);
615
616 assert(chunk->capacity == pool->new_chunk_capacity);
617
618 assert(chunk->mem_size ==
619 pool->new_chunk_capacity * pool->item_alloc_size);
620
621 assert(chunk->next_mem >= chunk->mem &&
622 chunk->next_mem <= chunk->mem + chunk->mem_size);
623
624 if (chunk->next)
625 assert(chunk->next->prev == chunk);
626
627 chunk = chunk->next;
628 }
629
630 return n;
631 }
632
633 /** Fail with an assertion if <b>pool</b> is not internally consistent. */
634 void
635 mp_pool_assert_ok(mp_pool_t *pool)
636 {
637 int n_empty;
638
639 n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
640 assert_chunks_ok(pool, pool->full_chunks, 0, 1);
641 assert_chunks_ok(pool, pool->used_chunks, 0, 0);
642
643 assert(pool->n_empty_chunks == n_empty);
644 }
645
646 void
647 mp_pool_garbage_collect(void *arg)
648 {
649 mp_pool_t *pool = mp_allocated_pools;
650
651 for (; pool; pool = pool->next)
652 mp_pool_clean(pool, 0, 1);
653 }
654
655 /** Dump information about <b>pool</b>'s memory usage to the Tor log at level
656 * <b>severity</b>. */
657 void
658 mp_pool_log_status(mp_pool_t *pool)
659 {
660 uint64_t bytes_used = 0;
661 uint64_t bytes_allocated = 0;
662 uint64_t bu = 0, ba = 0;
663 mp_chunk_t *chunk;
664 int n_full = 0, n_used = 0;
665
666 assert(pool);
667
668 for (chunk = pool->empty_chunks; chunk; chunk = chunk->next)
669 bytes_allocated += chunk->mem_size;
670
671 ilog(LOG_TYPE_DEBUG, "%llu bytes in %d empty chunks",
672 bytes_allocated, pool->n_empty_chunks);
673 for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
674 ++n_used;
675 bu += chunk->n_allocated * pool->item_alloc_size;
676 ba += chunk->mem_size;
677
678 ilog(LOG_TYPE_DEBUG, " used chunk: %d items allocated",
679 chunk->n_allocated);
680 }
681
682 ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d partially full chunks",
683 bu, ba, n_used);
684 bytes_used += bu;
685 bytes_allocated += ba;
686 bu = ba = 0;
687
688 for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
689 ++n_full;
690 bu += chunk->n_allocated * pool->item_alloc_size;
691 ba += chunk->mem_size;
692 }
693
694 ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d full chunks",
695 bu, ba, n_full);
696 bytes_used += bu;
697 bytes_allocated += ba;
698
699 ilog(LOG_TYPE_DEBUG, "Total: %llu/%llu bytes allocated "
700 "for cell pools are full.",
701 bytes_used, bytes_allocated);
702
703 #ifdef MEMPOOL_STATS
704 ilog(LOG_TYPE_DEBUG, "%llu cell allocations ever; "
705 "%llu chunk allocations ever; "
706 "%llu chunk frees ever.",
707 pool->total_items_allocated,
708 pool->total_chunks_allocated,
709 pool->total_chunks_freed);
710 #endif
711 }

Properties

Name Value
svn:eol-style native
svn:keywords Id Revision