/[svn]/branches/newio/src/mempool.c
ViewVC logotype

Contents of /branches/newio/src/mempool.c

Parent Directory Parent Directory | Revision Log Revision Log


Revision 2408 - (show annotations)
Thu Jul 18 19:57:58 2013 UTC (6 years, 11 months ago) by michael
File MIME type: text/x-chdr
File size: 21412 byte(s)
- ioengine changes as of 18JUL13

1 /*
2 * Copyright (c) 2007-2012, The Tor Project, Inc.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 *
11 * * Redistributions in binary form must reproduce the above
12 * copyright notice, this list of conditions and the following disclaimer
13 * in the documentation and/or other materials provided with the
14 * distribution.
15 *
16 * * Neither the names of the copyright owners nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*! \file mempool.c
34 * \brief A pooling allocator
35 * \version $Id$
36 */
37
38 #include "stdinc.h"
39 #include "memory.h"
40 #include "ioengine.h"
41 #include "log.h"
42 #include "mempool.h"
43
44 /** Returns floor(log2(u64)). If u64 is 0, (incorrectly) returns 0. */
45 static int
46 tor_log2(uint64_t u64)
47 {
48 int r = 0;
49
50 if (u64 >= (1LLU << 32))
51 {
52 u64 >>= 32;
53 r = 32;
54 }
55 if (u64 >= (1LLU << 16))
56 {
57 u64 >>= 16;
58 r += 16;
59 }
60 if (u64 >= (1LLU << 8))
61 {
62 u64 >>= 8;
63 r += 8;
64 }
65 if (u64 >= (1LLU << 4))
66 {
67 u64 >>= 4;
68 r += 4;
69 }
70 if (u64 >= (1LLU << 2))
71 {
72 u64 >>= 2;
73 r += 2;
74 }
75 if (u64 >= (1LLU << 1))
76 {
77 u64 >>= 1;
78 r += 1;
79 }
80
81 return r;
82 }
83
84 /** Return the power of 2 in range [1,UINT64_MAX] closest to <b>u64</b>. If
85 * there are two powers of 2 equally close, round down. */
86 static uint64_t
87 round_to_power_of_2(uint64_t u64)
88 {
89 int lg2;
90 uint64_t low;
91 uint64_t high;
92
93 if (u64 == 0)
94 return 1;
95
96 lg2 = tor_log2(u64);
97 low = 1LLU << lg2;
98
99 if (lg2 == 63)
100 return low;
101
102 high = 1LLU << (lg2 + 1);
103 if (high - u64 < u64 - low)
104 return high;
105 else
106 return low;
107 }
108
109 /* OVERVIEW:
110 *
111 * This is an implementation of memory pools for Tor cells. It may be
112 * useful for you too.
113 *
114 * Generally, a memory pool is an allocation strategy optimized for large
115 * numbers of identically-sized objects. Rather than the elaborate arena
116 * and coalescing strategies you need to get good performance for a
117 * general-purpose malloc(), pools use a series of large memory "chunks",
118 * each of which is carved into a bunch of smaller "items" or
119 * "allocations".
120 *
121 * To get decent performance, you need to:
122 * - Minimize the number of times you hit the underlying allocator.
123 * - Try to keep accesses as local in memory as possible.
124 * - Try to keep the common case fast.
125 *
126 * Our implementation uses three lists of chunks per pool. Each chunk can
127 * be either "full" (no more room for items); "empty" (no items); or
128 * "used" (not full, not empty). There are independent doubly-linked
129 * lists for each state.
130 *
131 * CREDIT:
132 *
133 * I wrote this after looking at 3 or 4 other pooling allocators, but
134 * without copying. The strategy this most resembles (which is funny,
135 * since that's the one I looked at longest ago) is the pool allocator
136 * underlying Python's obmalloc code. Major differences from obmalloc's
137 * pools are:
138 * - We don't even try to be threadsafe.
139 * - We only handle objects of one size.
140 * - Our list of empty chunks is doubly-linked, not singly-linked.
141 * (This could change pretty easily; it's only doubly-linked for
142 * consistency.)
143 * - We keep a list of full chunks (so we can have a "nuke everything"
144 * function). Obmalloc's pools leave full chunks to float unanchored.
145 *
146 * LIMITATIONS:
147 * - Not even slightly threadsafe.
148 * - Likes to have lots of items per chunks.
149 * - One pointer overhead per allocated thing. (The alternative is
150 * something like glib's use of an RB-tree to keep track of what
151 * chunk any given piece of memory is in.)
152 * - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE
153 * if you need doubles.
154 * - Could probably be optimized a bit; the representation contains
155 * a bit more info than it really needs to have.
156 */
157
158 /* Tuning parameters */
159 /** Largest type that we need to ensure returned memory items are aligned to.
160 * Change this to "double" if we need to be safe for structs with doubles. */
161 #define ALIGNMENT_TYPE void *
162 /** Increment that we need to align allocated. */
163 #define ALIGNMENT sizeof(ALIGNMENT_TYPE)
164 /** Largest memory chunk that we should allocate. */
165 #define MAX_CHUNK (8 *(1L << 20))
166 /** Smallest memory chunk size that we should allocate. */
167 #define MIN_CHUNK 4096
168
169 typedef struct mp_allocated_t mp_allocated_t;
170 typedef struct mp_chunk_t mp_chunk_t;
171
172 /** Holds a single allocated item, allocated as part of a chunk. */
173 struct mp_allocated_t {
174 /** The chunk that this item is allocated in. This adds overhead to each
175 * allocated item, thus making this implementation inappropriate for
176 * very small items. */
177 mp_chunk_t *in_chunk;
178
179 union {
180 /** If this item is free, the next item on the free list. */
181 mp_allocated_t *next_free;
182
183 /** If this item is not free, the actual memory contents of this item.
184 * (Not actual size.) */
185 char mem[1];
186
187 /** An extra element to the union to insure correct alignment. */
188 ALIGNMENT_TYPE dummy_;
189 } u;
190 };
191
192 /** 'Magic' value used to detect memory corruption. */
193 #define MP_CHUNK_MAGIC 0x09870123
194
195 /** A chunk of memory. Chunks come from malloc; we use them */
196 struct mp_chunk_t {
197 uint32_t magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */
198 mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */
199 mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */
200 mp_pool_t *pool; /**< The pool that this chunk is part of. */
201
202 /** First free item in the freelist for this chunk. Note that this may be
203 * NULL even if this chunk is not at capacity: if so, the free memory at
204 * next_mem has not yet been carved into items.
205 */
206 mp_allocated_t *first_free;
207 int n_allocated; /**< Number of currently allocated items in this chunk. */
208 int capacity; /**< Number of items that can be fit into this chunk. */
209 size_t mem_size; /**< Number of usable bytes in mem. */
210 char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */
211 char mem[]; /**< Storage for this chunk. */
212 };
213
214
215 static struct Timer garbage_collect_timer;
216 static mp_pool_t *mp_allocated_pools = NULL;
217
218 /** Number of extra bytes needed beyond mem_size to allocate a chunk. */
219 #define CHUNK_OVERHEAD offsetof(mp_chunk_t, mem[0])
220
221 /** Given a pointer to a mp_allocated_t, return a pointer to the memory
222 * item it holds. */
223 #define A2M(a) (&(a)->u.mem)
224 /** Given a pointer to a memory_item_t, return a pointer to its enclosing
225 * mp_allocated_t. */
226 #define M2A(p) (((char *)p) - offsetof(mp_allocated_t, u.mem))
227
228 void
229 mp_pool_init(void)
230 {
231 timer_add(timer_init(&garbage_collect_timer), mp_pool_garbage_collect, 0, TT_PERIODIC, 120);
232 }
233
234 /** Helper: Allocate and return a new memory chunk for <b>pool</b>. Does not
235 * link the chunk into any list. */
236 static mp_chunk_t *
237 mp_chunk_new(mp_pool_t *pool)
238 {
239 size_t sz = pool->new_chunk_capacity * pool->item_alloc_size;
240 mp_chunk_t *chunk = MyMalloc(CHUNK_OVERHEAD + sz);
241
242 #ifdef MEMPOOL_STATS
243 ++pool->total_chunks_allocated;
244 #endif
245 chunk->magic = MP_CHUNK_MAGIC;
246 chunk->capacity = pool->new_chunk_capacity;
247 chunk->mem_size = sz;
248 chunk->next_mem = chunk->mem;
249 chunk->pool = pool;
250 return chunk;
251 }
252
253 /** Take a <b>chunk</b> that has just been allocated or removed from
254 * <b>pool</b>'s empty chunk list, and add it to the head of the used chunk
255 * list. */
256 static void
257 add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk)
258 {
259 chunk->next = pool->used_chunks;
260 if (chunk->next)
261 chunk->next->prev = chunk;
262 pool->used_chunks = chunk;
263 assert(!chunk->prev);
264 }
265
266 /** Return a newly allocated item from <b>pool</b>. */
267 void *
268 mp_pool_get(mp_pool_t *pool)
269 {
270 mp_chunk_t *chunk;
271 mp_allocated_t *allocated;
272
273 if (pool->used_chunks != NULL) {
274 /*
275 * Common case: there is some chunk that is neither full nor empty. Use
276 * that one. (We can't use the full ones, obviously, and we should fill
277 * up the used ones before we start on any empty ones.
278 */
279 chunk = pool->used_chunks;
280
281 } else if (pool->empty_chunks) {
282 /*
283 * We have no used chunks, but we have an empty chunk that we haven't
284 * freed yet: use that. (We pull from the front of the list, which should
285 * get us the most recently emptied chunk.)
286 */
287 chunk = pool->empty_chunks;
288
289 /* Remove the chunk from the empty list. */
290 pool->empty_chunks = chunk->next;
291 if (chunk->next)
292 chunk->next->prev = NULL;
293
294 /* Put the chunk on the 'used' list*/
295 add_newly_used_chunk_to_used_list(pool, chunk);
296
297 assert(!chunk->prev);
298 --pool->n_empty_chunks;
299 if (pool->n_empty_chunks < pool->min_empty_chunks)
300 pool->min_empty_chunks = pool->n_empty_chunks;
301 } else {
302 /* We have no used or empty chunks: allocate a new chunk. */
303 chunk = mp_chunk_new(pool);
304
305 /* Add the new chunk to the used list. */
306 add_newly_used_chunk_to_used_list(pool, chunk);
307 }
308
309 assert(chunk->n_allocated < chunk->capacity);
310
311 if (chunk->first_free) {
312 /* If there's anything on the chunk's freelist, unlink it and use it. */
313 allocated = chunk->first_free;
314 chunk->first_free = allocated->u.next_free;
315 allocated->u.next_free = NULL; /* For debugging; not really needed. */
316 assert(allocated->in_chunk == chunk);
317 } else {
318 /* Otherwise, the chunk had better have some free space left on it. */
319 assert(chunk->next_mem + pool->item_alloc_size <=
320 chunk->mem + chunk->mem_size);
321
322 /* Good, it did. Let's carve off a bit of that free space, and use
323 * that. */
324 allocated = (void *)chunk->next_mem;
325 chunk->next_mem += pool->item_alloc_size;
326 allocated->in_chunk = chunk;
327 allocated->u.next_free = NULL; /* For debugging; not really needed. */
328 }
329
330 ++chunk->n_allocated;
331 #ifdef MEMPOOL_STATS
332 ++pool->total_items_allocated;
333 #endif
334
335 if (chunk->n_allocated == chunk->capacity) {
336 /* This chunk just became full. */
337 assert(chunk == pool->used_chunks);
338 assert(chunk->prev == NULL);
339
340 /* Take it off the used list. */
341 pool->used_chunks = chunk->next;
342 if (chunk->next)
343 chunk->next->prev = NULL;
344
345 /* Put it on the full list. */
346 chunk->next = pool->full_chunks;
347 if (chunk->next)
348 chunk->next->prev = chunk;
349 pool->full_chunks = chunk;
350 }
351 /* And return the memory portion of the mp_allocated_t. */
352 return A2M(allocated);
353 }
354
355 /** Return an allocated memory item to its memory pool. */
356 void
357 mp_pool_release(void *item)
358 {
359 mp_allocated_t *allocated = (void *)M2A(item);
360 mp_chunk_t *chunk = allocated->in_chunk;
361
362 assert(chunk);
363 assert(chunk->magic == MP_CHUNK_MAGIC);
364 assert(chunk->n_allocated > 0);
365
366 allocated->u.next_free = chunk->first_free;
367 chunk->first_free = allocated;
368
369 if (chunk->n_allocated == chunk->capacity) {
370 /* This chunk was full and is about to be used. */
371 mp_pool_t *pool = chunk->pool;
372 /* unlink from the full list */
373 if (chunk->prev)
374 chunk->prev->next = chunk->next;
375 if (chunk->next)
376 chunk->next->prev = chunk->prev;
377 if (chunk == pool->full_chunks)
378 pool->full_chunks = chunk->next;
379
380 /* link to the used list. */
381 chunk->next = pool->used_chunks;
382 chunk->prev = NULL;
383 if (chunk->next)
384 chunk->next->prev = chunk;
385 pool->used_chunks = chunk;
386 } else if (chunk->n_allocated == 1) {
387 /* This was used and is about to be empty. */
388 mp_pool_t *pool = chunk->pool;
389
390 /* Unlink from the used list */
391 if (chunk->prev)
392 chunk->prev->next = chunk->next;
393 if (chunk->next)
394 chunk->next->prev = chunk->prev;
395 if (chunk == pool->used_chunks)
396 pool->used_chunks = chunk->next;
397
398 /* Link to the empty list */
399 chunk->next = pool->empty_chunks;
400 chunk->prev = NULL;
401 if (chunk->next)
402 chunk->next->prev = chunk;
403 pool->empty_chunks = chunk;
404
405 /* Reset the guts of this chunk to defragment it, in case it gets
406 * used again. */
407 chunk->first_free = NULL;
408 chunk->next_mem = chunk->mem;
409
410 ++pool->n_empty_chunks;
411 }
412
413 --chunk->n_allocated;
414 }
415
416 /** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
417 * try to fit about <b>chunk_capacity</b> bytes in each chunk. */
418 mp_pool_t *
419 mp_pool_new(size_t item_size, size_t chunk_capacity)
420 {
421 mp_pool_t *pool;
422 size_t alloc_size, new_chunk_cap;
423
424 /* assert(item_size < SIZE_T_CEILING);
425 assert(chunk_capacity < SIZE_T_CEILING);
426 assert(SIZE_T_CEILING / item_size > chunk_capacity);
427 */
428 pool = MyMalloc(sizeof(mp_pool_t));
429 /*
430 * First, we figure out how much space to allow per item. We'll want to
431 * use make sure we have enough for the overhead plus the item size.
432 */
433 alloc_size = (size_t)(offsetof(mp_allocated_t, u.mem) + item_size);
434 /*
435 * If the item_size is less than sizeof(next_free), we need to make
436 * the allocation bigger.
437 */
438 if (alloc_size < sizeof(mp_allocated_t))
439 alloc_size = sizeof(mp_allocated_t);
440
441 /* If we're not an even multiple of ALIGNMENT, round up. */
442 if (alloc_size % ALIGNMENT) {
443 alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
444 }
445 if (alloc_size < ALIGNMENT)
446 alloc_size = ALIGNMENT;
447 assert((alloc_size % ALIGNMENT) == 0);
448
449 /*
450 * Now we figure out how many items fit in each chunk. We need to fit at
451 * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
452 * or less than MIN_CHUNK.
453 */
454 if (chunk_capacity > MAX_CHUNK)
455 chunk_capacity = MAX_CHUNK;
456
457 /*
458 * Try to be around a power of 2 in size, since that's what allocators like
459 * handing out. 512K-1 byte is a lot better than 512K+1 byte.
460 */
461 chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
462 while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
463 chunk_capacity *= 2;
464 if (chunk_capacity < MIN_CHUNK)
465 chunk_capacity = MIN_CHUNK;
466
467 new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
468 assert(new_chunk_cap < INT_MAX);
469 pool->new_chunk_capacity = (int)new_chunk_cap;
470
471 pool->item_alloc_size = alloc_size;
472
473 pool->next = mp_allocated_pools;
474 mp_allocated_pools = pool;
475
476 ilog(LOG_TYPE_DEBUG, "Capacity is %lu, item size is %lu, alloc size is %lu",
477 (unsigned long)pool->new_chunk_capacity,
478 (unsigned long)pool->item_alloc_size,
479 (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));
480
481 return pool;
482 }
483
484 /** Helper function for qsort: used to sort pointers to mp_chunk_t into
485 * descending order of fullness. */
486 static int
487 mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
488 {
489 mp_chunk_t *a = *(mp_chunk_t * const *)_a;
490 mp_chunk_t *b = *(mp_chunk_t * const *)_b;
491 return b->n_allocated - a->n_allocated;
492 }
493
494 /** Sort the used chunks in <b>pool</b> into descending order of fullness,
495 * so that we preferentially fill up mostly full chunks before we make
496 * nearly empty chunks less nearly empty. */
497 static void
498 mp_pool_sort_used_chunks(mp_pool_t *pool)
499 {
500 int i, n = 0, inverted = 0;
501 mp_chunk_t **chunks, *chunk;
502
503 for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
504 ++n;
505 if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
506 ++inverted;
507 }
508
509 if (!inverted)
510 return;
511
512 chunks = MyMalloc(sizeof(mp_chunk_t *) * n);
513
514 for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
515 chunks[i++] = chunk;
516
517 qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
518 pool->used_chunks = chunks[0];
519 chunks[0]->prev = NULL;
520
521 for (i = 1; i < n; ++i) {
522 chunks[i - 1]->next = chunks[i];
523 chunks[i]->prev = chunks[i - 1];
524 }
525
526 chunks[n - 1]->next = NULL;
527 MyFree(chunks);
528 mp_pool_assert_ok(pool);
529 }
530
531 /** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
532 * excess ones that have been empty for the longest. If
533 * <b>keep_recently_used</b> is true, do not free chunks unless they have been
534 * empty since the last call to this function.
535 **/
536 void
537 mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
538 {
539 mp_chunk_t *chunk, **first_to_free;
540
541 mp_pool_sort_used_chunks(pool);
542 assert(n_to_keep >= 0);
543
544 if (keep_recently_used) {
545 int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
546 if (n_to_keep < n_recently_used)
547 n_to_keep = n_recently_used;
548 }
549
550 assert(n_to_keep >= 0);
551
552 first_to_free = &pool->empty_chunks;
553 while (*first_to_free && n_to_keep > 0) {
554 first_to_free = &(*first_to_free)->next;
555 --n_to_keep;
556 }
557 if (!*first_to_free) {
558 pool->min_empty_chunks = pool->n_empty_chunks;
559 return;
560 }
561
562 chunk = *first_to_free;
563 while (chunk) {
564 mp_chunk_t *next = chunk->next;
565 chunk->magic = 0xdeadbeef;
566 MyFree(chunk);
567 #ifdef MEMPOOL_STATS
568 ++pool->total_chunks_freed;
569 #endif
570 --pool->n_empty_chunks;
571 chunk = next;
572 }
573
574 pool->min_empty_chunks = pool->n_empty_chunks;
575 *first_to_free = NULL;
576 }
577
578 /** Helper: Given a list of chunks, free all the chunks in the list. */
579 static void
580 destroy_chunks(mp_chunk_t *chunk)
581 {
582 mp_chunk_t *next;
583
584 while (chunk) {
585 chunk->magic = 0xd3adb33f;
586 next = chunk->next;
587 MyFree(chunk);
588 chunk = next;
589 }
590 }
591
592 /** Helper: make sure that a given chunk list is not corrupt. */
593 static int
594 assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
595 {
596 mp_allocated_t *allocated;
597 int n = 0;
598
599 if (chunk)
600 assert(chunk->prev == NULL);
601
602 while (chunk) {
603 n++;
604 assert(chunk->magic == MP_CHUNK_MAGIC);
605 assert(chunk->pool == pool);
606 for (allocated = chunk->first_free; allocated;
607 allocated = allocated->u.next_free) {
608 assert(allocated->in_chunk == chunk);
609 }
610 if (empty)
611 assert(chunk->n_allocated == 0);
612 else if (full)
613 assert(chunk->n_allocated == chunk->capacity);
614 else
615 assert(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);
616
617 assert(chunk->capacity == pool->new_chunk_capacity);
618
619 assert(chunk->mem_size ==
620 pool->new_chunk_capacity * pool->item_alloc_size);
621
622 assert(chunk->next_mem >= chunk->mem &&
623 chunk->next_mem <= chunk->mem + chunk->mem_size);
624
625 if (chunk->next)
626 assert(chunk->next->prev == chunk);
627
628 chunk = chunk->next;
629 }
630
631 return n;
632 }
633
634 /** Fail with an assertion if <b>pool</b> is not internally consistent. */
635 void
636 mp_pool_assert_ok(mp_pool_t *pool)
637 {
638 int n_empty;
639
640 n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
641 assert_chunks_ok(pool, pool->full_chunks, 0, 1);
642 assert_chunks_ok(pool, pool->used_chunks, 0, 0);
643
644 assert(pool->n_empty_chunks == n_empty);
645 }
646
647 void
648 mp_pool_garbage_collect(struct Event *ev)
649 {
650 mp_pool_t *pool = mp_allocated_pools;
651
652 for (; pool; pool = pool->next)
653 mp_pool_clean(pool, 0, 1);
654 }
655
656 /** Dump information about <b>pool</b>'s memory usage to the Tor log at level
657 * <b>severity</b>. */
658 void
659 mp_pool_log_status(mp_pool_t *pool)
660 {
661 uint64_t bytes_used = 0;
662 uint64_t bytes_allocated = 0;
663 uint64_t bu = 0, ba = 0;
664 mp_chunk_t *chunk;
665 int n_full = 0, n_used = 0;
666
667 assert(pool);
668
669 for (chunk = pool->empty_chunks; chunk; chunk = chunk->next)
670 bytes_allocated += chunk->mem_size;
671
672 ilog(LOG_TYPE_DEBUG, "%llu bytes in %d empty chunks",
673 bytes_allocated, pool->n_empty_chunks);
674 for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
675 ++n_used;
676 bu += chunk->n_allocated * pool->item_alloc_size;
677 ba += chunk->mem_size;
678
679 ilog(LOG_TYPE_DEBUG, " used chunk: %d items allocated",
680 chunk->n_allocated);
681 }
682
683 ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d partially full chunks",
684 bu, ba, n_used);
685 bytes_used += bu;
686 bytes_allocated += ba;
687 bu = ba = 0;
688
689 for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
690 ++n_full;
691 bu += chunk->n_allocated * pool->item_alloc_size;
692 ba += chunk->mem_size;
693 }
694
695 ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d full chunks",
696 bu, ba, n_full);
697 bytes_used += bu;
698 bytes_allocated += ba;
699
700 ilog(LOG_TYPE_DEBUG, "Total: %llu/%llu bytes allocated "
701 "for cell pools are full.",
702 bytes_used, bytes_allocated);
703
704 #ifdef MEMPOOL_STATS
705 ilog(LOG_TYPE_DEBUG, "%llu cell allocations ever; "
706 "%llu chunk allocations ever; "
707 "%llu chunk frees ever.",
708 pool->total_items_allocated,
709 pool->total_chunks_allocated,
710 pool->total_chunks_freed);
711 #endif
712 }

Properties

Name Value
svn:eol-style native
svn:keywords Id Revision

svnadmin@ircd-hybrid.org
ViewVC Help
Powered by ViewVC 1.1.28