ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/svn/ircd-hybrid/trunk/src/mempool.c
Revision: 1967
Committed: Wed May 8 14:33:22 2013 UTC (12 years, 3 months ago) by michael
Content type: text/x-csrc
File size: 21339 byte(s)
Log Message:
- Print chunk capacity to debug.log instead of ircd.log

File Contents

# User Rev Content
1 michael 1656 /*
2     * Copyright (c) 2007-2012, The Tor Project, Inc.
3     *
4     * Redistribution and use in source and binary forms, with or without
5     * modification, are permitted provided that the following conditions are
6     * met:
7     *
8     * * Redistributions of source code must retain the above copyright
9     * notice, this list of conditions and the following disclaimer.
10     *
11     * * Redistributions in binary form must reproduce the above
12     * copyright notice, this list of conditions and the following disclaimer
13     * in the documentation and/or other materials provided with the
14     * distribution.
15     *
16     * * Neither the names of the copyright owners nor the names of its
17     * contributors may be used to endorse or promote products derived from
18     * this software without specific prior written permission.
19     *
20     * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21     * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22     * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23     * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24     * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25     * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26     * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27     * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28     * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29     * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30     * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31     */
32    
33     /*! \file mempool.c
34     * \brief A pooling allocator
35 michael 1662 * \version $Id$
36 michael 1656 */
37    
38     #include "stdinc.h"
39     #include "memory.h"
40     #include "event.h"
41     #include "log.h"
42     #include "mempool.h"
43    
44     /** Returns floor(log2(u64)). If u64 is 0, (incorrectly) returns 0. */
45     static int
46     tor_log2(uint64_t u64)
47     {
48     int r = 0;
49    
50     if (u64 >= (1LLU << 32))
51     {
52     u64 >>= 32;
53     r = 32;
54     }
55     if (u64 >= (1LLU << 16))
56     {
57     u64 >>= 16;
58     r += 16;
59     }
60     if (u64 >= (1LLU << 8))
61     {
62     u64 >>= 8;
63     r += 8;
64     }
65     if (u64 >= (1LLU << 4))
66     {
67     u64 >>= 4;
68     r += 4;
69     }
70     if (u64 >= (1LLU << 2))
71     {
72     u64 >>= 2;
73     r += 2;
74     }
75     if (u64 >= (1LLU << 1))
76     {
77     u64 >>= 1;
78     r += 1;
79     }
80    
81     return r;
82     }
83    
84     /** Return the power of 2 in range [1,UINT64_MAX] closest to <b>u64</b>. If
85     * there are two powers of 2 equally close, round down. */
86     static uint64_t
87     round_to_power_of_2(uint64_t u64)
88     {
89     int lg2;
90     uint64_t low;
91     uint64_t high;
92    
93     if (u64 == 0)
94     return 1;
95    
96     lg2 = tor_log2(u64);
97     low = 1LLU << lg2;
98    
99     if (lg2 == 63)
100     return low;
101    
102     high = 1LLU << (lg2 + 1);
103     if (high - u64 < u64 - low)
104     return high;
105     else
106     return low;
107     }
108    
109     /* OVERVIEW:
110     *
111     * This is an implementation of memory pools for Tor cells. It may be
112     * useful for you too.
113     *
114     * Generally, a memory pool is an allocation strategy optimized for large
115     * numbers of identically-sized objects. Rather than the elaborate arena
116     * and coalescing strategies you need to get good performance for a
117     * general-purpose malloc(), pools use a series of large memory "chunks",
118     * each of which is carved into a bunch of smaller "items" or
119     * "allocations".
120     *
121     * To get decent performance, you need to:
122     * - Minimize the number of times you hit the underlying allocator.
123     * - Try to keep accesses as local in memory as possible.
124     * - Try to keep the common case fast.
125     *
126     * Our implementation uses three lists of chunks per pool. Each chunk can
127     * be either "full" (no more room for items); "empty" (no items); or
128     * "used" (not full, not empty). There are independent doubly-linked
129     * lists for each state.
130     *
131     * CREDIT:
132     *
133     * I wrote this after looking at 3 or 4 other pooling allocators, but
134     * without copying. The strategy this most resembles (which is funny,
135     * since that's the one I looked at longest ago) is the pool allocator
136     * underlying Python's obmalloc code. Major differences from obmalloc's
137     * pools are:
138     * - We don't even try to be threadsafe.
139     * - We only handle objects of one size.
140     * - Our list of empty chunks is doubly-linked, not singly-linked.
141     * (This could change pretty easily; it's only doubly-linked for
142     * consistency.)
143     * - We keep a list of full chunks (so we can have a "nuke everything"
144     * function). Obmalloc's pools leave full chunks to float unanchored.
145     *
146     * LIMITATIONS:
147     * - Not even slightly threadsafe.
148     * - Likes to have lots of items per chunks.
149     * - One pointer overhead per allocated thing. (The alternative is
150     * something like glib's use of an RB-tree to keep track of what
151     * chunk any given piece of memory is in.)
152     * - Only aligns allocated things to void* level: redefine ALIGNMENT_TYPE
153     * if you need doubles.
154     * - Could probably be optimized a bit; the representation contains
155     * a bit more info than it really needs to have.
156     */
157    
158     /* Tuning parameters */
159     /** Largest type that we need to ensure returned memory items are aligned to.
160     * Change this to "double" if we need to be safe for structs with doubles. */
161     #define ALIGNMENT_TYPE void *
162     /** Increment that we need to align allocated. */
163     #define ALIGNMENT sizeof(ALIGNMENT_TYPE)
164     /** Largest memory chunk that we should allocate. */
165     #define MAX_CHUNK (8 *(1L << 20))
166     /** Smallest memory chunk size that we should allocate. */
167     #define MIN_CHUNK 4096
168    
169     typedef struct mp_allocated_t mp_allocated_t;
170     typedef struct mp_chunk_t mp_chunk_t;
171    
172     /** Holds a single allocated item, allocated as part of a chunk. */
173     struct mp_allocated_t {
174     /** The chunk that this item is allocated in. This adds overhead to each
175     * allocated item, thus making this implementation inappropriate for
176     * very small items. */
177     mp_chunk_t *in_chunk;
178    
179     union {
180     /** If this item is free, the next item on the free list. */
181     mp_allocated_t *next_free;
182    
183     /** If this item is not free, the actual memory contents of this item.
184     * (Not actual size.) */
185     char mem[1];
186    
187     /** An extra element to the union to insure correct alignment. */
188     ALIGNMENT_TYPE dummy_;
189     } u;
190     };
191    
192     /** 'Magic' value used to detect memory corruption. */
193     #define MP_CHUNK_MAGIC 0x09870123
194    
195     /** A chunk of memory. Chunks come from malloc; we use them */
196     struct mp_chunk_t {
197     uint32_t magic; /**< Must be MP_CHUNK_MAGIC if this chunk is valid. */
198     mp_chunk_t *next; /**< The next free, used, or full chunk in sequence. */
199     mp_chunk_t *prev; /**< The previous free, used, or full chunk in sequence. */
200     mp_pool_t *pool; /**< The pool that this chunk is part of. */
201    
202     /** First free item in the freelist for this chunk. Note that this may be
203     * NULL even if this chunk is not at capacity: if so, the free memory at
204     * next_mem has not yet been carved into items.
205     */
206     mp_allocated_t *first_free;
207     int n_allocated; /**< Number of currently allocated items in this chunk. */
208     int capacity; /**< Number of items that can be fit into this chunk. */
209     size_t mem_size; /**< Number of usable bytes in mem. */
210     char *next_mem; /**< Pointer into part of <b>mem</b> not yet carved up. */
211     char mem[]; /**< Storage for this chunk. */
212     };
213    
214     static mp_pool_t *mp_allocated_pools = NULL;
215    
216     /** Number of extra bytes needed beyond mem_size to allocate a chunk. */
217     #define CHUNK_OVERHEAD offsetof(mp_chunk_t, mem[0])
218    
219     /** Given a pointer to a mp_allocated_t, return a pointer to the memory
220     * item it holds. */
221     #define A2M(a) (&(a)->u.mem)
222     /** Given a pointer to a memory_item_t, return a pointer to its enclosing
223     * mp_allocated_t. */
224     #define M2A(p) (((char *)p) - offsetof(mp_allocated_t, u.mem))
225    
226     void
227     mp_pool_init(void)
228     {
229     eventAdd("mp_pool_garbage_collect", &mp_pool_garbage_collect, NULL, 119);
230     }
231    
232     /** Helper: Allocate and return a new memory chunk for <b>pool</b>. Does not
233     * link the chunk into any list. */
234     static mp_chunk_t *
235     mp_chunk_new(mp_pool_t *pool)
236     {
237     size_t sz = pool->new_chunk_capacity * pool->item_alloc_size;
238     mp_chunk_t *chunk = MyMalloc(CHUNK_OVERHEAD + sz);
239    
240     #ifdef MEMPOOL_STATS
241     ++pool->total_chunks_allocated;
242     #endif
243     chunk->magic = MP_CHUNK_MAGIC;
244     chunk->capacity = pool->new_chunk_capacity;
245     chunk->mem_size = sz;
246     chunk->next_mem = chunk->mem;
247     chunk->pool = pool;
248     return chunk;
249     }
250    
251     /** Take a <b>chunk</b> that has just been allocated or removed from
252     * <b>pool</b>'s empty chunk list, and add it to the head of the used chunk
253     * list. */
254     static void
255     add_newly_used_chunk_to_used_list(mp_pool_t *pool, mp_chunk_t *chunk)
256     {
257     chunk->next = pool->used_chunks;
258     if (chunk->next)
259     chunk->next->prev = chunk;
260     pool->used_chunks = chunk;
261     assert(!chunk->prev);
262     }
263    
264     /** Return a newly allocated item from <b>pool</b>. */
265     void *
266     mp_pool_get(mp_pool_t *pool)
267     {
268     mp_chunk_t *chunk;
269     mp_allocated_t *allocated;
270    
271     if (pool->used_chunks != NULL) {
272     /*
273     * Common case: there is some chunk that is neither full nor empty. Use
274     * that one. (We can't use the full ones, obviously, and we should fill
275     * up the used ones before we start on any empty ones.
276     */
277     chunk = pool->used_chunks;
278    
279     } else if (pool->empty_chunks) {
280     /*
281     * We have no used chunks, but we have an empty chunk that we haven't
282     * freed yet: use that. (We pull from the front of the list, which should
283     * get us the most recently emptied chunk.)
284     */
285     chunk = pool->empty_chunks;
286    
287     /* Remove the chunk from the empty list. */
288     pool->empty_chunks = chunk->next;
289     if (chunk->next)
290     chunk->next->prev = NULL;
291    
292     /* Put the chunk on the 'used' list*/
293     add_newly_used_chunk_to_used_list(pool, chunk);
294    
295     assert(!chunk->prev);
296     --pool->n_empty_chunks;
297     if (pool->n_empty_chunks < pool->min_empty_chunks)
298     pool->min_empty_chunks = pool->n_empty_chunks;
299     } else {
300     /* We have no used or empty chunks: allocate a new chunk. */
301     chunk = mp_chunk_new(pool);
302    
303     /* Add the new chunk to the used list. */
304     add_newly_used_chunk_to_used_list(pool, chunk);
305     }
306    
307     assert(chunk->n_allocated < chunk->capacity);
308    
309     if (chunk->first_free) {
310     /* If there's anything on the chunk's freelist, unlink it and use it. */
311     allocated = chunk->first_free;
312     chunk->first_free = allocated->u.next_free;
313     allocated->u.next_free = NULL; /* For debugging; not really needed. */
314     assert(allocated->in_chunk == chunk);
315     } else {
316     /* Otherwise, the chunk had better have some free space left on it. */
317     assert(chunk->next_mem + pool->item_alloc_size <=
318     chunk->mem + chunk->mem_size);
319    
320     /* Good, it did. Let's carve off a bit of that free space, and use
321     * that. */
322     allocated = (void *)chunk->next_mem;
323     chunk->next_mem += pool->item_alloc_size;
324     allocated->in_chunk = chunk;
325     allocated->u.next_free = NULL; /* For debugging; not really needed. */
326     }
327    
328     ++chunk->n_allocated;
329     #ifdef MEMPOOL_STATS
330     ++pool->total_items_allocated;
331     #endif
332    
333     if (chunk->n_allocated == chunk->capacity) {
334     /* This chunk just became full. */
335     assert(chunk == pool->used_chunks);
336     assert(chunk->prev == NULL);
337    
338     /* Take it off the used list. */
339     pool->used_chunks = chunk->next;
340     if (chunk->next)
341     chunk->next->prev = NULL;
342    
343     /* Put it on the full list. */
344     chunk->next = pool->full_chunks;
345     if (chunk->next)
346     chunk->next->prev = chunk;
347     pool->full_chunks = chunk;
348     }
349     /* And return the memory portion of the mp_allocated_t. */
350     return A2M(allocated);
351     }
352    
353     /** Return an allocated memory item to its memory pool. */
354     void
355     mp_pool_release(void *item)
356     {
357     mp_allocated_t *allocated = (void *)M2A(item);
358     mp_chunk_t *chunk = allocated->in_chunk;
359    
360     assert(chunk);
361     assert(chunk->magic == MP_CHUNK_MAGIC);
362     assert(chunk->n_allocated > 0);
363    
364     allocated->u.next_free = chunk->first_free;
365     chunk->first_free = allocated;
366    
367     if (chunk->n_allocated == chunk->capacity) {
368     /* This chunk was full and is about to be used. */
369     mp_pool_t *pool = chunk->pool;
370     /* unlink from the full list */
371     if (chunk->prev)
372     chunk->prev->next = chunk->next;
373     if (chunk->next)
374     chunk->next->prev = chunk->prev;
375     if (chunk == pool->full_chunks)
376     pool->full_chunks = chunk->next;
377    
378     /* link to the used list. */
379     chunk->next = pool->used_chunks;
380     chunk->prev = NULL;
381     if (chunk->next)
382     chunk->next->prev = chunk;
383     pool->used_chunks = chunk;
384     } else if (chunk->n_allocated == 1) {
385     /* This was used and is about to be empty. */
386     mp_pool_t *pool = chunk->pool;
387    
388     /* Unlink from the used list */
389     if (chunk->prev)
390     chunk->prev->next = chunk->next;
391     if (chunk->next)
392     chunk->next->prev = chunk->prev;
393     if (chunk == pool->used_chunks)
394     pool->used_chunks = chunk->next;
395    
396     /* Link to the empty list */
397     chunk->next = pool->empty_chunks;
398     chunk->prev = NULL;
399     if (chunk->next)
400     chunk->next->prev = chunk;
401     pool->empty_chunks = chunk;
402    
403     /* Reset the guts of this chunk to defragment it, in case it gets
404     * used again. */
405     chunk->first_free = NULL;
406     chunk->next_mem = chunk->mem;
407    
408     ++pool->n_empty_chunks;
409     }
410    
411     --chunk->n_allocated;
412     }
413    
414     /** Allocate a new memory pool to hold items of size <b>item_size</b>. We'll
415     * try to fit about <b>chunk_capacity</b> bytes in each chunk. */
416     mp_pool_t *
417     mp_pool_new(size_t item_size, size_t chunk_capacity)
418     {
419     mp_pool_t *pool;
420     size_t alloc_size, new_chunk_cap;
421    
422     /* assert(item_size < SIZE_T_CEILING);
423     assert(chunk_capacity < SIZE_T_CEILING);
424     assert(SIZE_T_CEILING / item_size > chunk_capacity);
425     */
426     pool = MyMalloc(sizeof(mp_pool_t));
427     /*
428     * First, we figure out how much space to allow per item. We'll want to
429     * use make sure we have enough for the overhead plus the item size.
430     */
431     alloc_size = (size_t)(offsetof(mp_allocated_t, u.mem) + item_size);
432     /*
433     * If the item_size is less than sizeof(next_free), we need to make
434     * the allocation bigger.
435     */
436     if (alloc_size < sizeof(mp_allocated_t))
437     alloc_size = sizeof(mp_allocated_t);
438    
439     /* If we're not an even multiple of ALIGNMENT, round up. */
440     if (alloc_size % ALIGNMENT) {
441     alloc_size = alloc_size + ALIGNMENT - (alloc_size % ALIGNMENT);
442     }
443     if (alloc_size < ALIGNMENT)
444     alloc_size = ALIGNMENT;
445     assert((alloc_size % ALIGNMENT) == 0);
446    
447     /*
448     * Now we figure out how many items fit in each chunk. We need to fit at
449     * least 2 items per chunk. No chunk can be more than MAX_CHUNK bytes long,
450     * or less than MIN_CHUNK.
451     */
452     if (chunk_capacity > MAX_CHUNK)
453     chunk_capacity = MAX_CHUNK;
454    
455     /*
456     * Try to be around a power of 2 in size, since that's what allocators like
457     * handing out. 512K-1 byte is a lot better than 512K+1 byte.
458     */
459     chunk_capacity = (size_t) round_to_power_of_2(chunk_capacity);
460     while (chunk_capacity < alloc_size * 2 + CHUNK_OVERHEAD)
461     chunk_capacity *= 2;
462     if (chunk_capacity < MIN_CHUNK)
463     chunk_capacity = MIN_CHUNK;
464    
465     new_chunk_cap = (chunk_capacity-CHUNK_OVERHEAD) / alloc_size;
466     assert(new_chunk_cap < INT_MAX);
467     pool->new_chunk_capacity = (int)new_chunk_cap;
468    
469     pool->item_alloc_size = alloc_size;
470    
471     pool->next = mp_allocated_pools;
472     mp_allocated_pools = pool;
473    
474 michael 1967 ilog(LOG_TYPE_DEBUG, "Capacity is %lu, item size is %lu, alloc size is %lu",
475 michael 1656 (unsigned long)pool->new_chunk_capacity,
476     (unsigned long)pool->item_alloc_size,
477     (unsigned long)(pool->new_chunk_capacity*pool->item_alloc_size));
478    
479     return pool;
480     }
481    
482     /** Helper function for qsort: used to sort pointers to mp_chunk_t into
483     * descending order of fullness. */
484     static int
485     mp_pool_sort_used_chunks_helper(const void *_a, const void *_b)
486     {
487     mp_chunk_t *a = *(mp_chunk_t * const *)_a;
488     mp_chunk_t *b = *(mp_chunk_t * const *)_b;
489     return b->n_allocated - a->n_allocated;
490     }
491    
492     /** Sort the used chunks in <b>pool</b> into descending order of fullness,
493     * so that we preferentially fill up mostly full chunks before we make
494     * nearly empty chunks less nearly empty. */
495     static void
496     mp_pool_sort_used_chunks(mp_pool_t *pool)
497     {
498     int i, n = 0, inverted = 0;
499     mp_chunk_t **chunks, *chunk;
500    
501     for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
502     ++n;
503     if (chunk->next && chunk->next->n_allocated > chunk->n_allocated)
504     ++inverted;
505     }
506    
507     if (!inverted)
508     return;
509    
510     chunks = MyMalloc(sizeof(mp_chunk_t *) * n);
511    
512     for (i=0,chunk = pool->used_chunks; chunk; chunk = chunk->next)
513     chunks[i++] = chunk;
514    
515     qsort(chunks, n, sizeof(mp_chunk_t *), mp_pool_sort_used_chunks_helper);
516     pool->used_chunks = chunks[0];
517     chunks[0]->prev = NULL;
518    
519     for (i = 1; i < n; ++i) {
520     chunks[i - 1]->next = chunks[i];
521     chunks[i]->prev = chunks[i - 1];
522     }
523    
524     chunks[n - 1]->next = NULL;
525     MyFree(chunks);
526     mp_pool_assert_ok(pool);
527     }
528    
529     /** If there are more than <b>n</b> empty chunks in <b>pool</b>, free the
530     * excess ones that have been empty for the longest. If
531     * <b>keep_recently_used</b> is true, do not free chunks unless they have been
532     * empty since the last call to this function.
533     **/
534     void
535     mp_pool_clean(mp_pool_t *pool, int n_to_keep, int keep_recently_used)
536     {
537     mp_chunk_t *chunk, **first_to_free;
538    
539     mp_pool_sort_used_chunks(pool);
540     assert(n_to_keep >= 0);
541    
542     if (keep_recently_used) {
543     int n_recently_used = pool->n_empty_chunks - pool->min_empty_chunks;
544     if (n_to_keep < n_recently_used)
545     n_to_keep = n_recently_used;
546     }
547    
548     assert(n_to_keep >= 0);
549    
550     first_to_free = &pool->empty_chunks;
551     while (*first_to_free && n_to_keep > 0) {
552     first_to_free = &(*first_to_free)->next;
553     --n_to_keep;
554     }
555     if (!*first_to_free) {
556     pool->min_empty_chunks = pool->n_empty_chunks;
557     return;
558     }
559    
560     chunk = *first_to_free;
561     while (chunk) {
562     mp_chunk_t *next = chunk->next;
563     chunk->magic = 0xdeadbeef;
564     MyFree(chunk);
565     #ifdef MEMPOOL_STATS
566     ++pool->total_chunks_freed;
567     #endif
568     --pool->n_empty_chunks;
569     chunk = next;
570     }
571    
572     pool->min_empty_chunks = pool->n_empty_chunks;
573     *first_to_free = NULL;
574     }
575    
576     /** Helper: Given a list of chunks, free all the chunks in the list. */
577     static void
578     destroy_chunks(mp_chunk_t *chunk)
579     {
580     mp_chunk_t *next;
581    
582     while (chunk) {
583     chunk->magic = 0xd3adb33f;
584     next = chunk->next;
585     MyFree(chunk);
586     chunk = next;
587     }
588     }
589    
590     /** Helper: make sure that a given chunk list is not corrupt. */
591     static int
592     assert_chunks_ok(mp_pool_t *pool, mp_chunk_t *chunk, int empty, int full)
593     {
594     mp_allocated_t *allocated;
595     int n = 0;
596    
597     if (chunk)
598     assert(chunk->prev == NULL);
599    
600     while (chunk) {
601     n++;
602     assert(chunk->magic == MP_CHUNK_MAGIC);
603     assert(chunk->pool == pool);
604     for (allocated = chunk->first_free; allocated;
605     allocated = allocated->u.next_free) {
606     assert(allocated->in_chunk == chunk);
607     }
608     if (empty)
609     assert(chunk->n_allocated == 0);
610     else if (full)
611     assert(chunk->n_allocated == chunk->capacity);
612     else
613     assert(chunk->n_allocated > 0 && chunk->n_allocated < chunk->capacity);
614    
615     assert(chunk->capacity == pool->new_chunk_capacity);
616    
617     assert(chunk->mem_size ==
618     pool->new_chunk_capacity * pool->item_alloc_size);
619    
620     assert(chunk->next_mem >= chunk->mem &&
621     chunk->next_mem <= chunk->mem + chunk->mem_size);
622    
623     if (chunk->next)
624     assert(chunk->next->prev == chunk);
625    
626     chunk = chunk->next;
627     }
628    
629     return n;
630     }
631    
632     /** Fail with an assertion if <b>pool</b> is not internally consistent. */
633     void
634     mp_pool_assert_ok(mp_pool_t *pool)
635     {
636     int n_empty;
637    
638     n_empty = assert_chunks_ok(pool, pool->empty_chunks, 1, 0);
639     assert_chunks_ok(pool, pool->full_chunks, 0, 1);
640     assert_chunks_ok(pool, pool->used_chunks, 0, 0);
641    
642     assert(pool->n_empty_chunks == n_empty);
643     }
644    
645     void
646     mp_pool_garbage_collect(void *arg)
647     {
648     mp_pool_t *pool = mp_allocated_pools;
649    
650     for (; pool; pool = pool->next)
651     mp_pool_clean(pool, 0, 1);
652     }
653    
654     /** Dump information about <b>pool</b>'s memory usage to the Tor log at level
655     * <b>severity</b>. */
656     void
657     mp_pool_log_status(mp_pool_t *pool)
658     {
659     uint64_t bytes_used = 0;
660     uint64_t bytes_allocated = 0;
661     uint64_t bu = 0, ba = 0;
662     mp_chunk_t *chunk;
663     int n_full = 0, n_used = 0;
664    
665     assert(pool);
666    
667     for (chunk = pool->empty_chunks; chunk; chunk = chunk->next)
668     bytes_allocated += chunk->mem_size;
669    
670     ilog(LOG_TYPE_DEBUG, "%llu bytes in %d empty chunks",
671     bytes_allocated, pool->n_empty_chunks);
672     for (chunk = pool->used_chunks; chunk; chunk = chunk->next) {
673     ++n_used;
674     bu += chunk->n_allocated * pool->item_alloc_size;
675     ba += chunk->mem_size;
676    
677     ilog(LOG_TYPE_DEBUG, " used chunk: %d items allocated",
678     chunk->n_allocated);
679     }
680    
681     ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d partially full chunks",
682     bu, ba, n_used);
683     bytes_used += bu;
684     bytes_allocated += ba;
685     bu = ba = 0;
686    
687     for (chunk = pool->full_chunks; chunk; chunk = chunk->next) {
688     ++n_full;
689     bu += chunk->n_allocated * pool->item_alloc_size;
690     ba += chunk->mem_size;
691     }
692    
693     ilog(LOG_TYPE_DEBUG, "%llu/%llu bytes in %d full chunks",
694     bu, ba, n_full);
695     bytes_used += bu;
696     bytes_allocated += ba;
697    
698     ilog(LOG_TYPE_DEBUG, "Total: %llu/%llu bytes allocated "
699     "for cell pools are full.",
700     bytes_used, bytes_allocated);
701    
702     #ifdef MEMPOOL_STATS
703     ilog(LOG_TYPE_DEBUG, "%llu cell allocations ever; "
704     "%llu chunk allocations ever; "
705     "%llu chunk frees ever.",
706     pool->total_items_allocated,
707     pool->total_chunks_allocated,
708     pool->total_chunks_freed);
709     #endif
710     }

Properties

Name Value
svn:eol-style native
svn:keywords Id Revision