diff -rNu valgrind/coregrind/vg_malloc2.c valgrind-mempool/coregrind/vg_malloc2.c --- valgrind/coregrind/vg_malloc2.c 2004-11-23 12:31:24.000000000 -0800 +++ valgrind-mempool/coregrind/vg_malloc2.c 2005-01-17 11:42:33.000000000 -0800 @@ -1384,10 +1384,9 @@ } -Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size ) +Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size, SizeT redzone ) { - return (start - VG_(vg_malloc_redzone_szB) <= a - && a < start + size + VG_(vg_malloc_redzone_szB)); + return (start - redzone) <= a && a < (start + size + redzone); } diff -rNu valgrind/include/tool.h.base valgrind-mempool/include/tool.h.base --- valgrind/include/tool.h.base 2005-02-01 15:17:10.000000000 -0800 +++ valgrind-mempool/include/tool.h.base 2005-02-01 15:18:20.592177254 -0800 @@ -1738,7 +1738,8 @@ extern void VG_(cli_free) ( void* p ); /* Check if an address is within a range, allowing for redzones at edges */ -extern Bool VG_(addr_is_in_block)( Addr a, Addr start, SizeT size ); +extern Bool VG_(addr_is_in_block)( Addr a, Addr start, + SizeT size, SizeT redzone ); /* ------------------------------------------------------------------ */ /* Some options that can be used by a tool if malloc() et al are replaced. diff -rNu valgrind/memcheck/mac_leakcheck.c valgrind-mempool/memcheck/mac_leakcheck.c --- valgrind/memcheck/mac_leakcheck.c 2005-01-14 18:13:57.000000000 -0800 +++ valgrind-mempool/memcheck/mac_leakcheck.c 2005-01-17 11:42:33.000000000 -0800 @@ -366,35 +366,32 @@ return (mc1->data < mc2->data ? -1 : 1); } -/* Top level entry point to leak detector. Call here, passing in - suitable address-validating functions (see comment at top of - vg_scan_all_valid_memory above). All this is to avoid duplication - of the leak-detection code for Memcheck and Addrcheck. - Also pass in a tool-specific function to extract the .where field - for allocated blocks, an indication of the resolution wanted for - distinguishing different allocation points, and whether or not - reachable blocks should be shown. -*/ -void MAC_(do_detect_memory_leaks) ( - Bool is_valid_64k_chunk ( UInt ), - Bool is_valid_address ( Addr ) +struct mempool_check { + Bool (*is_valid_64k_chunk) ( UInt ); + Bool (*is_valid_address) ( Addr ); + Int blocks_leaked; + Int blocks_dubious; + Int blocks_reachable; + Int blocks_suppressed; + Int total_lossrecords; + Int n_lossrecords; + LossRecord *errlist; + Int offset; +}; + +static void do_detect_memory_leaks ( + VgHashTable malloc_list, + struct mempool_check *mp ) { Int i; - Int blocks_leaked; - Int blocks_dubious; - Int blocks_reachable; - Int blocks_suppressed; - Int n_lossrecords; UInt bytes_notified; - Bool is_suppressed; + static Bool already_notified = False; - LossRecord* errlist; LossRecord* p; /* VG_(HT_to_array) allocates storage for shadows */ - lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( MAC_(malloc_list), - &lc_n_shadows ); + lc_shadows = (MAC_Chunk**)VG_(HT_to_array)( malloc_list, &lc_n_shadows ); /* Sort the array. */ VG_(ssort)((void*)lc_shadows, lc_n_shadows, sizeof(VgHashNode*), lc_compar); @@ -407,23 +404,14 @@ /* Sanity check -- make sure they don't overlap */ for (i = 0; i < lc_n_shadows-1; i++) { sk_assert( lc_shadows[i]->data + lc_shadows[i]->size - < lc_shadows[i+1]->data ); + <= lc_shadows[i+1]->data ); } if (lc_n_shadows == 0) { sk_assert(lc_shadows == NULL); - if (VG_(clo_verbosity) >= 1) { - VG_(message)(Vg_UserMsg, - "No malloc'd blocks -- no leaks are possible."); - } return; } - if (VG_(clo_verbosity) > 0) - VG_(message)(Vg_UserMsg, - "searching for pointers to %d not-freed blocks.", - lc_n_shadows ); - lc_min_mallocd_addr = lc_shadows[0]->data; lc_max_mallocd_addr = lc_shadows[lc_n_shadows-1]->data + lc_shadows[lc_n_shadows-1]->size - 1; @@ -436,55 +424,63 @@ bytes_notified = sizeof(UWord) * vg_scan_all_valid_memory ( - is_valid_64k_chunk, - is_valid_address, + mp->is_valid_64k_chunk, + mp->is_valid_address, &vg_detect_memory_leaks_notify_addr ); - if (VG_(clo_verbosity) > 0) + if (!already_notified && VG_(clo_verbosity) > 0) { + already_notified = True; VG_(message)(Vg_UserMsg, "checked %d bytes.", bytes_notified); + } /* Common up the lost blocks so we can print sensible error messages. */ - n_lossrecords = 0; - errlist = NULL; for (i = 0; i < lc_n_shadows; i++) { ExeContext* where = lc_shadows[i]->where; - for (p = errlist; p != NULL; p = p->next) { + for (p = mp->errlist; p != NULL; p = p->next) { if (p->loss_mode == lc_reachedness[i] && VG_(eq_ExeContext) ( MAC_(clo_leak_resolution), p->allocated_at, where) ) { break; - } + } } if (p != NULL) { p->num_blocks ++; p->total_bytes += lc_shadows[i]->size; } else { - n_lossrecords ++; + mp->n_lossrecords ++; p = VG_(malloc)(sizeof(LossRecord)); p->loss_mode = lc_reachedness[i]; p->allocated_at = where; p->total_bytes = lc_shadows[i]->size; p->num_blocks = 1; - p->next = errlist; - errlist = p; + p->next = mp->errlist; + mp->errlist = p; } } + VG_(free) ( lc_shadows ); + VG_(free) ( lc_reachedness ); + mp->offset += mp->n_lossrecords; +} + +static void print_memory_leaks ( + struct mempool_check *mp +) +{ + Int i; + Bool is_suppressed; /* Print out the commoned-up blocks and collect summary stats. */ - blocks_leaked = MAC_(bytes_leaked) = 0; - blocks_dubious = MAC_(bytes_dubious) = 0; - blocks_reachable = MAC_(bytes_reachable) = 0; - blocks_suppressed = MAC_(bytes_suppressed) = 0; - for (i = 0; i < n_lossrecords; i++) { + for (i = 0; i < mp->n_lossrecords; i++) { Bool print_record; + LossRecord* p; LossRecord* p_min = NULL; UInt n_min = 0xFFFFFFFF; - for (p = errlist; p != NULL; p = p->next) { + for (p = mp->errlist; p != NULL; p = p->next) { if (p->num_blocks > 0 && p->total_bytes < n_min) { n_min = p->total_bytes; p_min = p; @@ -500,25 +496,25 @@ print_record = ( MAC_(clo_show_reachable) || Proper != p_min->loss_mode ); is_suppressed = - VG_(unique_error) ( VG_(get_VCPU_tid)(), LeakErr, (UInt)i+1, - (Char*)n_lossrecords, (void*) p_min, - p_min->allocated_at, print_record, + VG_(unique_error) ( VG_(get_VCPU_tid)(), LeakErr, + (UInt)i+1, (Char*)mp->n_lossrecords, + (void*) p_min, p_min->allocated_at, print_record, /*allow_GDB_attach*/False, /*count_error*/False ); if (is_suppressed) { - blocks_suppressed += p_min->num_blocks; + mp->blocks_suppressed += p_min->num_blocks; MAC_(bytes_suppressed) += p_min->total_bytes; } else if (Unreached == p_min->loss_mode) { - blocks_leaked += p_min->num_blocks; + mp->blocks_leaked += p_min->num_blocks; MAC_(bytes_leaked) += p_min->total_bytes; } else if (Interior == p_min->loss_mode) { - blocks_dubious += p_min->num_blocks; + mp->blocks_dubious += p_min->num_blocks; MAC_(bytes_dubious) += p_min->total_bytes; } else if (Proper == p_min->loss_mode) { - blocks_reachable += p_min->num_blocks; + mp->blocks_reachable += p_min->num_blocks; MAC_(bytes_reachable) += p_min->total_bytes; } else { @@ -526,18 +522,80 @@ } p_min->num_blocks = 0; } +} + +static void detect_mempool_leaks (VgHashNode *node, void *d) +{ + struct mempool_check *mp = (struct mempool_check *)d; + MAC_Mempool *m = (MAC_Mempool *)node; + + do_detect_memory_leaks (m->chunks, mp); +} + +static void count_mempools (VgHashNode *node, void *d) +{ + int *i = (int *)d; + MAC_Mempool *m = (MAC_Mempool *)node; + + *i += VG_(HT_count_nodes)(m->chunks); +} + +/* Top level entry point to leak detector. Call here, passing in + suitable address-validating functions (see comment at top of + vg_scan_all_valid_memory above). All this is to avoid duplication + of the leak-detection code for Memcheck and Addrcheck. + Also pass in a tool-specific function to extract the .where field + for allocated blocks, an indication of the resolution wanted for + distinguishing different allocation points, and whether or not + reachable blocks should be shown. +*/ +void MAC_(do_detect_memory_leaks) ( + Bool is_valid_64k_chunk ( UInt ), + Bool is_valid_address ( Addr ) +) +{ + struct mempool_check mp; + + mp.is_valid_64k_chunk = is_valid_64k_chunk; + mp.is_valid_address = is_valid_address; + mp.blocks_leaked = MAC_(bytes_leaked) = 0; + mp.blocks_dubious = MAC_(bytes_dubious) = 0; + mp.blocks_reachable = MAC_(bytes_reachable) = 0; + mp.blocks_suppressed = MAC_(bytes_suppressed) = 0; + mp.offset = 1; + mp.n_lossrecords = 0; + mp.errlist = NULL; + + mp.total_lossrecords = VG_(HT_count_nodes)(MAC_(malloc_list)); + VG_(HT_apply_to_all_nodes)(MAC_(mempool_list), count_mempools, + &(mp.total_lossrecords)); + + if (mp.total_lossrecords == 0 && VG_(clo_verbosity) >= 1) { + VG_(message)(Vg_UserMsg, + "No malloc'd blocks -- no leaks are possible."); + return; + } + + if (VG_(clo_verbosity) > 0) + VG_(message)(Vg_UserMsg, + "searching for pointers to %d not-freed blocks.", + mp.total_lossrecords ); + + VG_(HT_apply_to_all_nodes)(MAC_(mempool_list), detect_mempool_leaks, &mp); + do_detect_memory_leaks (MAC_(malloc_list), &mp); + print_memory_leaks(&mp); if (VG_(clo_verbosity) > 0) { VG_(message)(Vg_UserMsg, ""); VG_(message)(Vg_UserMsg, "LEAK SUMMARY:"); VG_(message)(Vg_UserMsg, " definitely lost: %d bytes in %d blocks.", - MAC_(bytes_leaked), blocks_leaked ); + MAC_(bytes_leaked), mp.blocks_leaked ); VG_(message)(Vg_UserMsg, " possibly lost: %d bytes in %d blocks.", - MAC_(bytes_dubious), blocks_dubious ); + MAC_(bytes_dubious), mp.blocks_dubious ); VG_(message)(Vg_UserMsg, " still reachable: %d bytes in %d blocks.", - MAC_(bytes_reachable), blocks_reachable ); + MAC_(bytes_reachable), mp.blocks_reachable ); VG_(message)(Vg_UserMsg, " suppressed: %d bytes in %d blocks.", - MAC_(bytes_suppressed), blocks_suppressed ); + MAC_(bytes_suppressed), mp.blocks_suppressed ); if (!MAC_(clo_show_reachable)) { VG_(message)(Vg_UserMsg, "Reachable blocks (those to which a pointer was found) are not shown."); @@ -545,9 +603,6 @@ "To see them, rerun with: --show-reachable=yes"); } } - - VG_(free) ( lc_shadows ); - VG_(free) ( lc_reachedness ); } /*--------------------------------------------------------------------*/ diff -rNu valgrind/memcheck/mac_needs.c valgrind-mempool/memcheck/mac_needs.c --- valgrind/memcheck/mac_needs.c 2004-11-23 12:31:25.000000000 -0800 +++ valgrind-mempool/memcheck/mac_needs.c 2005-01-17 11:42:33.000000000 -0800 @@ -221,7 +221,7 @@ a); VG_(message)(Vg_UserMsg, " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes"); - } else { + } else { VG_(message)(Vg_UserMsg, " Address 0x%x is not stack'd, malloc'd or (recently) free'd",a); } @@ -361,11 +361,21 @@ { Addr a = *(Addr *)ap; - return VG_(addr_is_in_block)( a, mc->data, mc->size ); + return VG_(addr_is_in_block)( a, mc->data, mc->size, + VG_(vg_malloc_redzone_szB) ); +} + +/* Callback for searching free'd list */ +static Bool addr_is_in_MempoolHashNode(MAC_Chunk* mc, void *ap, SizeT redzone) +{ + Addr a = *(Addr *)ap; + + return VG_(addr_is_in_block)( a, mc->data, mc->size, redzone); } + /* Callback for searching malloc'd lists */ -static Bool addr_is_in_HashNode(VgHashNode* sh_ch, void *ap) +static Bool addr_is_in_MallocHashNode(VgHashNode* sh_ch, void *ap) { return addr_is_in_MAC_Chunk( (MAC_Chunk*)sh_ch, ap ); } @@ -399,7 +409,8 @@ return; } /* Search for a currently malloc'd block which might bracket it. */ - sc = (MAC_Chunk*)VG_(HT_first_match)(MAC_(malloc_list), addr_is_in_HashNode, &a); + sc = (MAC_Chunk*)VG_(HT_first_match)(MAC_(malloc_list), + addr_is_in_MallocHashNode, &a); if (NULL != sc) { ai->akind = Mallocd; ai->blksize = sc->size; diff -rNu valgrind/memcheck/mc_main.c valgrind-mempool/memcheck/mc_main.c --- valgrind/memcheck/mc_main.c 2005-02-01 15:17:10.000000000 -0800 +++ valgrind-mempool/memcheck/mc_main.c 2005-02-01 15:18:21.185108750 -0800 @@ -1699,12 +1699,17 @@ ); } +struct fai { + Addr a; + SizeT rzB; +}; + static Bool find_addr(VgHashNode* sh_ch, void* ap) { MAC_Chunk *m = (MAC_Chunk*)sh_ch; - Addr a = *(Addr*)ap; + struct fai *a = (struct fai *)ap; - return VG_(addr_is_in_block)(a, m->data, m->size); + return VG_(addr_is_in_block)(a->a, m->data, m->size, a->rzB); } static Bool client_perm_maybe_describe( Addr a, AddrInfo* ai ) @@ -1716,7 +1721,8 @@ for (i = 0; i < vg_cgb_used; i++) { if (vg_cgbs[i].kind == CG_NotInUse) continue; - if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, vg_cgbs[i].size)) { + if (VG_(addr_is_in_block)(a, vg_cgbs[i].start, + vg_cgbs[i].size, VG_(vg_malloc_redzone_szB))) { MAC_Mempool **d, *mp; /* OK - maybe it's a mempool, too? */ @@ -1726,8 +1732,12 @@ if(mp != NULL) { if(mp->chunks != NULL) { MAC_Chunk *mc; + struct fai v; + + v.a = a; + v.rzB = mp->rzB; - mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &a); + mc = (MAC_Chunk*)VG_(HT_first_match)(mp->chunks, find_addr, &v); if(mc != NULL) { ai->akind = UserG; ai->blksize = mc->size; diff -rNu valgrind/memcheck/tests/mempool.c valgrind-mempool/memcheck/tests/mempool.c --- valgrind/memcheck/tests/mempool.c 2004-11-23 12:31:25.000000000 -0800 +++ valgrind-mempool/memcheck/tests/mempool.c 2005-01-17 11:42:34.000000000 -0800 @@ -6,7 +6,7 @@ #include "../memcheck.h" #define SUPERBLOCK_SIZE 100000 -#define REDZONE_SIZE 8 +#define REDZONE_SIZE 20 static const int USE_MMAP = 0; diff -rNu valgrind/memcheck/tests/mempool.stderr.exp valgrind-mempool/memcheck/tests/mempool.stderr.exp --- valgrind/memcheck/tests/mempool.stderr.exp 2004-11-23 12:31:25.000000000 -0800 +++ valgrind-mempool/memcheck/tests/mempool.stderr.exp 2005-01-17 11:42:34.000000000 -0800 @@ -17,7 +17,7 @@ Invalid write of size 1 at 0x........: test (mempool.c:129) by 0x........: main (mempool.c:148) - Address 0x........ is 70 bytes inside a mempool of size 100000 client-defined + Address 0x........ is 130 bytes inside a mempool of size 100000 client-defined at 0x........: make_pool (mempool.c:43) by 0x........: test (mempool.c:111) by 0x........: main (mempool.c:148) @@ -25,14 +25,46 @@ Invalid write of size 1 at 0x........: test (mempool.c:130) by 0x........: main (mempool.c:148) - Address 0x........ is 96 bytes inside a mempool of size 100000 client-defined + Address 0x........ is 180 bytes inside a mempool of size 100000 client-defined at 0x........: make_pool (mempool.c:43) by 0x........: test (mempool.c:111) by 0x........: main (mempool.c:148) -20 bytes in 1 blocks are definitely lost in loss record 2 of 3 +8 bytes in 1 blocks are still reachable in loss record 1 of 6 + at 0x........: malloc (vg_replace_malloc.c:...) + by 0x........: push (mempool.c:56) + by 0x........: test (mempool.c:113) + by 0x........: main (mempool.c:148) + + +10 bytes in 1 blocks are definitely lost in loss record 2 of 6 + at 0x........: allocate (mempool.c:99) + by 0x........: test (mempool.c:135) + by 0x........: main (mempool.c:148) + + +10 bytes in 1 blocks are definitely lost in loss record 3 of 6 + at 0x........: allocate (mempool.c:99) + by 0x........: test (mempool.c:115) + by 0x........: main (mempool.c:148) + + +20 bytes in 1 blocks are definitely lost in loss record 4 of 6 at 0x........: malloc (vg_replace_malloc.c:...) by 0x........: make_pool (mempool.c:37) by 0x........: test (mempool.c:111) by 0x........: main (mempool.c:148) + + +20 bytes in 1 blocks are definitely lost in loss record 5 of 6 + at 0x........: allocate (mempool.c:99) + by 0x........: test (mempool.c:116) + by 0x........: main (mempool.c:148) + + +100000 bytes in 1 blocks are still reachable in loss record 6 of 6 + at 0x........: malloc (vg_replace_malloc.c:...) + by 0x........: make_pool (mempool.c:38) + by 0x........: test (mempool.c:111) + by 0x........: main (mempool.c:148) diff -rNu valgrind/memcheck/tests/mempool.vgtest valgrind-mempool/memcheck/tests/mempool.vgtest --- valgrind/memcheck/tests/mempool.vgtest 2004-11-23 12:31:25.000000000 -0800 +++ valgrind-mempool/memcheck/tests/mempool.vgtest 2005-01-17 11:42:34.000000000 -0800 @@ -1,2 +1,2 @@ prog: mempool -vgopts: -q --leak-check=yes +vgopts: -q --leak-check=yes --show-reachable=yes