gecko-dev/build/valgrind/valgrind-epochs.patch

4829 строки
190 KiB
Diff

Index: Makefile.am
===================================================================
--- Makefile.am (revision 16465)
+++ Makefile.am (working copy)
@@ -16,6 +16,10 @@
exp-bbv \
exp-dhat
+#TOOLS = none memcheck
+#EXP_TOOLS =
+
+
# Put docs last because building the HTML is slow and we want to get
# everything else working before we try it.
SUBDIRS = \
Index: cachegrind/cg_main.c
===================================================================
--- cachegrind/cg_main.c (revision 16465)
+++ cachegrind/cg_main.c (working copy)
@@ -210,12 +210,14 @@
static void get_debug_info(Addr instr_addr, const HChar **dir,
const HChar **file, const HChar **fn, UInt* line)
{
+ DiEpoch ep = VG_(current_DiEpoch)();
Bool found_file_line = VG_(get_filename_linenum)(
+ ep,
instr_addr,
file, dir,
line
);
- Bool found_fn = VG_(get_fnname)(instr_addr, fn);
+ Bool found_fn = VG_(get_fnname)(ep, instr_addr, fn);
if (!found_file_line) {
*file = "???";
Index: callgrind/bb.c
===================================================================
--- callgrind/bb.c (revision 16465)
+++ callgrind/bb.c (working copy)
@@ -199,7 +199,8 @@
DebugInfo* di;
PtrdiffT offset;
- di = VG_(find_DebugInfo)(addr);
+ DiEpoch ep = VG_(current_DiEpoch)();
+ di = VG_(find_DebugInfo)(ep, addr);
obj = CLG_(get_obj_node)( di );
/* Update symbol offset in object if remapped */
Index: callgrind/dump.c
===================================================================
--- callgrind/dump.c (revision 16465)
+++ callgrind/dump.c (working copy)
@@ -373,7 +373,8 @@
found_file_line = debug_cache_info[cachepos];
}
else {
- found_file_line = VG_(get_filename_linenum)(addr,
+ DiEpoch ep = VG_(current_DiEpoch)();
+ found_file_line = VG_(get_filename_linenum)(ep, addr,
&file,
&dir,
&(p->line));
Index: callgrind/fn.c
===================================================================
--- callgrind/fn.c (revision 16465)
+++ callgrind/fn.c (working copy)
@@ -434,17 +434,18 @@
CLG_DEBUG(6, " + get_debug_info(%#lx)\n", instr_addr);
+ DiEpoch ep = VG_(current_DiEpoch)();
if (pDebugInfo) {
- *pDebugInfo = VG_(find_DebugInfo)(instr_addr);
+ *pDebugInfo = VG_(find_DebugInfo)(ep, instr_addr);
// for generated code in anonymous space, pSegInfo is 0
}
- found_file_line = VG_(get_filename_linenum)(instr_addr,
+ found_file_line = VG_(get_filename_linenum)(ep, instr_addr,
file,
dir,
&line);
- found_fn = VG_(get_fnname)(instr_addr, fn_name);
+ found_fn = VG_(get_fnname)(ep, instr_addr, fn_name);
if (!found_file_line && !found_fn) {
CLG_(stat).no_debug_BBs++;
@@ -503,6 +504,7 @@
CLG_(get_debug_info)(bb_addr(bb),
&dirname, &filename, &fnname, &line_num, &di);
+ DiEpoch ep = VG_(current_DiEpoch)();
if (0 == VG_(strcmp)(fnname, "???")) {
int p;
static HChar buf[32]; // for sure large enough
@@ -521,7 +523,7 @@
fnname = buf;
}
else {
- if (VG_(get_fnname_if_entry)(bb_addr(bb), &fnname))
+ if (VG_(get_fnname_if_entry)(ep, bb_addr(bb), &fnname))
bb->is_entry = 1;
}
Index: coregrind/m_addrinfo.c
===================================================================
--- coregrind/m_addrinfo.c (revision 16465)
+++ coregrind/m_addrinfo.c (working copy)
@@ -86,7 +86,7 @@
return VG_INVALID_THREADID;
}
-void VG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai )
+void VG_(describe_addr) ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
{
VgSectKind sect;
@@ -99,7 +99,7 @@
VG_(free), sizeof(HChar) );
(void) VG_(get_data_description)( ai->Addr.Variable.descr1,
- ai->Addr.Variable.descr2, a );
+ ai->Addr.Variable.descr2, ep, a );
/* If there's nothing in descr1/2, free them. Why is it safe to
VG_(indexXA) at zero here? Because VG_(get_data_description)
guarantees to zero terminate descr1/2 regardless of the outcome
@@ -127,7 +127,7 @@
there. -- */
const HChar *name;
if (VG_(get_datasym_and_offset)(
- a, &name,
+ ep, a, &name,
&ai->Addr.DataSym.offset )) {
ai->Addr.DataSym.name = VG_(strdup)("mc.da.dsname", name);
ai->tag = Addr_DataSym;
@@ -148,6 +148,7 @@
ai->tag = Addr_Stack;
VG_(initThreadInfo)(&ai->Addr.Stack.tinfo);
ai->Addr.Stack.tinfo.tid = tid;
+ ai->Addr.Stack.epoch = ep;
ai->Addr.Stack.IP = 0;
ai->Addr.Stack.frameNo = -1;
ai->Addr.Stack.stackPos = StackPos_stacked;
@@ -196,9 +197,9 @@
ai->Addr.Block.block_desc = aai.name;
ai->Addr.Block.block_szB = aai.block_szB;
ai->Addr.Block.rwoffset = aai.rwoffset;
- ai->Addr.Block.allocated_at = VG_(null_ExeContext)();
+ ai->Addr.Block.allocated_at = VG_(null_ExeContextAndEpoch)();
VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
- ai->Addr.Block.freed_at = VG_(null_ExeContext)();
+ ai->Addr.Block.freed_at = VG_(null_ExeContextAndEpoch)();
return;
}
}
@@ -248,6 +249,7 @@
ai->tag = Addr_Stack;
VG_(initThreadInfo)(&ai->Addr.Stack.tinfo);
ai->Addr.Stack.tinfo.tid = tid;
+ ai->Addr.Stack.epoch = ep;
ai->Addr.Stack.IP = 0;
ai->Addr.Stack.frameNo = -1;
vg_assert (stackPos != StackPos_stacked);
@@ -447,20 +449,24 @@
Bool haslinenum;
PtrdiffT offset;
- if (VG_(get_inst_offset_in_function)( ai->Addr.Stack.IP,
+ if (VG_(get_inst_offset_in_function)( ai->Addr.Stack.epoch,
+ ai->Addr.Stack.IP,
&offset))
- haslinenum = VG_(get_linenum) (ai->Addr.Stack.IP - offset,
+ haslinenum = VG_(get_linenum) (ai->Addr.Stack.epoch,
+ ai->Addr.Stack.IP - offset,
&linenum);
else
haslinenum = False;
- hasfile = VG_(get_filename)(ai->Addr.Stack.IP, &file);
+ hasfile = VG_(get_filename)(ai->Addr.Stack.epoch,
+ ai->Addr.Stack.IP, &file);
HChar strlinenum[16] = ""; // large enough
if (hasfile && haslinenum)
VG_(sprintf)(strlinenum, "%u", linenum);
- hasfn = VG_(get_fnname)(ai->Addr.Stack.IP, &fn);
+ hasfn = VG_(get_fnname)(ai->Addr.Stack.epoch,
+ ai->Addr.Stack.IP, &fn);
if (hasfn || hasfile)
VG_(emit)( "%sin frame #%d, created by %ps (%ps:%s)%s\n",
@@ -533,32 +539,35 @@
xpost
);
if (ai->Addr.Block.block_kind==Block_Mallocd) {
- VG_(pp_ExeContext)(ai->Addr.Block.allocated_at);
- vg_assert (ai->Addr.Block.freed_at == VG_(null_ExeContext)());
+ VG_(pp_ExeContextAndEpoch)(ai->Addr.Block.allocated_at);
+ vg_assert(
+ VG_(is_null_ExeContextAndEpoch)(ai->Addr.Block.freed_at));
}
else if (ai->Addr.Block.block_kind==Block_Freed) {
- VG_(pp_ExeContext)(ai->Addr.Block.freed_at);
- if (ai->Addr.Block.allocated_at != VG_(null_ExeContext)()) {
+ VG_(pp_ExeContextAndEpoch)(ai->Addr.Block.freed_at);
+ if (!VG_(is_null_ExeContextAndEpoch)(ai->Addr.Block.allocated_at)) {
VG_(emit)(
"%sBlock was alloc'd at%s\n",
xpre,
xpost
);
- VG_(pp_ExeContext)(ai->Addr.Block.allocated_at);
+ VG_(pp_ExeContextAndEpoch)(ai->Addr.Block.allocated_at);
}
}
else if (ai->Addr.Block.block_kind==Block_MempoolChunk
|| ai->Addr.Block.block_kind==Block_UserG) {
// client-defined
- VG_(pp_ExeContext)(ai->Addr.Block.allocated_at);
- vg_assert (ai->Addr.Block.freed_at == VG_(null_ExeContext)());
+ VG_(pp_ExeContextAndEpoch)(ai->Addr.Block.allocated_at);
+ vg_assert(VG_(is_null_ExeContextAndEpoch)(ai->Addr.Block.freed_at));
/* Nb: cannot have a freed_at, as a freed client-defined block
has a Block_Freed block_kind. */
} else {
// Client or Valgrind arena. At least currently, we never
// have stacktraces for these.
- vg_assert (ai->Addr.Block.allocated_at == VG_(null_ExeContext)());
- vg_assert (ai->Addr.Block.freed_at == VG_(null_ExeContext)());
+ vg_assert(VG_(is_null_ExeContextAndEpoch)
+ (ai->Addr.Block.allocated_at));
+ vg_assert(VG_(is_null_ExeContextAndEpoch)
+ (ai->Addr.Block.freed_at));
}
if (ai->Addr.Block.alloc_tinfo.tnr || ai->Addr.Block.alloc_tinfo.tid)
VG_(emit)(
@@ -603,7 +612,7 @@
if (ai->Addr.SectKind.kind == Vg_SectText) {
/* To better describe the address in a text segment,
pp a dummy stacktrace made of this single address. */
- VG_(pp_StackTrace)( &a, 1 );
+ VG_(pp_StackTrace)( VG_(current_DiEpoch)(), &a, 1 );
}
break;
Index: coregrind/m_debuginfo/debuginfo.c
===================================================================
--- coregrind/m_debuginfo/debuginfo.c (revision 16465)
+++ coregrind/m_debuginfo/debuginfo.c (working copy)
@@ -70,7 +70,11 @@
should-we-load-debuginfo-now? finite state machine. */
#define DEBUG_FSM 0
+/* Set this to 1 to enable somewhat minimal debug printing for the
+ debuginfo-epoch machinery. */
+#define DEBUG_EPOCHS 0
+
/*------------------------------------------------------------*/
/*--- The _svma / _avma / _image / _bias naming scheme ---*/
/*------------------------------------------------------------*/
@@ -109,6 +113,116 @@
/*------------------------------------------------------------*/
+/*--- Epochs ---*/
+/*------------------------------------------------------------*/
+
+/* The DebugInfo epoch is incremented every time we either load debuginfo in
+ response to an object mapping, or an existing DebugInfo becomes
+ non-current (or will be discarded) due to an object unmap. By storing,
+ in each DebugInfo, the first and last epoch for which it is valid, we can
+ unambiguously identify the set of DebugInfos which should be used to
+ provide metadata for a code or data address, provided we know the epoch
+ to which that address pertains.
+
+ Note, this isn't the same as the "handle_counter" below. That only
+ advances when new DebugInfos are created. "current_epoch" advances both
+ at DebugInfo created and destruction-or-making-non-current.
+*/
+
+// The value zero is reserved for indicating an invalid epoch number.
+static UInt current_epoch = 1;
+
+inline DiEpoch VG_(current_DiEpoch) ( void ) {
+ DiEpoch dep; dep.n = current_epoch; return dep;
+}
+
+static void advance_current_DiEpoch ( const HChar* msg ) {
+ current_epoch++;
+ if (DEBUG_EPOCHS)
+ VG_(printf)("Advancing current epoch to %u due to %s\n",
+ current_epoch, msg);
+}
+
+static inline Bool eq_DiEpoch ( DiEpoch dep1, DiEpoch dep2 ) {
+ return dep1.n == dep2.n && /*neither is invalid*/dep1.n != 0;
+}
+
+// Is this DebugInfo currently "allocated" (pre-use state, only FSM active) ?
+static inline Bool is_DebugInfo_allocated ( const DebugInfo* di )
+{
+ if (is_DiEpoch_INVALID(di->first_epoch)
+ && is_DiEpoch_INVALID(di->last_epoch)) {
+ return True;
+ } else {
+ return False;
+ }
+}
+
+// Is this DebugInfo currently "active" (valid for the current epoch) ?
+static inline Bool is_DebugInfo_active ( const DebugInfo* di )
+{
+ if (!is_DiEpoch_INVALID(di->first_epoch)
+ && is_DiEpoch_INVALID(di->last_epoch)) {
+ // Yes it is active. Sanity check ..
+ tl_assert(di->first_epoch.n <= current_epoch);
+ return True;
+ } else {
+ return False;
+ }
+}
+
+// Is this DebugInfo currently "archived" ?
+static inline Bool is_DebugInfo_archived ( const DebugInfo* di )
+{
+ if (!is_DiEpoch_INVALID(di->first_epoch)
+ && !is_DiEpoch_INVALID(di->last_epoch)) {
+ // Yes it is archived. Sanity checks ..
+ tl_assert(di->first_epoch.n <= di->last_epoch.n);
+ tl_assert(di->last_epoch.n <= current_epoch);
+ return True;
+ } else {
+ return False;
+ }
+}
+
+// Is this DebugInfo valid for the specified epoch?
+static inline Bool is_DI_valid_for_epoch ( const DebugInfo* di, DiEpoch ep )
+{
+ // Stay sane
+ vg_assert(ep.n > 0 && ep.n <= current_epoch);
+
+ Bool first_valid = !is_DiEpoch_INVALID(di->first_epoch);
+ Bool last_valid = !is_DiEpoch_INVALID(di->last_epoch);
+
+ if (first_valid) {
+ if (last_valid) {
+ // Both valid. di is in Archived state.
+ return di->first_epoch.n <= ep.n && ep.n <= di->last_epoch.n;
+ } else {
+ // First is valid, last is invalid. di is in Active state.
+ return di->first_epoch.n <= ep.n;
+ }
+ } else {
+ if (last_valid) {
+ // First is invalid, last is valid. This is an impossible state.
+ vg_assert(0);
+ /*NOTREACHED*/
+ return False;
+ } else {
+ // Neither is valid. di is in Allocated state.
+ return False;
+ }
+ }
+
+}
+
+static inline UInt ROL32 ( UInt x, UInt n )
+{
+ return (x << n) | (x >> (32-n));
+}
+
+
+/*------------------------------------------------------------*/
/*--- Root structure ---*/
/*------------------------------------------------------------*/
@@ -162,6 +276,23 @@
}
+// Debugging helper for epochs
+static void show_epochs ( const HChar* msg )
+{
+ if (DEBUG_EPOCHS) {
+ DebugInfo* di;
+ VG_(printf)("\nDebugInfo epoch display, requested by \"%s\"\n", msg);
+ VG_(printf)(" Current epoch (note: 0 means \"invalid epoch\") = %u\n",
+ current_epoch);
+ for (di = debugInfo_list; di; di = di->next) {
+ VG_(printf)(" [di=%p] first %u last %u %s\n",
+ di, di->first_epoch.n, di->last_epoch.n, di->fsm.filename);
+ }
+ VG_(printf)("\n");
+ }
+}
+
+
/*------------------------------------------------------------*/
/*--- Notification (acquire/discard) helpers ---*/
/*------------------------------------------------------------*/
@@ -182,6 +313,8 @@
di = ML_(dinfo_zalloc)("di.debuginfo.aDI.1", sizeof(DebugInfo));
di->handle = handle_counter++;
+ di->first_epoch = DiEpoch_INVALID();
+ di->last_epoch = DiEpoch_INVALID();
di->fsm.filename = ML_(dinfo_strdup)("di.debuginfo.aDI.2", filename);
di->fsm.maps = VG_(newXA)(
ML_(dinfo_zalloc), "di.debuginfo.aDI.3",
@@ -302,13 +435,20 @@
}
-/* 'si' is a member of debugInfo_list. Find it, remove it from the
- list, notify m_redir that this has happened, and free all storage
- reachable from it.
+/* 'di' is a member of debugInfo_list. Find it, and either (remove it from
+ the list and free all storage reachable from it) or archive it, notify
+ m_redir that this has happened, and free all storage reachable from it.
+
+ Note that 'di' can't be archived. Is a DebugInfo is archived then we
+ want to hold on to it forever. This is asserted for.
+
+ Note also, we don't advance the current epoch here. That's the
+ responsibility of some (non-immediate) caller.
*/
-static void discard_DebugInfo ( DebugInfo* di )
+static void discard_or_archive_DebugInfo ( DebugInfo* di )
{
- const HChar* reason = "munmap";
+ const HChar* reason = "munmap";
+ const Bool archive = VG_(clo_keep_debuginfo);
DebugInfo** prev_next_ptr = &debugInfo_list;
DebugInfo* curr = debugInfo_list;
@@ -315,11 +455,14 @@
while (curr) {
if (curr == di) {
- /* Found it; remove from list and free it. */
+ /* It must be active! */
+ vg_assert( is_DebugInfo_active(di));
+ /* Found it; (remove from list and free it), or archive it. */
if (curr->have_dinfo
&& (VG_(clo_verbosity) > 1 || VG_(clo_trace_redir)))
VG_(message)(Vg_DebugMsg,
- "Discarding syms at %#lx-%#lx in %s due to %s()\n",
+ "%s syms at %#lx-%#lx in %s due to %s()\n",
+ archive ? "Archiving" : "Discarding",
di->text_avma,
di->text_avma + di->text_size,
curr->fsm.filename ? curr->fsm.filename
@@ -326,10 +469,18 @@
: "???",
reason);
vg_assert(*prev_next_ptr == curr);
- *prev_next_ptr = curr->next;
- if (curr->have_dinfo)
+ if (!archive) {
+ *prev_next_ptr = curr->next;
+ }
+ if (curr->have_dinfo) {
VG_(redir_notify_delete_DebugInfo)( curr );
- free_DebugInfo(curr);
+ }
+ if (archive) {
+ /* Adjust the epoch markers appropriately. */
+ di->last_epoch = VG_(current_DiEpoch)();
+ } else {
+ free_DebugInfo(curr);
+ }
return;
}
prev_next_ptr = &curr->next;
@@ -358,10 +509,11 @@
while (True) {
if (curr == NULL)
break;
- if (curr->text_present
- && curr->text_size > 0
- && (start+length - 1 < curr->text_avma
- || curr->text_avma + curr->text_size - 1 < start)) {
+ if (is_DebugInfo_archived(curr)
+ || (curr->text_present
+ && curr->text_size > 0
+ && (start+length - 1 < curr->text_avma
+ || curr->text_avma + curr->text_size - 1 < start))) {
/* no overlap */
} else {
found = True;
@@ -372,7 +524,7 @@
if (!found) break;
anyFound = True;
- discard_DebugInfo( curr );
+ discard_or_archive_DebugInfo( curr );
}
return anyFound;
@@ -418,9 +570,9 @@
}
-/* Discard all elements of debugInfo_list whose .mark bit is set.
+/* Discard or archive all elements of debugInfo_list whose .mark bit is set.
*/
-static void discard_marked_DebugInfos ( void )
+static void discard_or_archive_marked_DebugInfos ( void )
{
DebugInfo* curr;
@@ -436,7 +588,7 @@
}
if (!curr) break;
- discard_DebugInfo( curr );
+ discard_or_archive_DebugInfo( curr );
}
}
@@ -446,6 +598,7 @@
Clearly diRef must have its mapping information set to something sane. */
static void discard_DebugInfos_which_overlap_with ( DebugInfo* diRef )
{
+ vg_assert(is_DebugInfo_allocated(diRef));
DebugInfo* di;
/* Mark all the DebugInfos in debugInfo_list that need to be
deleted. First, clear all the mark bits; then set them if they
@@ -452,6 +605,8 @@
overlap with siRef. Since siRef itself is in this list we at
least expect its own mark bit to be set. */
for (di = debugInfo_list; di; di = di->next) {
+ if (is_DebugInfo_archived(di))
+ continue;
di->mark = do_DebugInfos_overlap( di, diRef );
if (di == diRef) {
vg_assert(di->mark);
@@ -458,7 +613,7 @@
di->mark = False;
}
}
- discard_marked_DebugInfos();
+ discard_or_archive_marked_DebugInfos();
}
@@ -470,6 +625,8 @@
DebugInfo* di;
vg_assert(filename);
for (di = debugInfo_list; di; di = di->next) {
+ if (is_DebugInfo_archived(di))
+ continue;
vg_assert(di->fsm.filename);
if (0==VG_(strcmp)(di->fsm.filename, filename))
break;
@@ -480,6 +637,7 @@
di->next = debugInfo_list;
debugInfo_list = di;
}
+ vg_assert(!is_DebugInfo_archived(di));
return di;
}
@@ -723,6 +881,8 @@
ULong di_handle;
Bool ok;
+ advance_current_DiEpoch("di_notify_ACHIEVE_ACCEPT_STATE");
+
vg_assert(di->fsm.filename);
TRACE_SYMTAB("\n");
TRACE_SYMTAB("------ start ELF OBJECT "
@@ -734,7 +894,8 @@
/* We're going to read symbols and debug info for the avma
ranges specified in the _DebugInfoFsm mapping array. First
get rid of any other DebugInfos which overlap any of those
- ranges (to avoid total confusion). */
+ ranges (to avoid total confusion). But only those valid in
+ the current epoch. We don't want to discard archived DebugInfos. */
discard_DebugInfos_which_overlap_with( di );
/* The DebugInfoMappings that now exist in the FSM may involve
@@ -765,6 +926,15 @@
priv_storage.h. */
check_CFSI_related_invariants(di);
ML_(finish_CFSI_arrays)(di);
+
+ // Mark di's first epoch point as a valid epoch. Because its
+ // last_epoch value is still invalid, this changes di's state from
+ // "allocated" to "active".
+ vg_assert(is_DebugInfo_allocated(di));
+ di->first_epoch = VG_(current_DiEpoch)();
+ vg_assert(is_DebugInfo_active(di));
+ show_epochs("di_notify_ACHIEVE_ACCEPT_STATE success");
+
/* notify m_redir about it */
TRACE_SYMTAB("\n------ Notifying m_redir ------\n");
VG_(redir_notify_new_DebugInfo)( di );
@@ -1077,8 +1247,11 @@
Bool anyFound;
if (0) VG_(printf)("DISCARD %#lx %#lx\n", a, a+len);
anyFound = discard_syms_in_range(a, len);
- if (anyFound)
+ if (anyFound) {
caches__invalidate();
+ advance_current_DiEpoch("VG_(di_notify_munmap)");
+ show_epochs("VG_(di_notify_munmap)");
+ }
}
@@ -1094,8 +1267,10 @@
# endif
if (0 && !exe_ok) {
Bool anyFound = discard_syms_in_range(a, len);
- if (anyFound)
+ if (anyFound) {
caches__invalidate();
+ advance_current_DiEpoch("VG_(di_notify_mprotect)");
+ }
}
}
@@ -1395,6 +1570,7 @@
caches__invalidate();
/* dump old info for this range, if any */
discard_syms_in_range( avma_obj, total_size );
+ advance_current_DiEpoch("VG_(di_notify_pdb_debuginfo)");
{ DebugInfo* di = find_or_create_DebugInfo_for(exename);
@@ -1471,6 +1647,7 @@
/*------------------------------------------------------------*/
/*--- Types and functions for inlined IP cursor ---*/
/*------------------------------------------------------------*/
+
struct _InlIPCursor {
Addr eip; // Cursor used to describe calls at eip.
DebugInfo* di; // DebugInfo describing inlined calls at eip
@@ -1534,8 +1711,8 @@
}
/* Forward */
-static void search_all_loctabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
- /*OUT*/Word* locno );
+static void search_all_loctabs ( DiEpoch ep, Addr ptr,
+ /*OUT*/DebugInfo** pdi, /*OUT*/Word* locno );
/* Returns the position after which eip would be inserted in inltab.
(-1 if eip should be inserted before position 0).
@@ -1565,7 +1742,7 @@
return lo - 1;
}
-InlIPCursor* VG_(new_IIPC)(Addr eip)
+InlIPCursor* VG_(new_IIPC)(DiEpoch ep, Addr eip)
{
DebugInfo* di;
Word locno;
@@ -1576,8 +1753,8 @@
if (!VG_(clo_read_inline_info))
return NULL; // No way we can find inlined calls.
- /* Search the DebugInfo for eip */
- search_all_loctabs ( eip, &di, &locno );
+ /* Search the DebugInfo for (ep, eip) */
+ search_all_loctabs ( ep, eip, &di, &locno );
if (di == NULL || di->inltab_used == 0)
return NULL; // No di (with inltab) containing eip.
@@ -1641,8 +1818,8 @@
If findText==True, only text symbols are searched for.
If findText==False, only data symbols are searched for.
*/
-static void search_all_symtabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
- /*OUT*/Word* symno,
+static void search_all_symtabs ( DiEpoch ep, Addr ptr,
+ /*OUT*/DebugInfo** pdi, /*OUT*/Word* symno,
Bool findText )
{
Word sno;
@@ -1651,6 +1828,9 @@
for (di = debugInfo_list; di != NULL; di = di->next) {
+ if (!is_DI_valid_for_epoch(di, ep))
+ continue;
+
if (findText) {
/* Consider any symbol in the r-x mapped area to be text.
See Comment_Regarding_Text_Range_Checks in storage.c for
@@ -1698,15 +1878,17 @@
}
-/* Search all loctabs that we know about to locate ptr. If found, set
- *pdi to the relevant DebugInfo, and *locno to the loctab entry
+/* Search all loctabs that we know about to locate ptr at epoch ep. If
+ *found, set pdi to the relevant DebugInfo, and *locno to the loctab entry
*number within that. If not found, *pdi is set to NULL. */
-static void search_all_loctabs ( Addr ptr, /*OUT*/DebugInfo** pdi,
- /*OUT*/Word* locno )
+static void search_all_loctabs ( DiEpoch ep, Addr ptr,
+ /*OUT*/DebugInfo** pdi, /*OUT*/Word* locno )
{
Word lno;
DebugInfo* di;
for (di = debugInfo_list; di != NULL; di = di->next) {
+ if (!is_DI_valid_for_epoch(di, ep))
+ continue;
if (di->text_present
&& di->text_size > 0
&& di->text_avma <= ptr
@@ -1729,19 +1911,22 @@
typedef
struct {
- Addr sym_avma;
+ // (sym_epoch, sym_avma) are the hash table key.
+ DiEpoch sym_epoch;
+ Addr sym_avma;
+ // Fields below here are not part of the key.
const HChar* sym_name;
PtrdiffT offset : (sizeof(PtrdiffT)*8)-1;
Bool isText : 1;
}
Sym_Name_CacheEnt;
-/* Sym_Name_CacheEnt associates a queried address to the sym name found.
- By nature, if a sym name was found, it means the searched address
- stored in the cache is an avma (see e.g. search_all_symtabs).
- Note however that the caller is responsibe to work with 'avma'
- addresses e.g. when calling VG_(get_fnname) : m_debuginfo.c has
- no way to differentiate an 'svma a' from an 'avma a'. It is however
- unlikely that svma would percolate outside of this module. */
+/* Sym_Name_CacheEnt associates a queried (epoch, address) pair to the sym
+ name found. By nature, if a sym name was found, it means the searched
+ address stored in the cache is an avma (see e.g. search_all_symtabs).
+ Note however that the caller is responsibe to work with 'avma' addresses
+ e.g. when calling VG_(get_fnname) : m_debuginfo.c has no way to
+ differentiate an 'svma a' from an 'avma a'. It is however unlikely that
+ svma would percolate outside of this module. */
static Sym_Name_CacheEnt sym_name_cache[N_SYM_NAME_CACHE];
@@ -1757,13 +1942,15 @@
sym_name_cache[0].sym_name = no_sym_name;
}
-/* The whole point of this whole big deal: map a code address to a
- plausible symbol name. Returns False if no idea; otherwise True.
+/* The whole point of this whole big deal: map an (epoch, code address) pair
+ to a plausible symbol name. Returns False if no idea; otherwise True.
+
Caller supplies buf. If do_cxx_demangling is False, don't do
C++ demangling, regardless of VG_(clo_demangle) -- probably because the
call has come from VG_(get_fnname_raw)(). findText
indicates whether we're looking for a text symbol or a data symbol
-- caller must choose one kind or the other.
+
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h
get_sym_name and the fact it calls the demangler is the main reason
@@ -1772,22 +1959,32 @@
(1) the DebugInfo it belongs to is not discarded
(2) the demangler is not invoked again
Also, the returned string is owned by "somebody else". Callers must
- not free it or modify it.*/
+ not free it or modify it. */
static
Bool get_sym_name ( Bool do_cxx_demangling, Bool do_z_demangling,
Bool do_below_main_renaming,
- Addr a, const HChar** buf,
+ DiEpoch ep, Addr a, const HChar** buf,
Bool match_anywhere_in_sym, Bool show_offset,
Bool findText, /*OUT*/PtrdiffT* offsetP )
{
- UWord hash = a % N_SYM_NAME_CACHE;
- Sym_Name_CacheEnt* se = &sym_name_cache[hash];
+ // Compute the hash from 'ep' and 'a'. The latter contains lots of
+ // significant bits, but 'ep' is expected to be a small number, typically
+ // less than 500. So rotate it around a bit in the hope of spreading the
+ // bits out somewhat.
+ vg_assert(!is_DiEpoch_INVALID(ep));
+ UWord hash = a ^ (UWord)(ep.n ^ ROL32(ep.n, 5)
+ ^ ROL32(ep.n, 13) ^ ROL32(ep.n, 19));
+ hash %= N_SYM_NAME_CACHE;
- if (UNLIKELY(se->sym_avma != a || se->isText != findText)) {
+ Sym_Name_CacheEnt* se = &sym_name_cache[hash];
+
+ if (UNLIKELY(se->sym_epoch.n != ep.n || se->sym_avma != a
+ || se->isText != findText)) {
DebugInfo* di;
Word sno;
- search_all_symtabs ( a, &di, &sno, findText );
+ search_all_symtabs ( ep, a, &di, &sno, findText );
+ se->sym_epoch = ep;
se->sym_avma = a;
se->isText = findText;
if (di == NULL || a == 0)
@@ -1846,12 +2043,12 @@
/* ppc64be-linux only: find the TOC pointer (R2 value) that should be in
force at the entry point address of the function containing
guest_code_addr. Returns 0 if not known. */
-Addr VG_(get_tocptr) ( Addr guest_code_addr )
+Addr VG_(get_tocptr) ( DiEpoch ep, Addr guest_code_addr )
{
#if defined(VGA_ppc64be) || defined(VGA_ppc64le)
DebugInfo* si;
Word sno;
- search_all_symtabs ( guest_code_addr,
+ search_all_symtabs ( ep, guest_code_addr,
&si, &sno,
True/*consider text symbols only*/ );
if (si == NULL)
@@ -1867,11 +2064,11 @@
match anywhere in function, but don't show offsets.
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h */
-Bool VG_(get_fnname) ( Addr a, const HChar** buf )
+Bool VG_(get_fnname) ( DiEpoch ep, Addr a, const HChar** buf )
{
return get_sym_name ( /*C++-demangle*/True, /*Z-demangle*/True,
/*below-main-renaming*/True,
- a, buf,
+ ep, a, buf,
/*match_anywhere_in_fun*/True,
/*show offset?*/False,
/*text sym*/True,
@@ -1882,11 +2079,11 @@
match anywhere in function, and show offset if nonzero.
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h */
-Bool VG_(get_fnname_w_offset) ( Addr a, const HChar** buf )
+Bool VG_(get_fnname_w_offset) ( DiEpoch ep, Addr a, const HChar** buf )
{
return get_sym_name ( /*C++-demangle*/True, /*Z-demangle*/True,
/*below-main-renaming*/True,
- a, buf,
+ ep, a, buf,
/*match_anywhere_in_fun*/True,
/*show offset?*/True,
/*text sym*/True,
@@ -1898,7 +2095,7 @@
and don't show offsets.
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h */
-Bool VG_(get_fnname_if_entry) ( Addr a, const HChar** buf )
+Bool VG_(get_fnname_if_entry) ( DiEpoch ep, Addr a, const HChar** buf )
{
const HChar *tmp;
Bool res;
@@ -1905,7 +2102,7 @@
res = get_sym_name ( /*C++-demangle*/True, /*Z-demangle*/True,
/*below-main-renaming*/True,
- a, &tmp,
+ ep, a, &tmp,
/*match_anywhere_in_fun*/False,
/*show offset?*/False,
/*text sym*/True,
@@ -1920,11 +2117,11 @@
offsets.
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h */
-Bool VG_(get_fnname_raw) ( Addr a, const HChar** buf )
+Bool VG_(get_fnname_raw) ( DiEpoch ep, Addr a, const HChar** buf )
{
return get_sym_name ( /*C++-demangle*/False, /*Z-demangle*/False,
/*below-main-renaming*/False,
- a, buf,
+ ep, a, buf,
/*match_anywhere_in_fun*/True,
/*show offset?*/False,
/*text sym*/True,
@@ -1936,14 +2133,22 @@
don't show offsets.
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h */
-Bool VG_(get_fnname_no_cxx_demangle) ( Addr a, const HChar** buf,
+Bool VG_(get_fnname_no_cxx_demangle) ( DiEpoch ep, Addr a, const HChar** buf,
const InlIPCursor* iipc )
{
+ // FIXME JRS 28 June 2017: should we use 'iipc->di->first_epoch'
+ // instead of 'ep' in the call to get_sym_name? At least let's
+ // assert that the DebugInfo that 'iipc' mentions is valid for the
+ // specified epoch.
+ if (iipc) {
+ vg_assert(is_DI_valid_for_epoch(iipc->di, ep));
+ }
+
if (is_bottom(iipc)) {
// At the bottom (towards main), we describe the fn at eip.
return get_sym_name ( /*C++-demangle*/False, /*Z-demangle*/True,
/*below-main-renaming*/True,
- a, buf,
+ ep, a, buf,
/*match_anywhere_in_fun*/True,
/*show offset?*/False,
/*text sym*/True,
@@ -1962,13 +2167,13 @@
/* mips-linux only: find the offset of current address. This is needed for
stack unwinding for MIPS.
*/
-Bool VG_(get_inst_offset_in_function)( Addr a,
+Bool VG_(get_inst_offset_in_function)( DiEpoch ep, Addr a,
/*OUT*/PtrdiffT* offset )
{
const HChar *fnname;
return get_sym_name ( /*C++-demangle*/False, /*Z-demangle*/False,
/*below-main-renaming*/False,
- a, &fnname,
+ ep, a, &fnname,
/*match_anywhere_in_sym*/True,
/*show offset?*/False,
/*text sym*/True,
@@ -2000,13 +2205,13 @@
}
}
-Vg_FnNameKind VG_(get_fnname_kind_from_IP) ( Addr ip )
+Vg_FnNameKind VG_(get_fnname_kind_from_IP) ( DiEpoch ep, Addr ip )
{
const HChar *buf;
// We don't demangle, because it's faster not to, and the special names
// we're looking for won't be mangled.
- if (VG_(get_fnname_raw) ( ip, &buf )) {
+ if (VG_(get_fnname_raw) ( ep, ip, &buf )) {
return VG_(get_fnname_kind)(buf);
} else {
@@ -2019,13 +2224,13 @@
Also data_addr's offset from the symbol start is put into *offset.
NOTE: See IMPORTANT COMMENT above about persistence and ownership
in pub_tool_debuginfo.h */
-Bool VG_(get_datasym_and_offset)( Addr data_addr,
+Bool VG_(get_datasym_and_offset)( DiEpoch ep, Addr data_addr,
/*OUT*/const HChar** dname,
/*OUT*/PtrdiffT* offset )
{
return get_sym_name ( /*C++-demangle*/False, /*Z-demangle*/False,
/*below-main-renaming*/False,
- data_addr, dname,
+ ep, data_addr, dname,
/*match_anywhere_in_sym*/True,
/*show offset?*/False,
/*text sym*/False,
@@ -2038,7 +2243,7 @@
(1) the DebugInfo it belongs to is not discarded
(2) the segment containing the address is not merged with another segment
*/
-Bool VG_(get_objname) ( Addr a, const HChar** objname )
+Bool VG_(get_objname) ( DiEpoch ep, Addr a, const HChar** objname )
{
DebugInfo* di;
const NSegment *seg;
@@ -2047,6 +2252,8 @@
/* Look in the debugInfo_list to find the name. In most cases we
expect this to produce a result. */
for (di = debugInfo_list; di != NULL; di = di->next) {
+ if (!is_DI_valid_for_epoch(di, ep))
+ continue;
if (di->text_present
&& di->text_size > 0
&& di->text_avma <= a
@@ -2059,8 +2266,13 @@
the debugInfo_list, ask the address space manager whether it
knows the name of the file associated with this mapping. This
allows us to print the names of exe/dll files in the stack trace
- when running programs under wine. */
- if ( (seg = VG_(am_find_nsegment)(a)) != NULL
+ when running programs under wine.
+
+ Restrict this to the case where 'ep' is the current epoch, though, so
+ that we don't return information about this epoch when the caller was
+ enquiring about a different one. */
+ if ( eq_DiEpoch(ep, VG_(current_DiEpoch)())
+ && (seg = VG_(am_find_nsegment)(a)) != NULL
&& (filename = VG_(am_get_filename)(seg)) != NULL ) {
*objname = filename;
return True;
@@ -2070,12 +2282,14 @@
/* Map a code address to its DebugInfo. Returns NULL if not found. Doesn't
require debug info. */
-DebugInfo* VG_(find_DebugInfo) ( Addr a )
+DebugInfo* VG_(find_DebugInfo) ( DiEpoch ep, Addr a )
{
static UWord n_search = 0;
DebugInfo* di;
n_search++;
for (di = debugInfo_list; di != NULL; di = di->next) {
+ if (!is_DI_valid_for_epoch(di, ep))
+ continue;
if (di->text_present
&& di->text_size > 0
&& di->text_avma <= a
@@ -2091,13 +2305,13 @@
/* Map a code address to a filename. Returns True if successful. The
returned string is persistent as long as the DebugInfo to which it
belongs is not discarded. */
-Bool VG_(get_filename)( Addr a, const HChar** filename )
+Bool VG_(get_filename)( DiEpoch ep, Addr a, const HChar** filename )
{
DebugInfo* si;
Word locno;
UInt fndn_ix;
- search_all_loctabs ( a, &si, &locno );
+ search_all_loctabs ( ep, a, &si, &locno );
if (si == NULL)
return False;
fndn_ix = ML_(fndn_ix) (si, locno);
@@ -2106,11 +2320,11 @@
}
/* Map a code address to a line number. Returns True if successful. */
-Bool VG_(get_linenum)( Addr a, UInt* lineno )
+Bool VG_(get_linenum)( DiEpoch ep, Addr a, UInt* lineno )
{
DebugInfo* si;
Word locno;
- search_all_loctabs ( a, &si, &locno );
+ search_all_loctabs ( ep, a, &si, &locno );
if (si == NULL)
return False;
*lineno = si->loctab[locno].lineno;
@@ -2121,7 +2335,7 @@
/* Map a code address to a filename/line number/dir name info.
See prototype for detailed description of behaviour.
*/
-Bool VG_(get_filename_linenum) ( Addr a,
+Bool VG_(get_filename_linenum) ( DiEpoch ep, Addr a,
/*OUT*/const HChar** filename,
/*OUT*/const HChar** dirname,
/*OUT*/UInt* lineno )
@@ -2130,7 +2344,7 @@
Word locno;
UInt fndn_ix;
- search_all_loctabs ( a, &si, &locno );
+ search_all_loctabs ( ep, a, &si, &locno );
if (si == NULL) {
if (dirname) {
*dirname = "";
@@ -2159,7 +2373,8 @@
Therefore specify "*" to search all the objects. On TOC-afflicted
platforms, a symbol is deemed to be found only if it has a nonzero
TOC pointer. */
-Bool VG_(lookup_symbol_SLOW)(const HChar* sopatt, const HChar* name,
+Bool VG_(lookup_symbol_SLOW)(DiEpoch ep,
+ const HChar* sopatt, const HChar* name,
SymAVMAs* avmas)
{
Bool require_pToc = False;
@@ -2172,6 +2387,8 @@
for (si = debugInfo_list; si; si = si->next) {
if (debug)
VG_(printf)("lookup_symbol_SLOW: considering %s\n", si->soname);
+ if (!is_DI_valid_for_epoch(si, ep))
+ continue;
if (!VG_(string_match)(sopatt, si->soname)) {
if (debug)
VG_(printf)(" ... skip\n");
@@ -2254,7 +2471,7 @@
return n;
}
-const HChar* VG_(describe_IP)(Addr eip, const InlIPCursor *iipc)
+const HChar* VG_(describe_IP)(DiEpoch ep, Addr eip, const InlIPCursor *iipc)
{
static HChar *buf = NULL;
static SizeT bufsiz = 0;
@@ -2267,7 +2484,10 @@
HChar ibuf[50]; // large enough
SizeT n = 0;
- vg_assert (!iipc || iipc->eip == eip);
+ // An InlIPCursor is associated with one specific DebugInfo. So if
+ // it exists, make sure that it is valid for the specified DiEpoch.
+ vg_assert (!iipc
+ || (is_DI_valid_for_epoch(iipc->di, ep) && iipc->eip == eip));
const HChar *buf_fn;
const HChar *buf_obj;
@@ -2282,8 +2502,8 @@
if (is_bottom(iipc)) {
// At the bottom (towards main), we describe the fn at eip.
know_fnname = VG_(clo_sym_offsets)
- ? VG_(get_fnname_w_offset) (eip, &buf_fn)
- : VG_(get_fnname) (eip, &buf_fn);
+ ? VG_(get_fnname_w_offset) (ep, eip, &buf_fn)
+ : VG_(get_fnname) (ep, eip, &buf_fn);
} else {
const DiInlLoc *next_inl = iipc && iipc->next_inltab >= 0
? & iipc->di->inltab[iipc->next_inltab]
@@ -2301,12 +2521,12 @@
// ??? Currently never showing an offset.
}
- know_objname = VG_(get_objname)(eip, &buf_obj);
+ know_objname = VG_(get_objname)(ep, eip, &buf_obj);
if (is_top(iipc)) {
// The source for the highest level is in the loctab entry.
know_srcloc = VG_(get_filename_linenum)(
- eip,
+ ep, eip,
&buf_srcloc,
&buf_dirname,
&lineno
@@ -2459,6 +2679,20 @@
/*--- ---*/
/*--------------------------------------------------------------*/
+/* Note that the CFI machinery pertains to unwinding the stack "right now".
+ There is no support for unwinding stack images obtained from some time in
+ the past. That means that:
+
+ (1) We only deal with CFI from DebugInfos that are valid for the current
+ debuginfo epoch. Unlike in the rest of the file, there is no
+ epoch-awareness.
+
+ (2) We assume that the CFI cache will be invalidated every time the the
+ epoch changes. This is done by ensuring (in the file above) that
+ every call to advance_current_DiEpoch has a call to
+ caches__invalidate alongside it.
+*/
+
/* Gather up all the constant pieces of info needed to evaluate
a CfiExpr into one convenient struct. */
typedef
@@ -2579,6 +2813,9 @@
*cfsi_mP to the cfsi_m pointer in that DebugInfo's cfsi_m_pool.
If not found, set *diP to (DebugInfo*)1 and *cfsi_mP to zero.
+
+ Per comments at the top of this section, we only look for CFI in
+ DebugInfos that are valid for the current epoch.
*/
__attribute__((noinline))
static void find_DiCfSI ( /*OUT*/DebugInfo** diP,
@@ -2594,10 +2831,15 @@
if (0) VG_(printf)("search for %#lx\n", ip);
+ DiEpoch curr_epoch = VG_(current_DiEpoch)();
+
for (di = debugInfo_list; di != NULL; di = di->next) {
Word j;
n_steps++;
+ if (!is_DI_valid_for_epoch(di, curr_epoch))
+ continue;
+
/* Use the per-DebugInfo summary address ranges to skip
inapplicable DebugInfos quickly. */
if (di->cfsi_used == 0)
@@ -2605,6 +2847,11 @@
if (ip < di->cfsi_minavma || ip > di->cfsi_maxavma)
continue;
+ // This di must be active (because we have explicitly chosen not to
+ // allow unwinding stacks that pertain to some past epoch). It can't
+ // be archived or not-yet-active.
+ vg_assert(is_DebugInfo_active(di));
+
/* It might be in this DebugInfo. Search it. */
j = ML_(search_one_cfitab)( di, ip );
vg_assert(j >= -1 && j < (Word)di->cfsi_used);
@@ -3032,6 +3279,7 @@
Bool VG_(use_FPO_info) ( /*MOD*/Addr* ipP,
/*MOD*/Addr* spP,
/*MOD*/Addr* fpP,
+ DiEpoch ep,
Addr min_accessible,
Addr max_accessible )
{
@@ -3049,6 +3297,9 @@
for (di = debugInfo_list; di != NULL; di = di->next) {
n_steps++;
+ if (!is_DI_valid_for_epoch(di, ep))
+ continue;
+
/* Use the per-DebugInfo summary address ranges to skip
inapplicable DebugInfos quickly. */
if (di->fpo == NULL)
@@ -3552,6 +3803,7 @@
static
Bool consider_vars_in_frame ( /*MOD*/XArray* /* of HChar */ dname1,
/*MOD*/XArray* /* of HChar */ dname2,
+ DiEpoch ep,
Addr data_addr,
Addr ip, Addr sp, Addr fp,
/* shown to user: */
@@ -3570,6 +3822,8 @@
/* first, find the DebugInfo that pertains to 'ip'. */
for (di = debugInfo_list; di; di = di->next) {
n_steps++;
+ if (!is_DI_valid_for_epoch(di, ep))
+ continue;
/* text segment missing? unlikely, but handle it .. */
if (!di->text_present || di->text_size == 0)
continue;
@@ -3687,7 +3941,7 @@
Bool VG_(get_data_description)(
/*MOD*/ XArray* /* of HChar */ dname1,
/*MOD*/ XArray* /* of HChar */ dname2,
- Addr data_addr
+ DiEpoch ep, Addr data_addr
)
{
# define N_FRAMES 8
@@ -3807,7 +4061,7 @@
vg_assert(n_frames >= 0 && n_frames <= N_FRAMES);
for (j = 0; j < n_frames; j++) {
if (consider_vars_in_frame( dname1, dname2,
- data_addr,
+ ep, data_addr,
ips[j],
sps[j], fps[j], tid, j )) {
zterm_XA( dname1 );
@@ -3834,7 +4088,7 @@
equivalent kludge. */
if (j > 0 /* this is a non-innermost frame */
&& consider_vars_in_frame( dname1, dname2,
- data_addr,
+ ep, data_addr,
ips[j] + 1,
sps[j], fps[j], tid, j )) {
zterm_XA( dname1 );
Index: coregrind/m_debuginfo/priv_storage.h
===================================================================
--- coregrind/m_debuginfo/priv_storage.h (revision 16465)
+++ coregrind/m_debuginfo/priv_storage.h (working copy)
@@ -588,6 +588,36 @@
structure is allocated. */
ULong handle;
+ /* The range of epochs for which this DebugInfo is valid. These also
+ divide the DebugInfo's lifetime into three parts:
+
+ (1) Allocated: but with only .fsm holding useful info -- in
+ particular, not yet holding any debug info.
+ .first_epoch == DebugInfoEpoch_INVALID
+ .last_epoch == DebugInfoEpoch_INVALID
+
+ (2) Active: containing debug info, and current.
+ .first_epoch != DebugInfoEpoch_INVALID
+ .last_epoch == DebugInfoEpoch_INVALID
+
+ (3) Archived: containing debug info, but no longer current.
+ .first_epoch != DebugInfoEpoch_INVALID
+ .last_epoch != DebugInfoEpoch_INVALID
+
+ State (2) corresponds to an object which is currently mapped. When
+ the object is unmapped, what happens depends on the setting of
+ --keep-debuginfo:
+
+ * when =no, the DebugInfo is removed from debugInfo_list and
+ deleted.
+
+ * when =yes, the DebugInfo is retained in debugInfo_list, but its
+ .last_epoch field is filled in, and current_epoch is advanced. This
+ effectively moves the DebugInfo into state (3).
+ */
+ DiEpoch first_epoch;
+ DiEpoch last_epoch;
+
/* Used for debugging only - indicate what stuff to dump whilst
reading stuff into the seginfo. Are computed as early in the
lifetime of the DebugInfo as possible -- at the point when it is
Index: coregrind/m_errormgr.c
===================================================================
--- coregrind/m_errormgr.c (revision 16465)
+++ coregrind/m_errormgr.c (working copy)
@@ -136,16 +136,16 @@
Int count;
// The tool-specific part
- ThreadId tid; // Initialised by core
- ExeContext* where; // Initialised by core
- ErrorKind ekind; // Used by ALL. Must be in the range (0..)
- Addr addr; // Used frequently
- const HChar* string; // Used frequently
- void* extra; // For any tool-specific extras
+ ThreadId tid; // Initialised by core
+ ExeContextAndEpoch where; // Initialised by core
+ ErrorKind ekind; // Used by ALL. Must be in the range (0..)
+ Addr addr; // Used frequently
+ const HChar* string; // Used frequently
+ void* extra; // For any tool-specific extras
};
-ExeContext* VG_(get_error_where) ( const Error* err )
+ExeContextAndEpoch VG_(get_error_where) ( const Error* err )
{
return err->where;
}
@@ -293,7 +293,10 @@
{
if (e1->ekind != e2->ekind)
return False;
- if (!VG_(eq_ExeContext)(res, e1->where, e2->where))
+ // This comparison ignores the debuginfo epoch. Result is that we
+ // could conclude this error is the same as one from some other epoch.
+ // I don't think that's a big deal in practice.
+ if (!VG_(eq_ExeContext)(res, e1->where.ec, e2->where.ec))
return False;
switch (e1->ekind) {
@@ -321,15 +324,15 @@
*/
#define ERRTXT_LEN 4096
-static void printSuppForIp_XML(UInt n, Addr ip, void* uu_opaque)
+static void printSuppForIp_XML(UInt n, DiEpoch ep, Addr ip, void* uu_opaque)
{
const HChar *buf;
- InlIPCursor* iipc = VG_(new_IIPC)(ip);
+ InlIPCursor* iipc = VG_(new_IIPC)(ep, ip);
do {
- if ( VG_(get_fnname_no_cxx_demangle) (ip, &buf, iipc) ) {
+ if ( VG_(get_fnname_no_cxx_demangle) (ep, ip, &buf, iipc) ) {
VG_(printf_xml)(" <sframe> <fun>%pS</fun> </sframe>\n", buf);
} else
- if ( VG_(get_objname)(ip, &buf) ) {
+ if ( VG_(get_objname)(ep, ip, &buf) ) {
VG_(printf_xml)(" <sframe> <obj>%pS</obj> </sframe>\n", buf);
} else {
VG_(printf_xml)(" <sframe> <obj>*</obj> </sframe>\n");
@@ -338,16 +341,16 @@
VG_(delete_IIPC)(iipc);
}
-static void printSuppForIp_nonXML(UInt n, Addr ip, void* textV)
+static void printSuppForIp_nonXML(UInt n, DiEpoch ep, Addr ip, void* textV)
{
const HChar *buf;
XArray* /* of HChar */ text = (XArray*)textV;
- InlIPCursor* iipc = VG_(new_IIPC)(ip);
+ InlIPCursor* iipc = VG_(new_IIPC)(ep, ip);
do {
- if ( VG_(get_fnname_no_cxx_demangle) (ip, &buf, iipc) ) {
+ if ( VG_(get_fnname_no_cxx_demangle) (ep, ip, &buf, iipc) ) {
VG_(xaprintf)(text, " fun:%s\n", buf);
} else
- if ( VG_(get_objname)(ip, &buf) ) {
+ if ( VG_(get_objname)(ep, ip, &buf) ) {
VG_(xaprintf)(text, " obj:%s\n", buf);
} else {
VG_(xaprintf)(text, " obj:*\n");
@@ -361,7 +364,7 @@
static void gen_suppression(const Error* err)
{
const HChar* name;
- ExeContext* ec;
+ ExeContextAndEpoch ece;
XArray* /* HChar */ text;
const HChar* dummy_name = "insert_a_suppression_name_here";
@@ -368,8 +371,8 @@
vg_assert(err);
- ec = VG_(get_error_where)(err);
- vg_assert(ec);
+ ece = VG_(get_error_where)(err);
+ vg_assert(ece.ec);
name = VG_TDICT_CALL(tool_get_error_name, err);
if (NULL == name) {
@@ -408,12 +411,12 @@
VG_(xaprintf)(text, " %s\n", xtra);
// Print stack trace elements
- UInt n_ips = VG_(get_ExeContext_n_ips)(ec);
+ UInt n_ips = VG_(get_ExeContext_n_ips)(ece.ec);
vg_assert(n_ips > 0);
vg_assert(n_ips <= VG_DEEPEST_BACKTRACE);
VG_(apply_StackTrace)(printSuppForIp_nonXML,
- text,
- VG_(get_ExeContext_StackTrace)(ec),
+ text, ece.epoch,
+ VG_(get_ExeContext_StackTrace)(ece.ec),
n_ips);
VG_(xaprintf)(text, "}\n");
@@ -441,9 +444,9 @@
// Print stack trace elements
VG_(apply_StackTrace)(printSuppForIp_XML,
- NULL,
- VG_(get_ExeContext_StackTrace)(ec),
- VG_(get_ExeContext_n_ips)(ec));
+ NULL, ece.epoch,
+ VG_(get_ExeContext_StackTrace)(ece.ec),
+ VG_(get_ExeContext_n_ips)(ece.ec));
// And now the cdata bit
// XXX FIXME! properly handle the case where the raw text
@@ -637,7 +640,7 @@
/* Construct an error */
static
void construct_error ( Error* err, ThreadId tid, ErrorKind ekind, Addr a,
- const HChar* s, void* extra, ExeContext* where )
+ const HChar* s, void* extra, ExeContextAndEpoch where )
{
/* DO NOT MAKE unique_counter NON-STATIC */
static UInt unique_counter = 0;
@@ -650,10 +653,7 @@
err->supp = NULL;
err->count = 1;
err->tid = tid;
- if (NULL == where)
- err->where = VG_(record_ExeContext)( tid, 0 );
- else
- err->where = where;
+ err->where = where;
/* Tool-relevant parts */
err->ekind = ekind;
@@ -744,7 +744,10 @@
}
/* Build ourselves the error */
- construct_error ( &err, tid, ekind, a, s, extra, NULL );
+ { ExeContextAndEpoch ece
+ = VG_(tag_EC_with_current_epoch)( VG_(record_ExeContext)( tid, 0 ) );
+ construct_error ( &err, tid, ekind, a, s, extra, ece );
+ }
/* First, see if we've got an error record matching this one. */
em_errlist_searches++;
@@ -853,7 +856,8 @@
Bool 'count_error' dictates whether to count the error in n_errs_found.
*/
Bool VG_(unique_error) ( ThreadId tid, ErrorKind ekind, Addr a, const HChar* s,
- void* extra, ExeContext* where, Bool print_error,
+ void* extra, ExeContextAndEpoch where,
+ Bool print_error,
Bool allow_db_attach, Bool count_error )
{
Error err;
@@ -1016,7 +1020,7 @@
vg_assert(! xml);
if ((i+1 == VG_(clo_dump_error))) {
- StackTrace ips = VG_(get_ExeContext_StackTrace)(p_min->where);
+ StackTrace ips = VG_(get_ExeContext_StackTrace)(p_min->where.ec);
VG_(translate) ( 0 /* dummy ThreadId; irrelevant due to debugging*/,
ips[0], /*debugging*/True, 0xFE/*verbosity*/,
/*bbs_done*/0,
@@ -1500,6 +1504,7 @@
allocations and the nr of debuginfo search. */
typedef
struct {
+ DiEpoch epoch; // used to interpret .ips
StackTrace ips; // stack trace we are lazily completing.
UWord n_ips; // nr of elements in ips.
@@ -1595,9 +1600,11 @@
su->sname,
filename,
su->sname_lineno);
- } else
+ } else {
VG_(dmsg)("errormgr matching end no suppression matched:\n");
- VG_(pp_StackTrace) (ip2fo->ips, ip2fo->n_ips);
+ }
+ // JRS 27 July 2017: is it OK to use the current epoch here?
+ VG_(pp_StackTrace) (ip2fo->epoch, ip2fo->ips, ip2fo->n_ips);
pp_ip2fo(ip2fo);
}
if (ip2fo->n_offsets_per_ip) VG_(free)(ip2fo->n_offsets_per_ip);
@@ -1660,7 +1667,8 @@
// up comparing "malloc" in the suppression against
// "_vgrZU_libcZdsoZa_malloc" in the backtrace, and the
// two of them need to be made to match.
- if (!VG_(get_fnname_no_cxx_demangle)(ip2fo->ips[ixInput],
+ if (!VG_(get_fnname_no_cxx_demangle)(ip2fo->epoch,
+ ip2fo->ips[ixInput],
&caller,
NULL))
caller = "???";
@@ -1681,7 +1689,7 @@
last_expand_pos_ips is the last offset in fun/obj where
ips[pos_ips] has been expanded. */
- if (!VG_(get_objname)(ip2fo->ips[pos_ips], &caller))
+ if (!VG_(get_objname)(ip2fo->epoch, ip2fo->ips[pos_ips], &caller))
caller = "???";
// Have all inlined calls pointing at this object name
@@ -1751,7 +1759,7 @@
const Addr IP = ip2fo->ips[ip2fo->n_ips_expanded];
InlIPCursor *iipc;
- iipc = VG_(new_IIPC)(IP);
+ iipc = VG_(new_IIPC)(ip2fo->epoch, IP);
// The only thing we really need is the nr of inlined fn calls
// corresponding to the IP we will expand.
// However, computing this is mostly the same as finding
@@ -1760,7 +1768,7 @@
const HChar *caller;
grow_offsets(ip2fo, ip2fo->n_expanded+1);
ip2fo->fun_offsets[ip2fo->n_expanded] = ip2fo->names_free;
- if (!VG_(get_fnname_no_cxx_demangle)(IP,
+ if (!VG_(get_fnname_no_cxx_demangle)(ip2fo->epoch, IP,
&caller,
iipc))
caller = "???";
@@ -1788,18 +1796,18 @@
}
}
-static Bool haveInputInpC (void* inputCompleter, UWord ixInput )
+static Bool haveInputInpC (void* inputCompleterV, UWord ixInput )
{
- IPtoFunOrObjCompleter* ip2fo = inputCompleter;
+ IPtoFunOrObjCompleter* ip2fo = (IPtoFunOrObjCompleter*)inputCompleterV;
expandInput(ip2fo, ixInput);
return ixInput < ip2fo->n_expanded;
}
static Bool supp_pattEQinp ( const void* supplocV, const void* addrV,
- void* inputCompleter, UWord ixInput )
+ void* inputCompleterV, UWord ixInput )
{
- const SuppLoc* supploc = supplocV; /* PATTERN */
- IPtoFunOrObjCompleter* ip2fo = inputCompleter;
+ const SuppLoc* supploc = (const SuppLoc*)supplocV; /* PATTERN */
+ IPtoFunOrObjCompleter* ip2fo = (IPtoFunOrObjCompleter*)inputCompleterV;
HChar* funobj_name; // Fun or Obj name.
Bool ret;
@@ -1926,8 +1934,9 @@
em_supplist_searches++;
/* Prepare the lazy input completer. */
- ip2fo.ips = VG_(get_ExeContext_StackTrace)(err->where);
- ip2fo.n_ips = VG_(get_ExeContext_n_ips)(err->where);
+ ip2fo.epoch = err->where.epoch;
+ ip2fo.ips = VG_(get_ExeContext_StackTrace)(err->where.ec);
+ ip2fo.n_ips = VG_(get_ExeContext_n_ips)(err->where.ec);
ip2fo.n_ips_expanded = 0;
ip2fo.n_expanded = 0;
ip2fo.sz_offsets = 0;
Index: coregrind/m_execontext.c
===================================================================
--- coregrind/m_execontext.c (revision 16465)
+++ coregrind/m_execontext.c (working copy)
@@ -116,7 +116,7 @@
/*------------------------------------------------------------*/
-/*--- Exported functions. ---*/
+/*--- ExeContext functions. ---*/
/*------------------------------------------------------------*/
static ExeContext* record_ExeContext_wrk2 ( const Addr* ips, UInt n_ips );
@@ -169,7 +169,9 @@
for (ec = ec_htab[i]; ec; ec = ec->chain) {
VG_(message)(Vg_DebugMsg, " exectx: stacktrace ecu %u n_ips %u\n",
ec->ecu, ec->n_ips);
- VG_(pp_StackTrace)( ec->ips, ec->n_ips );
+ // FIXME JRS 27 July 2017: is a fake epoch here OK?
+ DiEpoch ep = VG_(current_DiEpoch)();
+ VG_(pp_StackTrace)( ep, ec->ips, ec->n_ips );
}
}
VG_(message)(Vg_DebugMsg,
@@ -202,13 +204,6 @@
}
-/* Print an ExeContext. */
-void VG_(pp_ExeContext) ( ExeContext* ec )
-{
- VG_(pp_StackTrace)( ec->ips, ec->n_ips );
-}
-
-
/* Compare two ExeContexts. Number of callers considered depends on res. */
Bool VG_(eq_ExeContext) ( VgRes res, const ExeContext* e1,
const ExeContext* e2 )
@@ -544,12 +539,48 @@
return record_ExeContext_wrk2(ips, n_ips);
}
-ExeContext* VG_(null_ExeContext) (void)
+
+/*------------------------------------------------------------*/
+/*--- ExeContextAndEpoch functions. ---*/
+/*------------------------------------------------------------*/
+
+ExeContextAndEpoch VG_(tag_EC_with_current_epoch)( ExeContext* ec )
{
+ ExeContextAndEpoch ece;
+ ece.ec = ec;
+ ece.epoch = VG_(current_DiEpoch)();
+ return ece;
+}
+
+ExeContextAndEpoch VG_(invalid_ExeContextAndEpoch) ( void )
+{
+ ExeContextAndEpoch ece;
+ ece.ec = NULL/*invalid ExeContext*/;
+ ece.epoch.n = 0/*invalid epoch*/;
+ return ece;
+}
+
+
+void VG_(pp_ExeContextAndEpoch) ( ExeContextAndEpoch ece )
+{
+ VG_(pp_StackTrace)( ece.epoch, ece.ec->ips, ece.ec->n_ips );
+}
+
+ExeContextAndEpoch VG_(null_ExeContextAndEpoch) ( void )
+{
init_ExeContext_storage();
- return null_ExeContext;
+ ExeContextAndEpoch ece
+ = mk_ExeContextAndEpoch(null_ExeContext, VG_(current_DiEpoch)());
+ return ece;
}
+Bool VG_(is_null_ExeContextAndEpoch)( ExeContextAndEpoch ece )
+{
+ init_ExeContext_storage();
+ return ece.ec == null_ExeContext;
+}
+
+
/*--------------------------------------------------------------------*/
/*--- end m_execontext.c ---*/
/*--------------------------------------------------------------------*/
Index: coregrind/m_gdbserver/m_gdbserver.c
===================================================================
--- coregrind/m_gdbserver/m_gdbserver.c (revision 16465)
+++ coregrind/m_gdbserver/m_gdbserver.c (working copy)
@@ -142,14 +142,17 @@
PtrdiffT offset;
if (w == 2) w = 0;
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
if (is_code) {
const HChar *name;
- name = VG_(describe_IP) (addr, NULL);
+ name = VG_(describe_IP) (ep, addr, NULL);
if (buf[w]) VG_(free)(buf[w]);
buf[w] = VG_(strdup)("gdbserver sym", name);
} else {
const HChar *name;
- VG_(get_datasym_and_offset) (addr, &name, &offset);
+ VG_(get_datasym_and_offset) (ep, addr, &name, &offset);
if (buf[w]) VG_(free)(buf[w]);
buf[w] = VG_(strdup)("gdbserver sym", name);
}
Index: coregrind/m_gdbserver/server.c
===================================================================
--- coregrind/m_gdbserver/server.c (revision 16465)
+++ coregrind/m_gdbserver/server.c (working copy)
@@ -195,6 +195,9 @@
int kwdid;
int int_value;
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
vg_assert (initial_valgrind_sink_saved);
strcpy (s, mon);
@@ -334,7 +337,7 @@
}
if (hostvisibility) {
const DebugInfo *tooldi
- = VG_(find_DebugInfo) ((Addr)handle_gdb_valgrind_command);
+ = VG_(find_DebugInfo) (ep, (Addr)handle_gdb_valgrind_command);
/* Normally, we should always find the tooldi. In case we
do not, suggest a 'likely somewhat working' address: */
const Addr tool_text_start
@@ -442,7 +445,7 @@
&dummy_sz, &ssaveptr)) {
// If tool provides location information, use that.
if (VG_(needs).info_location) {
- VG_TDICT_CALL(tool_info_location, address);
+ VG_TDICT_CALL(tool_info_location, ep, address);
}
// If tool does not provide location info, use the common one.
// Also use the common to compare with tool when debug log is set.
@@ -449,7 +452,7 @@
if (!VG_(needs).info_location || VG_(debugLog_getLevel)() > 0 ) {
AddrInfo ai;
ai.tag = Addr_Undescribed;
- VG_(describe_addr) (address, &ai);
+ VG_(describe_addr) (ep, address, &ai);
VG_(pp_addrinfo) (address, &ai);
VG_(clear_addrinfo) (&ai);
}
Index: coregrind/m_gdbserver/target.c
===================================================================
--- coregrind/m_gdbserver/target.c (revision 16465)
+++ coregrind/m_gdbserver/target.c (working copy)
@@ -209,7 +209,10 @@
static
const HChar* sym (Addr addr)
{
- return VG_(describe_IP) (addr, NULL);
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
+ return VG_(describe_IP) (ep, addr, NULL);
}
ThreadId vgdb_interrupted_tid = 0;
Index: coregrind/m_gdbserver/valgrind-low-arm.c
===================================================================
--- coregrind/m_gdbserver/valgrind-low-arm.c (revision 16465)
+++ coregrind/m_gdbserver/valgrind-low-arm.c (working copy)
@@ -149,8 +149,12 @@
// the debug info with the bit0 set
// (why can't debug info do that for us ???)
// (why if this is a 4 bytes thumb instruction ???)
- if (VG_(get_fnname_raw) (pc | 1, &fnname)) {
- if (VG_(lookup_symbol_SLOW)( "*", fnname, &avmas )) {
+
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
+ if (VG_(get_fnname_raw) (ep, pc | 1, &fnname)) {
+ if (VG_(lookup_symbol_SLOW)( ep, "*", fnname, &avmas )) {
dlog (1, "fnname %s lookupsym %p => %p %s.\n",
fnname, C2v(avmas.main), C2v(pc),
(avmas.main & 1 ? "thumb" : "arm"));
Index: coregrind/m_gdbserver/valgrind-low-mips32.c
===================================================================
--- coregrind/m_gdbserver/valgrind-low-mips32.c (revision 16465)
+++ coregrind/m_gdbserver/valgrind-low-mips32.c (working copy)
@@ -228,7 +228,11 @@
/* Make sure we don't scan back before the beginning of the current
function, since we may fetch constant data or insns that look like
a jump. */
- if (VG_(get_inst_offset_in_function) (bpaddr, &offset)) {
+
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
+ if (VG_(get_inst_offset_in_function) (ep, bpaddr, &offset)) {
func_addr = bpaddr - offset;
if (func_addr > boundary && func_addr <= bpaddr)
boundary = func_addr;
Index: coregrind/m_gdbserver/valgrind-low-mips64.c
===================================================================
--- coregrind/m_gdbserver/valgrind-low-mips64.c (revision 16465)
+++ coregrind/m_gdbserver/valgrind-low-mips64.c (working copy)
@@ -229,7 +229,11 @@
/* Make sure we don't scan back before the beginning of the current
function, since we may fetch constant data or insns that look like
a jump. */
- if (VG_(get_inst_offset_in_function) (bpaddr, &offset)) {
+
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
+ if (VG_(get_inst_offset_in_function) (ep, bpaddr, &offset)) {
func_addr = bpaddr - offset;
if (func_addr > boundary && func_addr <= bpaddr)
boundary = func_addr;
Index: coregrind/m_libcassert.c
===================================================================
--- coregrind/m_libcassert.c (revision 16465)
+++ coregrind/m_libcassert.c (working copy)
@@ -369,7 +369,7 @@
);
VG_(printf)("\nhost stacktrace:\n");
VG_(clo_xml) = False;
- VG_(pp_StackTrace) (ips, n_ips);
+ VG_(pp_StackTrace) (VG_(current_DiEpoch)(), ips, n_ips);
VG_(clo_xml) = save_clo_xml;
}
Index: coregrind/m_main.c
===================================================================
--- coregrind/m_main.c (revision 16465)
+++ coregrind/m_main.c (working copy)
@@ -128,6 +128,10 @@
" --error-exitcode=<number> exit code to return if errors found [0=disable]\n"
" --error-markers=<begin>,<end> add lines with begin/end markers before/after\n"
" each error output in plain text mode [none]\n"
+" --keep-debuginfo=no|yes Keep symbols etc for unloaded code [no]\n"
+" This allows stack traces for memory leaks to\n"
+" include file/line info for code that has been\n"
+" dlclose'd (or similar)\n"
" --show-below-main=no|yes continue stack traces below main() [no]\n"
" --default-suppressions=yes|no\n"
" load default suppressions [yes]\n"
@@ -626,6 +630,7 @@
else if VG_BOOL_CLO(arg, "--run-libc-freeres", VG_(clo_run_libc_freeres)) {}
else if VG_BOOL_CLO(arg, "--run-cxx-freeres", VG_(clo_run_cxx_freeres)) {}
else if VG_BOOL_CLO(arg, "--show-below-main", VG_(clo_show_below_main)) {}
+ else if VG_BOOL_CLO(arg, "--keep-debuginfo", VG_(clo_keep_debuginfo)) {}
else if VG_BOOL_CLO(arg, "--time-stamp", VG_(clo_time_stamp)) {}
else if VG_BOOL_CLO(arg, "--track-fds", VG_(clo_track_fds)) {}
else if VG_BOOL_CLO(arg, "--trace-children", VG_(clo_trace_children)) {}
Index: coregrind/m_options.c
===================================================================
--- coregrind/m_options.c (revision 16465)
+++ coregrind/m_options.c (working copy)
@@ -129,6 +129,7 @@
Bool VG_(clo_run_cxx_freeres) = True;
Bool VG_(clo_track_fds) = False;
Bool VG_(clo_show_below_main)= False;
+Bool VG_(clo_keep_debuginfo) = False;
Bool VG_(clo_show_emwarns) = False;
Word VG_(clo_max_stackframe) = 2000000;
UInt VG_(clo_max_threads) = MAX_THREADS_DEFAULT;
Index: coregrind/m_redir.c
===================================================================
--- coregrind/m_redir.c (revision 16465)
+++ coregrind/m_redir.c (working copy)
@@ -1859,8 +1859,9 @@
{
Bool ok;
const HChar *buf;
-
- ok = VG_(get_fnname_w_offset)(act->from_addr, &buf);
+
+ DiEpoch ep = VG_(current_DiEpoch)();
+ ok = VG_(get_fnname_w_offset)(ep, act->from_addr, &buf);
if (!ok) buf = "???";
// Stash away name1
HChar name1[VG_(strlen)(buf) + 1];
@@ -1867,7 +1868,7 @@
VG_(strcpy)(name1, buf);
const HChar *name2;
- ok = VG_(get_fnname_w_offset)(act->to_addr, &name2);
+ ok = VG_(get_fnname_w_offset)(ep, act->to_addr, &name2);
if (!ok) name2 = "???";
VG_(message)(Vg_DebugMsg, "%s0x%08lx (%-20s) %s-> (%04d.%d) 0x%08lx %s\n",
Index: coregrind/m_sbprofile.c
===================================================================
--- coregrind/m_sbprofile.c (revision 16465)
+++ coregrind/m_sbprofile.c (working copy)
@@ -74,6 +74,9 @@
VG_(printf)("Total score = %'llu\n\n", score_total);
+ // FIXME JRS 28 July 2017: this is probably not right in general
+ DiEpoch ep = VG_(current_DiEpoch)();
+
/* Print an initial per-block summary. */
VG_(printf)("rank ---cumulative--- -----self-----\n");
score_cumul = 0;
@@ -84,7 +87,7 @@
continue;
const HChar *name;
- VG_(get_fnname_w_offset)(tops[r].addr, &name);
+ VG_(get_fnname_w_offset)(ep, tops[r].addr, &name);
score_here = tops[r].score;
score_cumul += score_here;
@@ -123,7 +126,7 @@
continue;
const HChar *name;
- VG_(get_fnname_w_offset)(tops[r].addr, &name);
+ VG_(get_fnname_w_offset)(ep, tops[r].addr, &name);
score_here = tops[r].score;
score_cumul += score_here;
@@ -159,7 +162,7 @@
continue;
const HChar *name;
- VG_(get_fnname_w_offset)(tops[r].addr, &name);
+ VG_(get_fnname_w_offset)(ep, tops[r].addr, &name);
score_here = tops[r].score;
Index: coregrind/m_scheduler/scheduler.c
===================================================================
--- coregrind/m_scheduler/scheduler.c (revision 16465)
+++ coregrind/m_scheduler/scheduler.c (working copy)
@@ -2037,8 +2037,12 @@
VG_(memset)(buf64, 0, 64);
UInt linenum = 0;
+
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
Bool ok = VG_(get_filename_linenum)(
- ip, &buf, NULL, &linenum
+ ep, ip, &buf, NULL, &linenum
);
if (ok) {
/* For backward compatibility truncate the filename to
Index: coregrind/m_signals.c
===================================================================
--- coregrind/m_signals.c (revision 16465)
+++ coregrind/m_signals.c (working copy)
@@ -1878,7 +1878,7 @@
: VG_(record_depth_1_ExeContext)( tid,
first_ip_delta );
vg_assert(ec);
- VG_(pp_ExeContext)( ec );
+ VG_(pp_ExeContextAndEpoch)( VG_(tag_EC_with_current_epoch)(ec) );
}
if (sigNo == VKI_SIGSEGV
&& is_signal_from_kernel(tid, sigNo, info->si_code)
Index: coregrind/m_stacktrace.c
===================================================================
--- coregrind/m_stacktrace.c (revision 16465)
+++ coregrind/m_stacktrace.c (working copy)
@@ -446,6 +446,7 @@
/* And, similarly, try for MSVC FPO unwind info. */
if (FPO_info_present
&& VG_(use_FPO_info)( &uregs.xip, &uregs.xsp, &uregs.xbp,
+ VG_(current_DiEpoch)(),
fp_min, fp_max ) ) {
if (debug) unwind_case = "MS";
if (do_stats) stats.MS++;
@@ -1539,12 +1540,12 @@
stack_highest_byte);
}
-static void printIpDesc(UInt n, Addr ip, void* uu_opaque)
+static void printIpDesc(UInt n, DiEpoch ep, Addr ip, void* uu_opaque)
{
- InlIPCursor *iipc = VG_(new_IIPC)(ip);
+ InlIPCursor *iipc = VG_(new_IIPC)(ep, ip);
do {
- const HChar *buf = VG_(describe_IP)(ip, iipc);
+ const HChar *buf = VG_(describe_IP)(ep, ip, iipc);
if (VG_(clo_xml)) {
VG_(printf_xml)(" %s\n", buf);
} else {
@@ -1558,7 +1559,7 @@
}
/* Print a StackTrace. */
-void VG_(pp_StackTrace) ( StackTrace ips, UInt n_ips )
+void VG_(pp_StackTrace) ( DiEpoch ep, StackTrace ips, UInt n_ips )
{
vg_assert( n_ips > 0 );
@@ -1565,7 +1566,7 @@
if (VG_(clo_xml))
VG_(printf_xml)(" <stack>\n");
- VG_(apply_StackTrace)( printIpDesc, NULL, ips, n_ips );
+ VG_(apply_StackTrace)( printIpDesc, NULL, ep, ips, n_ips );
if (VG_(clo_xml))
VG_(printf_xml)(" </stack>\n");
@@ -1580,13 +1581,13 @@
NULL/*array to dump SP values in*/,
NULL/*array to dump FP values in*/,
0/*first_ip_delta*/);
- VG_(pp_StackTrace)(ips, n_ips);
+ VG_(pp_StackTrace)(VG_(current_DiEpoch)(), ips, n_ips);
}
void VG_(apply_StackTrace)(
- void(*action)(UInt n, Addr ip, void* opaque),
+ void(*action)(UInt n, DiEpoch ep, Addr ip, void* opaque),
void* opaque,
- StackTrace ips, UInt n_ips
+ DiEpoch ep, StackTrace ips, UInt n_ips
)
{
Int i;
@@ -1597,7 +1598,7 @@
// or the last appearance of a below main function.
// Then decrease n_ips so as to not call action for the below main
for (i = n_ips - 1; i >= 0; i--) {
- Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(ips[i]);
+ Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(ep, ips[i]);
if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind)
n_ips = i + 1;
if (Vg_FnNameMain == kind)
@@ -1607,7 +1608,7 @@
for (i = 0; i < n_ips; i++)
// Act on the ip
- action(i, ips[i], opaque);
+ action(i, ep, ips[i], opaque);
}
Index: coregrind/m_syswrap/syswrap-generic.c
===================================================================
--- coregrind/m_syswrap/syswrap-generic.c (revision 16465)
+++ coregrind/m_syswrap/syswrap-generic.c (working copy)
@@ -540,7 +540,8 @@
{
Int fd; /* The file descriptor */
HChar *pathname; /* NULL if not a regular file or unknown */
- ExeContext *where; /* NULL if inherited from parent */
+ ExeContextAndEpoch where; /* VG_(null_ExeContextAndEpoch)
+ if inherited from parent */
struct OpenFd *next, *prev;
} OpenFd;
@@ -614,7 +615,10 @@
i->fd = fd;
i->pathname = VG_(strdup)("syswrap.rfdowgn.2", pathname);
- i->where = (tid == -1) ? NULL : VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
+ i->where = (tid == -1)
+ ? VG_(null_ExeContextAndEpoch)()
+ : VG_(tag_EC_with_current_epoch)(
+ VG_(record_ExeContext)(tid, 0/*first_ip_delta*/));
}
// Record opening of an fd, and find its name.
@@ -846,8 +850,8 @@
}
}
- if(i->where) {
- VG_(pp_ExeContext)(i->where);
+ if (!VG_(is_null_ExeContextAndEpoch)(i->where)) {
+ VG_(pp_ExeContextAndEpoch)(i->where);
VG_(message)(Vg_UserMsg, "\n");
} else {
VG_(message)(Vg_UserMsg, " <inherited from parent>\n");
Index: coregrind/m_tooliface.c
===================================================================
--- coregrind/m_tooliface.c (revision 16465)
+++ coregrind/m_tooliface.c (working copy)
@@ -323,7 +323,7 @@
}
void VG_(needs_info_location) (
- void (*info_location)(Addr)
+ void (*info_location)(DiEpoch, Addr)
)
{
VG_(needs).info_location = True;
Index: coregrind/m_translate.c
===================================================================
--- coregrind/m_translate.c (revision 16465)
+++ coregrind/m_translate.c (working copy)
@@ -1527,12 +1527,13 @@
Bool ok;
const HChar *buf;
const HChar *name2;
+ const DiEpoch ep = VG_(current_DiEpoch)();
/* Try also to get the soname (not the filename) of the "from"
object. This makes it much easier to debug redirection
problems. */
const HChar* nraddr_soname = "???";
- DebugInfo* nraddr_di = VG_(find_DebugInfo)(nraddr);
+ DebugInfo* nraddr_di = VG_(find_DebugInfo)(ep, nraddr);
if (nraddr_di) {
const HChar* t = VG_(DebugInfo_get_soname)(nraddr_di);
if (t)
@@ -1539,12 +1540,12 @@
nraddr_soname = t;
}
- ok = VG_(get_fnname_w_offset)(nraddr, &buf);
+ ok = VG_(get_fnname_w_offset)(ep, nraddr, &buf);
if (!ok) buf = "???";
// Stash away name1
HChar name1[VG_(strlen)(buf) + 1];
VG_(strcpy)(name1, buf);
- ok = VG_(get_fnname_w_offset)(addr, &name2);
+ ok = VG_(get_fnname_w_offset)(ep, addr, &name2);
if (!ok) name2 = "???";
VG_(message)(Vg_DebugMsg,
@@ -1561,7 +1562,8 @@
if (VG_(clo_trace_flags) || debugging_translation) {
const HChar* objname = "UNKNOWN_OBJECT";
OffT objoff = 0;
- DebugInfo* di = VG_(find_DebugInfo)( addr );
+ const DiEpoch ep = VG_(current_DiEpoch)();
+ DebugInfo* di = VG_(find_DebugInfo)( ep, addr );
if (di) {
objname = VG_(DebugInfo_get_filename)(di);
objoff = addr - VG_(DebugInfo_get_text_bias)(di);
@@ -1569,7 +1571,7 @@
vg_assert(objname);
const HChar *fnname;
- Bool ok = VG_(get_fnname_w_offset)(addr, &fnname);
+ Bool ok = VG_(get_fnname_w_offset)(ep, addr, &fnname);
if (!ok) fnname = "UNKNOWN_FUNCTION";
VG_(printf)(
"==== SB %u (evchecks %llu) [tid %u] 0x%lx %s %s%c0x%lx\n",
Index: coregrind/m_xtree.c
===================================================================
--- coregrind/m_xtree.c (revision 16465)
+++ coregrind/m_xtree.c (working copy)
@@ -438,6 +438,9 @@
const HChar* filename_dir;
const HChar* filename_name;
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
if (fp == NULL)
return;
@@ -501,7 +504,7 @@
// the strings called_filename/called_fnname.
#define CALLED_FLF(n) \
if ((n) < 0 \
- || !VG_(get_filename_linenum)(ips[(n)], \
+ || !VG_(get_filename_linenum)(ep, ips[(n)], \
&filename_name, \
&filename_dir, \
&called_linenum)) { \
@@ -509,7 +512,7 @@
called_linenum = 0; \
} \
if ((n) < 0 \
- || !VG_(get_fnname)(ips[(n)], &called_fnname)) { \
+ || !VG_(get_fnname)(ep, ips[(n)], &called_fnname)) { \
called_fnname = "UnknownFn???"; \
} \
{ \
@@ -554,7 +557,9 @@
if (0) {
VG_(printf)("entry img %s\n", img);
- VG_(pp_ExeContext)(xe->ec);
+ // JRS 27 July 2017: it may be a hack to use the current epoch
+ // here. I don't know.
+ VG_(pp_ExeContextAndEpoch)(VG_(tag_EC_with_current_epoch)(xe->ec));
VG_(printf)("\n");
}
xt->add_data_fn(xt->tmp_data, VG_(indexXA)(xt->data, xecu));
@@ -762,11 +767,14 @@
ms_make_groups(depth+1, group->ms_ec, group->n_ec, sig_sz,
&n_groups, &groups);
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
FP("%*s" "n%u: %ld %s\n",
depth + 1, "",
n_groups,
group->total,
- VG_(describe_IP)(group->ms_ec->ips[depth] - 1, NULL));
+ VG_(describe_IP)(ep, group->ms_ec->ips[depth] - 1, NULL));
/* XTREE??? Massif original code removes 1 to get the IP description. I am
wondering if this is not something that predates revision r8818,
which introduced a -1 in the stack unwind (see m_stacktrace.c)
@@ -963,6 +971,10 @@
from there.
If no main is found, we will then do a search for main or
below main function till the top. */
+
+ // FIXME JRS 28 July 2017: HACK! Is this correct?
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
static Int deepest_main = 0;
Vg_FnNameKind kind = Vg_FnNameNormal;
Int mbm = n_ips - 1; // Position of deepest main or below main.
@@ -972,7 +984,7 @@
for (i = n_ips - 1 - deepest_main;
i < n_ips;
i++) {
- mbmkind = VG_(get_fnname_kind_from_IP)(ips[i]);
+ mbmkind = VG_(get_fnname_kind_from_IP)(ep, ips[i]);
if (mbmkind != Vg_FnNameNormal) {
mbm = i;
break;
@@ -983,7 +995,7 @@
for (i = mbm - 1;
i >= 0 && mbmkind != Vg_FnNameMain;
i--) {
- kind = VG_(get_fnname_kind_from_IP)(ips[i]);
+ kind = VG_(get_fnname_kind_from_IP)(ep, ips[i]);
if (kind != Vg_FnNameNormal) {
mbm = i;
mbmkind = kind;
Index: coregrind/pub_core_debuginfo.h
===================================================================
--- coregrind/pub_core_debuginfo.h (revision 16465)
+++ coregrind/pub_core_debuginfo.h (working copy)
@@ -86,13 +86,13 @@
* It should only be used in cases where the names of interest will have
* particular (ie. non-mangled) forms, or the mangled form is acceptable. */
extern
-Bool VG_(get_fnname_raw) ( Addr a, const HChar** buf );
+Bool VG_(get_fnname_raw) ( DiEpoch ep, Addr a, const HChar** buf );
/* Like VG_(get_fnname), but without C++ demangling. (But it does
Z-demangling and below-main renaming.)
iipc argument: same usage as in VG_(describe_IP) in pub_tool_debuginfo.h. */
extern
-Bool VG_(get_fnname_no_cxx_demangle) ( Addr a, const HChar** buf,
+Bool VG_(get_fnname_no_cxx_demangle) ( DiEpoch ep, Addr a, const HChar** buf,
const InlIPCursor* iipc );
/* mips-linux only: find the offset of current address. This is needed for
@@ -99,7 +99,8 @@
stack unwinding for MIPS.
*/
extern
-Bool VG_(get_inst_offset_in_function)( Addr a, /*OUT*/PtrdiffT* offset );
+Bool VG_(get_inst_offset_in_function)( DiEpoch ep, Addr a,
+ /*OUT*/PtrdiffT* offset );
/* Use DWARF2/3 CFA information to do one step of stack unwinding.
@@ -158,6 +159,7 @@
extern Bool VG_(use_FPO_info) ( /*MOD*/Addr* ipP,
/*MOD*/Addr* spP,
/*MOD*/Addr* fpP,
+ DiEpoch ep,
Addr min_accessible,
Addr max_accessible );
@@ -217,7 +219,7 @@
/* ppc64-linux only: find the TOC pointer (R2 value) that should be in
force at the entry point address of the function containing
guest_code_addr. Returns 0 if not known. */
-extern Addr VG_(get_tocptr) ( Addr guest_code_addr );
+extern Addr VG_(get_tocptr) ( DiEpoch ep, Addr guest_code_addr );
/* Map a function name to its SymAVMAs. Is done by
sequential search of all symbol tables, so is very slow. To
@@ -227,7 +229,8 @@
platforms, a symbol is deemed to be found only if it has a nonzero
TOC pointer. */
extern
-Bool VG_(lookup_symbol_SLOW)(const HChar* sopatt, const HChar* name,
+Bool VG_(lookup_symbol_SLOW)(DiEpoch ep,
+ const HChar* sopatt, const HChar* name,
SymAVMAs* avmas);
#endif // __PUB_CORE_DEBUGINFO_H
Index: coregrind/pub_core_tooliface.h
===================================================================
--- coregrind/pub_core_tooliface.h (revision 16465)
+++ coregrind/pub_core_tooliface.h (working copy)
@@ -156,7 +156,7 @@
void (*tool_print_stats)(void);
// VG_(needs).info_location
- void (*tool_info_location)(Addr a);
+ void (*tool_info_location)(DiEpoch ep, Addr a);
// VG_(needs).malloc_replacement
void* (*tool_malloc) (ThreadId, SizeT);
Index: docs/xml/manual-core.xml
===================================================================
--- docs/xml/manual-core.xml (revision 16465)
+++ docs/xml/manual-core.xml (working copy)
@@ -1219,6 +1219,19 @@
</listitem>
</varlistentry>
+ <varlistentry id="opt.keep-debuginfo" xreflabel="--keep-debuginfo">
+ <term>
+ <option><![CDATA[--keep-debuginfo=<yes|no> [default: no] ]]></option>
+ </term>
+ <listitem>
+ <para>When enabled, keep symbols and all other debuginfo for unloaded
+ code. This allows stack traces for memory leaks to include file/line
+ info for code that has been dlclose'd (or similar). Be careful with
+ this, since it can lead to unbounded memory use for programs which
+ repeatedly load and unload shard objects.</para>
+ </listitem>
+ </varlistentry>
+
<varlistentry id="opt.show-below-main" xreflabel="--show-below-main">
<term>
<option><![CDATA[--show-below-main=<yes|no> [default: no] ]]></option>
Index: drd/drd_error.c
===================================================================
--- drd/drd_error.c (revision 16465)
+++ drd/drd_error.c (working copy)
@@ -139,12 +139,14 @@
" <what>%pS</what>\n"
" <address>0x%lx</address>\n",
DRD_(clientobj_type_name)(cl->any.type), obj);
- VG_(pp_ExeContext)(cl->any.first_observed_at);
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(cl->any.first_observed_at));
print_err_detail(" </first_observed_at>\n");
} else {
print_err_detail("%s 0x%lx was first observed at:\n",
DRD_(clientobj_type_name)(cl->any.type), obj);
- VG_(pp_ExeContext)(cl->any.first_observed_at);
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(cl->any.first_observed_at));
}
}
}
@@ -161,6 +163,7 @@
const HChar* const indent = xml ? " " : "";
AddrInfo ai;
+ DiEpoch ep = VG_(current_DiEpoch)();
XArray* /* of HChar */ descr1
= VG_(newXA)( VG_(malloc), "drd.error.drdr2.1",
VG_(free), sizeof(HChar) );
@@ -172,7 +175,7 @@
tl_assert(dri->addr);
tl_assert(dri->size > 0);
- (void) VG_(get_data_description)(descr1, descr2, dri->addr);
+ (void) VG_(get_data_description)(descr1, descr2, ep, dri->addr);
/* If there's nothing in descr1/2, free them. Why is it safe to
VG_(indexXA) at zero here? Because VG_(get_data_description)
guarantees to zero terminate descr1/2 regardless of the outcome
@@ -202,7 +205,7 @@
what_prefix, dri->access_type == eStore ? "store" : "load",
dri->tid, dri->addr, dri->size, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(err));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(err));
if (descr1 != NULL) {
print_err_detail("%s%s\n", indent, (HChar*)VG_(indexXA)(descr1, 0));
if (descr2 != NULL)
@@ -216,7 +219,7 @@
print_err_detail(" <allocation_context>\n");
else
print_err_detail(" Allocation context:\n");
- VG_(pp_ExeContext)(ai.lastchange);
+ VG_(pp_ExeContextAndEpoch)(mk_ExeContextAndEpoch(ai.lastchange, ep));
if (xml)
print_err_detail(" </allocation_context>\n");
} else {
@@ -322,7 +325,7 @@
print_err_detail("%sThe object at address 0x%lx is not a mutex.%s\n",
what_prefix, p->mutex, what_suffix);
}
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(p->mutex);
break;
}
@@ -330,7 +333,7 @@
CondErrInfo* cdei =(CondErrInfo*)(VG_(get_error_extra)(e));
print_err_detail("%s%s: cond 0x%lx%s\n", what_prefix,
VG_(get_error_string)(e), cdei->cond, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(cdei->cond);
break;
}
@@ -339,7 +342,7 @@
print_err_detail("%s%s: cond 0x%lx, mutex 0x%lx locked by thread %u%s\n",
what_prefix, VG_(get_error_string)(e), cdi->cond,
cdi->mutex, cdi->owner, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(cdi->mutex);
break;
}
@@ -349,7 +352,7 @@
" has been signaled but the associated mutex 0x%lx is"
" not locked by the signalling thread.%s\n",
what_prefix, cei->cond, cei->mutex, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(cei->cond);
first_observed(cei->mutex);
break;
@@ -359,7 +362,7 @@
print_err_detail("%s%s: condition variable 0x%lx, mutexes 0x%lx and"
" 0x%lx%s\n", what_prefix, VG_(get_error_string)(e),
cwei->cond, cwei->mutex1, cwei->mutex2, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(cwei->cond);
first_observed(cwei->mutex1);
first_observed(cwei->mutex2);
@@ -370,7 +373,7 @@
tl_assert(sei);
print_err_detail("%s%s: semaphore 0x%lx%s\n", what_prefix,
VG_(get_error_string)(e), sei->semaphore, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(sei->semaphore);
break;
}
@@ -379,13 +382,14 @@
tl_assert(bei);
print_err_detail("%s%s: barrier 0x%lx%s\n", what_prefix,
VG_(get_error_string)(e), bei->barrier, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
if (bei->other_context) {
if (xml)
print_err_detail(" <confl_wait_call>\n");
print_err_detail("%sConflicting wait call by thread %u:%s\n",
what_prefix, bei->other_tid, what_suffix);
- VG_(pp_ExeContext)(bei->other_context);
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(bei->other_context));
if (xml)
print_err_detail(" </confl_wait_call>\n");
}
@@ -397,7 +401,7 @@
tl_assert(p);
print_err_detail("%s%s: rwlock 0x%lx.%s\n", what_prefix,
VG_(get_error_string)(e), p->rwlock, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(p->rwlock);
break;
}
@@ -409,7 +413,8 @@
print_err_detail(" <acquired_at>\n");
else
print_err_detail("Acquired at:\n");
- VG_(pp_ExeContext)(p->acquired_at);
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(p->acquired_at));
if (xml)
print_err_detail(" </acquired_at>\n");
print_err_detail("%sLock on %s 0x%lx was held during %u ms"
@@ -416,7 +421,7 @@
" (threshold: %u ms).%s\n", what_prefix,
VG_(get_error_string)(e), p->synchronization_object,
p->hold_time_ms, p->threshold_ms, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
first_observed(p->synchronization_object);
break;
}
@@ -424,7 +429,7 @@
GenericErrInfo* gei = (GenericErrInfo*)(VG_(get_error_extra)(e));
print_err_detail("%s%s%s\n", what_prefix, VG_(get_error_string)(e),
what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
if (gei->addr)
first_observed(gei->addr);
break;
@@ -433,7 +438,7 @@
InvalidThreadIdInfo* iti =(InvalidThreadIdInfo*)(VG_(get_error_extra)(e));
print_err_detail("%s%s 0x%llx%s\n", what_prefix, VG_(get_error_string)(e),
iti->ptid, what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
break;
}
case UnimpHgClReq: {
@@ -441,7 +446,7 @@
print_err_detail("%sThe annotation macro %s has not yet been implemented"
" in %ps%s\n", what_prefix, uicr->descr,
"<valgrind/helgrind.h>", what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
break;
}
case UnimpDrdClReq: {
@@ -449,13 +454,13 @@
print_err_detail("%sThe annotation macro %s has not yet been implemented"
" in %ps%s\n", what_prefix, uicr->descr,
"<valgrind/drd.h>", what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
break;
}
default:
print_err_detail("%s%s%s\n", what_prefix, VG_(get_error_string)(e),
what_suffix);
- VG_(pp_ExeContext)(VG_(get_error_where)(e));
+ VG_(pp_ExeContextAndEpoch)(VG_(get_error_where)(e));
break;
}
}
Index: drd/drd_thread.c
===================================================================
--- drd/drd_thread.c (revision 16465)
+++ drd/drd_thread.c (working copy)
@@ -1338,7 +1338,8 @@
if (vg_tid != VG_INVALID_THREADID) {
if (callstack)
- VG_(pp_ExeContext)(callstack);
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(callstack));
else
VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
} else {
Index: exp-bbv/bbv_main.c
===================================================================
--- exp-bbv/bbv_main.c (revision 16465)
+++ exp-bbv/bbv_main.c (working copy)
@@ -346,6 +346,7 @@
IRDirty *di;
IRExpr **argv, *arg1;
Int regparms,opcode_type;
+ DiEpoch ep = VG_(current_DiEpoch)();
/* We don't handle a host/guest word size mismatch */
if (gWordTy != hWordTy) {
@@ -392,8 +393,8 @@
block_num++;
/* get function name and entry point information */
const HChar *fn_name;
- VG_(get_fnname)(origAddr, &fn_name);
- bbInfo->is_entry=VG_(get_fnname_if_entry)(origAddr, &fn_name);
+ VG_(get_fnname)(ep, origAddr, &fn_name);
+ bbInfo->is_entry=VG_(get_fnname_if_entry)(ep, origAddr, &fn_name);
bbInfo->fn_name =VG_(strdup)("bbv_strings", fn_name);
/* insert structure into table */
VG_(OSetGen_Insert)( instr_info_table, bbInfo );
Index: exp-dhat/dh_main.c
===================================================================
--- exp-dhat/dh_main.c (revision 16465)
+++ exp-dhat/dh_main.c (working copy)
@@ -1146,7 +1146,7 @@
bufR, bufW,
api->n_reads, api->n_writes);
- VG_(pp_ExeContext)(api->ap);
+ VG_(pp_ExeContextAndEpoch)(VG_(tag_EC_with_current_epoch)(api->ap));
if (api->histo && api->xsize_tag == Exactly) {
VG_(umsg)("\nAggregated access counts by offset:\n");
Index: exp-sgcheck/pc_common.c
===================================================================
--- exp-sgcheck/pc_common.c (revision 16465)
+++ exp-sgcheck/pc_common.c (working copy)
@@ -322,7 +322,7 @@
emit( " <what>Invalid %s of size %ld</what>\n",
xe->XE.SorG.sszB < 0 ? "write" : "read",
Word__abs(xe->XE.SorG.sszB) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " <auxwhat>Address %#lx expected vs actual:</auxwhat>\n",
xe->XE.SorG.addr );
@@ -336,7 +336,7 @@
emit( "Invalid %s of size %ld\n",
xe->XE.SorG.sszB < 0 ? "write" : "read",
Word__abs(xe->XE.SorG.sszB) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " Address %#lx expected vs actual:\n", xe->XE.SorG.addr );
emit( " Expected: %s\n", &xe->XE.SorG.expect[0] );
@@ -362,7 +362,7 @@
emit( " <what>Invalid %s of size %ld</what>\n",
readwrite(xe->XE.Heap.sszB),
Word__abs(xe->XE.Heap.sszB) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " <auxwhat>Address %#lx is not derived from "
"any known block</auxwhat>\n", a );
@@ -372,7 +372,7 @@
emit( "Invalid %s of size %ld\n",
readwrite(xe->XE.Heap.sszB),
Word__abs(xe->XE.Heap.sszB) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " Address %#lx is not derived from "
"any known block\n", a );
@@ -397,7 +397,7 @@
how_invalid,
readwrite(xe->XE.Heap.sszB),
Word__abs(xe->XE.Heap.sszB) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " <auxwhat>Address %#lx is %lu bytes %s "
"the accessing pointer's</auxwhat>\n",
@@ -406,7 +406,8 @@
"a block of size %lu %s</auxwhat>\n",
legit, Seg__size(vseg),
Seg__is_freed(vseg) ? "free'd" : "alloc'd" );
- VG_(pp_ExeContext)(Seg__where(vseg));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(vseg)));
} else {
@@ -414,7 +415,7 @@
how_invalid,
readwrite(xe->XE.Heap.sszB),
Word__abs(xe->XE.Heap.sszB) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " Address %#lx is %lu bytes %s the accessing pointer's\n",
a, miss_size, place );
@@ -421,7 +422,8 @@
emit( " %slegitimate range, a block of size %lu %s\n",
legit, Seg__size(vseg),
Seg__is_freed(vseg) ? "free'd" : "alloc'd" );
- VG_(pp_ExeContext)(Seg__where(vseg));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(vseg)));
}
}
@@ -477,7 +479,7 @@
emit( " <what>Invalid arguments to %s</what>\n",
xe->XE.Arith.opname );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (seg1 != seg2) {
if (NONPTR == seg1) {
@@ -488,7 +490,8 @@
emit( " <auxwhat>First arg derived from address %#lx of "
"%lu-byte block alloc'd</auxwhat>\n",
Seg__addr(seg1), Seg__size(seg1) );
- VG_(pp_ExeContext)(Seg__where(seg1));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seg1)));
}
which = "Second arg";
} else {
@@ -500,7 +503,8 @@
emit( " <auxwhat>%s derived from address %#lx of "
"%lu-byte block alloc'd</auxwhat>\n",
which, Seg__addr(seg2), Seg__size(seg2) );
- VG_(pp_ExeContext)(Seg__where(seg2));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seg2)));
}
} else {
@@ -507,7 +511,7 @@
emit( "Invalid arguments to %s\n",
xe->XE.Arith.opname );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (seg1 != seg2) {
if (NONPTR == seg1) {
@@ -518,7 +522,8 @@
emit( " First arg derived from address %#lx of "
"%lu-byte block alloc'd\n",
Seg__addr(seg1), Seg__size(seg1) );
- VG_(pp_ExeContext)(Seg__where(seg1));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seg1)));
}
which = "Second arg";
} else {
@@ -530,7 +535,8 @@
emit( " %s derived from address %#lx of "
"%lu-byte block alloc'd\n",
which, Seg__addr(seg2), Seg__size(seg2) );
- VG_(pp_ExeContext)(Seg__where(seg2));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seg2)));
}
}
@@ -562,23 +568,25 @@
emit( " <what>%s%s contains unaddressable byte(s)</what>\n",
what, s );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " <auxwhat>Address %#lx is %lu bytes inside a "
"%lu-byte block free'd</auxwhat>\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
- VG_(pp_ExeContext)(Seg__where(seglo));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seglo)));
} else {
emit( " %s%s contains unaddressable byte(s)\n",
what, s );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
emit( " Address %#lx is %lu bytes inside a "
"%lu-byte block free'd\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
- VG_(pp_ExeContext)(Seg__where(seglo));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seglo)));
}
@@ -589,7 +597,7 @@
emit( " <what>%s%s is non-contiguous</what>\n",
what, s );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (UNKNOWN == seglo) {
emit( " <auxwhat>First byte is "
@@ -598,7 +606,8 @@
emit( " <auxwhat>First byte (%#lx) is %lu bytes inside a "
"%lu-byte block alloc'd</auxwhat>\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
- VG_(pp_ExeContext)(Seg__where(seglo));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seglo)));
}
if (UNKNOWN == seghi) {
@@ -608,7 +617,8 @@
emit( " <auxwhat>Last byte (%#lx) is %lu bytes inside a "
"%lu-byte block alloc'd</auxwhat>\n",
hi, hi-Seg__addr(seghi), Seg__size(seghi) );
- VG_(pp_ExeContext)(Seg__where(seghi));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seghi)));
}
} else {
@@ -615,7 +625,7 @@
emit( "%s%s is non-contiguous\n",
what, s );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (UNKNOWN == seglo) {
emit( " First byte is not inside a known block\n" );
@@ -623,7 +633,8 @@
emit( " First byte (%#lx) is %lu bytes inside a "
"%lu-byte block alloc'd\n",
lo, lo-Seg__addr(seglo), Seg__size(seglo) );
- VG_(pp_ExeContext)(Seg__where(seglo));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seglo)));
}
if (UNKNOWN == seghi) {
@@ -632,7 +643,8 @@
emit( " Last byte (%#lx) is %lu bytes inside a "
"%lu-byte block alloc'd\n",
hi, hi-Seg__addr(seghi), Seg__size(seghi) );
- VG_(pp_ExeContext)(Seg__where(seghi));
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(Seg__where(seghi)));
}
}
@@ -650,6 +662,8 @@
UInt pc_update_Error_extra ( const Error* err )
{
XError *xe = (XError*)VG_(get_error_extra)(err);
+ DiEpoch ep = VG_(get_error_where)(err).epoch;
+
tl_assert(xe);
switch (xe->tag) {
case XE_SorG:
@@ -675,7 +689,7 @@
have_descr
= VG_(get_data_description)( xe->XE.Heap.descr1,
xe->XE.Heap.descr2,
- xe->XE.Heap.addr );
+ ep, xe->XE.Heap.addr );
/* If there's nothing in descr1/2, free it. Why is it safe to
to VG_(indexXA) at zero here? Because
@@ -699,7 +713,7 @@
if (!have_descr) {
const HChar *name;
if (VG_(get_datasym_and_offset)(
- xe->XE.Heap.addr, &name,
+ ep, xe->XE.Heap.addr, &name,
&xe->XE.Heap.datasymoff )
) {
xe->XE.Heap.datasym =
Index: exp-sgcheck/sg_main.c
===================================================================
--- exp-sgcheck/sg_main.c (revision 16465)
+++ exp-sgcheck/sg_main.c (working copy)
@@ -1936,7 +1936,8 @@
const HChar *fnname;
Bool ok;
Addr ip = ip_post_call_insn;
- ok = VG_(get_fnname_w_offset)( ip, &fnname );
+ DiEpoch ep = VG_(current_DiEpoch)();
+ ok = VG_(get_fnname_w_offset)( ep, ip, &fnname );
while (d > 0) {
VG_(printf)(" ");
d--;
Index: helgrind/hg_addrdescr.c
===================================================================
--- helgrind/hg_addrdescr.c (revision 16465)
+++ helgrind/hg_addrdescr.c (working copy)
@@ -45,7 +45,7 @@
#include "hg_lock_n_thread.h"
#include "hg_addrdescr.h" /* self */
-void HG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai )
+void HG_(describe_addr) ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
{
tl_assert(ai->tag == Addr_Undescribed);
@@ -75,13 +75,14 @@
ai->Addr.Block.block_desc = "block";
ai->Addr.Block.block_szB = hszB;
ai->Addr.Block.rwoffset = (Word)(a) - (Word)(haddr);
- ai->Addr.Block.allocated_at = hctxt;
+ ai->Addr.Block.allocated_at.ec = hctxt;
+ ai->Addr.Block.allocated_at.epoch = ep;
VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
ai->Addr.Block.alloc_tinfo.tnr = tnr;
- ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
+ ai->Addr.Block.freed_at = VG_(null_ExeContextAndEpoch)();
} else {
/* No block found. Search a non-heap block description. */
- VG_(describe_addr) (a, ai);
+ VG_(describe_addr) (ep, a, ai);
/* In case ai contains a tid, set tnr to the corresponding helgrind
thread number. */
@@ -100,7 +101,7 @@
}
}
-Bool HG_(get_and_pp_addrdescr) (Addr addr)
+Bool HG_(get_and_pp_addrdescr) (DiEpoch ep, Addr addr)
{
Bool ret;
@@ -107,7 +108,7 @@
AddrInfo glai;
glai.tag = Addr_Undescribed;
- HG_(describe_addr) (addr, &glai);
+ HG_(describe_addr) (ep, addr, &glai);
VG_(pp_addrinfo) (addr, &glai);
ret = glai.tag != Addr_Unknown;
Index: helgrind/hg_addrdescr.h
===================================================================
--- helgrind/hg_addrdescr.h (revision 16465)
+++ helgrind/hg_addrdescr.h (working copy)
@@ -37,12 +37,12 @@
lock description, putting the result in ai.
This might allocate some memory in ai, to be cleared with
VG_(clear_addrinfo). */
-extern void HG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai );
+extern void HG_(describe_addr) ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai );
/* Get a readable description of addr, then print it using HG_(pp_addrdescr)
using xml False and VG_(printf) to emit the characters.
Returns True if a description was found/printed, False otherwise. */
-extern Bool HG_(get_and_pp_addrdescr) (Addr a);
+extern Bool HG_(get_and_pp_addrdescr) (DiEpoch ep, Addr a);
/* For error creation/address description:
map 'data_addr' to a malloc'd chunk, if any.
Index: helgrind/hg_errors.c
===================================================================
--- helgrind/hg_errors.c (revision 16465)
+++ helgrind/hg_errors.c (working copy)
@@ -421,7 +421,8 @@
VG_(printf)("HG_(update_extra): "
"%d conflicting-event queries\n", xxx);
- HG_(describe_addr) (xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
+ HG_(describe_addr) (VG_(get_error_where)(err).epoch,
+ xe->XE.Race.data_addr, &xe->XE.Race.data_addrinfo);
/* And poke around in the conflicting-event map, to see if we
can rustle up a plausible-looking conflicting memory access
@@ -748,7 +749,8 @@
VG_(printf_xml)(" <isrootthread></isrootthread>\n");
} else {
tl_assert(thr->created_at != NULL);
- VG_(pp_ExeContext)( thr->created_at );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( thr->created_at ) );
}
VG_(printf_xml)("</announcethread>\n\n");
@@ -767,7 +769,8 @@
tl_assert(thr->created_at != NULL);
VG_(message)(Vg_UserMsg, "Thread #%d was created\n",
thr->errmsg_index);
- VG_(pp_ExeContext)( thr->created_at );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( thr->created_at ) );
}
VG_(message)(Vg_UserMsg, "\n");
@@ -789,7 +792,8 @@
if (lk->appeared_at) {
emit( " <auxwhat>Lock at %p was first observed</auxwhat>\n",
(void*)lk );
- VG_(pp_ExeContext)( lk->appeared_at );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( lk->appeared_at ) );
}
} else {
@@ -796,12 +800,13 @@
if (lk->appeared_at) {
VG_(umsg)( " Lock at %p was first observed\n",
(void*)lk->guestaddr );
- VG_(pp_ExeContext)( lk->appeared_at );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( lk->appeared_at ) );
} else {
VG_(umsg)( " Lock at %p : no stacktrace for first observation\n",
(void*)lk->guestaddr );
}
- HG_(get_and_pp_addrdescr) (lk->guestaddr);
+ HG_(get_and_pp_addrdescr) (VG_(current_DiEpoch)(), lk->guestaddr);
VG_(umsg)("\n");
}
}
@@ -941,11 +946,12 @@
emit( " <hthreadid>%d</hthreadid>\n",
(Int)xe->XE.Misc.thr->errmsg_index );
emit( " </xwhat>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (xe->XE.Misc.auxstr) {
emit(" <auxwhat>%s</auxwhat>\n", xe->XE.Misc.auxstr);
if (xe->XE.Misc.auxctx)
- VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( xe->XE.Misc.auxctx ));
}
} else {
@@ -953,11 +959,12 @@
emit( "Thread #%d: %s\n",
(Int)xe->XE.Misc.thr->errmsg_index,
xe->XE.Misc.errstr );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (xe->XE.Misc.auxstr) {
emit(" %s\n", xe->XE.Misc.auxstr);
if (xe->XE.Misc.auxctx)
- VG_(pp_ExeContext)( xe->XE.Misc.auxctx );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( xe->XE.Misc.auxctx ));
}
}
@@ -978,17 +985,21 @@
emit( " <hthreadid>%d</hthreadid>\n",
(Int)xe->XE.LockOrder.thr->errmsg_index );
emit( " </xwhat>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (xe->XE.LockOrder.shouldbe_earlier_ec
&& xe->XE.LockOrder.shouldbe_later_ec) {
emit( " <auxwhat>Required order was established by "
"acquisition of lock at %p</auxwhat>\n",
(void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
- VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.LockOrder.shouldbe_earlier_ec ));
emit( " <auxwhat>followed by a later acquisition "
"of lock at %p</auxwhat>\n",
(void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
- VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.LockOrder.shouldbe_later_ec ));
}
announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
announce_LockP ( xe->XE.LockOrder.shouldbe_later_lk );
@@ -1004,7 +1015,9 @@
"acquisition of lock at %p\n",
(void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr);
if (xe->XE.LockOrder.actual_earlier_ec) {
- VG_(pp_ExeContext)(xe->XE.LockOrder.actual_earlier_ec);
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.LockOrder.actual_earlier_ec));
} else {
emit(" (stack unavailable)\n");
}
@@ -1011,7 +1024,7 @@
emit( "\n" );
emit(" followed by a later acquisition of lock at %p\n",
(void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr);
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (xe->XE.LockOrder.shouldbe_earlier_ec
&& xe->XE.LockOrder.shouldbe_later_ec) {
emit("\n");
@@ -1018,11 +1031,15 @@
emit( "Required order was established by "
"acquisition of lock at %p\n",
(void*)xe->XE.LockOrder.shouldbe_earlier_lk->guestaddr );
- VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_earlier_ec );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.LockOrder.shouldbe_earlier_ec ));
emit( "\n" );
emit( " followed by a later acquisition of lock at %p\n",
(void*)xe->XE.LockOrder.shouldbe_later_lk->guestaddr );
- VG_(pp_ExeContext)( xe->XE.LockOrder.shouldbe_later_ec );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.LockOrder.shouldbe_later_ec ));
}
emit("\n");
announce_LockP ( xe->XE.LockOrder.shouldbe_earlier_lk );
@@ -1048,7 +1065,7 @@
emit( " </xwhat>\n" );
emit( " <what>with error code %ld (%s)</what>\n",
xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
} else {
@@ -1057,7 +1074,7 @@
xe->XE.PthAPIerror.fnname );
emit( " with error code %ld (%s)\n",
xe->XE.PthAPIerror.err, xe->XE.PthAPIerror.errstr );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
}
@@ -1077,7 +1094,7 @@
emit( " <hthreadid>%d</hthreadid>\n",
(Int)xe->XE.UnlockBogus.thr->errmsg_index );
emit( " </xwhat>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
} else {
@@ -1084,7 +1101,7 @@
emit( "Thread #%d unlocked an invalid lock at %p\n",
(Int)xe->XE.UnlockBogus.thr->errmsg_index,
(void*)xe->XE.UnlockBogus.lock_ga );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
}
@@ -1109,7 +1126,7 @@
emit( " <hthreadid>%d</hthreadid>\n",
(Int)xe->XE.UnlockForeign.owner->errmsg_index );
emit( " </xwhat>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
announce_LockP ( xe->XE.UnlockForeign.lock );
} else {
@@ -1119,7 +1136,7 @@
(Int)xe->XE.UnlockForeign.thr->errmsg_index,
(void*)xe->XE.UnlockForeign.lock->guestaddr,
(Int)xe->XE.UnlockForeign.owner->errmsg_index );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
announce_LockP ( xe->XE.UnlockForeign.lock );
}
@@ -1141,7 +1158,7 @@
emit( " <hthreadid>%d</hthreadid>\n",
(Int)xe->XE.UnlockUnlocked.thr->errmsg_index );
emit( " </xwhat>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
announce_LockP ( xe->XE.UnlockUnlocked.lock);
} else {
@@ -1149,7 +1166,7 @@
emit( "Thread #%d unlocked a not-locked lock at %p\n",
(Int)xe->XE.UnlockUnlocked.thr->errmsg_index,
(void*)xe->XE.UnlockUnlocked.lock->guestaddr );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
announce_LockP ( xe->XE.UnlockUnlocked.lock);
}
@@ -1179,7 +1196,7 @@
emit( " <hthreadid>%d</hthreadid>\n",
(Int)xe->XE.Race.thr->errmsg_index );
emit( " </xwhat>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (xe->XE.Race.h2_ct) {
tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
@@ -1192,7 +1209,8 @@
emit( " <hthreadid>%d</hthreadid>\n",
xe->XE.Race.h2_ct->errmsg_index);
emit(" </xauxwhat>\n");
- VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( xe->XE.Race.h2_ct_accEC ));
}
if (xe->XE.Race.h1_ct) {
@@ -1204,13 +1222,17 @@
xe->XE.Race.h1_ct->errmsg_index );
emit(" </xauxwhat>\n");
if (xe->XE.Race.h1_ct_mbsegstartEC) {
- VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.Race.h1_ct_mbsegstartEC ));
} else {
emit( " <auxwhat>(the start of the thread)</auxwhat>\n" );
}
emit( " <auxwhat>but before</auxwhat>\n" );
if (xe->XE.Race.h1_ct_mbsegendEC) {
- VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.Race.h1_ct_mbsegendEC ));
} else {
emit( " <auxwhat>(the end of the thread)</auxwhat>\n" );
}
@@ -1228,7 +1250,7 @@
tl_assert(xe->XE.Race.locksHeldW);
show_LockP_summary_textmode( xe->XE.Race.locksHeldW, "" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
if (xe->XE.Race.h2_ct) {
tl_assert(xe->XE.Race.h2_ct_accEC); // assured by update_extra
@@ -1240,7 +1262,8 @@
xe->XE.Race.h2_ct_accSzB,
xe->XE.Race.h2_ct->errmsg_index );
show_LockP_summary_textmode( xe->XE.Race.h2_ct_locksHeldW, "" );
- VG_(pp_ExeContext)( xe->XE.Race.h2_ct_accEC );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)( xe->XE.Race.h2_ct_accEC ));
}
if (xe->XE.Race.h1_ct) {
@@ -1248,13 +1271,17 @@
"after\n",
xe->XE.Race.h1_ct->errmsg_index );
if (xe->XE.Race.h1_ct_mbsegstartEC) {
- VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegstartEC );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.Race.h1_ct_mbsegstartEC ));
} else {
emit( " (the start of the thread)\n" );
}
emit( " but before\n" );
if (xe->XE.Race.h1_ct_mbsegendEC) {
- VG_(pp_ExeContext)( xe->XE.Race.h1_ct_mbsegendEC );
+ VG_(pp_ExeContextAndEpoch)(
+ VG_(tag_EC_with_current_epoch)(
+ xe->XE.Race.h1_ct_mbsegendEC ));
} else {
emit( " (the end of the thread)\n" );
}
@@ -1307,7 +1334,7 @@
show_LockP_summary_textmode( locksHeldW_P, "" );
HG_(free) (locksHeldW_P);
}
- VG_(pp_StackTrace) (ips, n_ips);
+ VG_(pp_StackTrace)( VG_(current_DiEpoch)(), ips, n_ips );
VG_(printf) ("\n");
}
Index: helgrind/hg_main.c
===================================================================
--- helgrind/hg_main.c (revision 16465)
+++ helgrind/hg_main.c (working copy)
@@ -483,6 +483,7 @@
Bool show_lock_addrdescr,
Bool show_internal_data)
{
+ DiEpoch ep = VG_(current_DiEpoch)();
space(d+0);
if (show_internal_data)
VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
@@ -489,7 +490,7 @@
else
VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
if (!show_lock_addrdescr
- || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
+ || !HG_(get_and_pp_addrdescr) (ep, (Addr) lk->guestaddr))
VG_(printf)("\n");
if (sHOW_ADMIN) {
@@ -4598,7 +4599,7 @@
DebugInfo* dinfo;
const HChar* soname;
- dinfo = VG_(find_DebugInfo)( ga );
+ dinfo = VG_(find_DebugInfo)( VG_(current_DiEpoch)(), ga );
if (!dinfo) return False;
soname = VG_(DebugInfo_get_soname)(dinfo);
@@ -5817,9 +5818,9 @@
VG_(XTMemory_Full_init)(VG_(XT_filter_1top_and_maybe_below_main));
}
-static void hg_info_location (Addr a)
+static void hg_info_location (DiEpoch ep, Addr a)
{
- (void) HG_(get_and_pp_addrdescr) (a);
+ (void) HG_(get_and_pp_addrdescr) (ep, a);
}
static void hg_pre_clo_init ( void )
Index: helgrind/libhb_core.c
===================================================================
--- helgrind/libhb_core.c (revision 16465)
+++ helgrind/libhb_core.c (working copy)
@@ -4095,7 +4095,7 @@
VG_(printf)("LOCAL Kw: thr %p, Kw %llu, ec %p\n",
thr, pair.ull, pair.ec );
if (0)
- VG_(pp_ExeContext)(pair.ec);
+ VG_(pp_ExeContextAndEpoch)(VG_(tag_EC_with_current_epoch)(pair.ec));
}
static Int cmp__ULong_n_EC__by_ULong ( const ULong_n_EC* pair1,
Index: include/pub_tool_addrinfo.h
===================================================================
--- include/pub_tool_addrinfo.h (revision 16465)
+++ include/pub_tool_addrinfo.h (working copy)
@@ -135,6 +135,7 @@
// (spoffset will be negative, as stacks are assumed growing down).
struct {
ThreadInfo tinfo;
+ DiEpoch epoch;
Addr IP;
Int frameNo;
StackPos stackPos;
@@ -151,9 +152,9 @@
const HChar* block_desc; // "block","mempool","user-defined",arena
SizeT block_szB;
PtrdiffT rwoffset;
- ExeContext* allocated_at; // might be null_ExeContext.
- ThreadInfo alloc_tinfo; // which thread did alloc this block.
- ExeContext* freed_at; // might be null_ExeContext.
+ ExeContextAndEpoch allocated_at; // might contain null_ExeContext.
+ ThreadInfo alloc_tinfo; // which thread alloc'd this block.
+ ExeContextAndEpoch freed_at; // might contain null_ExeContext.
} Block;
// In a global .data symbol. This holds
@@ -204,7 +205,7 @@
On entry, ai->tag must be equal to Addr_Undescribed.
This might allocate some memory, that can be cleared with
VG_(clear_addrinfo). */
-extern void VG_(describe_addr) ( Addr a, /*OUT*/AddrInfo* ai );
+extern void VG_(describe_addr) ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai );
extern void VG_(clear_addrinfo) ( AddrInfo* ai);
Index: include/pub_tool_basics.h
===================================================================
--- include/pub_tool_basics.h (revision 16465)
+++ include/pub_tool_basics.h (working copy)
@@ -129,6 +129,24 @@
/* ThreadIds are simply indices into the VG_(threads)[] array. */
typedef UInt ThreadId;
+
+/* You need a debuginfo epoch in order to convert an address into any source
+ level entity, since that conversion depends on what objects were mapped
+ in at the time. An epoch is simply a monotonically increasing counter,
+ which we wrap up in a struct so as to enable the C type system to
+ distinguish it from other kinds of numbers. m_debuginfo holds and
+ maintains the current epoch number. */
+typedef struct { UInt n; } DiEpoch;
+
+static inline DiEpoch DiEpoch_INVALID ( void ) {
+ DiEpoch dep; dep.n = 0; return dep;
+}
+
+static inline Bool is_DiEpoch_INVALID ( DiEpoch dep ) {
+ return dep.n == 0;
+}
+
+
/* Many data structures need to allocate and release memory.
The allocation/release functions must be provided by the caller.
The Alloc_Fn_t function must allocate a chunk of memory of size szB.
Index: include/pub_tool_debuginfo.h
===================================================================
--- include/pub_tool_debuginfo.h (revision 16465)
+++ include/pub_tool_debuginfo.h (working copy)
@@ -31,13 +31,22 @@
#ifndef __PUB_TOOL_DEBUGINFO_H
#define __PUB_TOOL_DEBUGINFO_H
-#include "pub_tool_basics.h" // VG_ macro
+#include "pub_tool_basics.h" // VG_ macro, DiEpoch
#include "pub_tool_xarray.h" // XArray
+
/*====================================================================*/
-/*=== Obtaining debug information ===*/
+/*=== Debuginfo epochs. ===*/
/*====================================================================*/
+// This returns the current epoch.
+DiEpoch VG_(current_DiEpoch)(void);
+
+
+/*====================================================================*/
+/*=== Obtaining information pertaining to source artefacts. ===*/
+/*====================================================================*/
+
/* IMPORTANT COMMENT about memory persistence and ownership.
Many functions below are returning a string in a HChar** argument.
@@ -76,11 +85,11 @@
demangles C++ function names. VG_(get_fnname_w_offset) is the
same, except it appends "+N" to symbol names to indicate offsets.
NOTE: See IMPORTANT COMMENT above about persistence and ownership. */
-extern Bool VG_(get_filename) ( Addr a, const HChar** filename );
-extern Bool VG_(get_fnname) ( Addr a, const HChar** fnname );
-extern Bool VG_(get_linenum) ( Addr a, UInt* linenum );
+extern Bool VG_(get_filename) ( DiEpoch ep, Addr a, const HChar** filename );
+extern Bool VG_(get_fnname) ( DiEpoch ep, Addr a, const HChar** fnname );
+extern Bool VG_(get_linenum) ( DiEpoch ep, Addr a, UInt* linenum );
extern Bool VG_(get_fnname_w_offset)
- ( Addr a, const HChar** fnname );
+ ( DiEpoch ep, Addr a, const HChar** fnname );
/* This one is the most general. It gives filename, line number and
optionally directory name. filename and linenum may not be NULL.
@@ -95,7 +104,7 @@
Returned value indicates whether any filename/line info could be
found. */
extern Bool VG_(get_filename_linenum)
- ( Addr a,
+ ( DiEpoch ep, Addr a,
/*OUT*/const HChar** filename,
/*OUT*/const HChar** dirname,
/*OUT*/UInt* linenum );
@@ -108,7 +117,8 @@
of its symbols, this function will not be able to recognise function
entry points within it.
NOTE: See IMPORTANT COMMENT above about persistence and ownership. */
-extern Bool VG_(get_fnname_if_entry) ( Addr a, const HChar** fnname );
+extern Bool VG_(get_fnname_if_entry) ( DiEpoch ep, Addr a,
+ const HChar** fnname );
typedef
enum {
@@ -121,13 +131,13 @@
extern Vg_FnNameKind VG_(get_fnname_kind) ( const HChar* name );
/* Like VG_(get_fnname_kind), but takes a code address. */
-extern Vg_FnNameKind VG_(get_fnname_kind_from_IP) ( Addr ip );
+extern Vg_FnNameKind VG_(get_fnname_kind_from_IP) ( DiEpoch ep, Addr ip );
/* Looks up data_addr in the collection of data symbols, and if found
puts its name (or as much as will fit) into dname[0 .. n_dname-1],
which is guaranteed to be zero terminated. Also data_addr's offset
from the symbol start is put into *offset. */
-extern Bool VG_(get_datasym_and_offset)( Addr data_addr,
+extern Bool VG_(get_datasym_and_offset)( DiEpoch ep, Addr data_addr,
/*OUT*/const HChar** dname,
/*OUT*/PtrdiffT* offset );
@@ -147,7 +157,7 @@
Bool VG_(get_data_description)(
/*MOD*/ XArray* /* of HChar */ dname1v,
/*MOD*/ XArray* /* of HChar */ dname2v,
- Addr data_addr
+ DiEpoch ep, Addr data_addr
);
/* Succeeds if the address is within a shared object or the main executable.
@@ -154,7 +164,7 @@
It first searches if Addr a belongs to the text segment of debug info.
If not found, it asks the address space manager whether it
knows the name of the file associated with this mapping. */
-extern Bool VG_(get_objname) ( Addr a, const HChar** objname );
+extern Bool VG_(get_objname) ( DiEpoch ep, Addr a, const HChar** objname );
/* Cursor allowing to describe inlined function calls at an IP,
@@ -169,7 +179,7 @@
eip can possibly corresponds to inlined function call(s).
To describe eip and the inlined function calls, the following must
be done:
- InlIPCursor *iipc = VG_(new_IIPC)(eip);
+ InlIPCursor *iipc = VG_(new_IIPC)(ep, eip);
do {
buf = VG_(describe_IP)(eip, iipc);
... use buf ...
@@ -182,12 +192,16 @@
Note, that the returned string is allocated in a static buffer local to
VG_(describe_IP). That buffer will be overwritten with every invocation.
Therefore, callers need to possibly stash away the string.
+
+ Since this maps a code location to a source artefact (function names),
+ new_IIPC requires a DiEpoch argument (ep) too.
*/
-extern const HChar* VG_(describe_IP)(Addr eip, const InlIPCursor* iipc);
+extern const HChar* VG_(describe_IP)(DiEpoch ep, Addr eip,
+ const InlIPCursor* iipc);
/* Builds a IIPC (Inlined IP Cursor) to describe eip and all the inlined calls
at eip. Such a cursor must be deleted after use using VG_(delete_IIPC). */
-extern InlIPCursor* VG_(new_IIPC)(Addr eip);
+extern InlIPCursor* VG_(new_IIPC)(DiEpoch ep, Addr eip);
/* Move the cursor to the next call to describe.
Returns True if there are still calls to describe.
False if nothing to describe anymore. */
@@ -239,7 +253,7 @@
/*====================================================================*/
-/*=== Obtaining debug information ===*/
+/*=== Obtaining information pertaining to shared objects. ===*/
/*====================================================================*/
/* A way to make limited debuginfo queries on a per-mapped-object
@@ -248,7 +262,7 @@
/* Returns NULL if the DebugInfo isn't found. It doesn't matter if
debug info is present or not. */
-DebugInfo* VG_(find_DebugInfo) ( Addr a );
+DebugInfo* VG_(find_DebugInfo) ( DiEpoch ep, Addr a );
/* Fish bits out of DebugInfos. */
Addr VG_(DebugInfo_get_text_avma) ( const DebugInfo *di );
Index: include/pub_tool_errormgr.h
===================================================================
--- include/pub_tool_errormgr.h (revision 16465)
+++ include/pub_tool_errormgr.h (working copy)
@@ -56,11 +56,11 @@
/* Useful in VG_(tdict).tool_error_matches_suppression(),
* VG_(tdict).tool_pp_Error(), etc */
-ExeContext* VG_(get_error_where) ( const Error* err );
-ErrorKind VG_(get_error_kind) ( const Error* err );
-Addr VG_(get_error_address) ( const Error* err );
-const HChar* VG_(get_error_string) ( const Error* err );
-void* VG_(get_error_extra) ( const Error* err );
+ExeContextAndEpoch VG_(get_error_where) ( const Error* err );
+ErrorKind VG_(get_error_kind) ( const Error* err );
+Addr VG_(get_error_address) ( const Error* err );
+const HChar* VG_(get_error_string) ( const Error* err );
+void* VG_(get_error_extra) ( const Error* err );
/* Call this when an error occurs. It will be recorded if it hasn't been
seen before. If it has, the existing error record will have its count
@@ -90,7 +90,7 @@
whether to add the error in the error total count (another mild hack). */
extern Bool VG_(unique_error) ( ThreadId tid, ErrorKind ekind,
Addr a, const HChar* s, void* extra,
- ExeContext* where, Bool print_error,
+ ExeContextAndEpoch where, Bool print_error,
Bool allow_GDB_attach, Bool count_error );
/* Gets from fd (an opened suppression file) a non-blank, non-comment
Index: include/pub_tool_execontext.h
===================================================================
--- include/pub_tool_execontext.h (revision 16465)
+++ include/pub_tool_execontext.h (working copy)
@@ -30,8 +30,14 @@
#ifndef __PUB_TOOL_EXECONTEXT_H
#define __PUB_TOOL_EXECONTEXT_H
-#include "pub_tool_basics.h" // ThreadID
+#include "pub_tool_basics.h" // ThreadID
+#include "pub_tool_debuginfo.h" // DiEpoch
+
+/*====================================================================*/
+/*=== ExeContext ===*/
+/*====================================================================*/
+
// It's an abstract type.
typedef
struct _ExeContext
@@ -84,9 +90,6 @@
extern Bool VG_(eq_ExeContext) ( VgRes res, const ExeContext* e1,
const ExeContext* e2 );
-// Print an ExeContext.
-extern void VG_(pp_ExeContext) ( ExeContext* ec );
-
// Get the 32-bit unique reference number for this ExeContext
// (the "ExeContext Unique"). Guaranteed to be nonzero and to be a
// multiple of four (iow, the lowest two bits are guaranteed to
@@ -113,11 +116,53 @@
// Make an ExeContext containing exactly the specified stack frames.
ExeContext* VG_(make_ExeContext_from_StackTrace)( const Addr* ips, UInt n_ips );
-// Returns the "null" exe context. The null execontext is an artificial
-// exe context, with a stack trace made of one Addr (the NULL address).
-extern
-ExeContext* VG_(null_ExeContext) (void);
+/*====================================================================*/
+/*=== ExeContextAndEpoch ===*/
+/*====================================================================*/
+
+/* A plain ExeContext is not generally symbolisable, since we also need to
+ know which DebugInfo epoch it pertains to. ExeContextAndEpoch pairs them
+ up. Note this is just two words, so passing it around by value is
+ fine. */
+typedef
+ struct {
+ ExeContext* ec;
+ DiEpoch epoch;
+ }
+ ExeContextAndEpoch;
+
+// A trivial constructor.
+static inline ExeContextAndEpoch mk_ExeContextAndEpoch ( ExeContext* ec,
+ DiEpoch ep ) {
+ ExeContextAndEpoch ece;
+ ece.ec = ec;
+ ece.epoch = ep;
+ return ece;
+}
+
+// Generates a completely invalid ExeContextAndEpoch, with NULL for .ec and
+// zero for .epoch. Both values are invalid.
+ExeContextAndEpoch VG_(invalid_ExeContextAndEpoch) ( void );
+
+// Takes an ExeContext and tags it with the current epoch, which is
+// generally what we want to do.
+ExeContextAndEpoch VG_(tag_EC_with_current_epoch)( ExeContext* ec );
+
+// Print an ExeContextAndEpoch. We can't print a plain ExeContext
+// because we can't symbolising it without knowing which debuginfo
+// epoch it pertains to.
+void VG_(pp_ExeContextAndEpoch) ( ExeContextAndEpoch ece );
+
+// Returns the "null" exe context tagged with the current debuginfo
+// epoch. The null execontext is an artificial exe context, with a stack
+// trace made of one Addr (the NULL address), and the current epoch.
+ExeContextAndEpoch VG_(null_ExeContextAndEpoch) ( void );
+
+// Is this a value obtained from VG_(null_ExeContextAndEpoch) ?
+Bool VG_(is_null_ExeContextAndEpoch)( ExeContextAndEpoch ece );
+
+
#endif // __PUB_TOOL_EXECONTEXT_H
/*--------------------------------------------------------------------*/
Index: include/pub_tool_options.h
===================================================================
--- include/pub_tool_options.h (revision 16465)
+++ include/pub_tool_options.h (working copy)
@@ -249,7 +249,13 @@
/* Continue stack traces below main()? Default: NO */
extern Bool VG_(clo_show_below_main);
+/* Keep symbols (and all other debuginfo) for code that is unloaded (dlclose
+ or similar) so that stack traces can still give line/file info for
+ previously captured stack traces. e.g. ... showing where a block was
+ allocated e.g. leaks of or accesses just outside a block. */
+extern Bool VG_(clo_keep_debuginfo);
+
/* Used to expand file names. "option_name" is the option name, eg.
"--log-file". 'format' is what follows, eg. "cachegrind.out.%p". In
'format':
Index: include/pub_tool_stacktrace.h
===================================================================
--- include/pub_tool_stacktrace.h (revision 16465)
+++ include/pub_tool_stacktrace.h (working copy)
@@ -31,7 +31,7 @@
#ifndef __PUB_TOOL_STACKTRACE_H
#define __PUB_TOOL_STACKTRACE_H
-#include "pub_tool_basics.h" // Addr
+#include "pub_tool_basics.h" // Addr, DiEpoch
// The basic stack trace type: just an array of code addresses.
typedef Addr* StackTrace;
@@ -64,19 +64,19 @@
/*OUT*/StackTrace fps,
Word first_ip_delta );
-// Apply a function to every element in the StackTrace. The parameter
-// 'n' gives the index of the passed ip. 'opaque' is an arbitrary
-// pointer provided to each invocation of 'action' (a poor man's
-// closure). Doesn't go below main() unless --show-below-main=yes is
-// set.
+// Apply a function to every element in the StackTrace. The parameter 'n'
+// gives the index of the passed ip. 'opaque' is an arbitrary pointer
+// provided to each invocation of 'action' (a poor man's closure). 'ep' is
+// the debuginfo epoch assumed to apply to all code addresses in the stack
+// trace. Doesn't go below main() unless --show-below-main=yes is set.
extern void VG_(apply_StackTrace)(
- void(*action)(UInt n, Addr ip, void* opaque),
+ void(*action)(UInt n, DiEpoch ep, Addr ip, void* opaque),
void* opaque,
- StackTrace ips, UInt n_ips
+ DiEpoch ep, StackTrace ips, UInt n_ips
);
// Print a StackTrace.
-extern void VG_(pp_StackTrace) ( StackTrace ips, UInt n_ips );
+extern void VG_(pp_StackTrace) ( DiEpoch ep, StackTrace ips, UInt n_ips );
// Gets and immediately prints a StackTrace. Just a bit simpler than
// calling VG_(get_StackTrace)() then VG_(pp_StackTrace)().
Index: include/pub_tool_tooliface.h
===================================================================
--- include/pub_tool_tooliface.h (revision 16465)
+++ include/pub_tool_tooliface.h (working copy)
@@ -463,7 +463,7 @@
of an address ? */
extern void VG_(needs_info_location) (
// Get and pp information about Addr
- void (*info_location)(Addr)
+ void (*info_location)(DiEpoch, Addr)
);
/* Do we need to see variable type and location information? */
Index: lackey/lk_main.c
===================================================================
--- lackey/lk_main.c (revision 16465)
+++ lackey/lk_main.c (working copy)
@@ -664,6 +664,7 @@
Addr iaddr = 0, dst;
UInt ilen = 0;
Bool condition_inverted = False;
+ DiEpoch ep = VG_(current_DiEpoch)();
if (gWordTy != hWordTy) {
/* We don't currently support this case. */
@@ -750,7 +751,7 @@
tl_assert(clo_fnname);
tl_assert(clo_fnname[0]);
const HChar *fnname;
- if (VG_(get_fnname_if_entry)(st->Ist.IMark.addr,
+ if (VG_(get_fnname_if_entry)(ep, st->Ist.IMark.addr,
&fnname)
&& 0 == VG_(strcmp)(fnname, clo_fnname)) {
di = unsafeIRDirty_0_N(
Index: massif/ms_main.c
===================================================================
--- massif/ms_main.c (revision 16465)
+++ massif/ms_main.c (working copy)
@@ -520,8 +520,9 @@
// alloc function 'inside' a stacktrace e.g.
// 0x1 0x2 0x3 alloc func1 main
// becomes 0x1 0x2 0x3 func1 main
+ DiEpoch ep = VG_(current_DiEpoch)();
for (i = *top; i < n_ips; i++) {
- top_has_fnname = VG_(get_fnname)(ips[*top], &fnname);
+ top_has_fnname = VG_(get_fnname)(ep, ips[*top], &fnname);
if (top_has_fnname && VG_(strIsMemberXA)(alloc_fns, fnname)) {
VERB(4, "filtering alloc fn %s\n", fnname);
(*top)++;
@@ -576,7 +577,8 @@
if (exclude_first_entry && n_ips > 0) {
const HChar *fnname;
VERB(4, "removing top fn %s from stacktrace\n",
- VG_(get_fnname)(ips[0], &fnname) ? fnname : "???");
+ VG_(get_fnname)(VG_(current_DiEpoch)(), ips[0], &fnname)
+ ? fnname : "???");
return VG_(make_ExeContext_from_StackTrace)(ips+1, n_ips-1);
} else
return VG_(make_ExeContext_from_StackTrace)(ips, n_ips);
Index: memcheck/mc_errors.c
===================================================================
--- memcheck/mc_errors.c (revision 16465)
+++ memcheck/mc_errors.c (working copy)
@@ -94,15 +94,15 @@
struct {
SizeT szB; // size of value in bytes
// Origin info
- UInt otag; // origin tag
- ExeContext* origin_ec; // filled in later
+ UInt otag; // origin tag
+ ExeContextAndEpoch origin_ece; // filled in later
} Value;
// Use of an undefined value in a conditional branch or move.
struct {
// Origin info
- UInt otag; // origin tag
- ExeContext* origin_ec; // filled in later
+ UInt otag; // origin tag
+ ExeContextAndEpoch origin_ece; // filled in later
} Cond;
// Addressability error in core (signal-handling) operation.
@@ -127,8 +127,8 @@
// System call register input contains undefined bytes.
struct {
// Origin info
- UInt otag; // origin tag
- ExeContext* origin_ec; // filled in later
+ UInt otag; // origin tag
+ ExeContextAndEpoch origin_ece; // filled in later
} RegParam;
// System call memory input contains undefined/unaddressable bytes
@@ -136,8 +136,8 @@
Bool isAddrErr; // Addressability or definedness error?
AddrInfo ai;
// Origin info
- UInt otag; // origin tag
- ExeContext* origin_ec; // filled in later
+ UInt otag; // origin tag
+ ExeContextAndEpoch origin_ece; // filled in later
} MemParam;
// Problem found from a client request like CHECK_MEM_IS_ADDRESSABLE.
@@ -145,8 +145,8 @@
Bool isAddrErr; // Addressability or definedness error?
AddrInfo ai;
// Origin info
- UInt otag; // origin tag
- ExeContext* origin_ec; // filled in later
+ UInt otag; // origin tag
+ ExeContextAndEpoch origin_ece; // filled in later
} User;
// Program tried to free() something that's not a heap block (this
@@ -279,10 +279,10 @@
}
}
-static void mc_pp_origin ( ExeContext* ec, UInt okind )
+static void mc_pp_origin ( ExeContextAndEpoch ece, UInt okind )
{
const HChar* src = NULL;
- tl_assert(ec);
+ tl_assert(ece.ec);
switch (okind) {
case MC_OKIND_STACK: src = " by a stack allocation"; break;
@@ -295,10 +295,10 @@
if (VG_(clo_xml)) {
emit( " <auxwhat>Uninitialised value was created%s</auxwhat>\n",
src);
- VG_(pp_ExeContext)( ec );
+ VG_(pp_ExeContextAndEpoch)( ece );
} else {
emit( " Uninitialised value was created%s\n", src);
- VG_(pp_ExeContext)( ec );
+ VG_(pp_ExeContextAndEpoch)( ece );
}
}
@@ -379,7 +379,7 @@
emit( " <leakedblocks>%u</leakedblocks>\n", lr->num_blocks);
emit( " </xwhat>\n" );
}
- VG_(pp_ExeContext)(lr->key.allocated_at);
+ VG_(pp_ExeContextAndEpoch)(lr->key.allocated_at);
} else { /* ! if (xml) */
if (lr->indirect_szB > 0) {
emit(
@@ -401,7 +401,7 @@
n_this_record, n_total_records
);
}
- VG_(pp_ExeContext)(lr->key.allocated_at);
+ VG_(pp_ExeContextAndEpoch)(lr->key.allocated_at);
} /* if (xml) */
}
@@ -427,11 +427,11 @@
emit( " <kind>CoreMemError</kind>\n" );
emit( " <what>%pS contains unaddressable byte(s)</what>\n",
VG_(get_error_string)(err));
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
} else {
emit( "%s contains unaddressable byte(s)\n",
VG_(get_error_string)(err));
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
}
break;
@@ -441,19 +441,19 @@
emit( " <kind>UninitValue</kind>\n" );
emit( " <what>Use of uninitialised value of size %lu</what>\n",
extra->Err.Value.szB );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- if (extra->Err.Value.origin_ec)
- mc_pp_origin( extra->Err.Value.origin_ec,
- extra->Err.Value.otag & 3 );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
+ if (extra->Err.Value.origin_ece.ec)
+ mc_pp_origin( extra->Err.Value.origin_ece,
+ extra->Err.Value.otag & 3 );
} else {
/* Could also show extra->Err.Cond.otag if debugging origin
tracking */
emit( "Use of uninitialised value of size %lu\n",
extra->Err.Value.szB );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- if (extra->Err.Value.origin_ec)
- mc_pp_origin( extra->Err.Value.origin_ec,
- extra->Err.Value.otag & 3 );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
+ if (extra->Err.Value.origin_ece.ec)
+ mc_pp_origin( extra->Err.Value.origin_ece,
+ extra->Err.Value.otag & 3 );
}
break;
@@ -463,9 +463,9 @@
emit( " <kind>UninitCondition</kind>\n" );
emit( " <what>Conditional jump or move depends"
" on uninitialised value(s)</what>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- if (extra->Err.Cond.origin_ec)
- mc_pp_origin( extra->Err.Cond.origin_ec,
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
+ if (extra->Err.Cond.origin_ece.ec)
+ mc_pp_origin( extra->Err.Cond.origin_ece,
extra->Err.Cond.otag & 3 );
} else {
/* Could also show extra->Err.Cond.otag if debugging origin
@@ -472,9 +472,9 @@
tracking */
emit( "Conditional jump or move depends"
" on uninitialised value(s)\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- if (extra->Err.Cond.origin_ec)
- mc_pp_origin( extra->Err.Cond.origin_ec,
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
+ if (extra->Err.Cond.origin_ece.ec)
+ mc_pp_origin( extra->Err.Cond.origin_ece,
extra->Err.Cond.otag & 3 );
}
break;
@@ -486,16 +486,16 @@
emit( " <what>Syscall param %pS contains "
"uninitialised byte(s)</what>\n",
VG_(get_error_string)(err) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- if (extra->Err.RegParam.origin_ec)
- mc_pp_origin( extra->Err.RegParam.origin_ec,
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
+ if (extra->Err.RegParam.origin_ece.ec)
+ mc_pp_origin( extra->Err.RegParam.origin_ece,
extra->Err.RegParam.otag & 3 );
} else {
emit( "Syscall param %s contains uninitialised byte(s)\n",
VG_(get_error_string)(err) );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
- if (extra->Err.RegParam.origin_ec)
- mc_pp_origin( extra->Err.RegParam.origin_ec,
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
+ if (extra->Err.RegParam.origin_ece.ec)
+ mc_pp_origin( extra->Err.RegParam.origin_ece,
extra->Err.RegParam.otag & 3 );
}
break;
@@ -509,12 +509,12 @@
VG_(get_error_string)(err),
extra->Err.MemParam.isAddrErr
? "unaddressable" : "uninitialised" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
&extra->Err.MemParam.ai, False);
- if (extra->Err.MemParam.origin_ec
+ if (extra->Err.MemParam.origin_ece.ec
&& !extra->Err.MemParam.isAddrErr)
- mc_pp_origin( extra->Err.MemParam.origin_ec,
+ mc_pp_origin( extra->Err.MemParam.origin_ece,
extra->Err.MemParam.otag & 3 );
} else {
emit( "Syscall param %s points to %s byte(s)\n",
@@ -521,12 +521,12 @@
VG_(get_error_string)(err),
extra->Err.MemParam.isAddrErr
? "unaddressable" : "uninitialised" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
&extra->Err.MemParam.ai, False);
- if (extra->Err.MemParam.origin_ec
+ if (extra->Err.MemParam.origin_ece.ec
&& !extra->Err.MemParam.isAddrErr)
- mc_pp_origin( extra->Err.MemParam.origin_ec,
+ mc_pp_origin( extra->Err.MemParam.origin_ece,
extra->Err.MemParam.otag & 3 );
}
break;
@@ -540,21 +540,21 @@
"during client check request</what>\n",
extra->Err.User.isAddrErr
? "Unaddressable" : "Uninitialised" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
False);
- if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
- mc_pp_origin( extra->Err.User.origin_ec,
+ if (extra->Err.User.origin_ece.ec && !extra->Err.User.isAddrErr)
+ mc_pp_origin( extra->Err.User.origin_ece,
extra->Err.User.otag & 3 );
} else {
emit( "%s byte(s) found during client check request\n",
extra->Err.User.isAddrErr
? "Unaddressable" : "Uninitialised" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)(VG_(get_error_address)(err), &extra->Err.User.ai,
False);
- if (extra->Err.User.origin_ec && !extra->Err.User.isAddrErr)
- mc_pp_origin( extra->Err.User.origin_ec,
+ if (extra->Err.User.origin_ece.ec && !extra->Err.User.isAddrErr)
+ mc_pp_origin( extra->Err.User.origin_ece,
extra->Err.User.otag & 3 );
}
break;
@@ -564,12 +564,12 @@
emit( " <kind>InvalidFree</kind>\n" );
emit( " <what>Invalid free() / delete / delete[]"
" / realloc()</what>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
&extra->Err.Free.ai, False );
} else {
emit( "Invalid free() / delete / delete[] / realloc()\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
&extra->Err.Free.ai, False );
}
@@ -579,12 +579,12 @@
if (xml) {
emit( " <kind>MismatchedFree</kind>\n" );
emit( " <what>Mismatched free() / delete / delete []</what>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
&extra->Err.FreeMismatch.ai, False);
} else {
emit( "Mismatched free() / delete / delete []\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)(VG_(get_error_address)(err),
&extra->Err.FreeMismatch.ai, False);
}
@@ -597,7 +597,7 @@
emit( " <what>Invalid %s of size %lu</what>\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
&extra->Err.Addr.ai,
extra->Err.Addr.maybe_gcc );
@@ -605,7 +605,7 @@
emit( "Invalid %s of size %lu\n",
extra->Err.Addr.isWrite ? "write" : "read",
extra->Err.Addr.szB );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
&extra->Err.Addr.ai,
@@ -618,12 +618,12 @@
emit( " <kind>InvalidJump</kind>\n" );
emit( " <what>Jump to the invalid address stated "
"on the next line</what>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
False );
} else {
emit( "Jump to the invalid address stated on the next line\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err), &extra->Err.Jump.ai,
False );
}
@@ -644,7 +644,7 @@
extra->Err.Overlap.dst, extra->Err.Overlap.src,
extra->Err.Overlap.szB );
}
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
} else {
if (extra->Err.Overlap.szB == 0) {
emit( "Source and destination overlap in %s(%#lx, %#lx)\n",
@@ -656,7 +656,7 @@
extra->Err.Overlap.dst, extra->Err.Overlap.src,
extra->Err.Overlap.szB );
}
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
}
break;
@@ -666,12 +666,12 @@
if (xml) {
emit( " <kind>InvalidMemPool</kind>\n" );
emit( " <what>Illegal memory pool address</what>\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
&extra->Err.IllegalMempool.ai, False );
} else {
emit( "Illegal memory pool address\n" );
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
VG_(pp_addrinfo_mc)( VG_(get_error_address)(err),
&extra->Err.IllegalMempool.ai, False );
}
@@ -695,7 +695,7 @@
extra->Err.FishyValue.function_name,
(SSizeT)extra->Err.FishyValue.value);
emit( "</what>");
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
} else {
emit( "Argument '%s' of function %s has a fishy "
"(possibly negative) value: %ld\n",
@@ -702,7 +702,7 @@
extra->Err.FishyValue.argument_name,
extra->Err.FishyValue.function_name,
(SSizeT)extra->Err.FishyValue.value);
- VG_(pp_ExeContext)( VG_(get_error_where)(err) );
+ VG_(pp_ExeContextAndEpoch)( VG_(get_error_where)(err) );
}
break;
@@ -773,9 +773,10 @@
tl_assert( MC_(clo_mc_level) >= 2 );
if (otag > 0)
tl_assert( MC_(clo_mc_level) == 3 );
- extra.Err.Value.szB = szB;
- extra.Err.Value.otag = otag;
- extra.Err.Value.origin_ec = NULL; /* Filled in later */
+ extra.Err.Value.szB = szB;
+ extra.Err.Value.otag = otag;
+ extra.Err.Value.origin_ece = VG_(invalid_ExeContextAndEpoch)();
+ /* Filled in later */
VG_(maybe_record_error)( tid, Err_Value, /*addr*/0, /*s*/NULL, &extra );
}
@@ -785,8 +786,9 @@
tl_assert( MC_(clo_mc_level) >= 2 );
if (otag > 0)
tl_assert( MC_(clo_mc_level) == 3 );
- extra.Err.Cond.otag = otag;
- extra.Err.Cond.origin_ec = NULL; /* Filled in later */
+ extra.Err.Cond.otag = otag;
+ extra.Err.Cond.origin_ece = VG_(invalid_ExeContextAndEpoch)();
+ /* Filled in later */
VG_(maybe_record_error)( tid, Err_Cond, /*addr*/0, /*s*/NULL, &extra );
}
@@ -804,8 +806,9 @@
tl_assert(VG_INVALID_THREADID != tid);
if (otag > 0)
tl_assert( MC_(clo_mc_level) == 3 );
- extra.Err.RegParam.otag = otag;
- extra.Err.RegParam.origin_ec = NULL; /* Filled in later */
+ extra.Err.RegParam.otag = otag;
+ extra.Err.RegParam.origin_ece = VG_(invalid_ExeContextAndEpoch)();
+ /* Filled in later */
VG_(maybe_record_error)( tid, Err_RegParam, /*addr*/0, msg, &extra );
}
@@ -820,10 +823,11 @@
tl_assert( MC_(clo_mc_level) == 3 );
tl_assert( !isAddrErr );
}
- extra.Err.MemParam.isAddrErr = isAddrErr;
- extra.Err.MemParam.ai.tag = Addr_Undescribed;
- extra.Err.MemParam.otag = otag;
- extra.Err.MemParam.origin_ec = NULL; /* Filled in later */
+ extra.Err.MemParam.isAddrErr = isAddrErr;
+ extra.Err.MemParam.ai.tag = Addr_Undescribed;
+ extra.Err.MemParam.otag = otag;
+ extra.Err.MemParam.origin_ece = VG_(invalid_ExeContextAndEpoch)();
+ /* Filled in later */
VG_(maybe_record_error)( tid, Err_MemParam, a, msg, &extra );
}
@@ -925,10 +929,11 @@
tl_assert( MC_(clo_mc_level) >= 2 );
}
tl_assert(VG_INVALID_THREADID != tid);
- extra.Err.User.isAddrErr = isAddrErr;
- extra.Err.User.ai.tag = Addr_Undescribed;
- extra.Err.User.otag = otag;
- extra.Err.User.origin_ec = NULL; /* Filled in later */
+ extra.Err.User.isAddrErr = isAddrErr;
+ extra.Err.User.ai.tag = Addr_Undescribed;
+ extra.Err.User.otag = otag;
+ extra.Err.User.origin_ece = VG_(invalid_ExeContextAndEpoch)();
+ /* Filled in later */
VG_(maybe_record_error)( tid, Err_User, a, /*s*/NULL, &extra );
}
@@ -1053,7 +1058,7 @@
/* Describe an address as best you can, for error messages,
putting the result in ai. */
-static void describe_addr ( Addr a, /*OUT*/AddrInfo* ai )
+static void describe_addr ( DiEpoch ep, Addr a, /*OUT*/AddrInfo* ai )
{
MC_Chunk* mc;
@@ -1121,28 +1126,30 @@
}
/* No block found. Search a non-heap block description. */
- VG_(describe_addr) (a, ai);
+ VG_(describe_addr) (ep, a, ai);
}
-void MC_(pp_describe_addr) ( Addr a )
+void MC_(pp_describe_addr) ( DiEpoch ep, Addr a )
{
AddrInfo ai;
ai.tag = Addr_Undescribed;
- describe_addr (a, &ai);
+ describe_addr (ep, a, &ai);
VG_(pp_addrinfo_mc) (a, &ai, /* maybe_gcc */ False);
VG_(clear_addrinfo) (&ai);
}
-/* Fill in *origin_ec as specified by otag, or NULL it out if otag
+/* Fill in *origin_ece as specified by otag, or NULL it out if otag
does not refer to a known origin. */
-static void update_origin ( /*OUT*/ExeContext** origin_ec,
+static void update_origin ( /*OUT*/ExeContextAndEpoch* origin_ece,
UInt otag )
{
UInt ecu = otag & ~3;
- *origin_ec = NULL;
if (VG_(is_plausible_ECU)(ecu)) {
- *origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
+ *origin_ece
+ = VG_(tag_EC_with_current_epoch)(VG_(get_ExeContext_from_ECU)( ecu ));
+ } else {
+ *origin_ece = VG_(invalid_ExeContextAndEpoch)();
}
}
@@ -1150,6 +1157,7 @@
UInt MC_(update_Error_extra)( const Error* err )
{
MC_Error* extra = VG_(get_error_extra)(err);
+ DiEpoch ep = VG_(get_error_where)(err).epoch;
switch (VG_(get_error_kind)(err)) {
// These ones don't have addresses associated with them, and so don't
@@ -1169,45 +1177,45 @@
// origin tag. Note that it is a kludge to assume that
// a length-1 trace indicates a stack origin. FIXME.
case Err_Value:
- update_origin( &extra->Err.Value.origin_ec,
+ update_origin( &extra->Err.Value.origin_ece,
extra->Err.Value.otag );
return sizeof(MC_Error);
case Err_Cond:
- update_origin( &extra->Err.Cond.origin_ec,
+ update_origin( &extra->Err.Cond.origin_ece,
extra->Err.Cond.otag );
return sizeof(MC_Error);
case Err_RegParam:
- update_origin( &extra->Err.RegParam.origin_ec,
+ update_origin( &extra->Err.RegParam.origin_ece,
extra->Err.RegParam.otag );
return sizeof(MC_Error);
// These ones always involve a memory address.
case Err_Addr:
- describe_addr ( VG_(get_error_address)(err),
+ describe_addr ( ep, VG_(get_error_address)(err),
&extra->Err.Addr.ai );
return sizeof(MC_Error);
case Err_MemParam:
- describe_addr ( VG_(get_error_address)(err),
+ describe_addr ( ep, VG_(get_error_address)(err),
&extra->Err.MemParam.ai );
- update_origin( &extra->Err.MemParam.origin_ec,
+ update_origin( &extra->Err.MemParam.origin_ece,
extra->Err.MemParam.otag );
return sizeof(MC_Error);
case Err_Jump:
- describe_addr ( VG_(get_error_address)(err),
+ describe_addr ( ep, VG_(get_error_address)(err),
&extra->Err.Jump.ai );
return sizeof(MC_Error);
case Err_User:
- describe_addr ( VG_(get_error_address)(err),
+ describe_addr ( ep, VG_(get_error_address)(err),
&extra->Err.User.ai );
- update_origin( &extra->Err.User.origin_ec,
+ update_origin( &extra->Err.User.origin_ece,
extra->Err.User.otag );
return sizeof(MC_Error);
case Err_Free:
- describe_addr ( VG_(get_error_address)(err),
+ describe_addr ( ep, VG_(get_error_address)(err),
&extra->Err.Free.ai );
return sizeof(MC_Error);
case Err_IllegalMempool:
- describe_addr ( VG_(get_error_address)(err),
+ describe_addr ( ep, VG_(get_error_address)(err),
&extra->Err.IllegalMempool.ai );
return sizeof(MC_Error);
@@ -1252,7 +1260,7 @@
ai->Addr.Block.rwoffset = (Word)(a) - (Word)(cgbs[i].start);
ai->Addr.Block.allocated_at = cgbs[i].where;
VG_(initThreadInfo) (&ai->Addr.Block.alloc_tinfo);
- ai->Addr.Block.freed_at = VG_(null_ExeContext)();;
+ ai->Addr.Block.freed_at = VG_(null_ExeContextAndEpoch)();
return True;
}
}
Index: memcheck/mc_include.h
===================================================================
--- memcheck/mc_include.h (revision 16465)
+++ memcheck/mc_include.h (working copy)
@@ -67,17 +67,17 @@
Addr data; // Address of the actual block.
SizeT szB : (sizeof(SizeT)*8)-2; // Size requested; 30 or 62 bits.
MC_AllocKind allockind : 2; // Which operation did the allocation.
- ExeContext* where[0];
+ ExeContextAndEpoch where[0];
/* Variable-length array. The size depends on MC_(clo_keep_stacktraces).
This array optionally stores the alloc and/or free stack trace. */
}
MC_Chunk;
-/* Returns the execontext where the MC_Chunk was allocated/freed.
+/* Returns the execontext and epoch where the MC_Chunk was allocated/freed.
Returns VG_(null_ExeContext)() if the execontext has not been recorded (due
to MC_(clo_keep_stacktraces) and/or because block not yet freed). */
-ExeContext* MC_(allocated_at) (MC_Chunk*);
-ExeContext* MC_(freed_at) (MC_Chunk*);
+ExeContextAndEpoch MC_(allocated_at) (MC_Chunk*);
+ExeContextAndEpoch MC_(freed_at) (MC_Chunk*);
/* Records and sets execontext according to MC_(clo_keep_stacktraces) */
void MC_(set_allocated_at) (ThreadId, MC_Chunk*);
@@ -432,8 +432,8 @@
/* When a LossRecord is put into an OSet, these elements represent the key. */
typedef
struct _LossRecordKey {
- Reachedness state; // LC_Extra.state value shared by all blocks.
- ExeContext* allocated_at; // Where they were allocated.
+ Reachedness state; // LC_Extra.state value shared by all blocks.
+ ExeContextAndEpoch allocated_at; // Where they were allocated.
}
LossRecordKey;
@@ -569,8 +569,8 @@
/* Leak kinds tokens to call VG_(parse_enum_set). */
extern const HChar* MC_(parse_leak_kinds_tokens);
-/* prints a description of address a */
-void MC_(pp_describe_addr) (Addr a);
+/* prints a description of address a in the specified debuginfo epoch */
+void MC_(pp_describe_addr) ( DiEpoch ep, Addr a );
/* Is this address in a user-specified "ignored range" ? */
Bool MC_(in_ignored_range) ( Addr a );
@@ -588,10 +588,10 @@
start == size == 0. */
typedef
struct {
- Addr start;
- SizeT size;
- ExeContext* where;
- HChar* desc;
+ Addr start;
+ SizeT size;
+ ExeContextAndEpoch where;
+ HChar* desc;
}
CGenBlock;
Index: memcheck/mc_leakcheck.c
===================================================================
--- memcheck/mc_leakcheck.c (revision 16465)
+++ memcheck/mc_leakcheck.c (working copy)
@@ -1060,6 +1060,8 @@
const Addr end = VG_ROUNDDN(start+len, sizeof(Addr));
fault_catcher_t prev_catcher;
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
if (VG_DEBUG_LEAKCHECK)
VG_(printf)("scan %#lx-%#lx (%lu)\n", start, end, len);
@@ -1139,7 +1141,7 @@
if (addr >= searched && addr < searched + szB) {
if (addr == searched) {
VG_(umsg)("*%#lx points at %#lx\n", ptr, searched);
- MC_(pp_describe_addr) (ptr);
+ MC_(pp_describe_addr) (ep, ptr); // FIXME JRS: ep correct?
} else {
Int ch_no;
MC_Chunk *ch;
@@ -1146,7 +1148,7 @@
LC_Extra *ex;
VG_(umsg)("*%#lx interior points at %lu bytes inside %#lx\n",
ptr, (long unsigned) addr - searched, searched);
- MC_(pp_describe_addr) (ptr);
+ MC_(pp_describe_addr) (ep, ptr); // FIXME JRS: ep correct?
if (lc_is_a_chunk_ptr(addr, &ch_no, &ch, &ex) ) {
Int h;
for (h = LchStdString; h < N_LEAK_CHECK_HEURISTICS; h++) {
@@ -1203,13 +1205,17 @@
// Compare on states first because that's fast.
if (a->state < b->state) return -1;
if (a->state > b->state) return 1;
- // Ok, the states are equal. Now compare the locations, which is slower.
+ // Also on epochs, for the same reason.
+ if (a->allocated_at.epoch.n < b->allocated_at.epoch.n) return -1;
+ if (a->allocated_at.epoch.n > b->allocated_at.epoch.n) return 1;
+ // Ok, the states and epochs are equal. Now compare the locations, which
+ // is slower.
if (VG_(eq_ExeContext)(
- MC_(clo_leak_resolution), a->allocated_at, b->allocated_at))
+ MC_(clo_leak_resolution), a->allocated_at.ec, b->allocated_at.ec))
return 0;
// Different locations. Ordering is arbitrary, just use the ec pointer.
- if (a->allocated_at < b->allocated_at) return -1;
- if (a->allocated_at > b->allocated_at) return 1;
+ if (a->allocated_at.ec < b->allocated_at.ec) return -1;
+ if (a->allocated_at.ec > b->allocated_at.ec) return 1;
VG_(tool_panic)("bad LossRecord comparison");
}
@@ -1231,10 +1237,15 @@
// possible. So: compare num_blocks.
if (lr_a->num_blocks < lr_b->num_blocks) return -1;
if (lr_a->num_blocks > lr_b->num_blocks) return 1;
+ // Then epochs.
+ if (lr_a->key.allocated_at.epoch.n < lr_b->key.allocated_at.epoch.n)
+ return -1;
+ if (lr_a->key.allocated_at.epoch.n > lr_b->key.allocated_at.epoch.n)
+ return 1;
// Finally, compare ExeContext addresses... older ones are likely to have
// lower addresses.
- if (lr_a->key.allocated_at < lr_b->key.allocated_at) return -1;
- if (lr_a->key.allocated_at > lr_b->key.allocated_at) return 1;
+ if (lr_a->key.allocated_at.ec < lr_b->key.allocated_at.ec) return -1;
+ if (lr_a->key.allocated_at.ec > lr_b->key.allocated_at.ec) return 1;
return 0;
}
@@ -1381,7 +1392,7 @@
xtl.xt_lr[i].vid[XT_Decrease].num_blocks
= lr->old_num_blocks - lr->num_blocks;
- VG_(XT_add_to_ec)(leak_xt, lr->key.allocated_at, &xtl);
+ VG_(XT_add_to_ec)(leak_xt, lr->key.allocated_at.ec, &xtl);
}
static void MC_(XT_Leak_sub) (void* from, const void* xtleak)
@@ -2133,9 +2144,9 @@
VG_(umsg)("Block 0x%lx..0x%lx overlaps with block 0x%lx..0x%lx\n",
start1, end1, start2, end2);
VG_(umsg)("Blocks allocation contexts:\n"),
- VG_(pp_ExeContext)( MC_(allocated_at)(ch1));
+ VG_(pp_ExeContextAndEpoch)( MC_(allocated_at)(ch1));
VG_(umsg)("\n"),
- VG_(pp_ExeContext)( MC_(allocated_at)(ch2));
+ VG_(pp_ExeContextAndEpoch)( MC_(allocated_at)(ch2));
VG_(umsg)("This is usually caused by using ");
VG_(umsg)("VALGRIND_MALLOCLIKE_BLOCK in an inappropriate way.\n");
tl_assert (0);
Index: memcheck/mc_main.c
===================================================================
--- memcheck/mc_main.c (revision 16465)
+++ memcheck/mc_main.c (working copy)
@@ -6719,9 +6719,11 @@
const HChar* src;
UInt otag;
UInt ecu;
- ExeContext* origin_ec;
+ ExeContextAndEpoch origin_ece;
MC_ReadResult res;
+ const DiEpoch ep = VG_(current_DiEpoch)();
+
Int kwdid = VG_(keyword_id)
("addressable defined",
VG_(strtok_r) (NULL, " ", &ssaveptr), kwd_report_all);
@@ -6738,7 +6740,8 @@
VG_(printf)
("Address %p len %lu not addressable:\nbad address %p\n",
(void *)address, szB, (void *) bad_addr);
- MC_(pp_describe_addr) (address);
+ // FIXME JRS epoch ok?
+ MC_(pp_describe_addr) (ep, address);
break;
case 1: /* defined */
res = is_mem_defined ( address, szB, &bad_addr, &otag );
@@ -6765,14 +6768,16 @@
(void *)address, szB, (void *) bad_addr, src);
ecu = otag & ~3;
if (VG_(is_plausible_ECU)(ecu)) {
- origin_ec = VG_(get_ExeContext_from_ECU)( ecu );
- VG_(pp_ExeContext)( origin_ec );
+ origin_ece = VG_(tag_EC_with_current_epoch)(
+ VG_(get_ExeContext_from_ECU)( ecu ));
+ VG_(pp_ExeContextAndEpoch)( origin_ece );
}
}
else
VG_(printf) ("Address %p len %lu defined\n",
(void *)address, szB);
- MC_(pp_describe_addr) (address);
+ // FIXME JRS epoch ok?
+ MC_(pp_describe_addr) (ep, address);
break;
default: tl_assert(0);
}
@@ -7049,7 +7054,9 @@
cgbs[i].start = arg[1];
cgbs[i].size = arg[2];
cgbs[i].desc = VG_(strdup)("mc.mhcr.1", (HChar *)arg[3]);
- cgbs[i].where = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
+ cgbs[i].where = VG_(tag_EC_with_current_epoch)(
+ VG_(record_ExeContext) ( tid,
+ 0/*first_ip_delta*/ ));
*ret = i;
} else
*ret = -1;
@@ -7917,7 +7924,7 @@
}
MC_(chunk_poolalloc) = VG_(newPA)
- (sizeof(MC_Chunk) + MC_(n_where_pointers)() * sizeof(ExeContext*),
+ (sizeof(MC_Chunk) + MC_(n_where_pointers)() * sizeof(ExeContextAndEpoch),
1000,
VG_(malloc),
"mc.cMC.1 (MC_Chunk pools)",
Index: memcheck/mc_malloc_wrappers.c
===================================================================
--- memcheck/mc_malloc_wrappers.c (revision 16465)
+++ memcheck/mc_malloc_wrappers.c (working copy)
@@ -199,8 +199,8 @@
mc->szB = szB;
mc->allockind = kind;
switch ( MC_(n_where_pointers)() ) {
- case 2: mc->where[1] = 0; // fallback to 1
- case 1: mc->where[0] = 0; // fallback to 0
+ case 2: mc->where[1] = VG_(invalid_ExeContextAndEpoch)(); // fall thru
+ case 1: mc->where[0] = VG_(invalid_ExeContextAndEpoch)(); // fall thru
case 0: break;
default: tl_assert(0);
}
@@ -268,30 +268,34 @@
return in_block_list ( MC_(malloc_list), mc );
}
-ExeContext* MC_(allocated_at) (MC_Chunk* mc)
+ExeContextAndEpoch MC_(allocated_at) (MC_Chunk* mc)
{
switch (MC_(clo_keep_stacktraces)) {
- case KS_none: return VG_(null_ExeContext) ();
+ case KS_none: return VG_(null_ExeContextAndEpoch) ();
case KS_alloc: return mc->where[0];
- case KS_free: return VG_(null_ExeContext) ();
- case KS_alloc_then_free: return (live_block(mc) ?
- mc->where[0] : VG_(null_ExeContext) ());
+ case KS_free: return VG_(null_ExeContextAndEpoch) ();
+ case KS_alloc_then_free: return live_block(mc)
+ ? mc->where[0]
+ : VG_(null_ExeContextAndEpoch) ();
case KS_alloc_and_free: return mc->where[0];
default: tl_assert (0);
}
}
-ExeContext* MC_(freed_at) (MC_Chunk* mc)
+ExeContextAndEpoch MC_(freed_at) (MC_Chunk* mc)
{
switch (MC_(clo_keep_stacktraces)) {
- case KS_none: return VG_(null_ExeContext) ();
- case KS_alloc: return VG_(null_ExeContext) ();
- case KS_free: return (mc->where[0] ?
- mc->where[0] : VG_(null_ExeContext) ());
- case KS_alloc_then_free: return (live_block(mc) ?
- VG_(null_ExeContext) () : mc->where[0]);
- case KS_alloc_and_free: return (mc->where[1] ?
- mc->where[1] : VG_(null_ExeContext) ());
+ case KS_none: return VG_(null_ExeContextAndEpoch) ();
+ case KS_alloc: return VG_(null_ExeContextAndEpoch) ();
+ case KS_free: return mc->where[0].ec
+ ? mc->where[0]
+ : VG_(null_ExeContextAndEpoch) ();
+ case KS_alloc_then_free: return live_block(mc)
+ ? VG_(null_ExeContextAndEpoch) ()
+ : mc->where[0];
+ case KS_alloc_and_free: return mc->where[1].ec
+ ? mc->where[1]
+ : VG_(null_ExeContextAndEpoch) ();
default: tl_assert (0);
}
}
@@ -306,15 +310,16 @@
case KS_alloc_and_free: break;
default: tl_assert (0);
}
- mc->where[0] = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
+ mc->where[0] = VG_(tag_EC_with_current_epoch)(
+ VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ ));
if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
- VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0]);
+ VG_(XTMemory_Full_alloc)(mc->szB, mc->where[0].ec);
}
void MC_(set_freed_at) (ThreadId tid, MC_Chunk* mc)
{
Int pos;
- ExeContext* ec_free;
+ ExeContextAndEpoch ec_free;
switch (MC_(clo_keep_stacktraces)) {
case KS_none: return;
@@ -333,9 +338,10 @@
Note: we are guaranteed to find the ec_alloc in mc->where[0], as
mc_post_clo_init verifies the consistency of --xtree-memory and
--keep-stacktraces. */
- ec_free = VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ );
+ ec_free = VG_(tag_EC_with_current_epoch)(
+ VG_(record_ExeContext) ( tid, 0/*first_ip_delta*/ ));
if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
- VG_(XTMemory_Full_free)(mc->szB, mc->where[0], ec_free);
+ VG_(XTMemory_Full_free)(mc->szB, mc->where[0].ec, ec_free.ec);
if (LIKELY(pos >= 0))
mc->where[pos] = ec_free;
}
@@ -391,7 +397,7 @@
if (is_zeroed)
MC_(make_mem_defined)( p, szB );
else {
- UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc));
+ UInt ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(mc).ec);
tl_assert(VG_(is_plausible_ECU)(ecu));
MC_(make_mem_undefined_w_otag)( p, szB, ecu | MC_OKIND_HEAP );
}
@@ -605,7 +611,7 @@
// If the block has grown, we mark the grown area as undefined.
// We have to do that after VG_(HT_add_node) to ensure the ecu
// execontext is for a fully allocated block.
- ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc));
+ ecu = VG_(get_ECU_from_ExeContext)(MC_(allocated_at)(new_mc).ec);
tl_assert(VG_(is_plausible_ECU)(ecu));
MC_(make_mem_undefined_w_otag)( a_new+old_szB,
new_szB-old_szB,
@@ -673,7 +679,7 @@
return;
if (UNLIKELY(VG_(clo_xtree_memory) == Vg_XTMemory_Full))
- VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0]);
+ VG_(XTMemory_Full_resize_in_place)(oldSizeB, newSizeB, mc->where[0].ec);
mc->szB = newSizeB;
if (newSizeB < oldSizeB) {
@@ -887,7 +893,7 @@
chunks[i]->data,
chunks[i]->data + chunks[i]->szB);
- VG_(pp_ExeContext)(MC_(allocated_at)(chunks[i]));
+ VG_(pp_ExeContextAndEpoch)(MC_(allocated_at)(chunks[i]));
}
}
VG_(free)(chunks);
@@ -1147,7 +1153,7 @@
if (mc) {
xta->nbytes = mc->szB;
xta->nblocks = 1;
- *ec_alloc = MC_(allocated_at)(mc);
+ *ec_alloc = MC_(allocated_at)(mc).ec;
} else
xta->nblocks = 0;
}
Index: memcheck/tests/linux/Makefile.am
===================================================================
--- memcheck/tests/linux/Makefile.am (revision 16465)
+++ memcheck/tests/linux/Makefile.am (working copy)
@@ -6,6 +6,10 @@
EXTRA_DIST = \
brk.stderr.exp brk.vgtest \
capget.vgtest capget.stderr.exp capget.stderr.exp2 \
+ dlclose_leak-no-keep.stderr.exp dlclose_leak-no-keep.stdout.exp \
+ dlclose_leak-no-keep.vgtest \
+ dlclose_leak.stderr.exp dlclose_leak.stdout.exp \
+ dlclose_leak.vgtest \
ioctl-tiocsig.vgtest ioctl-tiocsig.stderr.exp \
lsframe1.vgtest lsframe1.stdout.exp lsframe1.stderr.exp \
lsframe2.vgtest lsframe2.stdout.exp lsframe2.stderr.exp \
@@ -25,6 +29,7 @@
check_PROGRAMS = \
brk \
capget \
+ dlclose_leak dlclose_leak_so.so \
ioctl-tiocsig \
getregset \
lsframe1 \
@@ -48,3 +53,15 @@
stack_switch_LDADD = -lpthread
timerfd_syscall_LDADD = -lrt
+# Build shared object for dlclose_leak
+dlclose_leak_so_so_SOURCES = dlclose_leak_so.c
+dlclose_leak_so_so_CFLAGS = $(AM_CFLAGS) -fpic -g -O0
+dlclose_leak_so_so_LDFLAGS = -fpic $(AM_FLAG_M3264_PRI) -shared -Wl,-soname \
+ -Wl,dlclose_leak_so.so
+
+dlclose_leak_SOURCES = dlclose_leak.c
+dlclose_leak_DEPENDENCIES = dlclose_leak_so.so
+dlclose_leak_LDADD = dlclose_leak_so.so
+dlclose_leak_LDFLAGS = $(AM_FLAG_M3264_PRI) \
+ -ldl \
+ -Wl,-rpath,$(top_builddir)/memcheck/tests/linux
Index: memcheck/tests/linux/dlclose_leak-no-keep.stderr.exp
===================================================================
--- memcheck/tests/linux/dlclose_leak-no-keep.stderr.exp (nonexistent)
+++ memcheck/tests/linux/dlclose_leak-no-keep.stderr.exp (working copy)
@@ -0,0 +1,30 @@
+
+Conditional jump or move depends on uninitialised value(s)
+ ...
+
+Invalid read of size 1
+ ...
+ Address 0x........ is 1 bytes before a block of size 1 alloc'd
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ ...
+
+done!
+
+HEAP SUMMARY:
+ in use at exit: 1 bytes in 1 blocks
+ total heap usage: 4 allocs, 3 frees, 123 bytes allocated
+
+1 bytes in 1 blocks are definitely lost in loss record ... of ...
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ ...
+
+LEAK SUMMARY:
+ definitely lost: 1 bytes in 1 blocks
+ indirectly lost: 0 bytes in 0 blocks
+ possibly lost: 0 bytes in 0 blocks
+ still reachable: 0 bytes in 0 blocks
+ suppressed: 0 bytes in 0 blocks
+
+For counts of detected and suppressed errors, rerun with: -v
+Use --track-origins=yes to see where uninitialised values come from
+ERROR SUMMARY: 3 errors from 3 contexts (suppressed: 0 from 0)
Index: memcheck/tests/linux/dlclose_leak-no-keep.stdout.exp
===================================================================
Index: memcheck/tests/linux/dlclose_leak-no-keep.vgtest
===================================================================
--- memcheck/tests/linux/dlclose_leak-no-keep.vgtest (nonexistent)
+++ memcheck/tests/linux/dlclose_leak-no-keep.vgtest (working copy)
@@ -0,0 +1,3 @@
+prog: dlclose_leak
+stderr_filter: ../filter_stderr
+vgopts: --leak-check=yes --keep-debuginfo=no
Index: memcheck/tests/linux/dlclose_leak.c
===================================================================
--- memcheck/tests/linux/dlclose_leak.c (nonexistent)
+++ memcheck/tests/linux/dlclose_leak.c (working copy)
@@ -0,0 +1,32 @@
+/* Test reporting of memory leaks in objects that have been dlopen'ed.
+ * File: dlclose_leak.c */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <dlfcn.h>
+#include <assert.h>
+
+int (*jmp_on_uninit)(void);
+char* (*alloc_1_byte)(void);
+
+int main(int argc, char** argv) {
+ char* memToLeak;
+ char x;
+ void* handle = dlopen("./dlclose_leak_so.so", RTLD_NOW);
+ if(!handle) {
+ printf("FAILURE to dlopen dlclose_leak_so.so\n");
+ return EXIT_FAILURE;
+ }
+ jmp_on_uninit = dlsym(handle,"jmp_on_uninit");
+ //fprintf(stderr, "jmp_on_uninit: %p\n", jmp_on_uninit);
+ assert(jmp_on_uninit);
+ alloc_1_byte = dlsym(handle,"alloc_1_byte");
+ //fprintf(stderr, "alloc_1_byte: %p\n", alloc_1_byte);
+ assert(alloc_1_byte);
+ (void)jmp_on_uninit();
+ memToLeak = alloc_1_byte();
+ dlclose(handle);
+ x = memToLeak[-1];
+ fprintf(stderr, "done!\n");
+ return (EXIT_SUCCESS);
+}
Index: memcheck/tests/linux/dlclose_leak.stderr.exp
===================================================================
--- memcheck/tests/linux/dlclose_leak.stderr.exp (nonexistent)
+++ memcheck/tests/linux/dlclose_leak.stderr.exp (working copy)
@@ -0,0 +1,33 @@
+
+Conditional jump or move depends on uninitialised value(s)
+ at 0x........: jmp_on_uninit (dlclose_leak_so.c:10)
+ by 0x........: main (dlclose_leak.c:26)
+
+Invalid read of size 1
+ at 0x........: main (dlclose_leak.c:29)
+ Address 0x........ is 1 bytes before a block of size 1 alloc'd
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ by 0x........: alloc_1_byte (dlclose_leak_so.c:20)
+ by 0x........: main (dlclose_leak.c:27)
+
+done!
+
+HEAP SUMMARY:
+ in use at exit: 1 bytes in 1 blocks
+ total heap usage: 4 allocs, 3 frees, 123 bytes allocated
+
+1 bytes in 1 blocks are definitely lost in loss record ... of ...
+ at 0x........: malloc (vg_replace_malloc.c:...)
+ by 0x........: alloc_1_byte (dlclose_leak_so.c:20)
+ by 0x........: main (dlclose_leak.c:27)
+
+LEAK SUMMARY:
+ definitely lost: 1 bytes in 1 blocks
+ indirectly lost: 0 bytes in 0 blocks
+ possibly lost: 0 bytes in 0 blocks
+ still reachable: 0 bytes in 0 blocks
+ suppressed: 0 bytes in 0 blocks
+
+For counts of detected and suppressed errors, rerun with: -v
+Use --track-origins=yes to see where uninitialised values come from
+ERROR SUMMARY: 3 errors from 3 contexts (suppressed: 0 from 0)
Index: memcheck/tests/linux/dlclose_leak.stdout.exp
===================================================================
Index: memcheck/tests/linux/dlclose_leak.vgtest
===================================================================
--- memcheck/tests/linux/dlclose_leak.vgtest (nonexistent)
+++ memcheck/tests/linux/dlclose_leak.vgtest (working copy)
@@ -0,0 +1,3 @@
+prog: dlclose_leak
+stderr_filter: ../filter_stderr
+vgopts: --leak-check=yes --keep-debuginfo=yes
Index: memcheck/tests/linux/dlclose_leak_so.c
===================================================================
--- memcheck/tests/linux/dlclose_leak_so.c (nonexistent)
+++ memcheck/tests/linux/dlclose_leak_so.c (working copy)
@@ -0,0 +1,21 @@
+/* dlclose_leak_so.c */
+
+#include <stdlib.h>
+
+/** Makes a jump based on an uninitialized variable in order to make sure
+ * errors reported while the dlopen'ed object is loaded work. */
+int jmp_on_uninit(void) {
+ int uninit[27];
+ __asm__ __volatile("":::"cc","memory");
+ if(uninit[13]) {
+ return 1;
+ } else {
+ return 0;
+ }
+}
+
+/** Leak 1 byte of memory. This is to test the stack check reported after the
+ * object has been dlclose'd. */
+char* alloc_1_byte(void) {
+ return (char*)malloc(1);
+}
Index: none/tests/cmdline1.stdout.exp
===================================================================
--- none/tests/cmdline1.stdout.exp (revision 16465)
+++ none/tests/cmdline1.stdout.exp (working copy)
@@ -42,6 +42,10 @@
--error-exitcode=<number> exit code to return if errors found [0=disable]
--error-markers=<begin>,<end> add lines with begin/end markers before/after
each error output in plain text mode [none]
+ --keep-debuginfo=no|yes Keep symbols etc for unloaded code [no]
+ This allows stack traces for memory leaks to
+ include file/line info for code that has been
+ dlclose'd (or similar)
--show-below-main=no|yes continue stack traces below main() [no]
--default-suppressions=yes|no
load default suppressions [yes]
Index: none/tests/cmdline2.stdout.exp
===================================================================
--- none/tests/cmdline2.stdout.exp (revision 16465)
+++ none/tests/cmdline2.stdout.exp (working copy)
@@ -42,6 +42,10 @@
--error-exitcode=<number> exit code to return if errors found [0=disable]
--error-markers=<begin>,<end> add lines with begin/end markers before/after
each error output in plain text mode [none]
+ --keep-debuginfo=no|yes Keep symbols etc for unloaded code [no]
+ This allows stack traces for memory leaks to
+ include file/line info for code that has been
+ dlclose'd (or similar)
--show-below-main=no|yes continue stack traces below main() [no]
--default-suppressions=yes|no
load default suppressions [yes]