gdb/gdb-6.3-ia64-gcore-speedup-20050714.patch

124 lines
4.1 KiB
Diff
Raw Normal View History

2005-07-14 Jeff Johnsotn <jjohnstn@redhat.com>
* linux-nat.c (linux_nat_xfer_memory): Incorporate Fujitsu
work-around to use /proc/mem for storage, but to fall-back
to PTRACE for ia64 rse register areas.
* ia64-linux-nat.c (ia64_rse_slot_num): New static function.
(ia64_rse_skip_regs): Ditto.
(ia64_linux_check_stack_region): New function.
--- gdb-6.3/gdb/linux-nat.c.fix 2005-07-14 17:53:13.000000000 -0400
+++ gdb-6.3/gdb/linux-nat.c 2005-07-14 19:31:00.000000000 -0400
@@ -2438,7 +2438,9 @@ linux_nat_mourn_inferior (void)
deprecated_child_ops.to_mourn_inferior ();
}
-
+#ifdef NATIVE_XFER_UNWIND_TABLE
+extern int ia64_linux_check_stack_region(struct lwp_info *lwp, void *range);
+#endif
static int
linux_nat_xfer_memory (CORE_ADDR memaddr, char *myaddr, int len, int write,
struct mem_attrib *attrib, struct target_ops *target)
@@ -2446,14 +2448,29 @@ linux_nat_xfer_memory (CORE_ADDR memaddr
struct cleanup *old_chain = save_inferior_ptid ();
int xfer;
-#ifdef NATIVE_XFER_UNWIND_TABLE
- /* FIXME: For ia64, we cannot currently use linux_proc_xfer_memory
- for accessing thread storage. Revert when Bugzilla 147436
+#ifdef NATIVE_XFER_UNWIND_TABLE
+ struct mem_region range;
+ range.lo = memaddr;
+ range.hi = memaddr + len;
+
+ /* FIXME: For ia64, we cannot currently trust linux_proc_xfer_memory
+ for accessing rse register storage. Revert when Bugzilla 147436
is fixed. */
if (is_lwp (inferior_ptid))
inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
- xfer = child_xfer_memory (memaddr, myaddr, len, write, attrib, target);
+ xfer = linux_proc_xfer_memory (memaddr, myaddr, len, write, attrib, target);
+ if (xfer == 0)
+ xfer = child_xfer_memory (memaddr, myaddr, len, write, attrib, target);
+ else if (iterate_over_lwps (ia64_linux_check_stack_region, &range) != NULL)
+ { /* This region contains ia64 rse registers, we have to re-read. */
+ int xxfer;
+ /* Re-read register stack area. */
+ xxfer = child_xfer_memory (range.lo, myaddr + (range.lo - memaddr),
+ range.hi - range.lo, write, attrib, target);
+ if (xxfer == 0)
+ xfer = 0;
+ }
#else
if (is_lwp (inferior_ptid))
inferior_ptid = pid_to_ptid (GET_LWP (inferior_ptid));
--- gdb-6.3/gdb/ia64-linux-nat.c.fix 2005-07-14 17:54:43.000000000 -0400
+++ gdb-6.3/gdb/ia64-linux-nat.c 2005-07-14 17:55:19.000000000 -0400
@@ -784,6 +784,64 @@ ia64_linux_save_sigtrap_info (void *queu
lp->saved_trap_data);
}
+/*
+ * Note: taken from ia64_tdep.c
+ *
+ */
+
+static __inline__ unsigned long
+ia64_rse_slot_num (unsigned long addr)
+{
+ return (addr >> 3) & 0x3f;
+}
+
+/* Skip over a designated number of registers in the backing
+ store, remembering every 64th position is for NAT. */
+static __inline__ unsigned long
+ia64_rse_skip_regs (unsigned long addr, long num_regs)
+{
+ long delta = ia64_rse_slot_num(addr) + num_regs;
+
+ if (num_regs < 0)
+ delta -= 0x3e;
+ return addr + ((num_regs + delta/0x3f) << 3);
+}
+
+/*
+ * Check mem_region is stack or not. If stack, /proc/<pid>/mem cannot return
+ * expected value.
+ */
+int ia64_linux_check_stack_region(struct lwp_info *ti, struct mem_region *range)
+{
+ CORE_ADDR addr;
+ int error;
+ unsigned long bsp, cfm, bspstore;
+ long sof;
+ pid_t pid = ptid_get_lwp(ti->ptid);
+ bsp = ptrace(PTRACE_PEEKUSER, pid, PT_AR_BSP ,NULL);
+ if (bsp == (unsigned long)-1) {
+ return 1;
+ }
+ /* stack is allocated by one-segment, not separated into several segments.
+ So, we only have to check whether bsp is in *range* or not. */
+ if((range->lo <= bsp) && (bsp <= range->hi)) {
+ bspstore = ptrace(PTRACE_PEEKUSER, pid, PT_AR_BSPSTORE, NULL);
+ cfm = ptrace(PTRACE_PEEKUSER, pid, PT_CFM, NULL);
+ sof = cfm & 0x3f;
+ bsp = ia64_rse_skip_regs(bsp, -sof);
+ range->lo = bspstore;
+ range->hi = bsp;
+ /* we have to check the size of dirty register stack area */
+ /*
+ fprintf_unfiltered(gdb_stdlog, "<%d> <%p> <%lx> <%p> <%p>\n",
+ pid, bsp, sof, range->lo, range->hi);
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
void
_initialize_ia64_linux_nat (void)
{