diff --git a/gdb-bz515434-qsort_cmp.patch b/gdb-bz515434-qsort_cmp.patch index a2153f0..20974f0 100644 --- a/gdb-bz515434-qsort_cmp.patch +++ b/gdb-bz515434-qsort_cmp.patch @@ -1,46 +1,232 @@ ---- gdb-6.8.50.20090818/gdb/objfiles.c-orig 2009-08-25 14:19:04.000000000 +0200 -+++ gdb-6.8.50.20090818/gdb/objfiles.c 2009-08-25 14:28:07.000000000 +0200 -@@ -820,12 +820,16 @@ qsort_cmp (const void *a, const void *b) +Index: objfiles.c +=================================================================== +RCS file: /cvs/src/src/gdb/objfiles.c,v +retrieving revision 1.93 +diff -p -u -r1.93 objfiles.c +--- ./gdb/objfiles.c 21 Aug 2009 17:57:17 -0000 1.93 ++++ ./gdb/objfiles.c 26 Aug 2009 07:07:04 -0000 +@@ -790,15 +790,9 @@ qsort_cmp (const void *a, const void *b) + const CORE_ADDR sect2_addr = obj_section_addr (sect2); if (sect1_addr < sect2_addr) - { -+#if 0 /* qsort_cmp ICE */ - gdb_assert (obj_section_endaddr (sect1) <= sect2_addr); -+#endif /* qsort_cmp ICE */ - return -1; - } +- { +- gdb_assert (obj_section_endaddr (sect1) <= sect2_addr); +- return -1; +- } ++ return -1; else if (sect1_addr > sect2_addr) - { -+#if 0 /* qsort_cmp ICE */ - gdb_assert (sect1_addr >= obj_section_endaddr (sect2)); -+#endif /* qsort_cmp ICE */ - return 1; - } +- { +- gdb_assert (sect1_addr >= obj_section_endaddr (sect2)); +- return 1; +- } ++ return 1; -@@ -841,11 +845,13 @@ qsort_cmp (const void *a, const void *b) - static struct obj_section * - preferred_obj_section (struct obj_section *a, struct obj_section *b) + return 0; + } +@@ -823,12 +817,133 @@ preferred_obj_section (struct obj_sectio + return b; + } + ++/* Return 1 if SECTION should be inserted into the section map. ++ We want to insert only non-overlay and non-TLS section. */ ++ ++static int ++insert_section_p (const struct bfd *abfd, ++ const struct bfd_section *section) ++{ ++ const bfd_vma lma = bfd_section_lma (abfd, section); ++ ++ if (lma != 0 && lma != bfd_section_vma (abfd, section) ++ && (bfd_get_file_flags (abfd) & BFD_IN_MEMORY) == 0) ++ /* This is an overlay section. IN_MEMORY check is needed to avoid ++ discarding sections from the "system supplied DSO" (aka vdso) ++ on Linux. */ ++ return 0; ++ if ((bfd_get_section_flags (abfd, section) & SEC_THREAD_LOCAL) != 0) ++ /* This is a TLS section. */ ++ return 0; ++ ++ return 1; ++} ++ ++/* Filter out overlapping sections where one section came from the real ++ objfile, and the other from a separate debuginfo file. ++ Return the size of table after redundant sections have been eliminated. */ ++ ++static int ++filter_debuginfo_sections (struct obj_section **map, int map_size) ++{ ++ int i, j; ++ ++ for (i = 0, j = 0; i < map_size - 1; i++) ++ { ++ struct obj_section *const sect1 = map[i]; ++ struct obj_section *const sect2 = map[i + 1]; ++ const struct objfile *const objfile1 = sect1->objfile; ++ const struct objfile *const objfile2 = sect2->objfile; ++ const CORE_ADDR sect1_addr = obj_section_addr (sect1); ++ const CORE_ADDR sect2_addr = obj_section_addr (sect2); ++ ++ if (sect1_addr == sect2_addr ++ && (objfile1->separate_debug_objfile == objfile2 ++ || objfile2->separate_debug_objfile == objfile1)) ++ { ++ map[j++] = preferred_obj_section (sect1, sect2); ++ ++i; ++ } ++ else ++ map[j++] = sect1; ++ } ++ ++ if (i < map_size) ++ map[j++] = map[i]; ++ ++ /* The map should not have shrunk to less than half the original size. */ ++ gdb_assert (map_size / 2 <= j); ++ ++ return j; ++} ++ ++/* Filter out overlapping sections, issuing a warning if any are found. ++ Overlapping sections could really be overlay sections which we didn't ++ classify as such in insert_section_p, or we could be dealing with a ++ corrupt binary. */ ++ ++static int ++filter_overlapping_sections (struct obj_section **map, int map_size) ++{ ++ int i, j; ++ ++ for (i = 0, j = 0; i < map_size - 1; ) ++ { ++ int k; ++ ++ map[j++] = map[i]; ++ for (k = i + 1; k < map_size; k++) ++ { ++ struct obj_section *const sect1 = map[i]; ++ struct obj_section *const sect2 = map[k]; ++ const CORE_ADDR sect1_addr = obj_section_addr (sect1); ++ const CORE_ADDR sect2_addr = obj_section_addr (sect2); ++ const CORE_ADDR sect1_endaddr = obj_section_endaddr (sect1); ++ ++ gdb_assert (sect1_addr <= sect2_addr); ++ ++ if (sect1_endaddr <= sect2_addr) ++ break; ++ else ++ { ++ /* We have an overlap. Report it. */ ++ ++ struct objfile *const objf1 = sect1->objfile; ++ struct objfile *const objf2 = sect2->objfile; ++ ++ const struct bfd *const abfd1 = objf1->obfd; ++ const struct bfd *const abfd2 = objf2->obfd; ++ ++ const struct bfd_section *const bfds1 = sect1->the_bfd_section; ++ const struct bfd_section *const bfds2 = sect2->the_bfd_section; ++ ++ const CORE_ADDR sect2_endaddr = obj_section_endaddr (sect2); ++ ++ struct gdbarch *const gdbarch = get_objfile_arch (objf1); ++ ++ warning (_("Unexpected overlap between " ++ "section `%s' from `%s' [%s, %s) and " ++ "section `%s' from `%s' [%s, %s)"), ++ bfd_section_name (abfd1, bfds1), objf1->name, ++ paddress (gdbarch, sect1_addr), ++ paddress (gdbarch, sect1_endaddr), ++ bfd_section_name (abfd2, bfds2), objf2->name, ++ paddress (gdbarch, sect2_addr), ++ paddress (gdbarch, sect2_endaddr)); ++ } ++ } ++ i = k; ++ } ++ return map_size; ++} ++ ++ + /* Update PMAP, PMAP_SIZE with non-TLS sections from all objfiles. */ + + static void + update_section_map (struct obj_section ***pmap, int *pmap_size) { -+#if 0 /* qsort_cmp ICE */ - gdb_assert (obj_section_addr (a) == obj_section_addr (b)); - gdb_assert ((a->objfile->separate_debug_objfile == b->objfile) - || (b->objfile->separate_debug_objfile == a->objfile)); - gdb_assert ((a->objfile->separate_debug_objfile_backlink == b->objfile) - || (b->objfile->separate_debug_objfile_backlink == a->objfile)); -+#endif /* qsort_cmp ICE */ +- int map_size, i, j; ++ int alloc_size, map_size, i; + struct obj_section *s, **map; + struct objfile *objfile; - if (a->objfile->separate_debug_objfile != NULL) - return a; -@@ -908,7 +914,9 @@ update_section_map (struct obj_section * - { - /* Some duplicates were eliminated. - The new size shouldn't be less than half of the original. */ -+#if 0 /* qsort_cmp ICE */ - gdb_assert (map_size / 2 <= j); -+#endif /* qsort_cmp ICE */ - map_size = j; +@@ -837,55 +952,27 @@ update_section_map (struct obj_section * + map = *pmap; + xfree (map); - map = xrealloc (map, map_size * sizeof (*map)); /* Trim excess space. */ +-#define insert_p(objf, sec) \ +- ((bfd_get_section_flags ((objf)->obfd, (sec)->the_bfd_section) \ +- & SEC_THREAD_LOCAL) == 0) +- +- map_size = 0; ++ alloc_size = 0; + ALL_OBJSECTIONS (objfile, s) +- if (insert_p (objfile, s)) +- map_size += 1; ++ if (insert_section_p (objfile->obfd, s->the_bfd_section)) ++ alloc_size += 1; + +- map = xmalloc (map_size * sizeof (*map)); ++ map = xmalloc (alloc_size * sizeof (*map)); + + i = 0; + ALL_OBJSECTIONS (objfile, s) +- if (insert_p (objfile, s)) ++ if (insert_section_p (objfile->obfd, s->the_bfd_section)) + map[i++] = s; + +-#undef insert_p +- +- qsort (map, map_size, sizeof (*map), qsort_cmp); +- +- /* With separate debuginfo files, we may have up to two (almost) +- identical copies of some obj_sections in the map. +- Filter out duplicates. */ +- for (i = 0, j = 0; i < map_size; ++i) +- { +- struct obj_section *sect1 = map[i]; +- struct obj_section *sect2 = (i + 1 < map_size) ? map[i + 1] : NULL; +- +- if (sect2 == NULL +- || obj_section_addr (sect1) != obj_section_addr (sect2)) +- map[j++] = sect1; +- else +- { +- map[j++] = preferred_obj_section (sect1, sect2); +- ++i; +- } +- } +- +- if (j < map_size) +- { +- /* Some duplicates were eliminated. +- The new size shouldn't be less than half of the original. */ +- gdb_assert (map_size / 2 <= j); +- map_size = j; +- +- map = xrealloc (map, map_size * sizeof (*map)); /* Trim excess space. */ +- } ++ qsort (map, alloc_size, sizeof (*map), qsort_cmp); ++ map_size = filter_debuginfo_sections(map, alloc_size); ++ map_size = filter_overlapping_sections(map, map_size); ++ ++ if (map_size < alloc_size) ++ /* Some sections were eliminated. Trim excess space. */ ++ map = xrealloc (map, map_size * sizeof (*map)); + else +- gdb_assert (j == map_size); ++ gdb_assert (alloc_size == map_size); + + *pmap = map; + *pmap_size = map_size; 2009-08-25 Jan Kratochvil diff --git a/gdb-bz520129-drow-bitfields.patch b/gdb-bz520129-drow-bitfields.patch new file mode 100644 index 0000000..81ecf1b --- /dev/null +++ b/gdb-bz520129-drow-bitfields.patch @@ -0,0 +1,245 @@ +http://sourceware.org/ml/gdb-cvs/2009-07/msg00143.html + +2009-07-21 Daniel Jacobowitz + Vladimir Prus + + * valops.c (value_fetch_lazy): Handle bitfields explicitly. + (value_assign): Remove unnecessary FIXME. Honor the container + type of bitfields if possible. + * value.c (struct value): Add parent field. + (value_parent): New function. + (value_free): Free the parent also. + (value_copy): Copy the parent also. + (value_primitive_field): Do not read the contents of a lazy + value to create a child bitfield value. Set bitpos and offset + according to the container type if possible. + (unpack_bits_as_long): Rename from unpack_field_as_long. Take + field_type, bitpos, and bitsize instead of type and fieldno. + (unpack_field_as_long): Use unpack_bits_as_long. + * value.h (value_parent, unpack_bits_as_long): New prototypes. + +[ Reverted, backported for Fedora. ] + +--- ./gdb/valops.c 2009-08-28 19:27:30.000000000 +0200 ++++ ./gdb/valops.c 2009-08-28 19:27:59.000000000 +0200 +@@ -689,25 +689,7 @@ value_fetch_lazy (struct value *val) + { + gdb_assert (value_lazy (val)); + allocate_value_contents (val); +- if (value_bitsize (val)) +- { +- /* To read a lazy bitfield, read the entire enclosing value. This +- prevents reading the same block of (possibly volatile) memory once +- per bitfield. It would be even better to read only the containing +- word, but we have no way to record that just specific bits of a +- value have been fetched. */ +- struct type *type = check_typedef (value_type (val)); +- enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); +- struct value *parent = value_parent (val); +- LONGEST offset = value_offset (val); +- LONGEST num = unpack_bits_as_long (value_type (val), +- value_contents (parent) + offset, +- value_bitpos (val), +- value_bitsize (val)); +- int length = TYPE_LENGTH (type); +- store_signed_integer (value_contents_raw (val), length, byte_order, num); +- } +- else if (VALUE_LVAL (val) == lval_memory) ++ if (VALUE_LVAL (val) == lval_memory) + { + CORE_ADDR addr = value_raw_address (val); + +@@ -883,20 +865,13 @@ value_assign (struct value *toval, struc + + if (value_bitsize (toval)) + { ++ /* We assume that the argument to read_memory is in units ++ of host chars. FIXME: Is that correct? */ + changed_len = (value_bitpos (toval) + + value_bitsize (toval) + + HOST_CHAR_BIT - 1) + / HOST_CHAR_BIT; + +- /* If we can read-modify-write exactly the size of the +- containing type (e.g. short or int) then do so. This +- is safer for volatile bitfields mapped to hardware +- registers. */ +- if (changed_len < TYPE_LENGTH (type) +- && TYPE_LENGTH (type) <= (int) sizeof (LONGEST) +- && ((LONGEST) value_address (toval) % TYPE_LENGTH (type)) == 0) +- changed_len = TYPE_LENGTH (type); +- + if (changed_len > (int) sizeof (LONGEST)) + error (_("Can't handle bitfields which don't fit in a %d bit word."), + (int) sizeof (LONGEST) * HOST_CHAR_BIT); +--- ./gdb/value.c 2009-08-28 19:27:29.000000000 +0200 ++++ ./gdb/value.c 2009-08-28 19:28:34.000000000 +0200 +@@ -110,11 +110,6 @@ struct value + gdbarch_bits_big_endian=1 targets, it is the position of the MSB. */ + int bitpos; + +- /* Only used for bitfields; the containing value. This allows a +- single read from the target when displaying multiple +- bitfields. */ +- struct value *parent; +- + /* Frame register value is relative to. This will be described in + the lval enum above as "lval_register". */ + struct frame_id frame_id; +@@ -392,12 +387,6 @@ set_value_bitsize (struct value *value, + value->bitsize = bit; + } + +-struct value * +-value_parent (struct value *value) +-{ +- return value->parent; +-} +- + gdb_byte * + value_contents_raw (struct value *value) + { +@@ -617,11 +606,6 @@ value_free (struct value *val) + if (val->reference_count > 0) + return; + +- /* If there's an associated parent value, drop our reference to +- it. */ +- if (val->parent != NULL) +- value_free (val->parent); +- + type_decref (val->type); + type_decref (val->enclosing_type); + +@@ -750,9 +734,6 @@ value_copy (struct value *arg) + TYPE_LENGTH (value_enclosing_type (arg))); + + } +- val->parent = arg->parent; +- if (val->parent) +- value_incref (val->parent); + if (VALUE_LVAL (val) == lval_computed) + { + struct lval_funcs *funcs = val->location.computed.funcs; +@@ -1946,28 +1927,15 @@ value_primitive_field (struct value *arg + + if (TYPE_FIELD_BITSIZE (arg_type, fieldno)) + { +- /* Create a new value for the bitfield, with bitpos and bitsize +- set. If possible, arrange offset and bitpos so that we can +- do a single aligned read of the size of the containing type. +- Otherwise, adjust offset to the byte containing the first +- bit. Assume that the address, offset, and embedded offset +- are sufficiently aligned. */ +- int bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno); +- int container_bitsize = TYPE_LENGTH (type) * 8; +- +- v = allocate_value_lazy (type); ++ v = value_from_longest (type, ++ unpack_field_as_long (arg_type, ++ value_contents (arg1) ++ + offset, ++ fieldno)); ++ v->bitpos = TYPE_FIELD_BITPOS (arg_type, fieldno) % 8; + v->bitsize = TYPE_FIELD_BITSIZE (arg_type, fieldno); +- if ((bitpos % container_bitsize) + v->bitsize <= container_bitsize +- && TYPE_LENGTH (type) <= (int) sizeof (LONGEST)) +- v->bitpos = bitpos % container_bitsize; +- else +- v->bitpos = bitpos % 8; +- v->offset = value_offset (arg1) + value_embedded_offset (arg1) +- + (bitpos - v->bitpos) / 8; +- v->parent = arg1; +- value_incref (v->parent); +- if (!value_lazy (arg1)) +- value_fetch_lazy (v); ++ v->offset = value_offset (arg1) + offset ++ + TYPE_FIELD_BITPOS (arg_type, fieldno) / 8; + } + else if (fieldno < TYPE_N_BASECLASSES (arg_type)) + { +@@ -2094,9 +2062,8 @@ value_fn_field (struct value **arg1p, st + } + + +-/* Unpack a bitfield of the specified FIELD_TYPE, from the anonymous +- object at VALADDR. The bitfield starts at BITPOS bits and contains +- BITSIZE bits. ++/* Unpack a field FIELDNO of the specified TYPE, from the anonymous object at ++ VALADDR. + + Extracting bits depends on endianness of the machine. Compute the + number of least significant bits to discard. For big endian machines, +@@ -2110,21 +2077,24 @@ value_fn_field (struct value **arg1p, st + If the field is signed, we also do sign extension. */ + + LONGEST +-unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, +- int bitpos, int bitsize) ++unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) + { +- enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (field_type)); ++ enum bfd_endian byte_order = gdbarch_byte_order (get_type_arch (type)); + ULONGEST val; + ULONGEST valmask; ++ int bitpos = TYPE_FIELD_BITPOS (type, fieldno); ++ int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); + int lsbcount; ++ struct type *field_type; + + val = extract_unsigned_integer (valaddr + bitpos / 8, + sizeof (val), byte_order); ++ field_type = TYPE_FIELD_TYPE (type, fieldno); + CHECK_TYPEDEF (field_type); + + /* Extract bits. See comment above. */ + +- if (gdbarch_bits_big_endian (get_type_arch (field_type))) ++ if (gdbarch_bits_big_endian (get_type_arch (type))) + lsbcount = (sizeof val * 8 - bitpos % 8 - bitsize); + else + lsbcount = (bitpos % 8); +@@ -2148,19 +2118,6 @@ unpack_bits_as_long (struct type *field_ + return (val); + } + +-/* Unpack a field FIELDNO of the specified TYPE, from the anonymous object at +- VALADDR. See unpack_bits_as_long for more details. */ +- +-LONGEST +-unpack_field_as_long (struct type *type, const gdb_byte *valaddr, int fieldno) +-{ +- int bitpos = TYPE_FIELD_BITPOS (type, fieldno); +- int bitsize = TYPE_FIELD_BITSIZE (type, fieldno); +- struct type *field_type = TYPE_FIELD_TYPE (type, fieldno); +- +- return unpack_bits_as_long (field_type, valaddr, bitpos, bitsize); +-} +- + /* Modify the value of a bitfield. ADDR points to a block of memory in + target byte order; the bitfield starts in the byte pointed to. FIELDVAL + is the desired value of the field, in host byte order. BITPOS and BITSIZE +--- ./gdb/value.h 2009-08-28 19:27:29.000000000 +0200 ++++ ./gdb/value.h 2009-08-28 19:27:59.000000000 +0200 +@@ -71,12 +71,6 @@ extern void set_value_bitsize (struct va + extern int value_bitpos (struct value *); + extern void set_value_bitpos (struct value *, int bit); + +-/* Only used for bitfields; the containing value. This allows a +- single read from the target when displaying multiple +- bitfields. */ +- +-struct value *value_parent (struct value *); +- + /* Describes offset of a value within lval of a structure in bytes. + If lval == lval_memory, this is an offset to the address. If lval + == lval_register, this is a further offset from location.address +@@ -330,8 +324,6 @@ extern LONGEST unpack_long (struct type + extern DOUBLEST unpack_double (struct type *type, const gdb_byte *valaddr, + int *invp); + extern CORE_ADDR unpack_pointer (struct type *type, const gdb_byte *valaddr); +-LONGEST unpack_bits_as_long (struct type *field_type, const gdb_byte *valaddr, +- int bitpos, int bitsize); + extern LONGEST unpack_field_as_long (struct type *type, + const gdb_byte *valaddr, + int fieldno); diff --git a/gdb.spec b/gdb.spec index e7b05f7..3c2026c 100644 --- a/gdb.spec +++ b/gdb.spec @@ -14,7 +14,7 @@ Version: 6.8.50.20090818 # The release always contains a leading reserved number, start it at 1. # `upstream' is not a part of `name' to stay fully rpm dependencies compatible for the testing. -Release: 5%{?_with_upstream:.upstream}%{?dist} +Release: 6%{?_with_upstream:.upstream}%{?dist} License: GPLv3+ Group: Development/Debuggers @@ -363,6 +363,9 @@ Patch375: gdb-readline-6.0.patch # Temporarily disable assertion checks crashing in qsort_cmp (BZ 515434). Patch378: gdb-bz515434-qsort_cmp.patch +# Revert bitfields regression (BZ 520129). +Patch380: gdb-bz520129-drow-bitfields.patch + BuildRequires: ncurses-devel texinfo gettext flex bison expat-devel Requires: readline BuildRequires: readline-devel @@ -552,6 +555,7 @@ rm -f gdb/jv-exp.c gdb/m2-exp.c gdb/objc-exp.c gdb/p-exp.c %patch360 -p1 %patch375 -p1 %patch378 -p1 +%patch380 -p1 %patch124 -p1 find -name "*.orig" | xargs rm -f @@ -825,6 +829,10 @@ fi %endif %changelog +* Fri Aug 28 2009 Jan Kratochvil - 6.8.50.20090818-6 +- Real upstream fixup of qsort_cmp (BZ 515434). +- Revert bitfields regression (BZ 520129). + * Tue Aug 25 2009 Jan Kratochvil - 6.8.50.20090818-5 - Temporarily disable assertion checks crashing in qsort_cmp (BZ 515434).