• R/O
  • HTTP
  • SSH
  • HTTPS

Commit

Tags
No Tags

Frequently used words (click to add to your profile)

javac++androidlinuxc#windowsobjective-ccocoa誰得qtpythonphprubygameguibathyscaphec計画中(planning stage)翻訳omegatframeworktwitterdomtestvb.netdirectxゲームエンジンbtronarduinopreviewer

GNU Binutils with patches for OS216


Commit MetaInfo

Revisión757186093b40ad3b37962c34294d032b0d88739f (tree)
Tiempo2017-02-08 00:25:54
AutorPhilipp Rudo <prudo@linu...>
CommiterAndreas Arnez

Log Message

Add basic Linux kernel support

This patch implements a basic target_ops for Linux kernel support. In
particular it models Linux tasks as GDB threads such that you are able to
change to a given thread, get backtraces, disassemble the current frame
etc..

Currently the target_ops is designed only to work with static targets, i.e.
dumps. Thus it lacks implementation for hooks like to_wait, to_resume or
to_store_registers. Furthermore the mapping between a CPU and the
task_struct of the running task is only be done once at initialization. See
cover letter for a detailed discussion.

Nevertheless i made some design decisions different to Peter [1] which are
worth discussing. Especially storing the private data in a htab (or
std::unordered_map if i had the time...) instead of global variables makes
the code much nicer and less memory consuming.

[1] https://sourceware.org/ml/gdb-patches/2016-12/msg00382.html

gdb/ChangeLog:

    • gdbarch.sh (lk_init_private): New hook.
    • gdbarch.h: Regenerated.
    • gdbarch.c: Regenerated.
    • lk-low.h: New file.
    • lk-low.c: New file.
    • lk-lists.h: New file.
    • lk-lists.c: New file.
    • Makefile.in (SFILES, ALLDEPFILES): Add lk-low.c and lk-lists.c.
      (HFILES_NO_SRCDIR): Add lk-low.h and lk-lists.h.
      (ALL_TARGET_OBS): Add lk-low.o
      (COMMON_OBS): Add lk-lists.o

Cambiar Resumen

Diferencia incremental

--- a/gdb/Makefile.in
+++ b/gdb/Makefile.in
@@ -805,6 +805,7 @@ ALL_TARGET_OBS = \
805805 iq2000-tdep.o \
806806 linux-record.o \
807807 linux-tdep.o \
808+ lk-low.o \
808809 lm32-tdep.o \
809810 m32c-tdep.o \
810811 m32r-linux-tdep.o \
@@ -1092,6 +1093,8 @@ SFILES = \
10921093 jit.c \
10931094 language.c \
10941095 linespec.c \
1096+ lk-lists.c \
1097+ lk-low.c \
10951098 location.c \
10961099 m2-exp.y \
10971100 m2-lang.c \
@@ -1339,6 +1342,8 @@ HFILES_NO_SRCDIR = \
13391342 linux-nat.h \
13401343 linux-record.h \
13411344 linux-tdep.h \
1345+ lk-lists.h \
1346+ lk-low.h \
13421347 location.h \
13431348 m2-lang.h \
13441349 m32r-tdep.h \
@@ -1699,6 +1704,7 @@ COMMON_OBS = $(DEPFILES) $(CONFIG_OBS) $(YYOBJ) \
16991704 jit.o \
17001705 language.o \
17011706 linespec.o \
1707+ lk-lists.o \
17021708 location.o \
17031709 m2-lang.o \
17041710 m2-typeprint.o \
@@ -2531,6 +2537,8 @@ ALLDEPFILES = \
25312537 linux-fork.c \
25322538 linux-record.c \
25332539 linux-tdep.c \
2540+ lk-lists.c \
2541+ lk-low.c \
25342542 lm32-tdep.c \
25352543 m32r-linux-nat.c \
25362544 m32r-linux-tdep.c \
--- a/gdb/gdbarch.c
+++ b/gdb/gdbarch.c
@@ -339,6 +339,7 @@ struct gdbarch
339339 gdbarch_gcc_target_options_ftype *gcc_target_options;
340340 gdbarch_gnu_triplet_regexp_ftype *gnu_triplet_regexp;
341341 gdbarch_addressable_memory_unit_size_ftype *addressable_memory_unit_size;
342+ gdbarch_lk_init_private_ftype *lk_init_private;
342343 };
343344
344345 /* Create a new ``struct gdbarch'' based on information provided by
@@ -1124,6 +1125,12 @@ gdbarch_dump (struct gdbarch *gdbarch, struct ui_file *file)
11241125 "gdbarch_dump: iterate_over_regset_sections = <%s>\n",
11251126 host_address_to_string (gdbarch->iterate_over_regset_sections));
11261127 fprintf_unfiltered (file,
1128+ "gdbarch_dump: gdbarch_lk_init_private_p() = %d\n",
1129+ gdbarch_lk_init_private_p (gdbarch));
1130+ fprintf_unfiltered (file,
1131+ "gdbarch_dump: lk_init_private = <%s>\n",
1132+ host_address_to_string (gdbarch->lk_init_private));
1133+ fprintf_unfiltered (file,
11271134 "gdbarch_dump: long_bit = %s\n",
11281135 plongest (gdbarch->long_bit));
11291136 fprintf_unfiltered (file,
@@ -4956,6 +4963,30 @@ set_gdbarch_addressable_memory_unit_size (struct gdbarch *gdbarch,
49564963 gdbarch->addressable_memory_unit_size = addressable_memory_unit_size;
49574964 }
49584965
4966+int
4967+gdbarch_lk_init_private_p (struct gdbarch *gdbarch)
4968+{
4969+ gdb_assert (gdbarch != NULL);
4970+ return gdbarch->lk_init_private != NULL;
4971+}
4972+
4973+void
4974+gdbarch_lk_init_private (struct gdbarch *gdbarch)
4975+{
4976+ gdb_assert (gdbarch != NULL);
4977+ gdb_assert (gdbarch->lk_init_private != NULL);
4978+ if (gdbarch_debug >= 2)
4979+ fprintf_unfiltered (gdb_stdlog, "gdbarch_lk_init_private called\n");
4980+ gdbarch->lk_init_private (gdbarch);
4981+}
4982+
4983+void
4984+set_gdbarch_lk_init_private (struct gdbarch *gdbarch,
4985+ gdbarch_lk_init_private_ftype lk_init_private)
4986+{
4987+ gdbarch->lk_init_private = lk_init_private;
4988+}
4989+
49594990
49604991 /* Keep a registry of per-architecture data-pointers required by GDB
49614992 modules. */
--- a/gdb/gdbarch.h
+++ b/gdb/gdbarch.h
@@ -1545,6 +1545,14 @@ typedef int (gdbarch_addressable_memory_unit_size_ftype) (struct gdbarch *gdbarc
15451545 extern int gdbarch_addressable_memory_unit_size (struct gdbarch *gdbarch);
15461546 extern void set_gdbarch_addressable_memory_unit_size (struct gdbarch *gdbarch, gdbarch_addressable_memory_unit_size_ftype *addressable_memory_unit_size);
15471547
1548+/* Initiate architecture dependent private data for the linux-kernel target. */
1549+
1550+extern int gdbarch_lk_init_private_p (struct gdbarch *gdbarch);
1551+
1552+typedef void (gdbarch_lk_init_private_ftype) (struct gdbarch *gdbarch);
1553+extern void gdbarch_lk_init_private (struct gdbarch *gdbarch);
1554+extern void set_gdbarch_lk_init_private (struct gdbarch *gdbarch, gdbarch_lk_init_private_ftype *lk_init_private);
1555+
15481556 /* Definition for an unknown syscall, used basically in error-cases. */
15491557 #define UNKNOWN_SYSCALL (-1)
15501558
--- a/gdb/gdbarch.sh
+++ b/gdb/gdbarch.sh
@@ -1163,6 +1163,10 @@ m:const char *:gnu_triplet_regexp:void:::default_gnu_triplet_regexp::0
11631163 # each address in memory.
11641164 m:int:addressable_memory_unit_size:void:::default_addressable_memory_unit_size::0
11651165
1166+# Initialize architecture dependent private data for the linux-kernel
1167+# target.
1168+M:void:lk_init_private:void:
1169+
11661170 EOF
11671171 }
11681172
--- /dev/null
+++ b/gdb/lk-lists.c
@@ -0,0 +1,47 @@
1+/* Iterators for internal data structures of the Linux kernel.
2+
3+ Copyright (C) 2016 Free Software Foundation, Inc.
4+
5+ This file is part of GDB.
6+
7+ This program is free software; you can redistribute it and/or modify
8+ it under the terms of the GNU General Public License as published by
9+ the Free Software Foundation; either version 3 of the License, or
10+ (at your option) any later version.
11+
12+ This program is distributed in the hope that it will be useful,
13+ but WITHOUT ANY WARRANTY; without even the implied warranty of
14+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15+ GNU General Public License for more details.
16+
17+ You should have received a copy of the GNU General Public License
18+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
19+
20+#include "defs.h"
21+
22+#include "inferior.h"
23+#include "lk-lists.h"
24+#include "lk-low.h"
25+
26+/* Returns next entry from struct list_head CURR while iterating field
27+ SNAME->FNAME. */
28+
29+CORE_ADDR
30+lk_list_head_next (CORE_ADDR curr, const char *sname, const char *fname)
31+{
32+ CORE_ADDR next, next_prev;
33+
34+ /* We must always assume that the data we handle is corrupted. Thus use
35+ curr->next->prev == curr as sanity check. */
36+ next = lk_read_addr (curr + LK_OFFSET (list_head, next));
37+ next_prev = lk_read_addr (next + LK_OFFSET (list_head, prev));
38+
39+ if (!curr || curr != next_prev)
40+ {
41+ error (_("Memory corruption detected while iterating list_head at "\
42+ "0x%s belonging to list %s->%s."),
43+ phex (curr, lk_builtin_type_size (unsigned_long)) , sname, fname);
44+ }
45+
46+ return next;
47+}
--- /dev/null
+++ b/gdb/lk-lists.h
@@ -0,0 +1,56 @@
1+/* Iterators for internal data structures of the Linux kernel.
2+
3+ Copyright (C) 2016 Free Software Foundation, Inc.
4+
5+ This file is part of GDB.
6+
7+ This program is free software; you can redistribute it and/or modify
8+ it under the terms of the GNU General Public License as published by
9+ the Free Software Foundation; either version 3 of the License, or
10+ (at your option) any later version.
11+
12+ This program is distributed in the hope that it will be useful,
13+ but WITHOUT ANY WARRANTY; without even the implied warranty of
14+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15+ GNU General Public License for more details.
16+
17+ You should have received a copy of the GNU General Public License
18+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
19+
20+#ifndef __LK_LISTS_H__
21+#define __LK_LISTS_H__
22+
23+extern CORE_ADDR lk_list_head_next (CORE_ADDR curr, const char *sname,
24+ const char *fname);
25+
26+/* Iterator over field SNAME->FNAME of type struct list_head starting at
27+ address START of type struct list_head. This iterator is intended to be
28+ used for lists initiated with macro LIST_HEAD (include/linux/list.h) in
29+ the kernel, i.e. lists that START is a global variable of type struct
30+ list_head and _not_ of type struct SNAME as the rest of the list. Thus
31+ START will not be iterated over but only be used to start/terminate the
32+ iteration. */
33+
34+#define lk_list_for_each(next, start, sname, fname) \
35+ for ((next) = lk_list_head_next ((start), #sname, #fname); \
36+ (next) != (start); \
37+ (next) = lk_list_head_next ((next), #sname, #fname))
38+
39+/* Iterator over struct SNAME linked together via field SNAME->FNAME of type
40+ struct list_head starting at address START of type struct SNAME. In
41+ contrast to the iterator above, START is a "full" member of the list and
42+ thus will be iterated over. */
43+
44+#define lk_list_for_each_container(cont, start, sname, fname) \
45+ CORE_ADDR _next; \
46+ bool _first_loop = true; \
47+ for ((cont) = (start), \
48+ _next = (start) + LK_OFFSET (sname, fname); \
49+ \
50+ (cont) != (start) || _first_loop; \
51+ \
52+ _next = lk_list_head_next (_next, #sname, #fname), \
53+ (cont) = LK_CONTAINER_OF (_next, sname, fname), \
54+ _first_loop = false)
55+
56+#endif /* __LK_LISTS_H__ */
--- /dev/null
+++ b/gdb/lk-low.c
@@ -0,0 +1,860 @@
1+/* Basic Linux kernel support, architecture independent.
2+
3+ Copyright (C) 2016 Free Software Foundation, Inc.
4+
5+ This file is part of GDB.
6+
7+ This program is free software; you can redistribute it and/or modify
8+ it under the terms of the GNU General Public License as published by
9+ the Free Software Foundation; either version 3 of the License, or
10+ (at your option) any later version.
11+
12+ This program is distributed in the hope that it will be useful,
13+ but WITHOUT ANY WARRANTY; without even the implied warranty of
14+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15+ GNU General Public License for more details.
16+
17+ You should have received a copy of the GNU General Public License
18+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
19+
20+#include "defs.h"
21+
22+#include "block.h"
23+#include "exceptions.h"
24+#include "frame.h"
25+#include "gdbarch.h"
26+#include "gdbcore.h"
27+#include "gdbthread.h"
28+#include "gdbtypes.h"
29+#include "inferior.h"
30+#include "lk-lists.h"
31+#include "lk-low.h"
32+#include "objfiles.h"
33+#include "observer.h"
34+#include "solib.h"
35+#include "target.h"
36+#include "value.h"
37+
38+#include <algorithm>
39+
40+struct target_ops *linux_kernel_ops = NULL;
41+
42+/* Initialize a private data entry for an address, where NAME is the name
43+ of the symbol, i.e. variable name in Linux, ALIAS the name used to
44+ retrieve the entry from hashtab, and SILENT a flag to determine if
45+ errors should be ignored.
46+
47+ Returns a pointer to the new entry. In case of an error, either returns
48+ NULL (SILENT = TRUE) or throws an error (SILENT = FALSE). If SILENT = TRUE
49+ the caller is responsible to check for errors.
50+
51+ Do not use directly, use LK_DECLARE_* macros defined in lk-low.h instead. */
52+
53+struct lk_private_data *
54+lk_init_addr (const char *name, const char *alias, int silent)
55+{
56+ /* Initialize to NULL to silence gcc. */
57+ struct value *val = NULL;
58+ struct lk_private_data *data;
59+ void **new_slot;
60+ void *old_slot;
61+
62+ if ((old_slot = lk_find (alias)) != NULL)
63+ return (struct lk_private_data *) old_slot;
64+
65+ TRY
66+ {
67+ /* Choose global block for search, in case the variable was redefined
68+ in the current context. */
69+ const struct block *global = block_global_block (get_selected_block (0));
70+ const char *tmp = name;
71+ expression_up expr = parse_exp_1 (&tmp, 0, global, 0);
72+
73+ gdb_assert (*tmp == '\0');
74+ val = evaluate_expression (expr.get ());
75+ }
76+ CATCH (except, RETURN_MASK_ALL)
77+ {
78+ if (!silent)
79+ error (_("Could not find address %s. Aborting."), alias);
80+
81+ return NULL;
82+ }
83+ END_CATCH
84+
85+ data = XCNEW (struct lk_private_data);
86+ data->alias = alias;
87+ data->data.addr = value_address (val);
88+
89+ new_slot = lk_find_slot (alias);
90+ *new_slot = data;
91+
92+ return data;
93+}
94+
95+/* Same as lk_init_addr but for structs. */
96+
97+struct lk_private_data *
98+lk_init_struct (const char *name, const char *alias, int silent)
99+{
100+ /* Initialize to NULL to silence GCC. */
101+ struct value *val = NULL;
102+ struct lk_private_data *data;
103+ void **new_slot;
104+ void *old_slot;
105+
106+ if ((old_slot = lk_find (alias)) != NULL)
107+ return (struct lk_private_data *) old_slot;
108+
109+ /* There are two ways to define structs
110+ o struct name { ... };
111+ o typedef struct { ... } name;
112+ Both are used in the linux kernel. Thus we have to check for both ways.
113+ We do this by first searching for "struct name" (the "struct " is added
114+ by macro LK_STRUCT_NAME in lk-low.h) and if not found seach for "name".
115+
116+ Note: The alias will always keep its "struct "-prefix, even when
117+ given explicitely. Besides some weird error messages this has no effect.
118+ */
119+retry:
120+ TRY
121+ {
122+ /* Choose global block for search, in case the struct was redefined
123+ in the current context. */
124+ const struct block *global = block_global_block(get_selected_block (0));
125+ const char *tmp = name;
126+ expression_up expr = parse_exp_1 (&tmp, 0, global, 0);
127+
128+ gdb_assert (*tmp == '\0');
129+ /* parsing just for 'name' can cause name clashes. Thus also check for
130+ OP_TYPE. */
131+ if (expr->elts[0].opcode != OP_TYPE)
132+ error ("We just need to get to the catch block");
133+
134+ val = evaluate_type (expr.get ());
135+ }
136+ CATCH (except, RETURN_MASK_ALL)
137+ {
138+ /* 7 = strlen ("struct "). */
139+ if (strncmp (name, "struct ", 7) == 0)
140+ {
141+ name += 7;
142+ goto retry;
143+ }
144+
145+ if (!silent)
146+ error (_("Could not find %s. Aborting."), alias);
147+
148+ return NULL;
149+ }
150+ END_CATCH
151+
152+ data = XCNEW (struct lk_private_data);
153+ data->alias = alias;
154+ data->data.type = value_type (val);
155+
156+ new_slot = lk_find_slot (alias);
157+ *new_slot = data;
158+
159+ return data;
160+}
161+
162+/* Nearly the same as lk_init_addr, with the difference that two names are
163+ needed, i.e. the struct name S_NAME containing the field with name
164+ F_NAME. */
165+
166+struct lk_private_data *
167+lk_init_field (const char *s_name, const char *f_name, const char *alias,
168+ int silent)
169+{
170+ struct lk_private_data *data;
171+ struct lk_private_data *parent;
172+ struct field *first, *last, *field;
173+ void **new_slot;
174+ void *old_slot;
175+
176+ if ((old_slot = lk_find (alias)) != NULL)
177+ return (struct lk_private_data *) old_slot;
178+
179+ parent = lk_find (s_name);
180+ if (parent == NULL)
181+ {
182+ parent = lk_init_struct (s_name, s_name, silent);
183+
184+ /* Only SILENT == true needed, as otherwise lk_init_struct would throw
185+ an error. */
186+ if (parent == NULL)
187+ return NULL;
188+ }
189+
190+ first = TYPE_FIELDS (parent->data.type);
191+ last = first + TYPE_NFIELDS (parent->data.type);
192+ for (field = first; field < last; field ++)
193+ {
194+ if (streq (field->name, f_name))
195+ break;
196+ }
197+
198+ if (field == last)
199+ {
200+ if (!silent)
201+ error (_("Could not find field %s->%s. Aborting."), s_name, f_name);
202+ return NULL;
203+ }
204+
205+ data = XCNEW (struct lk_private_data);
206+ data->alias = alias;
207+ data->data.field = field;
208+
209+ new_slot = lk_find_slot (alias);
210+ *new_slot = data;
211+
212+ return data;
213+}
214+
215+/* Map cpu number CPU to the original PTID from target beneath. */
216+
217+static ptid_t
218+lk_cpu_to_old_ptid (const int cpu)
219+{
220+ struct lk_ptid_map *ptid_map;
221+
222+ for (ptid_map = LK_PRIVATE->old_ptid; ptid_map;
223+ ptid_map = ptid_map->next)
224+ {
225+ if (ptid_map->cpu == cpu)
226+ return ptid_map->old_ptid;
227+ }
228+
229+ error (_("Could not map CPU %d to original PTID. Aborting."), cpu);
230+}
231+
232+/* Helper functions to read and return basic types at a given ADDRess. */
233+
234+/* Read and return the integer value at address ADDR. */
235+
236+int
237+lk_read_int (CORE_ADDR addr)
238+{
239+ size_t int_size = lk_builtin_type_size (int);
240+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
241+ return read_memory_integer (addr, int_size, endian);
242+}
243+
244+/* Read and return the unsigned integer value at address ADDR. */
245+
246+unsigned int
247+lk_read_uint (CORE_ADDR addr)
248+{
249+ size_t uint_size = lk_builtin_type_size (unsigned_int);
250+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
251+ return read_memory_integer (addr, uint_size, endian);
252+}
253+
254+/* Read and return the long integer value at address ADDR. */
255+
256+LONGEST
257+lk_read_long (CORE_ADDR addr)
258+{
259+ size_t long_size = lk_builtin_type_size (long);
260+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
261+ return read_memory_integer (addr, long_size, endian);
262+}
263+
264+/* Read and return the unsigned long integer value at address ADDR. */
265+
266+ULONGEST
267+lk_read_ulong (CORE_ADDR addr)
268+{
269+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
270+ enum bfd_endian endian = gdbarch_byte_order (current_inferior ()->gdbarch);
271+ return read_memory_unsigned_integer (addr, ulong_size, endian);
272+}
273+
274+/* Read and return the address value at address ADDR. */
275+
276+CORE_ADDR
277+lk_read_addr (CORE_ADDR addr)
278+{
279+ return (CORE_ADDR) lk_read_ulong (addr);
280+}
281+
282+/* Reads a bitmap at a given ADDRess of size SIZE (in bits). Allocates and
283+ returns an array of ulongs. The caller is responsible to free the array
284+ after it is no longer needed. */
285+
286+ULONGEST *
287+lk_read_bitmap (CORE_ADDR addr, size_t size)
288+{
289+ ULONGEST *bitmap;
290+ size_t ulong_size, len;
291+
292+ ulong_size = lk_builtin_type_size (unsigned_long);
293+ len = LK_DIV_ROUND_UP (size, ulong_size * LK_BITS_PER_BYTE);
294+ bitmap = XNEWVEC (ULONGEST, len);
295+
296+ for (size_t i = 0; i < len; i++)
297+ bitmap[i] = lk_read_ulong (addr + i * ulong_size);
298+
299+ return bitmap;
300+}
301+
302+/* Return the next set bit in bitmap BITMAP of size SIZE (in bits)
303+ starting from bit (index) BIT. Return SIZE when the end of the bitmap
304+ was reached. To iterate over all set bits use macro
305+ LK_BITMAP_FOR_EACH_SET_BIT defined in lk-low.h. */
306+
307+size_t
308+lk_bitmap_find_next_bit (ULONGEST *bitmap, size_t size, size_t bit)
309+{
310+ size_t ulong_size, bits_per_ulong, elt;
311+
312+ ulong_size = lk_builtin_type_size (unsigned_long);
313+ bits_per_ulong = ulong_size * LK_BITS_PER_BYTE;
314+ elt = bit / bits_per_ulong;
315+
316+ while (bit < size)
317+ {
318+ /* FIXME: Explain why using lsb0 bit order. */
319+ if (bitmap[elt] & (1UL << (bit % bits_per_ulong)))
320+ return bit;
321+
322+ bit++;
323+ if (bit % bits_per_ulong == 0)
324+ elt++;
325+ }
326+
327+ return size;
328+}
329+
330+/* Returns the Hamming weight, i.e. number of set bits, of bitmap BITMAP
331+ with size SIZE (in bits). */
332+
333+size_t
334+lk_bitmap_hweight (ULONGEST *bitmap, size_t size)
335+{
336+ size_t ulong_size, bit, bits_per_ulong, elt, retval;
337+
338+ ulong_size = lk_builtin_type_size (unsigned_long);
339+ bits_per_ulong = ulong_size * LK_BITS_PER_BYTE;
340+ elt = bit = 0;
341+ retval = 0;
342+
343+ while (bit < size)
344+ {
345+ if (bitmap[elt] & (1 << bit % bits_per_ulong))
346+ retval++;
347+
348+ bit++;
349+ if (bit % bits_per_ulong == 0)
350+ elt++;
351+ }
352+
353+ return retval;
354+}
355+
356+/* Provide the per_cpu_offset of cpu CPU. See comment in lk-low.h for
357+ details. */
358+
359+CORE_ADDR
360+lk_get_percpu_offset (unsigned int cpu)
361+{
362+ size_t ulong_size = lk_builtin_type_size (unsigned_long);
363+ CORE_ADDR percpu_elt;
364+
365+ /* Give the architecture a chance to overwrite default behaviour. */
366+ if (LK_HOOK->get_percpu_offset)
367+ return LK_HOOK->get_percpu_offset (cpu);
368+
369+ percpu_elt = LK_ADDR (__per_cpu_offset) + (ulong_size * cpu);
370+ return lk_read_addr (percpu_elt);
371+}
372+
373+
374+/* Test if a given task TASK is running. See comment in lk-low.h for
375+ details. */
376+
377+unsigned int
378+lk_task_running (CORE_ADDR task)
379+{
380+ ULONGEST *cpu_online_mask;
381+ size_t size;
382+ unsigned int cpu;
383+ struct cleanup *old_chain;
384+
385+ size = LK_BITMAP_SIZE (cpumask);
386+ cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
387+ old_chain = make_cleanup (xfree, cpu_online_mask);
388+
389+ LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
390+ {
391+ CORE_ADDR rq;
392+ CORE_ADDR curr;
393+
394+ rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
395+ curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
396+
397+ if (curr == task)
398+ break;
399+ }
400+
401+ if (cpu == size)
402+ cpu = LK_CPU_INVAL;
403+
404+ do_cleanups (old_chain);
405+ return cpu;
406+}
407+
408+/* Update running tasks with information from struct rq->curr. */
409+
410+static void
411+lk_update_running_tasks ()
412+{
413+ ULONGEST *cpu_online_mask;
414+ size_t size;
415+ unsigned int cpu;
416+ struct cleanup *old_chain;
417+
418+ size = LK_BITMAP_SIZE (cpumask);
419+ cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
420+ old_chain = make_cleanup (xfree, cpu_online_mask);
421+
422+ LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
423+ {
424+ struct thread_info *tp;
425+ CORE_ADDR rq, curr;
426+ LONGEST pid, inf_pid;
427+ ptid_t new_ptid, old_ptid;
428+
429+ rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
430+ curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
431+ pid = lk_read_int (curr + LK_OFFSET (task_struct, pid));
432+ inf_pid = current_inferior ()->pid;
433+
434+ new_ptid = ptid_build (inf_pid, pid, curr);
435+ old_ptid = lk_cpu_to_old_ptid (cpu); /* FIXME not suitable for
436+ running targets? */
437+
438+ tp = find_thread_ptid (old_ptid);
439+ if (tp && tp->state != THREAD_EXITED)
440+ thread_change_ptid (old_ptid, new_ptid);
441+ }
442+ do_cleanups (old_chain);
443+}
444+
445+/* Update sleeping tasks by walking the task_structs starting from
446+ init_task. */
447+
448+static void
449+lk_update_sleeping_tasks ()
450+{
451+ CORE_ADDR init_task, task, thread;
452+ int inf_pid;
453+
454+ inf_pid = current_inferior ()->pid;
455+ init_task = LK_ADDR (init_task);
456+
457+ lk_list_for_each_container (task, init_task, task_struct, tasks)
458+ {
459+ lk_list_for_each_container (thread, task, task_struct, thread_group)
460+ {
461+ int pid;
462+ ptid_t ptid;
463+ struct thread_info *tp;
464+
465+ pid = lk_read_int (thread + LK_OFFSET (task_struct, pid));
466+ ptid = ptid_build (inf_pid, pid, thread);
467+
468+ tp = find_thread_ptid (ptid);
469+ if (tp == NULL || tp->state == THREAD_EXITED)
470+ add_thread (ptid);
471+ }
472+ }
473+}
474+
475+/* Function for targets to_update_thread_list hook. */
476+
477+static void
478+lk_update_thread_list (struct target_ops *target)
479+{
480+ prune_threads ();
481+ lk_update_running_tasks ();
482+ lk_update_sleeping_tasks ();
483+}
484+
485+/* Function for targets to_fetch_registers hook. */
486+
487+static void
488+lk_fetch_registers (struct target_ops *target,
489+ struct regcache *regcache, int regnum)
490+{
491+ CORE_ADDR task;
492+ unsigned int cpu;
493+
494+ task = (CORE_ADDR) ptid_get_tid (inferior_ptid);
495+ cpu = lk_task_running (task);
496+
497+ /* Let the target beneath fetch registers of running tasks. */
498+ if (cpu != LK_CPU_INVAL)
499+ {
500+ struct cleanup *old_inferior_ptid;
501+
502+ old_inferior_ptid = save_inferior_ptid ();
503+ inferior_ptid = lk_cpu_to_old_ptid (cpu);
504+ linux_kernel_ops->beneath->to_fetch_registers (target, regcache, regnum);
505+ do_cleanups (old_inferior_ptid);
506+ }
507+ else
508+ {
509+ struct gdbarch *gdbarch;
510+ unsigned int i;
511+
512+ LK_HOOK->get_registers (task, target, regcache, regnum);
513+
514+ /* Mark all registers not found as unavailable. */
515+ gdbarch = get_regcache_arch (regcache);
516+ for (i = 0; i < gdbarch_num_regs (gdbarch); i++)
517+ {
518+ if (regcache_register_status (regcache, i) == REG_UNKNOWN)
519+ regcache_raw_supply (regcache, i, NULL);
520+ }
521+ }
522+}
523+
524+/* Function for targets to_pid_to_str hook. Marks running tasks with an
525+ asterisk "*". */
526+
527+static char *
528+lk_pid_to_str (struct target_ops *target, ptid_t ptid)
529+{
530+ static char buf[64];
531+ long pid;
532+ CORE_ADDR task;
533+
534+ pid = ptid_get_lwp (ptid);
535+ task = (CORE_ADDR) ptid_get_tid (ptid);
536+
537+ xsnprintf (buf, sizeof (buf), "PID: %5li%s, 0x%s",
538+ pid, ((lk_task_running (task) != LK_CPU_INVAL) ? "*" : ""),
539+ phex (task, lk_builtin_type_size (unsigned_long)));
540+
541+ return buf;
542+}
543+
544+/* Function for targets to_thread_name hook. */
545+
546+static const char *
547+lk_thread_name (struct target_ops *target, struct thread_info *ti)
548+{
549+ static char buf[LK_TASK_COMM_LEN + 1];
550+ char tmp[LK_TASK_COMM_LEN + 1];
551+ CORE_ADDR task, comm;
552+ size_t size;
553+
554+ size = std::min ((unsigned int) LK_TASK_COMM_LEN,
555+ LK_ARRAY_LEN(LK_FIELD (task_struct, comm)));
556+
557+ task = (CORE_ADDR) ptid_get_tid (ti->ptid);
558+ comm = task + LK_OFFSET (task_struct, comm);
559+ read_memory (comm, (gdb_byte *) tmp, size);
560+
561+ xsnprintf (buf, sizeof (buf), "%-16s", tmp);
562+
563+ return buf;
564+}
565+
566+/* Functions to initialize and free target_ops and its private data. As well
567+ as functions for targets to_open/close/detach hooks. */
568+
569+/* Check if OBFFILE is a Linux kernel. */
570+
571+static int
572+lk_is_linux_kernel (struct objfile *objfile)
573+{
574+ int ok = 0;
575+
576+ if (objfile == NULL || !(objfile->flags & OBJF_MAINLINE))
577+ return 0;
578+
579+ ok += lookup_minimal_symbol ("linux_banner", NULL, objfile).minsym != NULL;
580+ ok += lookup_minimal_symbol ("_stext", NULL, objfile).minsym != NULL;
581+ ok += lookup_minimal_symbol ("_etext", NULL, objfile).minsym != NULL;
582+
583+ return (ok > 2);
584+}
585+
586+/* Initialize struct lk_private. */
587+
588+static void
589+lk_init_private ()
590+{
591+ linux_kernel_ops->to_data = XCNEW (struct lk_private);
592+ LK_PRIVATE->hooks = XCNEW (struct lk_private_hooks);
593+ LK_PRIVATE->data = htab_create_alloc (31, (htab_hash) lk_hash_private_data,
594+ (htab_eq) lk_private_data_eq, NULL,
595+ xcalloc, xfree);
596+}
597+
598+/* Initialize architecture independent private data. Must be called
599+ _after_ symbol tables were initialized. */
600+
601+static void
602+lk_init_private_data ()
603+{
604+ if (LK_PRIVATE->data != NULL)
605+ htab_empty (LK_PRIVATE->data);
606+
607+ LK_DECLARE_FIELD (task_struct, tasks);
608+ LK_DECLARE_FIELD (task_struct, pid);
609+ LK_DECLARE_FIELD (task_struct, tgid);
610+ LK_DECLARE_FIELD (task_struct, thread_group);
611+ LK_DECLARE_FIELD (task_struct, comm);
612+ LK_DECLARE_FIELD (task_struct, thread);
613+
614+ LK_DECLARE_FIELD (list_head, next);
615+ LK_DECLARE_FIELD (list_head, prev);
616+
617+ LK_DECLARE_FIELD (rq, curr);
618+
619+ LK_DECLARE_FIELD (cpumask, bits);
620+
621+ LK_DECLARE_ADDR (init_task);
622+ LK_DECLARE_ADDR (runqueues);
623+ LK_DECLARE_ADDR (__per_cpu_offset);
624+ LK_DECLARE_ADDR (init_mm);
625+
626+ LK_DECLARE_ADDR_ALIAS (__cpu_online_mask, cpu_online_mask); /* linux 4.5+ */
627+ LK_DECLARE_ADDR_ALIAS (cpu_online_bits, cpu_online_mask); /* linux -4.4 */
628+ if (LK_ADDR (cpu_online_mask) == -1)
629+ error (_("Could not find address cpu_online_mask. Aborting."));
630+}
631+
632+/* Frees the cpu to old ptid map. */
633+
634+static void
635+lk_free_ptid_map ()
636+{
637+ while (LK_PRIVATE->old_ptid)
638+ {
639+ struct lk_ptid_map *tmp;
640+
641+ tmp = LK_PRIVATE->old_ptid;
642+ LK_PRIVATE->old_ptid = tmp->next;
643+ XDELETE (tmp);
644+ }
645+}
646+
647+/* Initialize the cpu to old ptid map. Prefer the arch dependent
648+ map_running_task_to_cpu hook if provided, else assume that the PID used
649+ by target beneath is the same as in task_struct PID task_struct. See
650+ comment on lk_ptid_map in lk-low.h for details. */
651+
652+static void
653+lk_init_ptid_map ()
654+{
655+ struct thread_info *ti;
656+ ULONGEST *cpu_online_mask;
657+ size_t size;
658+ unsigned int cpu;
659+ struct cleanup *old_chain;
660+
661+ if (LK_PRIVATE->old_ptid != NULL)
662+ lk_free_ptid_map ();
663+
664+ size = LK_BITMAP_SIZE (cpumask);
665+ cpu_online_mask = lk_read_bitmap (LK_ADDR (cpu_online_mask), size);
666+ old_chain = make_cleanup (xfree, cpu_online_mask);
667+
668+ ALL_THREADS (ti)
669+ {
670+ struct lk_ptid_map *ptid_map = XCNEW (struct lk_ptid_map);
671+ CORE_ADDR rq, curr;
672+ int pid;
673+
674+ /* Give the architecture a chance to overwrite default behaviour. */
675+ if (LK_HOOK->map_running_task_to_cpu)
676+ {
677+ ptid_map->cpu = LK_HOOK->map_running_task_to_cpu (ti);
678+ }
679+ else
680+ {
681+ LK_BITMAP_FOR_EACH_SET_BIT (cpu_online_mask, size, cpu)
682+ {
683+ rq = LK_ADDR (runqueues) + lk_get_percpu_offset (cpu);
684+ curr = lk_read_addr (rq + LK_OFFSET (rq, curr));
685+ pid = lk_read_int (curr + LK_OFFSET (task_struct, pid));
686+
687+ if (pid == ptid_get_lwp (ti->ptid))
688+ {
689+ ptid_map->cpu = cpu;
690+ break;
691+ }
692+ }
693+ if (cpu == size)
694+ error (_("Could not map thread with pid %d, lwp %lu to a cpu."),
695+ ti->ptid.pid, ti->ptid.lwp);
696+ }
697+ ptid_map->old_ptid = ti->ptid;
698+ ptid_map->next = LK_PRIVATE->old_ptid;
699+ LK_PRIVATE->old_ptid = ptid_map;
700+ }
701+
702+ do_cleanups (old_chain);
703+}
704+
705+/* Initializes all private data and pushes the linux kernel target, if not
706+ already done. */
707+
708+static void
709+lk_try_push_target ()
710+{
711+ struct gdbarch *gdbarch;
712+
713+ gdbarch = current_inferior ()->gdbarch;
714+ if (!(gdbarch && gdbarch_lk_init_private_p (gdbarch)))
715+ error (_("Linux kernel debugging not supported on %s."),
716+ gdbarch_bfd_arch_info (gdbarch)->printable_name);
717+
718+ lk_init_private ();
719+ lk_init_private_data ();
720+ gdbarch_lk_init_private (gdbarch);
721+ /* Check for required arch hooks. */
722+ gdb_assert (LK_HOOK->get_registers);
723+
724+ lk_init_ptid_map ();
725+ lk_update_thread_list (linux_kernel_ops);
726+
727+ if (!target_is_pushed (linux_kernel_ops))
728+ push_target (linux_kernel_ops);
729+}
730+
731+/* Function for targets to_open hook. */
732+
733+static void
734+lk_open (const char *args, int from_tty)
735+{
736+ struct objfile *objfile;
737+
738+ if (target_is_pushed (linux_kernel_ops))
739+ {
740+ printf_unfiltered (_("Linux kernel target already pushed. Aborting\n"));
741+ return;
742+ }
743+
744+ for (objfile = current_program_space->objfiles; objfile;
745+ objfile = objfile->next)
746+ {
747+ if (lk_is_linux_kernel (objfile)
748+ && ptid_get_pid (inferior_ptid) != 0)
749+ {
750+ lk_try_push_target ();
751+ return;
752+ }
753+ }
754+ printf_unfiltered (_("Could not find a valid Linux kernel object file. "
755+ "Aborting.\n"));
756+}
757+
758+/* Function for targets to_close hook. Deletes all private data. */
759+
760+static void
761+lk_close (struct target_ops *ops)
762+{
763+ htab_delete (LK_PRIVATE->data);
764+ lk_free_ptid_map ();
765+ XDELETE (LK_PRIVATE->hooks);
766+
767+ XDELETE (LK_PRIVATE);
768+ linux_kernel_ops->to_data = NULL;
769+}
770+
771+/* Function for targets to_detach hook. */
772+
773+static void
774+lk_detach (struct target_ops *t, const char *args, int from_tty)
775+{
776+ struct target_ops *beneath = linux_kernel_ops->beneath;
777+
778+ unpush_target (linux_kernel_ops);
779+ reinit_frame_cache ();
780+ if (from_tty)
781+ printf_filtered (_("Linux kernel target detached.\n"));
782+
783+ beneath->to_detach (beneath, args, from_tty);
784+}
785+
786+/* Function for new objfile observer. */
787+
788+static void
789+lk_observer_new_objfile (struct objfile *objfile)
790+{
791+ if (lk_is_linux_kernel (objfile)
792+ && ptid_get_pid (inferior_ptid) != 0)
793+ lk_try_push_target ();
794+}
795+
796+/* Function for inferior created observer. */
797+
798+static void
799+lk_observer_inferior_created (struct target_ops *ops, int from_tty)
800+{
801+ struct objfile *objfile;
802+
803+ if (ptid_get_pid (inferior_ptid) == 0)
804+ return;
805+
806+ for (objfile = current_inferior ()->pspace->objfiles; objfile;
807+ objfile = objfile->next)
808+ {
809+ if (lk_is_linux_kernel (objfile))
810+ {
811+ lk_try_push_target ();
812+ return;
813+ }
814+ }
815+}
816+
817+/* Initialize linux kernel target. */
818+
819+static void
820+init_linux_kernel_ops (void)
821+{
822+ struct target_ops *t;
823+
824+ if (linux_kernel_ops != NULL)
825+ return;
826+
827+ t = XCNEW (struct target_ops);
828+ t->to_shortname = "linux-kernel";
829+ t->to_longname = "linux kernel support";
830+ t->to_doc = "Adds support to debug the Linux kernel";
831+
832+ /* set t->to_data = struct lk_private in lk_init_private. */
833+
834+ t->to_open = lk_open;
835+ t->to_close = lk_close;
836+ t->to_detach = lk_detach;
837+ t->to_fetch_registers = lk_fetch_registers;
838+ t->to_update_thread_list = lk_update_thread_list;
839+ t->to_pid_to_str = lk_pid_to_str;
840+ t->to_thread_name = lk_thread_name;
841+
842+ t->to_stratum = thread_stratum;
843+ t->to_magic = OPS_MAGIC;
844+
845+ linux_kernel_ops = t;
846+
847+ add_target (t);
848+}
849+
850+/* Provide a prototype to silence -Wmissing-prototypes. */
851+extern initialize_file_ftype _initialize_linux_kernel;
852+
853+void
854+_initialize_linux_kernel (void)
855+{
856+ init_linux_kernel_ops ();
857+
858+ observer_attach_new_objfile (lk_observer_new_objfile);
859+ observer_attach_inferior_created (lk_observer_inferior_created);
860+}
--- /dev/null
+++ b/gdb/lk-low.h
@@ -0,0 +1,309 @@
1+/* Basic Linux kernel support, architecture independent.
2+
3+ Copyright (C) 2016 Free Software Foundation, Inc.
4+
5+ This file is part of GDB.
6+
7+ This program is free software; you can redistribute it and/or modify
8+ it under the terms of the GNU General Public License as published by
9+ the Free Software Foundation; either version 3 of the License, or
10+ (at your option) any later version.
11+
12+ This program is distributed in the hope that it will be useful,
13+ but WITHOUT ANY WARRANTY; without even the implied warranty of
14+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15+ GNU General Public License for more details.
16+
17+ You should have received a copy of the GNU General Public License
18+ along with this program. If not, see <http://www.gnu.org/licenses/>. */
19+
20+#ifndef __LK_LOW_H__
21+#define __LK_LOW_H__
22+
23+#include "target.h"
24+
25+extern struct target_ops *linux_kernel_ops;
26+
27+/* Copy constants defined in Linux kernel. */
28+#define LK_TASK_COMM_LEN 16
29+#define LK_BITS_PER_BYTE 8
30+
31+/* Definitions used in linux kernel target. */
32+#define LK_CPU_INVAL -1U
33+
34+/* Private data structs for this target. */
35+/* Forward declarations. */
36+struct lk_private_hooks;
37+struct lk_ptid_map;
38+
39+/* Short hand access to private data. */
40+#define LK_PRIVATE ((struct lk_private *) linux_kernel_ops->to_data)
41+#define LK_HOOK (LK_PRIVATE->hooks)
42+
43+struct lk_private
44+{
45+ /* Hashtab for needed addresses, structs and fields. */
46+ htab_t data;
47+
48+ /* Linked list to map between cpu number and original ptid from target
49+ beneath. */
50+ struct lk_ptid_map *old_ptid;
51+
52+ /* Hooks for architecture dependent functions. */
53+ struct lk_private_hooks *hooks;
54+};
55+
56+/* We use the following convention for PTIDs:
57+
58+ ptid->pid = inferiors PID
59+ ptid->lwp = PID from task_stuct
60+ ptid->tid = address of task_struct
61+
62+ The task_structs address as TID has two reasons. First, we need it quite
63+ often and there is no other reasonable way to pass it down. Second, it
64+ helps us to distinguish swapper tasks as they all have PID = 0.
65+
66+ Furthermore we cannot rely on the target beneath to use the same PID as the
67+ task_struct. Thus we need a mapping between our PTID and the PTID of the
68+ target beneath. Otherwise it is impossible to pass jobs, e.g. fetching
69+ registers of running tasks, to the target beneath. */
70+
71+/* Private data struct to map between our and the target beneath PTID. */
72+
73+struct lk_ptid_map
74+{
75+ struct lk_ptid_map *next;
76+ unsigned int cpu;
77+ ptid_t old_ptid;
78+};
79+
80+/* Private data struct to be stored in hashtab. */
81+
82+struct lk_private_data
83+{
84+ const char *alias;
85+
86+ union
87+ {
88+ CORE_ADDR addr;
89+ struct type *type;
90+ struct field *field;
91+ } data;
92+};
93+
94+/* Wrapper for htab_hash_string to work with our private data. */
95+
96+static inline hashval_t
97+lk_hash_private_data (const struct lk_private_data *entry)
98+{
99+ return htab_hash_string (entry->alias);
100+}
101+
102+/* Function for htab_eq to work with our private data. */
103+
104+static inline int
105+lk_private_data_eq (const struct lk_private_data *entry,
106+ const struct lk_private_data *element)
107+{
108+ return streq (entry->alias, element->alias);
109+}
110+
111+/* Wrapper for htab_find_slot to work with our private data. Do not use
112+ directly, use the macros below instead. */
113+
114+static inline void **
115+lk_find_slot (const char *alias)
116+{
117+ const struct lk_private_data dummy = { alias };
118+ return htab_find_slot (LK_PRIVATE->data, &dummy, INSERT);
119+}
120+
121+/* Wrapper for htab_find to work with our private data. Do not use
122+ directly, use the macros below instead. */
123+
124+static inline struct lk_private_data *
125+lk_find (const char *alias)
126+{
127+ const struct lk_private_data dummy = { alias };
128+ return (struct lk_private_data *) htab_find (LK_PRIVATE->data, &dummy);
129+}
130+
131+/* Functions to initialize private data. Do not use directly, use the
132+ macros below instead. */
133+
134+extern struct lk_private_data *lk_init_addr (const char *name,
135+ const char *alias, int silent);
136+extern struct lk_private_data *lk_init_struct (const char *name,
137+ const char *alias, int silent);
138+extern struct lk_private_data *lk_init_field (const char *s_name,
139+ const char *f_name,
140+ const char *alias, int silent);
141+
142+/* The names we use to store our private data in the hashtab. */
143+
144+#define LK_STRUCT_NAME(s_name) ("struct " #s_name)
145+#define LK_FIELD_NAME(s_name, f_name) (#s_name " " #f_name)
146+
147+/* Macros to initiate addresses and fields, where (S_/F_)NAME is the variables
148+ name as used in Linux. LK_DECLARE_FIELD also initializes the corresponding
149+ struct entry. Throws an error, if no symbol with the given name is found.
150+ */
151+
152+#define LK_DECLARE_ADDR(name) \
153+ lk_init_addr (#name, #name, 0)
154+#define LK_DECLARE_FIELD(s_name, f_name) \
155+ lk_init_field (LK_STRUCT_NAME (s_name), #f_name,\
156+ LK_FIELD_NAME (s_name, f_name), 0)
157+
158+/* Same as LK_DECLARE_*, but returns NULL instead of throwing an error if no
159+ symbol was found. The caller is responsible to check for possible errors.
160+ */
161+
162+#define LK_DECLARE_ADDR_SILENT(name) \
163+ lk_init_addr (#name, #name, 1)
164+#define LK_DECLARE_FIELD_SILENT(s_name, f_name) \
165+ lk_init_field (LK_STRUCT_NAME (s_name), #f_name,\
166+ LK_FIELD_NAME (s_name, f_name), 1)
167+
168+/* Same as LK_DECLARE_*_SILENT, but allows you to give an ALIAS name. If used
169+ for a struct, the struct has to be declared explicitly _before_ any of its
170+ fields. They are ment to be used, when a variable in the kernel was simply
171+ renamed (at least from our point of view). The caller is responsible to
172+ check for possible errors. */
173+
174+#define LK_DECLARE_ADDR_ALIAS(name, alias) \
175+ lk_init_addr (#name, #alias, 1)
176+#define LK_DECLARE_STRUCT_ALIAS(s_name, alias) \
177+ lk_init_struct (LK_STRUCT_NAME(s_name), LK_STRUCT_NAME (alias), 1)
178+#define LK_DECLARE_FIELD_ALIAS(s_alias, f_name, f_alias) \
179+ lk_init_field (LK_STRUCT_NAME (s_alias), #f_name, \
180+ LK_FIELD_NAME (s_alias, f_alias), 1)
181+
182+/* Macros to retrieve private data from hashtab. Returns NULL (-1) if no entry
183+ with the given ALIAS exists. The caller only needs to check for possible
184+ errors if not done so at initialization. */
185+
186+#define LK_ADDR(alias) \
187+ (lk_find (#alias) ? (lk_find (#alias))->data.addr : -1)
188+#define LK_STRUCT(alias) \
189+ (lk_find (LK_STRUCT_NAME (alias)) \
190+ ? (lk_find (LK_STRUCT_NAME (alias)))->data.type \
191+ : NULL)
192+#define LK_FIELD(s_alias, f_alias) \
193+ (lk_find (LK_FIELD_NAME (s_alias, f_alias)) \
194+ ? (lk_find (LK_FIELD_NAME (s_alias, f_alias)))->data.field \
195+ : NULL)
196+
197+
198+/* Definitions for architecture dependent hooks. */
199+/* Hook to read registers from the target and supply their content
200+ to the regcache. */
201+typedef void (*lk_hook_get_registers) (CORE_ADDR task,
202+ struct target_ops *target,
203+ struct regcache *regcache,
204+ int regnum);
205+
206+/* Hook to return the per_cpu_offset of cpu CPU. Only architectures that
207+ do not use the __per_cpu_offset array to determin the offset have to
208+ supply this hook. */
209+typedef CORE_ADDR (*lk_hook_get_percpu_offset) (unsigned int cpu);
210+
211+/* Hook to map a running task to a logical CPU. Required if the target
212+ beneath uses a different PID as struct rq. */
213+typedef unsigned int (*lk_hook_map_running_task_to_cpu) (struct thread_info *ti);
214+
215+struct lk_private_hooks
216+{
217+ /* required */
218+ lk_hook_get_registers get_registers;
219+
220+ /* optional, required if __per_cpu_offset array is not used to determine
221+ offset. */
222+ lk_hook_get_percpu_offset get_percpu_offset;
223+
224+ /* optional, required if the target beneath uses a different PID as struct
225+ rq. */
226+ lk_hook_map_running_task_to_cpu map_running_task_to_cpu;
227+};
228+
229+/* Helper functions to read and return a value at a given ADDRess. */
230+extern int lk_read_int (CORE_ADDR addr);
231+extern unsigned int lk_read_uint (CORE_ADDR addr);
232+extern LONGEST lk_read_long (CORE_ADDR addr);
233+extern ULONGEST lk_read_ulong (CORE_ADDR addr);
234+extern CORE_ADDR lk_read_addr (CORE_ADDR addr);
235+
236+/* Reads a bitmap at a given ADDRess of size SIZE (in bits). Allocates and
237+ returns an array of ulongs. The caller is responsible to free the array
238+ after it is no longer needed. */
239+extern ULONGEST *lk_read_bitmap (CORE_ADDR addr, size_t size);
240+
241+/* Walks the bitmap BITMAP of size SIZE from bit (index) BIT.
242+ Returns the index of the next set bit or SIZE, when the end of the bitmap
243+ was reached. To iterate over all set bits use macro
244+ LK_BITMAP_FOR_EACH_SET_BIT defined below. */
245+extern size_t lk_bitmap_find_next_bit (ULONGEST *bitmap, size_t bit,
246+ size_t size);
247+#define LK_BITMAP_FOR_EACH_SET_BIT(bitmap, size, bit) \
248+ for ((bit) = lk_bitmap_find_next_bit ((bitmap), (size), 0); \
249+ (bit) < (size); \
250+ (bit) = lk_bitmap_find_next_bit ((bitmap), (size), (bit) + 1))
251+
252+/* Returns the size of BITMAP in bits. */
253+#define LK_BITMAP_SIZE(bitmap) \
254+ (FIELD_SIZE (LK_FIELD (bitmap, bits)) * LK_BITS_PER_BYTE)
255+
256+/* Returns the Hamming weight, i.e. number of set bits, of bitmap BITMAP with
257+ size SIZE (in bits). */
258+extern size_t lk_bitmap_hweight (ULONGEST *bitmap, size_t size);
259+
260+
261+/* Short hand access to current gdbarchs builtin types and their
262+ size (in byte). For TYPE replace spaces " " by underscore "_", e.g.
263+ "unsigned int" => "unsigned_int". */
264+#define lk_builtin_type(type) \
265+ (builtin_type (current_inferior ()->gdbarch)->builtin_##type)
266+#define lk_builtin_type_size(type) \
267+ (lk_builtin_type (type)->length)
268+
269+/* If field FIELD is an array returns its length (in #elements). */
270+#define LK_ARRAY_LEN(field) \
271+ (FIELD_SIZE (field) / FIELD_TARGET_SIZE (field))
272+
273+/* Short hand access to the offset of field F_NAME in struct S_NAME. */
274+#define LK_OFFSET(s_name, f_name) \
275+ (FIELD_OFFSET (LK_FIELD (s_name, f_name)))
276+
277+/* Returns the container of field FNAME of struct SNAME located at address
278+ ADDR. */
279+#define LK_CONTAINER_OF(addr, sname, fname) \
280+ ((addr) - LK_OFFSET (sname, fname))
281+
282+/* Divides numinator N by demoniator D and rounds up the result. */
283+#define LK_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
284+
285+
286+/* Additional access macros to fields in the style of gdbtypes.h */
287+/* Returns the size of field FIELD (in bytes). If FIELD is an array returns
288+ the size of the whole array. */
289+#define FIELD_SIZE(field) \
290+ TYPE_LENGTH (check_typedef (FIELD_TYPE (*field)))
291+
292+/* Returns the size of the target type of field FIELD (in bytes). If FIELD is
293+ an array returns the size of its elements. */
294+#define FIELD_TARGET_SIZE(field) \
295+ TYPE_LENGTH (check_typedef (TYPE_TARGET_TYPE (FIELD_TYPE (*field))))
296+
297+/* Returns the offset of field FIELD (in bytes). */
298+#define FIELD_OFFSET(field) \
299+ (FIELD_BITPOS (*field) / TARGET_CHAR_BIT)
300+
301+/* Provides the per_cpu_offset of cpu CPU. If the architecture
302+ provides a get_percpu_offset hook, the call is passed to it. Otherwise
303+ returns the __per_cpu_offset[CPU] element. */
304+extern CORE_ADDR lk_get_percpu_offset (unsigned int cpu);
305+
306+/* Tests if a given task TASK is running. Returns either the cpu-id
307+ if running or LK_CPU_INVAL if not. */
308+extern unsigned int lk_task_running (CORE_ADDR task);
309+#endif /* __LK_LOW_H__ */