Upgrade GDB from 7.4.1 to 7.6.1 on the vendor branch
[dragonfly.git] / contrib / gdb-7 / gdb / target.c
CommitLineData
5796c8dc
SS
1/* Select target systems and architectures at runtime for GDB.
2
ef5ccd6c 3 Copyright (C) 1990-2013 Free Software Foundation, Inc.
5796c8dc
SS
4
5 Contributed by Cygnus Support.
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include <errno.h>
24#include "gdb_string.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "symtab.h"
28#include "inferior.h"
29#include "bfd.h"
30#include "symfile.h"
31#include "objfiles.h"
5796c8dc
SS
32#include "dcache.h"
33#include <signal.h>
34#include "regcache.h"
35#include "gdb_assert.h"
36#include "gdbcore.h"
37#include "exceptions.h"
38#include "target-descriptions.h"
39#include "gdbthread.h"
40#include "solib.h"
41#include "exec.h"
42#include "inline-frame.h"
cf7f2e2d 43#include "tracepoint.h"
ef5ccd6c
JM
44#include "gdb/fileio.h"
45#include "agent.h"
5796c8dc
SS
46
47static void target_info (char *, int);
48
5796c8dc
SS
49static void default_terminal_info (char *, int);
50
51static int default_watchpoint_addr_within_range (struct target_ops *,
52 CORE_ADDR, CORE_ADDR, int);
53
54static int default_region_ok_for_hw_watchpoint (CORE_ADDR, int);
55
cf7f2e2d 56static void tcomplain (void) ATTRIBUTE_NORETURN;
5796c8dc
SS
57
58static int nomemory (CORE_ADDR, char *, int, int, struct target_ops *);
59
60static int return_zero (void);
61
62static int return_one (void);
63
64static int return_minus_one (void);
65
66void target_ignore (void);
67
68static void target_command (char *, int);
69
70static struct target_ops *find_default_run_target (char *);
71
5796c8dc
SS
72static LONGEST default_xfer_partial (struct target_ops *ops,
73 enum target_object object,
74 const char *annex, gdb_byte *readbuf,
75 const gdb_byte *writebuf,
76 ULONGEST offset, LONGEST len);
77
78static LONGEST current_xfer_partial (struct target_ops *ops,
79 enum target_object object,
80 const char *annex, gdb_byte *readbuf,
81 const gdb_byte *writebuf,
82 ULONGEST offset, LONGEST len);
83
84static LONGEST target_xfer_partial (struct target_ops *ops,
85 enum target_object object,
86 const char *annex,
87 void *readbuf, const void *writebuf,
88 ULONGEST offset, LONGEST len);
89
90static struct gdbarch *default_thread_architecture (struct target_ops *ops,
91 ptid_t ptid);
92
93static void init_dummy_target (void);
94
95static struct target_ops debug_target;
96
97static void debug_to_open (char *, int);
98
99static void debug_to_prepare_to_store (struct regcache *);
100
101static void debug_to_files_info (struct target_ops *);
102
103static int debug_to_insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *);
105
106static int debug_to_remove_breakpoint (struct gdbarch *,
107 struct bp_target_info *);
108
109static int debug_to_can_use_hw_breakpoint (int, int, int);
110
111static int debug_to_insert_hw_breakpoint (struct gdbarch *,
112 struct bp_target_info *);
113
114static int debug_to_remove_hw_breakpoint (struct gdbarch *,
115 struct bp_target_info *);
116
cf7f2e2d
JM
117static int debug_to_insert_watchpoint (CORE_ADDR, int, int,
118 struct expression *);
5796c8dc 119
cf7f2e2d
JM
120static int debug_to_remove_watchpoint (CORE_ADDR, int, int,
121 struct expression *);
5796c8dc
SS
122
123static int debug_to_stopped_by_watchpoint (void);
124
125static int debug_to_stopped_data_address (struct target_ops *, CORE_ADDR *);
126
127static int debug_to_watchpoint_addr_within_range (struct target_ops *,
128 CORE_ADDR, CORE_ADDR, int);
129
130static int debug_to_region_ok_for_hw_watchpoint (CORE_ADDR, int);
131
cf7f2e2d
JM
132static int debug_to_can_accel_watchpoint_condition (CORE_ADDR, int, int,
133 struct expression *);
134
5796c8dc
SS
135static void debug_to_terminal_init (void);
136
137static void debug_to_terminal_inferior (void);
138
139static void debug_to_terminal_ours_for_output (void);
140
141static void debug_to_terminal_save_ours (void);
142
143static void debug_to_terminal_ours (void);
144
145static void debug_to_terminal_info (char *, int);
146
147static void debug_to_load (char *, int);
148
5796c8dc
SS
149static int debug_to_can_run (void);
150
5796c8dc
SS
151static void debug_to_stop (ptid_t);
152
5796c8dc
SS
153/* Pointer to array of target architecture structures; the size of the
154 array; the current index into the array; the allocated size of the
155 array. */
156struct target_ops **target_structs;
157unsigned target_struct_size;
158unsigned target_struct_index;
159unsigned target_struct_allocsize;
160#define DEFAULT_ALLOCSIZE 10
161
162/* The initial current target, so that there is always a semi-valid
163 current target. */
164
165static struct target_ops dummy_target;
166
167/* Top of target stack. */
168
169static struct target_ops *target_stack;
170
171/* The target structure we are currently using to talk to a process
172 or file or whatever "inferior" we have. */
173
174struct target_ops current_target;
175
176/* Command list for target. */
177
178static struct cmd_list_element *targetlist = NULL;
179
180/* Nonzero if we should trust readonly sections from the
181 executable when reading memory. */
182
183static int trust_readonly = 0;
184
185/* Nonzero if we should show true memory content including
186 memory breakpoint inserted by gdb. */
187
188static int show_memory_breakpoints = 0;
189
cf7f2e2d
JM
190/* These globals control whether GDB attempts to perform these
191 operations; they are useful for targets that need to prevent
192 inadvertant disruption, such as in non-stop mode. */
193
194int may_write_registers = 1;
195
196int may_write_memory = 1;
197
198int may_insert_breakpoints = 1;
199
200int may_insert_tracepoints = 1;
201
202int may_insert_fast_tracepoints = 1;
203
204int may_stop = 1;
205
5796c8dc
SS
206/* Non-zero if we want to see trace of target level stuff. */
207
ef5ccd6c 208static unsigned int targetdebug = 0;
5796c8dc
SS
209static void
210show_targetdebug (struct ui_file *file, int from_tty,
211 struct cmd_list_element *c, const char *value)
212{
213 fprintf_filtered (file, _("Target debugging is %s.\n"), value);
214}
215
216static void setup_target_debug (void);
217
218/* The option sets this. */
219static int stack_cache_enabled_p_1 = 1;
220/* And set_stack_cache_enabled_p updates this.
221 The reason for the separation is so that we don't flush the cache for
222 on->on transitions. */
223static int stack_cache_enabled_p = 1;
224
225/* This is called *after* the stack-cache has been set.
226 Flush the cache for off->on and on->off transitions.
227 There's no real need to flush the cache for on->off transitions,
228 except cleanliness. */
229
230static void
231set_stack_cache_enabled_p (char *args, int from_tty,
232 struct cmd_list_element *c)
233{
234 if (stack_cache_enabled_p != stack_cache_enabled_p_1)
235 target_dcache_invalidate ();
236
237 stack_cache_enabled_p = stack_cache_enabled_p_1;
238}
239
240static void
241show_stack_cache_enabled_p (struct ui_file *file, int from_tty,
242 struct cmd_list_element *c, const char *value)
243{
244 fprintf_filtered (file, _("Cache use for stack accesses is %s.\n"), value);
245}
246
247/* Cache of memory operations, to speed up remote access. */
248static DCACHE *target_dcache;
249
250/* Invalidate the target dcache. */
251
252void
253target_dcache_invalidate (void)
254{
255 dcache_invalidate (target_dcache);
256}
257
258/* The user just typed 'target' without the name of a target. */
259
260static void
261target_command (char *arg, int from_tty)
262{
263 fputs_filtered ("Argument required (target name). Try `help target'\n",
264 gdb_stdout);
265}
266
267/* Default target_has_* methods for process_stratum targets. */
268
269int
270default_child_has_all_memory (struct target_ops *ops)
271{
272 /* If no inferior selected, then we can't read memory here. */
273 if (ptid_equal (inferior_ptid, null_ptid))
274 return 0;
275
276 return 1;
277}
278
279int
280default_child_has_memory (struct target_ops *ops)
281{
282 /* If no inferior selected, then we can't read memory here. */
283 if (ptid_equal (inferior_ptid, null_ptid))
284 return 0;
285
286 return 1;
287}
288
289int
290default_child_has_stack (struct target_ops *ops)
291{
292 /* If no inferior selected, there's no stack. */
293 if (ptid_equal (inferior_ptid, null_ptid))
294 return 0;
295
296 return 1;
297}
298
299int
300default_child_has_registers (struct target_ops *ops)
301{
302 /* Can't read registers from no inferior. */
303 if (ptid_equal (inferior_ptid, null_ptid))
304 return 0;
305
306 return 1;
307}
308
309int
c50c785c 310default_child_has_execution (struct target_ops *ops, ptid_t the_ptid)
5796c8dc
SS
311{
312 /* If there's no thread selected, then we can't make it run through
313 hoops. */
c50c785c 314 if (ptid_equal (the_ptid, null_ptid))
5796c8dc
SS
315 return 0;
316
317 return 1;
318}
319
320
321int
322target_has_all_memory_1 (void)
323{
324 struct target_ops *t;
325
326 for (t = current_target.beneath; t != NULL; t = t->beneath)
327 if (t->to_has_all_memory (t))
328 return 1;
329
330 return 0;
331}
332
333int
334target_has_memory_1 (void)
335{
336 struct target_ops *t;
337
338 for (t = current_target.beneath; t != NULL; t = t->beneath)
339 if (t->to_has_memory (t))
340 return 1;
341
342 return 0;
343}
344
345int
346target_has_stack_1 (void)
347{
348 struct target_ops *t;
349
350 for (t = current_target.beneath; t != NULL; t = t->beneath)
351 if (t->to_has_stack (t))
352 return 1;
353
354 return 0;
355}
356
357int
358target_has_registers_1 (void)
359{
360 struct target_ops *t;
361
362 for (t = current_target.beneath; t != NULL; t = t->beneath)
363 if (t->to_has_registers (t))
364 return 1;
365
366 return 0;
367}
368
369int
c50c785c 370target_has_execution_1 (ptid_t the_ptid)
5796c8dc
SS
371{
372 struct target_ops *t;
373
374 for (t = current_target.beneath; t != NULL; t = t->beneath)
c50c785c 375 if (t->to_has_execution (t, the_ptid))
5796c8dc
SS
376 return 1;
377
378 return 0;
379}
380
c50c785c
JM
381int
382target_has_execution_current (void)
383{
384 return target_has_execution_1 (inferior_ptid);
385}
386
5796c8dc
SS
387/* Add a possible target architecture to the list. */
388
389void
390add_target (struct target_ops *t)
391{
392 /* Provide default values for all "must have" methods. */
393 if (t->to_xfer_partial == NULL)
394 t->to_xfer_partial = default_xfer_partial;
395
396 if (t->to_has_all_memory == NULL)
397 t->to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
398
399 if (t->to_has_memory == NULL)
400 t->to_has_memory = (int (*) (struct target_ops *)) return_zero;
401
402 if (t->to_has_stack == NULL)
403 t->to_has_stack = (int (*) (struct target_ops *)) return_zero;
404
405 if (t->to_has_registers == NULL)
406 t->to_has_registers = (int (*) (struct target_ops *)) return_zero;
407
408 if (t->to_has_execution == NULL)
c50c785c 409 t->to_has_execution = (int (*) (struct target_ops *, ptid_t)) return_zero;
5796c8dc
SS
410
411 if (!target_structs)
412 {
413 target_struct_allocsize = DEFAULT_ALLOCSIZE;
414 target_structs = (struct target_ops **) xmalloc
415 (target_struct_allocsize * sizeof (*target_structs));
416 }
417 if (target_struct_size >= target_struct_allocsize)
418 {
419 target_struct_allocsize *= 2;
420 target_structs = (struct target_ops **)
421 xrealloc ((char *) target_structs,
422 target_struct_allocsize * sizeof (*target_structs));
423 }
424 target_structs[target_struct_size++] = t;
425
426 if (targetlist == NULL)
427 add_prefix_cmd ("target", class_run, target_command, _("\
428Connect to a target machine or process.\n\
429The first argument is the type or protocol of the target machine.\n\
430Remaining arguments are interpreted by the target protocol. For more\n\
431information on the arguments for a particular protocol, type\n\
432`help target ' followed by the protocol name."),
433 &targetlist, "target ", 0, &cmdlist);
434 add_cmd (t->to_shortname, no_class, t->to_open, t->to_doc, &targetlist);
435}
436
ef5ccd6c
JM
437/* See target.h. */
438
439void
440add_deprecated_target_alias (struct target_ops *t, char *alias)
441{
442 struct cmd_list_element *c;
443 char *alt;
444
445 /* If we use add_alias_cmd, here, we do not get the deprecated warning,
446 see PR cli/15104. */
447 c = add_cmd (alias, no_class, t->to_open, t->to_doc, &targetlist);
448 alt = xstrprintf ("target %s", t->to_shortname);
449 deprecate_cmd (c, alt);
450}
451
5796c8dc
SS
452/* Stub functions */
453
454void
455target_ignore (void)
456{
457}
458
459void
460target_kill (void)
461{
462 struct target_ops *t;
463
464 for (t = current_target.beneath; t != NULL; t = t->beneath)
465 if (t->to_kill != NULL)
466 {
467 if (targetdebug)
468 fprintf_unfiltered (gdb_stdlog, "target_kill ()\n");
469
470 t->to_kill (t);
471 return;
472 }
473
474 noprocess ();
475}
476
477void
478target_load (char *arg, int from_tty)
479{
480 target_dcache_invalidate ();
481 (*current_target.to_load) (arg, from_tty);
482}
483
484void
485target_create_inferior (char *exec_file, char *args,
486 char **env, int from_tty)
487{
488 struct target_ops *t;
cf7f2e2d 489
5796c8dc
SS
490 for (t = current_target.beneath; t != NULL; t = t->beneath)
491 {
492 if (t->to_create_inferior != NULL)
493 {
494 t->to_create_inferior (t, exec_file, args, env, from_tty);
495 if (targetdebug)
496 fprintf_unfiltered (gdb_stdlog,
497 "target_create_inferior (%s, %s, xxx, %d)\n",
498 exec_file, args, from_tty);
499 return;
500 }
501 }
502
503 internal_error (__FILE__, __LINE__,
c50c785c 504 _("could not find a target to create inferior"));
5796c8dc
SS
505}
506
507void
508target_terminal_inferior (void)
509{
510 /* A background resume (``run&'') should leave GDB in control of the
c50c785c 511 terminal. Use target_can_async_p, not target_is_async_p, since at
cf7f2e2d
JM
512 this point the target is not async yet. However, if sync_execution
513 is not set, we know it will become async prior to resume. */
514 if (target_can_async_p () && !sync_execution)
5796c8dc
SS
515 return;
516
517 /* If GDB is resuming the inferior in the foreground, install
518 inferior's terminal modes. */
519 (*current_target.to_terminal_inferior) ();
520}
521
522static int
523nomemory (CORE_ADDR memaddr, char *myaddr, int len, int write,
524 struct target_ops *t)
525{
c50c785c
JM
526 errno = EIO; /* Can't read/write this location. */
527 return 0; /* No bytes handled. */
5796c8dc
SS
528}
529
530static void
531tcomplain (void)
532{
533 error (_("You can't do that when your target is `%s'"),
534 current_target.to_shortname);
535}
536
537void
538noprocess (void)
539{
540 error (_("You can't do that without a process to debug."));
541}
542
5796c8dc
SS
543static void
544default_terminal_info (char *args, int from_tty)
545{
546 printf_unfiltered (_("No saved terminal information.\n"));
547}
548
5796c8dc
SS
549/* A default implementation for the to_get_ada_task_ptid target method.
550
551 This function builds the PTID by using both LWP and TID as part of
552 the PTID lwp and tid elements. The pid used is the pid of the
553 inferior_ptid. */
554
555static ptid_t
556default_get_ada_task_ptid (long lwp, long tid)
557{
558 return ptid_build (ptid_get_pid (inferior_ptid), lwp, tid);
559}
560
a45ae5f8
JM
561static enum exec_direction_kind
562default_execution_direction (void)
563{
564 if (!target_can_execute_reverse)
565 return EXEC_FORWARD;
566 else if (!target_can_async_p ())
567 return EXEC_FORWARD;
568 else
569 gdb_assert_not_reached ("\
570to_execution_direction must be implemented for reverse async");
571}
572
5796c8dc
SS
573/* Go through the target stack from top to bottom, copying over zero
574 entries in current_target, then filling in still empty entries. In
575 effect, we are doing class inheritance through the pushed target
576 vectors.
577
578 NOTE: cagney/2003-10-17: The problem with this inheritance, as it
579 is currently implemented, is that it discards any knowledge of
580 which target an inherited method originally belonged to.
581 Consequently, new new target methods should instead explicitly and
582 locally search the target stack for the target that can handle the
583 request. */
584
585static void
586update_current_target (void)
587{
588 struct target_ops *t;
589
590 /* First, reset current's contents. */
591 memset (&current_target, 0, sizeof (current_target));
592
593#define INHERIT(FIELD, TARGET) \
594 if (!current_target.FIELD) \
595 current_target.FIELD = (TARGET)->FIELD
596
597 for (t = target_stack; t; t = t->beneath)
598 {
599 INHERIT (to_shortname, t);
600 INHERIT (to_longname, t);
601 INHERIT (to_doc, t);
602 /* Do not inherit to_open. */
603 /* Do not inherit to_close. */
604 /* Do not inherit to_attach. */
605 INHERIT (to_post_attach, t);
606 INHERIT (to_attach_no_wait, t);
607 /* Do not inherit to_detach. */
608 /* Do not inherit to_disconnect. */
609 /* Do not inherit to_resume. */
610 /* Do not inherit to_wait. */
611 /* Do not inherit to_fetch_registers. */
612 /* Do not inherit to_store_registers. */
613 INHERIT (to_prepare_to_store, t);
614 INHERIT (deprecated_xfer_memory, t);
615 INHERIT (to_files_info, t);
616 INHERIT (to_insert_breakpoint, t);
617 INHERIT (to_remove_breakpoint, t);
618 INHERIT (to_can_use_hw_breakpoint, t);
619 INHERIT (to_insert_hw_breakpoint, t);
620 INHERIT (to_remove_hw_breakpoint, t);
c50c785c 621 /* Do not inherit to_ranged_break_num_registers. */
5796c8dc
SS
622 INHERIT (to_insert_watchpoint, t);
623 INHERIT (to_remove_watchpoint, t);
a45ae5f8
JM
624 /* Do not inherit to_insert_mask_watchpoint. */
625 /* Do not inherit to_remove_mask_watchpoint. */
5796c8dc
SS
626 INHERIT (to_stopped_data_address, t);
627 INHERIT (to_have_steppable_watchpoint, t);
628 INHERIT (to_have_continuable_watchpoint, t);
629 INHERIT (to_stopped_by_watchpoint, t);
630 INHERIT (to_watchpoint_addr_within_range, t);
631 INHERIT (to_region_ok_for_hw_watchpoint, t);
cf7f2e2d 632 INHERIT (to_can_accel_watchpoint_condition, t);
a45ae5f8 633 /* Do not inherit to_masked_watch_num_registers. */
5796c8dc
SS
634 INHERIT (to_terminal_init, t);
635 INHERIT (to_terminal_inferior, t);
636 INHERIT (to_terminal_ours_for_output, t);
637 INHERIT (to_terminal_ours, t);
638 INHERIT (to_terminal_save_ours, t);
639 INHERIT (to_terminal_info, t);
640 /* Do not inherit to_kill. */
641 INHERIT (to_load, t);
5796c8dc
SS
642 /* Do no inherit to_create_inferior. */
643 INHERIT (to_post_startup_inferior, t);
5796c8dc
SS
644 INHERIT (to_insert_fork_catchpoint, t);
645 INHERIT (to_remove_fork_catchpoint, t);
646 INHERIT (to_insert_vfork_catchpoint, t);
647 INHERIT (to_remove_vfork_catchpoint, t);
648 /* Do not inherit to_follow_fork. */
649 INHERIT (to_insert_exec_catchpoint, t);
650 INHERIT (to_remove_exec_catchpoint, t);
651 INHERIT (to_set_syscall_catchpoint, t);
652 INHERIT (to_has_exited, t);
cf7f2e2d 653 /* Do not inherit to_mourn_inferior. */
5796c8dc 654 INHERIT (to_can_run, t);
a45ae5f8 655 /* Do not inherit to_pass_signals. */
ef5ccd6c 656 /* Do not inherit to_program_signals. */
5796c8dc
SS
657 /* Do not inherit to_thread_alive. */
658 /* Do not inherit to_find_new_threads. */
659 /* Do not inherit to_pid_to_str. */
660 INHERIT (to_extra_thread_info, t);
c50c785c 661 INHERIT (to_thread_name, t);
5796c8dc
SS
662 INHERIT (to_stop, t);
663 /* Do not inherit to_xfer_partial. */
664 INHERIT (to_rcmd, t);
665 INHERIT (to_pid_to_exec_file, t);
666 INHERIT (to_log_command, t);
667 INHERIT (to_stratum, t);
c50c785c
JM
668 /* Do not inherit to_has_all_memory. */
669 /* Do not inherit to_has_memory. */
670 /* Do not inherit to_has_stack. */
671 /* Do not inherit to_has_registers. */
672 /* Do not inherit to_has_execution. */
5796c8dc
SS
673 INHERIT (to_has_thread_control, t);
674 INHERIT (to_can_async_p, t);
675 INHERIT (to_is_async_p, t);
676 INHERIT (to_async, t);
5796c8dc
SS
677 INHERIT (to_find_memory_regions, t);
678 INHERIT (to_make_corefile_notes, t);
cf7f2e2d
JM
679 INHERIT (to_get_bookmark, t);
680 INHERIT (to_goto_bookmark, t);
5796c8dc
SS
681 /* Do not inherit to_get_thread_local_address. */
682 INHERIT (to_can_execute_reverse, t);
a45ae5f8 683 INHERIT (to_execution_direction, t);
5796c8dc
SS
684 INHERIT (to_thread_architecture, t);
685 /* Do not inherit to_read_description. */
686 INHERIT (to_get_ada_task_ptid, t);
687 /* Do not inherit to_search_memory. */
688 INHERIT (to_supports_multi_process, t);
a45ae5f8
JM
689 INHERIT (to_supports_enable_disable_tracepoint, t);
690 INHERIT (to_supports_string_tracing, t);
cf7f2e2d
JM
691 INHERIT (to_trace_init, t);
692 INHERIT (to_download_tracepoint, t);
a45ae5f8 693 INHERIT (to_can_download_tracepoint, t);
cf7f2e2d 694 INHERIT (to_download_trace_state_variable, t);
a45ae5f8
JM
695 INHERIT (to_enable_tracepoint, t);
696 INHERIT (to_disable_tracepoint, t);
cf7f2e2d
JM
697 INHERIT (to_trace_set_readonly_regions, t);
698 INHERIT (to_trace_start, t);
699 INHERIT (to_get_trace_status, t);
a45ae5f8 700 INHERIT (to_get_tracepoint_status, t);
cf7f2e2d
JM
701 INHERIT (to_trace_stop, t);
702 INHERIT (to_trace_find, t);
703 INHERIT (to_get_trace_state_variable_value, t);
704 INHERIT (to_save_trace_data, t);
705 INHERIT (to_upload_tracepoints, t);
706 INHERIT (to_upload_trace_state_variables, t);
707 INHERIT (to_get_raw_trace_data, t);
a45ae5f8 708 INHERIT (to_get_min_fast_tracepoint_insn_len, t);
cf7f2e2d
JM
709 INHERIT (to_set_disconnected_tracing, t);
710 INHERIT (to_set_circular_trace_buffer, t);
ef5ccd6c 711 INHERIT (to_set_trace_buffer_size, t);
a45ae5f8 712 INHERIT (to_set_trace_notes, t);
cf7f2e2d
JM
713 INHERIT (to_get_tib_address, t);
714 INHERIT (to_set_permissions, t);
715 INHERIT (to_static_tracepoint_marker_at, t);
716 INHERIT (to_static_tracepoint_markers_by_strid, t);
c50c785c 717 INHERIT (to_traceframe_info, t);
ef5ccd6c
JM
718 INHERIT (to_use_agent, t);
719 INHERIT (to_can_use_agent, t);
5796c8dc 720 INHERIT (to_magic, t);
ef5ccd6c
JM
721 INHERIT (to_supports_evaluation_of_breakpoint_conditions, t);
722 INHERIT (to_can_run_breakpoint_commands, t);
5796c8dc
SS
723 /* Do not inherit to_memory_map. */
724 /* Do not inherit to_flash_erase. */
725 /* Do not inherit to_flash_done. */
726 }
727#undef INHERIT
728
729 /* Clean up a target struct so it no longer has any zero pointers in
730 it. Some entries are defaulted to a method that print an error,
731 others are hard-wired to a standard recursive default. */
732
733#define de_fault(field, value) \
734 if (!current_target.field) \
735 current_target.field = value
736
737 de_fault (to_open,
738 (void (*) (char *, int))
739 tcomplain);
740 de_fault (to_close,
741 (void (*) (int))
742 target_ignore);
743 de_fault (to_post_attach,
744 (void (*) (int))
745 target_ignore);
746 de_fault (to_prepare_to_store,
747 (void (*) (struct regcache *))
748 noprocess);
749 de_fault (deprecated_xfer_memory,
c50c785c
JM
750 (int (*) (CORE_ADDR, gdb_byte *, int, int,
751 struct mem_attrib *, struct target_ops *))
5796c8dc
SS
752 nomemory);
753 de_fault (to_files_info,
754 (void (*) (struct target_ops *))
755 target_ignore);
756 de_fault (to_insert_breakpoint,
757 memory_insert_breakpoint);
758 de_fault (to_remove_breakpoint,
759 memory_remove_breakpoint);
760 de_fault (to_can_use_hw_breakpoint,
761 (int (*) (int, int, int))
762 return_zero);
763 de_fault (to_insert_hw_breakpoint,
764 (int (*) (struct gdbarch *, struct bp_target_info *))
765 return_minus_one);
766 de_fault (to_remove_hw_breakpoint,
767 (int (*) (struct gdbarch *, struct bp_target_info *))
768 return_minus_one);
769 de_fault (to_insert_watchpoint,
cf7f2e2d 770 (int (*) (CORE_ADDR, int, int, struct expression *))
5796c8dc
SS
771 return_minus_one);
772 de_fault (to_remove_watchpoint,
cf7f2e2d 773 (int (*) (CORE_ADDR, int, int, struct expression *))
5796c8dc
SS
774 return_minus_one);
775 de_fault (to_stopped_by_watchpoint,
776 (int (*) (void))
777 return_zero);
778 de_fault (to_stopped_data_address,
779 (int (*) (struct target_ops *, CORE_ADDR *))
780 return_zero);
781 de_fault (to_watchpoint_addr_within_range,
782 default_watchpoint_addr_within_range);
783 de_fault (to_region_ok_for_hw_watchpoint,
784 default_region_ok_for_hw_watchpoint);
cf7f2e2d
JM
785 de_fault (to_can_accel_watchpoint_condition,
786 (int (*) (CORE_ADDR, int, int, struct expression *))
787 return_zero);
5796c8dc
SS
788 de_fault (to_terminal_init,
789 (void (*) (void))
790 target_ignore);
791 de_fault (to_terminal_inferior,
792 (void (*) (void))
793 target_ignore);
794 de_fault (to_terminal_ours_for_output,
795 (void (*) (void))
796 target_ignore);
797 de_fault (to_terminal_ours,
798 (void (*) (void))
799 target_ignore);
800 de_fault (to_terminal_save_ours,
801 (void (*) (void))
802 target_ignore);
803 de_fault (to_terminal_info,
804 default_terminal_info);
805 de_fault (to_load,
806 (void (*) (char *, int))
807 tcomplain);
5796c8dc
SS
808 de_fault (to_post_startup_inferior,
809 (void (*) (ptid_t))
810 target_ignore);
5796c8dc 811 de_fault (to_insert_fork_catchpoint,
c50c785c
JM
812 (int (*) (int))
813 return_one);
5796c8dc
SS
814 de_fault (to_remove_fork_catchpoint,
815 (int (*) (int))
c50c785c 816 return_one);
5796c8dc 817 de_fault (to_insert_vfork_catchpoint,
c50c785c
JM
818 (int (*) (int))
819 return_one);
5796c8dc
SS
820 de_fault (to_remove_vfork_catchpoint,
821 (int (*) (int))
c50c785c 822 return_one);
5796c8dc 823 de_fault (to_insert_exec_catchpoint,
c50c785c
JM
824 (int (*) (int))
825 return_one);
5796c8dc
SS
826 de_fault (to_remove_exec_catchpoint,
827 (int (*) (int))
c50c785c 828 return_one);
5796c8dc
SS
829 de_fault (to_set_syscall_catchpoint,
830 (int (*) (int, int, int, int, int *))
c50c785c 831 return_one);
5796c8dc
SS
832 de_fault (to_has_exited,
833 (int (*) (int, int, int *))
834 return_zero);
835 de_fault (to_can_run,
836 return_zero);
5796c8dc
SS
837 de_fault (to_extra_thread_info,
838 (char *(*) (struct thread_info *))
839 return_zero);
c50c785c
JM
840 de_fault (to_thread_name,
841 (char *(*) (struct thread_info *))
842 return_zero);
5796c8dc
SS
843 de_fault (to_stop,
844 (void (*) (ptid_t))
845 target_ignore);
846 current_target.to_xfer_partial = current_xfer_partial;
847 de_fault (to_rcmd,
848 (void (*) (char *, struct ui_file *))
849 tcomplain);
850 de_fault (to_pid_to_exec_file,
851 (char *(*) (int))
852 return_zero);
853 de_fault (to_async,
854 (void (*) (void (*) (enum inferior_event_type, void*), void*))
855 tcomplain);
5796c8dc
SS
856 de_fault (to_thread_architecture,
857 default_thread_architecture);
858 current_target.to_read_description = NULL;
859 de_fault (to_get_ada_task_ptid,
860 (ptid_t (*) (long, long))
861 default_get_ada_task_ptid);
862 de_fault (to_supports_multi_process,
863 (int (*) (void))
864 return_zero);
a45ae5f8
JM
865 de_fault (to_supports_enable_disable_tracepoint,
866 (int (*) (void))
867 return_zero);
868 de_fault (to_supports_string_tracing,
869 (int (*) (void))
870 return_zero);
cf7f2e2d
JM
871 de_fault (to_trace_init,
872 (void (*) (void))
873 tcomplain);
874 de_fault (to_download_tracepoint,
a45ae5f8 875 (void (*) (struct bp_location *))
cf7f2e2d 876 tcomplain);
a45ae5f8
JM
877 de_fault (to_can_download_tracepoint,
878 (int (*) (void))
879 return_zero);
cf7f2e2d
JM
880 de_fault (to_download_trace_state_variable,
881 (void (*) (struct trace_state_variable *))
882 tcomplain);
a45ae5f8
JM
883 de_fault (to_enable_tracepoint,
884 (void (*) (struct bp_location *))
885 tcomplain);
886 de_fault (to_disable_tracepoint,
887 (void (*) (struct bp_location *))
888 tcomplain);
cf7f2e2d
JM
889 de_fault (to_trace_set_readonly_regions,
890 (void (*) (void))
891 tcomplain);
892 de_fault (to_trace_start,
893 (void (*) (void))
894 tcomplain);
895 de_fault (to_get_trace_status,
896 (int (*) (struct trace_status *))
897 return_minus_one);
a45ae5f8
JM
898 de_fault (to_get_tracepoint_status,
899 (void (*) (struct breakpoint *, struct uploaded_tp *))
900 tcomplain);
cf7f2e2d
JM
901 de_fault (to_trace_stop,
902 (void (*) (void))
903 tcomplain);
904 de_fault (to_trace_find,
905 (int (*) (enum trace_find_type, int, ULONGEST, ULONGEST, int *))
906 return_minus_one);
907 de_fault (to_get_trace_state_variable_value,
908 (int (*) (int, LONGEST *))
909 return_zero);
910 de_fault (to_save_trace_data,
911 (int (*) (const char *))
912 tcomplain);
913 de_fault (to_upload_tracepoints,
914 (int (*) (struct uploaded_tp **))
915 return_zero);
916 de_fault (to_upload_trace_state_variables,
917 (int (*) (struct uploaded_tsv **))
918 return_zero);
919 de_fault (to_get_raw_trace_data,
920 (LONGEST (*) (gdb_byte *, ULONGEST, LONGEST))
921 tcomplain);
a45ae5f8
JM
922 de_fault (to_get_min_fast_tracepoint_insn_len,
923 (int (*) (void))
924 return_minus_one);
cf7f2e2d
JM
925 de_fault (to_set_disconnected_tracing,
926 (void (*) (int))
927 target_ignore);
928 de_fault (to_set_circular_trace_buffer,
929 (void (*) (int))
930 target_ignore);
ef5ccd6c
JM
931 de_fault (to_set_trace_buffer_size,
932 (void (*) (LONGEST))
933 target_ignore);
a45ae5f8
JM
934 de_fault (to_set_trace_notes,
935 (int (*) (char *, char *, char *))
936 return_zero);
cf7f2e2d
JM
937 de_fault (to_get_tib_address,
938 (int (*) (ptid_t, CORE_ADDR *))
939 tcomplain);
940 de_fault (to_set_permissions,
941 (void (*) (void))
942 target_ignore);
943 de_fault (to_static_tracepoint_marker_at,
944 (int (*) (CORE_ADDR, struct static_tracepoint_marker *))
945 return_zero);
946 de_fault (to_static_tracepoint_markers_by_strid,
947 (VEC(static_tracepoint_marker_p) * (*) (const char *))
948 tcomplain);
c50c785c
JM
949 de_fault (to_traceframe_info,
950 (struct traceframe_info * (*) (void))
951 tcomplain);
ef5ccd6c
JM
952 de_fault (to_supports_evaluation_of_breakpoint_conditions,
953 (int (*) (void))
954 return_zero);
955 de_fault (to_can_run_breakpoint_commands,
956 (int (*) (void))
957 return_zero);
958 de_fault (to_use_agent,
959 (int (*) (int))
960 tcomplain);
961 de_fault (to_can_use_agent,
962 (int (*) (void))
963 return_zero);
a45ae5f8
JM
964 de_fault (to_execution_direction, default_execution_direction);
965
5796c8dc
SS
966#undef de_fault
967
968 /* Finally, position the target-stack beneath the squashed
969 "current_target". That way code looking for a non-inherited
970 target method can quickly and simply find it. */
971 current_target.beneath = target_stack;
972
973 if (targetdebug)
974 setup_target_debug ();
975}
976
977/* Push a new target type into the stack of the existing target accessors,
978 possibly superseding some of the existing accessors.
979
5796c8dc
SS
980 Rather than allow an empty stack, we always have the dummy target at
981 the bottom stratum, so we can call the function vectors without
982 checking them. */
983
cf7f2e2d 984void
5796c8dc
SS
985push_target (struct target_ops *t)
986{
987 struct target_ops **cur;
988
989 /* Check magic number. If wrong, it probably means someone changed
990 the struct definition, but not all the places that initialize one. */
991 if (t->to_magic != OPS_MAGIC)
992 {
993 fprintf_unfiltered (gdb_stderr,
994 "Magic number of %s target struct wrong\n",
995 t->to_shortname);
c50c785c
JM
996 internal_error (__FILE__, __LINE__,
997 _("failed internal consistency check"));
5796c8dc
SS
998 }
999
1000 /* Find the proper stratum to install this target in. */
1001 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1002 {
1003 if ((int) (t->to_stratum) >= (int) (*cur)->to_stratum)
1004 break;
1005 }
1006
1007 /* If there's already targets at this stratum, remove them. */
1008 /* FIXME: cagney/2003-10-15: I think this should be popping all
1009 targets to CUR, and not just those at this stratum level. */
1010 while ((*cur) != NULL && t->to_stratum == (*cur)->to_stratum)
1011 {
1012 /* There's already something at this stratum level. Close it,
1013 and un-hook it from the stack. */
1014 struct target_ops *tmp = (*cur);
cf7f2e2d 1015
5796c8dc
SS
1016 (*cur) = (*cur)->beneath;
1017 tmp->beneath = NULL;
1018 target_close (tmp, 0);
1019 }
1020
1021 /* We have removed all targets in our stratum, now add the new one. */
1022 t->beneath = (*cur);
1023 (*cur) = t;
1024
1025 update_current_target ();
5796c8dc
SS
1026}
1027
1028/* Remove a target_ops vector from the stack, wherever it may be.
1029 Return how many times it was removed (0 or 1). */
1030
1031int
1032unpush_target (struct target_ops *t)
1033{
1034 struct target_ops **cur;
1035 struct target_ops *tmp;
1036
1037 if (t->to_stratum == dummy_stratum)
1038 internal_error (__FILE__, __LINE__,
c50c785c 1039 _("Attempt to unpush the dummy target"));
5796c8dc
SS
1040
1041 /* Look for the specified target. Note that we assume that a target
c50c785c 1042 can only occur once in the target stack. */
5796c8dc
SS
1043
1044 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1045 {
1046 if ((*cur) == t)
1047 break;
1048 }
1049
ef5ccd6c
JM
1050 /* If we don't find target_ops, quit. Only open targets should be
1051 closed. */
5796c8dc 1052 if ((*cur) == NULL)
ef5ccd6c 1053 return 0;
5796c8dc 1054
c50c785c 1055 /* Unchain the target. */
5796c8dc
SS
1056 tmp = (*cur);
1057 (*cur) = (*cur)->beneath;
1058 tmp->beneath = NULL;
1059
1060 update_current_target ();
1061
ef5ccd6c
JM
1062 /* Finally close the target. Note we do this after unchaining, so
1063 any target method calls from within the target_close
1064 implementation don't end up in T anymore. */
1065 target_close (t, 0);
1066
5796c8dc
SS
1067 return 1;
1068}
1069
1070void
1071pop_target (void)
1072{
c50c785c 1073 target_close (target_stack, 0); /* Let it clean up. */
5796c8dc
SS
1074 if (unpush_target (target_stack) == 1)
1075 return;
1076
1077 fprintf_unfiltered (gdb_stderr,
1078 "pop_target couldn't find target %s\n",
1079 current_target.to_shortname);
cf7f2e2d
JM
1080 internal_error (__FILE__, __LINE__,
1081 _("failed internal consistency check"));
5796c8dc
SS
1082}
1083
1084void
1085pop_all_targets_above (enum strata above_stratum, int quitting)
1086{
1087 while ((int) (current_target.to_stratum) > (int) above_stratum)
1088 {
1089 target_close (target_stack, quitting);
1090 if (!unpush_target (target_stack))
1091 {
1092 fprintf_unfiltered (gdb_stderr,
1093 "pop_all_targets couldn't find target %s\n",
1094 target_stack->to_shortname);
1095 internal_error (__FILE__, __LINE__,
1096 _("failed internal consistency check"));
1097 break;
1098 }
1099 }
1100}
1101
1102void
1103pop_all_targets (int quitting)
1104{
1105 pop_all_targets_above (dummy_stratum, quitting);
1106}
1107
c50c785c
JM
1108/* Return 1 if T is now pushed in the target stack. Return 0 otherwise. */
1109
1110int
1111target_is_pushed (struct target_ops *t)
1112{
1113 struct target_ops **cur;
1114
1115 /* Check magic number. If wrong, it probably means someone changed
1116 the struct definition, but not all the places that initialize one. */
1117 if (t->to_magic != OPS_MAGIC)
1118 {
1119 fprintf_unfiltered (gdb_stderr,
1120 "Magic number of %s target struct wrong\n",
1121 t->to_shortname);
1122 internal_error (__FILE__, __LINE__,
1123 _("failed internal consistency check"));
1124 }
1125
1126 for (cur = &target_stack; (*cur) != NULL; cur = &(*cur)->beneath)
1127 if (*cur == t)
1128 return 1;
1129
1130 return 0;
1131}
1132
5796c8dc
SS
1133/* Using the objfile specified in OBJFILE, find the address for the
1134 current thread's thread-local storage with offset OFFSET. */
1135CORE_ADDR
1136target_translate_tls_address (struct objfile *objfile, CORE_ADDR offset)
1137{
1138 volatile CORE_ADDR addr = 0;
1139 struct target_ops *target;
1140
1141 for (target = current_target.beneath;
1142 target != NULL;
1143 target = target->beneath)
1144 {
1145 if (target->to_get_thread_local_address != NULL)
1146 break;
1147 }
1148
1149 if (target != NULL
ef5ccd6c 1150 && gdbarch_fetch_tls_load_module_address_p (target_gdbarch ()))
5796c8dc
SS
1151 {
1152 ptid_t ptid = inferior_ptid;
1153 volatile struct gdb_exception ex;
1154
1155 TRY_CATCH (ex, RETURN_MASK_ALL)
1156 {
1157 CORE_ADDR lm_addr;
1158
1159 /* Fetch the load module address for this objfile. */
ef5ccd6c 1160 lm_addr = gdbarch_fetch_tls_load_module_address (target_gdbarch (),
5796c8dc
SS
1161 objfile);
1162 /* If it's 0, throw the appropriate exception. */
1163 if (lm_addr == 0)
1164 throw_error (TLS_LOAD_MODULE_NOT_FOUND_ERROR,
1165 _("TLS load module not found"));
1166
c50c785c
JM
1167 addr = target->to_get_thread_local_address (target, ptid,
1168 lm_addr, offset);
5796c8dc
SS
1169 }
1170 /* If an error occurred, print TLS related messages here. Otherwise,
1171 throw the error to some higher catcher. */
1172 if (ex.reason < 0)
1173 {
1174 int objfile_is_library = (objfile->flags & OBJF_SHARED);
1175
1176 switch (ex.error)
1177 {
1178 case TLS_NO_LIBRARY_SUPPORT_ERROR:
c50c785c
JM
1179 error (_("Cannot find thread-local variables "
1180 "in this thread library."));
5796c8dc
SS
1181 break;
1182 case TLS_LOAD_MODULE_NOT_FOUND_ERROR:
1183 if (objfile_is_library)
1184 error (_("Cannot find shared library `%s' in dynamic"
1185 " linker's load module list"), objfile->name);
1186 else
1187 error (_("Cannot find executable file `%s' in dynamic"
1188 " linker's load module list"), objfile->name);
1189 break;
1190 case TLS_NOT_ALLOCATED_YET_ERROR:
1191 if (objfile_is_library)
1192 error (_("The inferior has not yet allocated storage for"
1193 " thread-local variables in\n"
1194 "the shared library `%s'\n"
1195 "for %s"),
1196 objfile->name, target_pid_to_str (ptid));
1197 else
1198 error (_("The inferior has not yet allocated storage for"
1199 " thread-local variables in\n"
1200 "the executable `%s'\n"
1201 "for %s"),
1202 objfile->name, target_pid_to_str (ptid));
1203 break;
1204 case TLS_GENERIC_ERROR:
1205 if (objfile_is_library)
1206 error (_("Cannot find thread-local storage for %s, "
1207 "shared library %s:\n%s"),
1208 target_pid_to_str (ptid),
1209 objfile->name, ex.message);
1210 else
1211 error (_("Cannot find thread-local storage for %s, "
1212 "executable file %s:\n%s"),
1213 target_pid_to_str (ptid),
1214 objfile->name, ex.message);
1215 break;
1216 default:
1217 throw_exception (ex);
1218 break;
1219 }
1220 }
1221 }
1222 /* It wouldn't be wrong here to try a gdbarch method, too; finding
1223 TLS is an ABI-specific thing. But we don't do that yet. */
1224 else
1225 error (_("Cannot find thread-local variables on this target"));
1226
1227 return addr;
1228}
1229
1230#undef MIN
1231#define MIN(A, B) (((A) <= (B)) ? (A) : (B))
1232
1233/* target_read_string -- read a null terminated string, up to LEN bytes,
1234 from MEMADDR in target. Set *ERRNOP to the errno code, or 0 if successful.
1235 Set *STRING to a pointer to malloc'd memory containing the data; the caller
1236 is responsible for freeing it. Return the number of bytes successfully
1237 read. */
1238
1239int
1240target_read_string (CORE_ADDR memaddr, char **string, int len, int *errnop)
1241{
ef5ccd6c 1242 int tlen, offset, i;
5796c8dc
SS
1243 gdb_byte buf[4];
1244 int errcode = 0;
1245 char *buffer;
1246 int buffer_allocated;
1247 char *bufptr;
1248 unsigned int nbytes_read = 0;
1249
1250 gdb_assert (string);
1251
1252 /* Small for testing. */
1253 buffer_allocated = 4;
1254 buffer = xmalloc (buffer_allocated);
1255 bufptr = buffer;
1256
5796c8dc
SS
1257 while (len > 0)
1258 {
1259 tlen = MIN (len, 4 - (memaddr & 3));
1260 offset = memaddr & 3;
1261
1262 errcode = target_read_memory (memaddr & ~3, buf, sizeof buf);
1263 if (errcode != 0)
1264 {
1265 /* The transfer request might have crossed the boundary to an
c50c785c 1266 unallocated region of memory. Retry the transfer, requesting
5796c8dc
SS
1267 a single byte. */
1268 tlen = 1;
1269 offset = 0;
1270 errcode = target_read_memory (memaddr, buf, 1);
1271 if (errcode != 0)
1272 goto done;
1273 }
1274
1275 if (bufptr - buffer + tlen > buffer_allocated)
1276 {
1277 unsigned int bytes;
cf7f2e2d 1278
5796c8dc
SS
1279 bytes = bufptr - buffer;
1280 buffer_allocated *= 2;
1281 buffer = xrealloc (buffer, buffer_allocated);
1282 bufptr = buffer + bytes;
1283 }
1284
1285 for (i = 0; i < tlen; i++)
1286 {
1287 *bufptr++ = buf[i + offset];
1288 if (buf[i + offset] == '\000')
1289 {
1290 nbytes_read += i + 1;
1291 goto done;
1292 }
1293 }
1294
1295 memaddr += tlen;
1296 len -= tlen;
1297 nbytes_read += tlen;
1298 }
1299done:
1300 *string = buffer;
1301 if (errnop != NULL)
1302 *errnop = errcode;
1303 return nbytes_read;
1304}
1305
1306struct target_section_table *
1307target_get_section_table (struct target_ops *target)
1308{
1309 struct target_ops *t;
1310
1311 if (targetdebug)
1312 fprintf_unfiltered (gdb_stdlog, "target_get_section_table ()\n");
1313
1314 for (t = target; t != NULL; t = t->beneath)
1315 if (t->to_get_section_table != NULL)
1316 return (*t->to_get_section_table) (t);
1317
1318 return NULL;
1319}
1320
1321/* Find a section containing ADDR. */
1322
1323struct target_section *
1324target_section_by_addr (struct target_ops *target, CORE_ADDR addr)
1325{
1326 struct target_section_table *table = target_get_section_table (target);
1327 struct target_section *secp;
1328
1329 if (table == NULL)
1330 return NULL;
1331
1332 for (secp = table->sections; secp < table->sections_end; secp++)
1333 {
1334 if (addr >= secp->addr && addr < secp->endaddr)
1335 return secp;
1336 }
1337 return NULL;
1338}
1339
c50c785c
JM
1340/* Read memory from the live target, even if currently inspecting a
1341 traceframe. The return is the same as that of target_read. */
1342
1343static LONGEST
1344target_read_live_memory (enum target_object object,
1345 ULONGEST memaddr, gdb_byte *myaddr, LONGEST len)
1346{
1347 int ret;
1348 struct cleanup *cleanup;
1349
1350 /* Switch momentarily out of tfind mode so to access live memory.
1351 Note that this must not clear global state, such as the frame
1352 cache, which must still remain valid for the previous traceframe.
1353 We may be _building_ the frame cache at this point. */
1354 cleanup = make_cleanup_restore_traceframe_number ();
1355 set_traceframe_number (-1);
1356
1357 ret = target_read (current_target.beneath, object, NULL,
1358 myaddr, memaddr, len);
1359
1360 do_cleanups (cleanup);
1361 return ret;
1362}
1363
1364/* Using the set of read-only target sections of OPS, read live
1365 read-only memory. Note that the actual reads start from the
1366 top-most target again.
1367
1368 For interface/parameters/return description see target.h,
1369 to_xfer_partial. */
1370
1371static LONGEST
1372memory_xfer_live_readonly_partial (struct target_ops *ops,
1373 enum target_object object,
1374 gdb_byte *readbuf, ULONGEST memaddr,
1375 LONGEST len)
1376{
1377 struct target_section *secp;
1378 struct target_section_table *table;
1379
1380 secp = target_section_by_addr (ops, memaddr);
1381 if (secp != NULL
1382 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1383 & SEC_READONLY))
1384 {
1385 struct target_section *p;
1386 ULONGEST memend = memaddr + len;
1387
1388 table = target_get_section_table (ops);
1389
1390 for (p = table->sections; p < table->sections_end; p++)
1391 {
1392 if (memaddr >= p->addr)
1393 {
1394 if (memend <= p->endaddr)
1395 {
1396 /* Entire transfer is within this section. */
1397 return target_read_live_memory (object, memaddr,
1398 readbuf, len);
1399 }
1400 else if (memaddr >= p->endaddr)
1401 {
1402 /* This section ends before the transfer starts. */
1403 continue;
1404 }
1405 else
1406 {
1407 /* This section overlaps the transfer. Just do half. */
1408 len = p->endaddr - memaddr;
1409 return target_read_live_memory (object, memaddr,
1410 readbuf, len);
1411 }
1412 }
1413 }
1414 }
1415
1416 return 0;
1417}
1418
cf7f2e2d
JM
1419/* Perform a partial memory transfer.
1420 For docs see target.h, to_xfer_partial. */
5796c8dc
SS
1421
1422static LONGEST
a45ae5f8
JM
1423memory_xfer_partial_1 (struct target_ops *ops, enum target_object object,
1424 void *readbuf, const void *writebuf, ULONGEST memaddr,
1425 LONGEST len)
5796c8dc
SS
1426{
1427 LONGEST res;
1428 int reg_len;
1429 struct mem_region *region;
1430 struct inferior *inf;
1431
5796c8dc
SS
1432 /* For accesses to unmapped overlay sections, read directly from
1433 files. Must do this first, as MEMADDR may need adjustment. */
1434 if (readbuf != NULL && overlay_debugging)
1435 {
1436 struct obj_section *section = find_pc_overlay (memaddr);
cf7f2e2d 1437
5796c8dc
SS
1438 if (pc_in_unmapped_range (memaddr, section))
1439 {
1440 struct target_section_table *table
1441 = target_get_section_table (ops);
1442 const char *section_name = section->the_bfd_section->name;
cf7f2e2d 1443
5796c8dc
SS
1444 memaddr = overlay_mapped_address (memaddr, section);
1445 return section_table_xfer_memory_partial (readbuf, writebuf,
1446 memaddr, len,
1447 table->sections,
1448 table->sections_end,
1449 section_name);
1450 }
1451 }
1452
1453 /* Try the executable files, if "trust-readonly-sections" is set. */
1454 if (readbuf != NULL && trust_readonly)
1455 {
1456 struct target_section *secp;
1457 struct target_section_table *table;
1458
1459 secp = target_section_by_addr (ops, memaddr);
1460 if (secp != NULL
1461 && (bfd_get_section_flags (secp->bfd, secp->the_bfd_section)
1462 & SEC_READONLY))
1463 {
1464 table = target_get_section_table (ops);
1465 return section_table_xfer_memory_partial (readbuf, writebuf,
1466 memaddr, len,
1467 table->sections,
1468 table->sections_end,
1469 NULL);
1470 }
1471 }
1472
c50c785c
JM
1473 /* If reading unavailable memory in the context of traceframes, and
1474 this address falls within a read-only section, fallback to
1475 reading from live memory. */
1476 if (readbuf != NULL && get_traceframe_number () != -1)
1477 {
1478 VEC(mem_range_s) *available;
1479
1480 /* If we fail to get the set of available memory, then the
1481 target does not support querying traceframe info, and so we
1482 attempt reading from the traceframe anyway (assuming the
1483 target implements the old QTro packet then). */
1484 if (traceframe_available_memory (&available, memaddr, len))
1485 {
1486 struct cleanup *old_chain;
1487
1488 old_chain = make_cleanup (VEC_cleanup(mem_range_s), &available);
1489
1490 if (VEC_empty (mem_range_s, available)
1491 || VEC_index (mem_range_s, available, 0)->start != memaddr)
1492 {
1493 /* Don't read into the traceframe's available
1494 memory. */
1495 if (!VEC_empty (mem_range_s, available))
1496 {
1497 LONGEST oldlen = len;
1498
1499 len = VEC_index (mem_range_s, available, 0)->start - memaddr;
1500 gdb_assert (len <= oldlen);
1501 }
1502
1503 do_cleanups (old_chain);
1504
1505 /* This goes through the topmost target again. */
1506 res = memory_xfer_live_readonly_partial (ops, object,
1507 readbuf, memaddr, len);
1508 if (res > 0)
1509 return res;
1510
1511 /* No use trying further, we know some memory starting
1512 at MEMADDR isn't available. */
1513 return -1;
1514 }
1515
1516 /* Don't try to read more than how much is available, in
1517 case the target implements the deprecated QTro packet to
1518 cater for older GDBs (the target's knowledge of read-only
1519 sections may be outdated by now). */
1520 len = VEC_index (mem_range_s, available, 0)->length;
1521
1522 do_cleanups (old_chain);
1523 }
1524 }
1525
5796c8dc
SS
1526 /* Try GDB's internal data cache. */
1527 region = lookup_mem_region (memaddr);
1528 /* region->hi == 0 means there's no upper bound. */
1529 if (memaddr + len < region->hi || region->hi == 0)
1530 reg_len = len;
1531 else
1532 reg_len = region->hi - memaddr;
1533
1534 switch (region->attrib.mode)
1535 {
1536 case MEM_RO:
1537 if (writebuf != NULL)
1538 return -1;
1539 break;
1540
1541 case MEM_WO:
1542 if (readbuf != NULL)
1543 return -1;
1544 break;
1545
1546 case MEM_FLASH:
1547 /* We only support writing to flash during "load" for now. */
1548 if (writebuf != NULL)
1549 error (_("Writing to flash memory forbidden in this context"));
1550 break;
1551
1552 case MEM_NONE:
1553 return -1;
1554 }
1555
cf7f2e2d
JM
1556 if (!ptid_equal (inferior_ptid, null_ptid))
1557 inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
1558 else
1559 inf = NULL;
5796c8dc
SS
1560
1561 if (inf != NULL
cf7f2e2d
JM
1562 /* The dcache reads whole cache lines; that doesn't play well
1563 with reading from a trace buffer, because reading outside of
1564 the collected memory range fails. */
1565 && get_traceframe_number () == -1
5796c8dc
SS
1566 && (region->attrib.cache
1567 || (stack_cache_enabled_p && object == TARGET_OBJECT_STACK_MEMORY)))
1568 {
1569 if (readbuf != NULL)
1570 res = dcache_xfer_memory (ops, target_dcache, memaddr, readbuf,
1571 reg_len, 0);
1572 else
1573 /* FIXME drow/2006-08-09: If we're going to preserve const
1574 correctness dcache_xfer_memory should take readbuf and
1575 writebuf. */
1576 res = dcache_xfer_memory (ops, target_dcache, memaddr,
1577 (void *) writebuf,
1578 reg_len, 1);
1579 if (res <= 0)
1580 return -1;
1581 else
a45ae5f8 1582 return res;
5796c8dc
SS
1583 }
1584
1585 /* If none of those methods found the memory we wanted, fall back
1586 to a target partial transfer. Normally a single call to
1587 to_xfer_partial is enough; if it doesn't recognize an object
1588 it will call the to_xfer_partial of the next target down.
1589 But for memory this won't do. Memory is the only target
1590 object which can be read from more than one valid target.
1591 A core file, for instance, could have some of memory but
1592 delegate other bits to the target below it. So, we must
1593 manually try all targets. */
1594
1595 do
1596 {
1597 res = ops->to_xfer_partial (ops, TARGET_OBJECT_MEMORY, NULL,
1598 readbuf, writebuf, memaddr, reg_len);
1599 if (res > 0)
1600 break;
1601
1602 /* We want to continue past core files to executables, but not
1603 past a running target's memory. */
1604 if (ops->to_has_all_memory (ops))
1605 break;
1606
1607 ops = ops->beneath;
1608 }
1609 while (ops != NULL);
1610
5796c8dc
SS
1611 /* Make sure the cache gets updated no matter what - if we are writing
1612 to the stack. Even if this write is not tagged as such, we still need
1613 to update the cache. */
1614
1615 if (res > 0
1616 && inf != NULL
1617 && writebuf != NULL
1618 && !region->attrib.cache
1619 && stack_cache_enabled_p
1620 && object != TARGET_OBJECT_STACK_MEMORY)
1621 {
1622 dcache_update (target_dcache, memaddr, (void *) writebuf, res);
1623 }
1624
1625 /* If we still haven't got anything, return the last error. We
1626 give up. */
1627 return res;
1628}
1629
a45ae5f8
JM
1630/* Perform a partial memory transfer. For docs see target.h,
1631 to_xfer_partial. */
1632
1633static LONGEST
1634memory_xfer_partial (struct target_ops *ops, enum target_object object,
1635 void *readbuf, const void *writebuf, ULONGEST memaddr,
1636 LONGEST len)
1637{
1638 int res;
1639
1640 /* Zero length requests are ok and require no work. */
1641 if (len == 0)
1642 return 0;
1643
1644 /* Fill in READBUF with breakpoint shadows, or WRITEBUF with
1645 breakpoint insns, thus hiding out from higher layers whether
1646 there are software breakpoints inserted in the code stream. */
1647 if (readbuf != NULL)
1648 {
1649 res = memory_xfer_partial_1 (ops, object, readbuf, NULL, memaddr, len);
1650
1651 if (res > 0 && !show_memory_breakpoints)
1652 breakpoint_xfer_memory (readbuf, NULL, NULL, memaddr, res);
1653 }
1654 else
1655 {
1656 void *buf;
1657 struct cleanup *old_chain;
1658
1659 buf = xmalloc (len);
1660 old_chain = make_cleanup (xfree, buf);
1661 memcpy (buf, writebuf, len);
1662
1663 breakpoint_xfer_memory (NULL, buf, writebuf, memaddr, len);
1664 res = memory_xfer_partial_1 (ops, object, NULL, buf, memaddr, len);
1665
1666 do_cleanups (old_chain);
1667 }
1668
1669 return res;
1670}
1671
5796c8dc
SS
1672static void
1673restore_show_memory_breakpoints (void *arg)
1674{
1675 show_memory_breakpoints = (uintptr_t) arg;
1676}
1677
1678struct cleanup *
1679make_show_memory_breakpoints_cleanup (int show)
1680{
1681 int current = show_memory_breakpoints;
5796c8dc 1682
cf7f2e2d 1683 show_memory_breakpoints = show;
5796c8dc
SS
1684 return make_cleanup (restore_show_memory_breakpoints,
1685 (void *) (uintptr_t) current);
1686}
1687
cf7f2e2d
JM
1688/* For docs see target.h, to_xfer_partial. */
1689
5796c8dc
SS
1690static LONGEST
1691target_xfer_partial (struct target_ops *ops,
1692 enum target_object object, const char *annex,
1693 void *readbuf, const void *writebuf,
1694 ULONGEST offset, LONGEST len)
1695{
1696 LONGEST retval;
1697
1698 gdb_assert (ops->to_xfer_partial != NULL);
1699
cf7f2e2d
JM
1700 if (writebuf && !may_write_memory)
1701 error (_("Writing to memory is not allowed (addr %s, len %s)"),
1702 core_addr_to_string_nz (offset), plongest (len));
1703
5796c8dc
SS
1704 /* If this is a memory transfer, let the memory-specific code
1705 have a look at it instead. Memory transfers are more
1706 complicated. */
1707 if (object == TARGET_OBJECT_MEMORY || object == TARGET_OBJECT_STACK_MEMORY)
1708 retval = memory_xfer_partial (ops, object, readbuf,
1709 writebuf, offset, len);
1710 else
1711 {
1712 enum target_object raw_object = object;
1713
1714 /* If this is a raw memory transfer, request the normal
1715 memory object from other layers. */
1716 if (raw_object == TARGET_OBJECT_RAW_MEMORY)
1717 raw_object = TARGET_OBJECT_MEMORY;
1718
1719 retval = ops->to_xfer_partial (ops, raw_object, annex, readbuf,
1720 writebuf, offset, len);
1721 }
1722
1723 if (targetdebug)
1724 {
1725 const unsigned char *myaddr = NULL;
1726
1727 fprintf_unfiltered (gdb_stdlog,
c50c785c
JM
1728 "%s:target_xfer_partial "
1729 "(%d, %s, %s, %s, %s, %s) = %s",
5796c8dc
SS
1730 ops->to_shortname,
1731 (int) object,
1732 (annex ? annex : "(null)"),
1733 host_address_to_string (readbuf),
1734 host_address_to_string (writebuf),
1735 core_addr_to_string_nz (offset),
1736 plongest (len), plongest (retval));
1737
1738 if (readbuf)
1739 myaddr = readbuf;
1740 if (writebuf)
1741 myaddr = writebuf;
1742 if (retval > 0 && myaddr != NULL)
1743 {
1744 int i;
1745
1746 fputs_unfiltered (", bytes =", gdb_stdlog);
1747 for (i = 0; i < retval; i++)
1748 {
1749 if ((((intptr_t) &(myaddr[i])) & 0xf) == 0)
1750 {
1751 if (targetdebug < 2 && i > 0)
1752 {
1753 fprintf_unfiltered (gdb_stdlog, " ...");
1754 break;
1755 }
1756 fprintf_unfiltered (gdb_stdlog, "\n");
1757 }
1758
1759 fprintf_unfiltered (gdb_stdlog, " %02x", myaddr[i] & 0xff);
1760 }
1761 }
1762
1763 fputc_unfiltered ('\n', gdb_stdlog);
1764 }
1765 return retval;
1766}
1767
1768/* Read LEN bytes of target memory at address MEMADDR, placing the results in
1769 GDB's memory at MYADDR. Returns either 0 for success or an errno value
1770 if any error occurs.
1771
1772 If an error occurs, no guarantee is made about the contents of the data at
1773 MYADDR. In particular, the caller should not depend upon partial reads
1774 filling the buffer with good data. There is no way for the caller to know
1775 how much good data might have been transfered anyway. Callers that can
1776 deal with partial reads should call target_read (which will retry until
c50c785c 1777 it makes no progress, and then return how much was transferred). */
5796c8dc
SS
1778
1779int
ef5ccd6c 1780target_read_memory (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
5796c8dc
SS
1781{
1782 /* Dispatch to the topmost target, not the flattened current_target.
1783 Memory accesses check target->to_has_(all_)memory, and the
1784 flattened target doesn't inherit those. */
1785 if (target_read (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1786 myaddr, memaddr, len) == len)
1787 return 0;
1788 else
1789 return EIO;
1790}
1791
1792/* Like target_read_memory, but specify explicitly that this is a read from
1793 the target's stack. This may trigger different cache behavior. */
1794
1795int
ef5ccd6c 1796target_read_stack (CORE_ADDR memaddr, gdb_byte *myaddr, ssize_t len)
5796c8dc
SS
1797{
1798 /* Dispatch to the topmost target, not the flattened current_target.
1799 Memory accesses check target->to_has_(all_)memory, and the
1800 flattened target doesn't inherit those. */
1801
1802 if (target_read (current_target.beneath, TARGET_OBJECT_STACK_MEMORY, NULL,
1803 myaddr, memaddr, len) == len)
1804 return 0;
1805 else
1806 return EIO;
1807}
1808
cf7f2e2d
JM
1809/* Write LEN bytes from MYADDR to target memory at address MEMADDR.
1810 Returns either 0 for success or an errno value if any error occurs.
1811 If an error occurs, no guarantee is made about how much data got written.
1812 Callers that can deal with partial writes should call target_write. */
1813
5796c8dc 1814int
ef5ccd6c 1815target_write_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
5796c8dc
SS
1816{
1817 /* Dispatch to the topmost target, not the flattened current_target.
1818 Memory accesses check target->to_has_(all_)memory, and the
1819 flattened target doesn't inherit those. */
1820 if (target_write (current_target.beneath, TARGET_OBJECT_MEMORY, NULL,
1821 myaddr, memaddr, len) == len)
1822 return 0;
1823 else
1824 return EIO;
1825}
1826
a45ae5f8
JM
1827/* Write LEN bytes from MYADDR to target raw memory at address
1828 MEMADDR. Returns either 0 for success or an errno value if any
1829 error occurs. If an error occurs, no guarantee is made about how
1830 much data got written. Callers that can deal with partial writes
1831 should call target_write. */
1832
1833int
ef5ccd6c 1834target_write_raw_memory (CORE_ADDR memaddr, const gdb_byte *myaddr, ssize_t len)
a45ae5f8
JM
1835{
1836 /* Dispatch to the topmost target, not the flattened current_target.
1837 Memory accesses check target->to_has_(all_)memory, and the
1838 flattened target doesn't inherit those. */
1839 if (target_write (current_target.beneath, TARGET_OBJECT_RAW_MEMORY, NULL,
1840 myaddr, memaddr, len) == len)
1841 return 0;
1842 else
1843 return EIO;
1844}
1845
5796c8dc
SS
1846/* Fetch the target's memory map. */
1847
1848VEC(mem_region_s) *
1849target_memory_map (void)
1850{
1851 VEC(mem_region_s) *result;
1852 struct mem_region *last_one, *this_one;
1853 int ix;
1854 struct target_ops *t;
1855
1856 if (targetdebug)
1857 fprintf_unfiltered (gdb_stdlog, "target_memory_map ()\n");
1858
1859 for (t = current_target.beneath; t != NULL; t = t->beneath)
1860 if (t->to_memory_map != NULL)
1861 break;
1862
1863 if (t == NULL)
1864 return NULL;
1865
1866 result = t->to_memory_map (t);
1867 if (result == NULL)
1868 return NULL;
1869
1870 qsort (VEC_address (mem_region_s, result),
1871 VEC_length (mem_region_s, result),
1872 sizeof (struct mem_region), mem_region_cmp);
1873
1874 /* Check that regions do not overlap. Simultaneously assign
1875 a numbering for the "mem" commands to use to refer to
1876 each region. */
1877 last_one = NULL;
1878 for (ix = 0; VEC_iterate (mem_region_s, result, ix, this_one); ix++)
1879 {
1880 this_one->number = ix;
1881
1882 if (last_one && last_one->hi > this_one->lo)
1883 {
1884 warning (_("Overlapping regions in memory map: ignoring"));
1885 VEC_free (mem_region_s, result);
1886 return NULL;
1887 }
1888 last_one = this_one;
1889 }
1890
1891 return result;
1892}
1893
1894void
1895target_flash_erase (ULONGEST address, LONGEST length)
1896{
1897 struct target_ops *t;
1898
1899 for (t = current_target.beneath; t != NULL; t = t->beneath)
1900 if (t->to_flash_erase != NULL)
cf7f2e2d
JM
1901 {
1902 if (targetdebug)
1903 fprintf_unfiltered (gdb_stdlog, "target_flash_erase (%s, %s)\n",
1904 hex_string (address), phex (length, 0));
1905 t->to_flash_erase (t, address, length);
1906 return;
1907 }
5796c8dc
SS
1908
1909 tcomplain ();
1910}
1911
1912void
1913target_flash_done (void)
1914{
1915 struct target_ops *t;
1916
1917 for (t = current_target.beneath; t != NULL; t = t->beneath)
1918 if (t->to_flash_done != NULL)
cf7f2e2d
JM
1919 {
1920 if (targetdebug)
1921 fprintf_unfiltered (gdb_stdlog, "target_flash_done\n");
1922 t->to_flash_done (t);
1923 return;
1924 }
5796c8dc
SS
1925
1926 tcomplain ();
1927}
1928
1929static void
1930show_trust_readonly (struct ui_file *file, int from_tty,
1931 struct cmd_list_element *c, const char *value)
1932{
c50c785c
JM
1933 fprintf_filtered (file,
1934 _("Mode for reading from readonly sections is %s.\n"),
5796c8dc
SS
1935 value);
1936}
1937
1938/* More generic transfers. */
1939
1940static LONGEST
1941default_xfer_partial (struct target_ops *ops, enum target_object object,
1942 const char *annex, gdb_byte *readbuf,
1943 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1944{
1945 if (object == TARGET_OBJECT_MEMORY
1946 && ops->deprecated_xfer_memory != NULL)
1947 /* If available, fall back to the target's
1948 "deprecated_xfer_memory" method. */
1949 {
1950 int xfered = -1;
cf7f2e2d 1951
5796c8dc
SS
1952 errno = 0;
1953 if (writebuf != NULL)
1954 {
1955 void *buffer = xmalloc (len);
1956 struct cleanup *cleanup = make_cleanup (xfree, buffer);
cf7f2e2d 1957
5796c8dc
SS
1958 memcpy (buffer, writebuf, len);
1959 xfered = ops->deprecated_xfer_memory (offset, buffer, len,
1960 1/*write*/, NULL, ops);
1961 do_cleanups (cleanup);
1962 }
1963 if (readbuf != NULL)
1964 xfered = ops->deprecated_xfer_memory (offset, readbuf, len,
1965 0/*read*/, NULL, ops);
1966 if (xfered > 0)
1967 return xfered;
1968 else if (xfered == 0 && errno == 0)
1969 /* "deprecated_xfer_memory" uses 0, cross checked against
1970 ERRNO as one indication of an error. */
1971 return 0;
1972 else
1973 return -1;
1974 }
1975 else if (ops->beneath != NULL)
1976 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1977 readbuf, writebuf, offset, len);
1978 else
1979 return -1;
1980}
1981
1982/* The xfer_partial handler for the topmost target. Unlike the default,
1983 it does not need to handle memory specially; it just passes all
1984 requests down the stack. */
1985
1986static LONGEST
1987current_xfer_partial (struct target_ops *ops, enum target_object object,
1988 const char *annex, gdb_byte *readbuf,
1989 const gdb_byte *writebuf, ULONGEST offset, LONGEST len)
1990{
1991 if (ops->beneath != NULL)
1992 return ops->beneath->to_xfer_partial (ops->beneath, object, annex,
1993 readbuf, writebuf, offset, len);
1994 else
1995 return -1;
1996}
1997
cf7f2e2d 1998/* Target vector read/write partial wrapper functions. */
5796c8dc
SS
1999
2000static LONGEST
2001target_read_partial (struct target_ops *ops,
2002 enum target_object object,
2003 const char *annex, gdb_byte *buf,
2004 ULONGEST offset, LONGEST len)
2005{
2006 return target_xfer_partial (ops, object, annex, buf, NULL, offset, len);
2007}
2008
2009static LONGEST
2010target_write_partial (struct target_ops *ops,
2011 enum target_object object,
2012 const char *annex, const gdb_byte *buf,
2013 ULONGEST offset, LONGEST len)
2014{
2015 return target_xfer_partial (ops, object, annex, NULL, buf, offset, len);
2016}
2017
2018/* Wrappers to perform the full transfer. */
cf7f2e2d
JM
2019
2020/* For docs on target_read see target.h. */
2021
5796c8dc
SS
2022LONGEST
2023target_read (struct target_ops *ops,
2024 enum target_object object,
2025 const char *annex, gdb_byte *buf,
2026 ULONGEST offset, LONGEST len)
2027{
2028 LONGEST xfered = 0;
cf7f2e2d 2029
5796c8dc
SS
2030 while (xfered < len)
2031 {
2032 LONGEST xfer = target_read_partial (ops, object, annex,
2033 (gdb_byte *) buf + xfered,
2034 offset + xfered, len - xfered);
cf7f2e2d 2035
5796c8dc
SS
2036 /* Call an observer, notifying them of the xfer progress? */
2037 if (xfer == 0)
2038 return xfered;
2039 if (xfer < 0)
2040 return -1;
2041 xfered += xfer;
2042 QUIT;
2043 }
2044 return len;
2045}
2046
c50c785c
JM
2047/* Assuming that the entire [begin, end) range of memory cannot be
2048 read, try to read whatever subrange is possible to read.
2049
2050 The function returns, in RESULT, either zero or one memory block.
2051 If there's a readable subrange at the beginning, it is completely
2052 read and returned. Any further readable subrange will not be read.
2053 Otherwise, if there's a readable subrange at the end, it will be
2054 completely read and returned. Any readable subranges before it
2055 (obviously, not starting at the beginning), will be ignored. In
2056 other cases -- either no readable subrange, or readable subrange(s)
2057 that is neither at the beginning, or end, nothing is returned.
2058
2059 The purpose of this function is to handle a read across a boundary
2060 of accessible memory in a case when memory map is not available.
2061 The above restrictions are fine for this case, but will give
2062 incorrect results if the memory is 'patchy'. However, supporting
2063 'patchy' memory would require trying to read every single byte,
2064 and it seems unacceptable solution. Explicit memory map is
2065 recommended for this case -- and target_read_memory_robust will
2066 take care of reading multiple ranges then. */
2067
2068static void
2069read_whatever_is_readable (struct target_ops *ops,
2070 ULONGEST begin, ULONGEST end,
2071 VEC(memory_read_result_s) **result)
5796c8dc 2072{
c50c785c
JM
2073 gdb_byte *buf = xmalloc (end - begin);
2074 ULONGEST current_begin = begin;
2075 ULONGEST current_end = end;
2076 int forward;
2077 memory_read_result_s r;
2078
2079 /* If we previously failed to read 1 byte, nothing can be done here. */
2080 if (end - begin <= 1)
2081 {
2082 xfree (buf);
2083 return;
2084 }
2085
2086 /* Check that either first or the last byte is readable, and give up
2087 if not. This heuristic is meant to permit reading accessible memory
2088 at the boundary of accessible region. */
2089 if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2090 buf, begin, 1) == 1)
2091 {
2092 forward = 1;
2093 ++current_begin;
2094 }
2095 else if (target_read_partial (ops, TARGET_OBJECT_MEMORY, NULL,
2096 buf + (end-begin) - 1, end - 1, 1) == 1)
2097 {
2098 forward = 0;
2099 --current_end;
2100 }
2101 else
2102 {
2103 xfree (buf);
2104 return;
2105 }
2106
2107 /* Loop invariant is that the [current_begin, current_end) was previously
2108 found to be not readable as a whole.
2109
2110 Note loop condition -- if the range has 1 byte, we can't divide the range
2111 so there's no point trying further. */
2112 while (current_end - current_begin > 1)
2113 {
2114 ULONGEST first_half_begin, first_half_end;
2115 ULONGEST second_half_begin, second_half_end;
2116 LONGEST xfer;
2117 ULONGEST middle = current_begin + (current_end - current_begin)/2;
2118
2119 if (forward)
2120 {
2121 first_half_begin = current_begin;
2122 first_half_end = middle;
2123 second_half_begin = middle;
2124 second_half_end = current_end;
2125 }
2126 else
2127 {
2128 first_half_begin = middle;
2129 first_half_end = current_end;
2130 second_half_begin = current_begin;
2131 second_half_end = middle;
2132 }
2133
2134 xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2135 buf + (first_half_begin - begin),
2136 first_half_begin,
2137 first_half_end - first_half_begin);
2138
2139 if (xfer == first_half_end - first_half_begin)
2140 {
2141 /* This half reads up fine. So, the error must be in the
2142 other half. */
2143 current_begin = second_half_begin;
2144 current_end = second_half_end;
2145 }
2146 else
2147 {
2148 /* This half is not readable. Because we've tried one byte, we
2149 know some part of this half if actually redable. Go to the next
2150 iteration to divide again and try to read.
2151
2152 We don't handle the other half, because this function only tries
2153 to read a single readable subrange. */
2154 current_begin = first_half_begin;
2155 current_end = first_half_end;
2156 }
2157 }
2158
2159 if (forward)
2160 {
2161 /* The [begin, current_begin) range has been read. */
2162 r.begin = begin;
2163 r.end = current_begin;
2164 r.data = buf;
2165 }
2166 else
2167 {
2168 /* The [current_end, end) range has been read. */
2169 LONGEST rlen = end - current_end;
2170
2171 r.data = xmalloc (rlen);
2172 memcpy (r.data, buf + current_end - begin, rlen);
2173 r.begin = current_end;
2174 r.end = end;
2175 xfree (buf);
2176 }
2177 VEC_safe_push(memory_read_result_s, (*result), &r);
2178}
cf7f2e2d 2179
c50c785c
JM
2180void
2181free_memory_read_result_vector (void *x)
2182{
2183 VEC(memory_read_result_s) *v = x;
2184 memory_read_result_s *current;
2185 int ix;
2186
2187 for (ix = 0; VEC_iterate (memory_read_result_s, v, ix, current); ++ix)
2188 {
2189 xfree (current->data);
2190 }
2191 VEC_free (memory_read_result_s, v);
2192}
2193
2194VEC(memory_read_result_s) *
2195read_memory_robust (struct target_ops *ops, ULONGEST offset, LONGEST len)
2196{
2197 VEC(memory_read_result_s) *result = 0;
2198
2199 LONGEST xfered = 0;
5796c8dc
SS
2200 while (xfered < len)
2201 {
c50c785c
JM
2202 struct mem_region *region = lookup_mem_region (offset + xfered);
2203 LONGEST rlen;
cf7f2e2d 2204
c50c785c
JM
2205 /* If there is no explicit region, a fake one should be created. */
2206 gdb_assert (region);
2207
2208 if (region->hi == 0)
2209 rlen = len - xfered;
2210 else
2211 rlen = region->hi - offset;
2212
2213 if (region->attrib.mode == MEM_NONE || region->attrib.mode == MEM_WO)
5796c8dc 2214 {
c50c785c
JM
2215 /* Cannot read this region. Note that we can end up here only
2216 if the region is explicitly marked inaccessible, or
2217 'inaccessible-by-default' is in effect. */
2218 xfered += rlen;
2219 }
2220 else
2221 {
2222 LONGEST to_read = min (len - xfered, rlen);
2223 gdb_byte *buffer = (gdb_byte *)xmalloc (to_read);
2224
2225 LONGEST xfer = target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2226 (gdb_byte *) buffer,
2227 offset + xfered, to_read);
2228 /* Call an observer, notifying them of the xfer progress? */
5796c8dc 2229 if (xfer <= 0)
5796c8dc 2230 {
c50c785c
JM
2231 /* Got an error reading full chunk. See if maybe we can read
2232 some subrange. */
2233 xfree (buffer);
2234 read_whatever_is_readable (ops, offset + xfered,
2235 offset + xfered + to_read, &result);
2236 xfered += to_read;
5796c8dc 2237 }
c50c785c
JM
2238 else
2239 {
2240 struct memory_read_result r;
2241 r.data = buffer;
2242 r.begin = offset + xfered;
2243 r.end = r.begin + xfer;
2244 VEC_safe_push (memory_read_result_s, result, &r);
2245 xfered += xfer;
2246 }
2247 QUIT;
5796c8dc 2248 }
5796c8dc 2249 }
c50c785c 2250 return result;
5796c8dc
SS
2251}
2252
c50c785c 2253
5796c8dc
SS
2254/* An alternative to target_write with progress callbacks. */
2255
2256LONGEST
2257target_write_with_progress (struct target_ops *ops,
2258 enum target_object object,
2259 const char *annex, const gdb_byte *buf,
2260 ULONGEST offset, LONGEST len,
2261 void (*progress) (ULONGEST, void *), void *baton)
2262{
2263 LONGEST xfered = 0;
2264
2265 /* Give the progress callback a chance to set up. */
2266 if (progress)
2267 (*progress) (0, baton);
2268
2269 while (xfered < len)
2270 {
2271 LONGEST xfer = target_write_partial (ops, object, annex,
2272 (gdb_byte *) buf + xfered,
2273 offset + xfered, len - xfered);
2274
2275 if (xfer == 0)
2276 return xfered;
2277 if (xfer < 0)
2278 return -1;
2279
2280 if (progress)
2281 (*progress) (xfer, baton);
2282
2283 xfered += xfer;
2284 QUIT;
2285 }
2286 return len;
2287}
2288
cf7f2e2d
JM
2289/* For docs on target_write see target.h. */
2290
5796c8dc
SS
2291LONGEST
2292target_write (struct target_ops *ops,
2293 enum target_object object,
2294 const char *annex, const gdb_byte *buf,
2295 ULONGEST offset, LONGEST len)
2296{
2297 return target_write_with_progress (ops, object, annex, buf, offset, len,
2298 NULL, NULL);
2299}
2300
2301/* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2302 the size of the transferred data. PADDING additional bytes are
2303 available in *BUF_P. This is a helper function for
2304 target_read_alloc; see the declaration of that function for more
2305 information. */
2306
2307static LONGEST
2308target_read_alloc_1 (struct target_ops *ops, enum target_object object,
2309 const char *annex, gdb_byte **buf_p, int padding)
2310{
2311 size_t buf_alloc, buf_pos;
2312 gdb_byte *buf;
2313 LONGEST n;
2314
2315 /* This function does not have a length parameter; it reads the
2316 entire OBJECT). Also, it doesn't support objects fetched partly
2317 from one target and partly from another (in a different stratum,
2318 e.g. a core file and an executable). Both reasons make it
2319 unsuitable for reading memory. */
2320 gdb_assert (object != TARGET_OBJECT_MEMORY);
2321
2322 /* Start by reading up to 4K at a time. The target will throttle
2323 this number down if necessary. */
2324 buf_alloc = 4096;
2325 buf = xmalloc (buf_alloc);
2326 buf_pos = 0;
2327 while (1)
2328 {
2329 n = target_read_partial (ops, object, annex, &buf[buf_pos],
2330 buf_pos, buf_alloc - buf_pos - padding);
2331 if (n < 0)
2332 {
2333 /* An error occurred. */
2334 xfree (buf);
2335 return -1;
2336 }
2337 else if (n == 0)
2338 {
2339 /* Read all there was. */
2340 if (buf_pos == 0)
2341 xfree (buf);
2342 else
2343 *buf_p = buf;
2344 return buf_pos;
2345 }
2346
2347 buf_pos += n;
2348
2349 /* If the buffer is filling up, expand it. */
2350 if (buf_alloc < buf_pos * 2)
2351 {
2352 buf_alloc *= 2;
2353 buf = xrealloc (buf, buf_alloc);
2354 }
2355
2356 QUIT;
2357 }
2358}
2359
2360/* Read OBJECT/ANNEX using OPS. Store the result in *BUF_P and return
2361 the size of the transferred data. See the declaration in "target.h"
2362 function for more information about the return value. */
2363
2364LONGEST
2365target_read_alloc (struct target_ops *ops, enum target_object object,
2366 const char *annex, gdb_byte **buf_p)
2367{
2368 return target_read_alloc_1 (ops, object, annex, buf_p, 0);
2369}
2370
2371/* Read OBJECT/ANNEX using OPS. The result is NUL-terminated and
2372 returned as a string, allocated using xmalloc. If an error occurs
2373 or the transfer is unsupported, NULL is returned. Empty objects
2374 are returned as allocated but empty strings. A warning is issued
2375 if the result contains any embedded NUL bytes. */
2376
2377char *
2378target_read_stralloc (struct target_ops *ops, enum target_object object,
2379 const char *annex)
2380{
2381 gdb_byte *buffer;
ef5ccd6c
JM
2382 char *bufstr;
2383 LONGEST i, transferred;
5796c8dc
SS
2384
2385 transferred = target_read_alloc_1 (ops, object, annex, &buffer, 1);
ef5ccd6c 2386 bufstr = (char *) buffer;
5796c8dc
SS
2387
2388 if (transferred < 0)
2389 return NULL;
2390
2391 if (transferred == 0)
2392 return xstrdup ("");
2393
ef5ccd6c
JM
2394 bufstr[transferred] = 0;
2395
2396 /* Check for embedded NUL bytes; but allow trailing NULs. */
2397 for (i = strlen (bufstr); i < transferred; i++)
2398 if (bufstr[i] != 0)
2399 {
2400 warning (_("target object %d, annex %s, "
2401 "contained unexpected null characters"),
2402 (int) object, annex ? annex : "(none)");
2403 break;
2404 }
5796c8dc 2405
ef5ccd6c 2406 return bufstr;
5796c8dc
SS
2407}
2408
2409/* Memory transfer methods. */
2410
2411void
2412get_target_memory (struct target_ops *ops, CORE_ADDR addr, gdb_byte *buf,
2413 LONGEST len)
2414{
2415 /* This method is used to read from an alternate, non-current
2416 target. This read must bypass the overlay support (as symbols
2417 don't match this target), and GDB's internal cache (wrong cache
2418 for this target). */
2419 if (target_read (ops, TARGET_OBJECT_RAW_MEMORY, NULL, buf, addr, len)
2420 != len)
2421 memory_error (EIO, addr);
2422}
2423
2424ULONGEST
cf7f2e2d
JM
2425get_target_memory_unsigned (struct target_ops *ops, CORE_ADDR addr,
2426 int len, enum bfd_endian byte_order)
5796c8dc
SS
2427{
2428 gdb_byte buf[sizeof (ULONGEST)];
2429
2430 gdb_assert (len <= sizeof (buf));
2431 get_target_memory (ops, addr, buf, len);
2432 return extract_unsigned_integer (buf, len, byte_order);
2433}
2434
cf7f2e2d
JM
2435int
2436target_insert_breakpoint (struct gdbarch *gdbarch,
2437 struct bp_target_info *bp_tgt)
2438{
2439 if (!may_insert_breakpoints)
2440 {
2441 warning (_("May not insert breakpoints"));
2442 return 1;
2443 }
2444
2445 return (*current_target.to_insert_breakpoint) (gdbarch, bp_tgt);
2446}
2447
2448int
2449target_remove_breakpoint (struct gdbarch *gdbarch,
2450 struct bp_target_info *bp_tgt)
2451{
2452 /* This is kind of a weird case to handle, but the permission might
2453 have been changed after breakpoints were inserted - in which case
2454 we should just take the user literally and assume that any
2455 breakpoints should be left in place. */
2456 if (!may_insert_breakpoints)
2457 {
2458 warning (_("May not remove breakpoints"));
2459 return 1;
2460 }
2461
2462 return (*current_target.to_remove_breakpoint) (gdbarch, bp_tgt);
2463}
2464
5796c8dc
SS
2465static void
2466target_info (char *args, int from_tty)
2467{
2468 struct target_ops *t;
2469 int has_all_mem = 0;
2470
2471 if (symfile_objfile != NULL)
2472 printf_unfiltered (_("Symbols from \"%s\".\n"), symfile_objfile->name);
2473
2474 for (t = target_stack; t != NULL; t = t->beneath)
2475 {
2476 if (!(*t->to_has_memory) (t))
2477 continue;
2478
2479 if ((int) (t->to_stratum) <= (int) dummy_stratum)
2480 continue;
2481 if (has_all_mem)
c50c785c
JM
2482 printf_unfiltered (_("\tWhile running this, "
2483 "GDB does not access memory from...\n"));
5796c8dc
SS
2484 printf_unfiltered ("%s:\n", t->to_longname);
2485 (t->to_files_info) (t);
2486 has_all_mem = (*t->to_has_all_memory) (t);
2487 }
2488}
2489
2490/* This function is called before any new inferior is created, e.g.
2491 by running a program, attaching, or connecting to a target.
2492 It cleans up any state from previous invocations which might
2493 change between runs. This is a subset of what target_preopen
2494 resets (things which might change between targets). */
2495
2496void
2497target_pre_inferior (int from_tty)
2498{
c50c785c 2499 /* Clear out solib state. Otherwise the solib state of the previous
5796c8dc 2500 inferior might have survived and is entirely wrong for the new
c50c785c 2501 target. This has been observed on GNU/Linux using glibc 2.3. How
5796c8dc
SS
2502 to reproduce:
2503
2504 bash$ ./foo&
2505 [1] 4711
2506 bash$ ./foo&
2507 [1] 4712
2508 bash$ gdb ./foo
2509 [...]
2510 (gdb) attach 4711
2511 (gdb) detach
2512 (gdb) attach 4712
2513 Cannot access memory at address 0xdeadbeef
2514 */
2515
2516 /* In some OSs, the shared library list is the same/global/shared
2517 across inferiors. If code is shared between processes, so are
2518 memory regions and features. */
ef5ccd6c 2519 if (!gdbarch_has_global_solist (target_gdbarch ()))
5796c8dc
SS
2520 {
2521 no_shared_libraries (NULL, from_tty);
2522
2523 invalidate_target_mem_regions ();
2524
2525 target_clear_description ();
2526 }
ef5ccd6c
JM
2527
2528 agent_capability_invalidate ();
5796c8dc
SS
2529}
2530
2531/* Callback for iterate_over_inferiors. Gets rid of the given
2532 inferior. */
2533
2534static int
2535dispose_inferior (struct inferior *inf, void *args)
2536{
2537 struct thread_info *thread;
2538
2539 thread = any_thread_of_process (inf->pid);
2540 if (thread)
2541 {
2542 switch_to_thread (thread->ptid);
2543
2544 /* Core inferiors actually should be detached, not killed. */
2545 if (target_has_execution)
2546 target_kill ();
2547 else
2548 target_detach (NULL, 0);
2549 }
2550
2551 return 0;
2552}
2553
2554/* This is to be called by the open routine before it does
2555 anything. */
2556
2557void
2558target_preopen (int from_tty)
2559{
2560 dont_repeat ();
2561
2562 if (have_inferiors ())
2563 {
2564 if (!from_tty
2565 || !have_live_inferiors ()
2566 || query (_("A program is being debugged already. Kill it? ")))
2567 iterate_over_inferiors (dispose_inferior, NULL);
2568 else
2569 error (_("Program not killed."));
2570 }
2571
2572 /* Calling target_kill may remove the target from the stack. But if
2573 it doesn't (which seems like a win for UDI), remove it now. */
2574 /* Leave the exec target, though. The user may be switching from a
2575 live process to a core of the same program. */
2576 pop_all_targets_above (file_stratum, 0);
2577
2578 target_pre_inferior (from_tty);
2579}
2580
2581/* Detach a target after doing deferred register stores. */
2582
2583void
2584target_detach (char *args, int from_tty)
2585{
2586 struct target_ops* t;
2587
ef5ccd6c 2588 if (gdbarch_has_global_breakpoints (target_gdbarch ()))
5796c8dc
SS
2589 /* Don't remove global breakpoints here. They're removed on
2590 disconnection from the target. */
2591 ;
2592 else
2593 /* If we're in breakpoints-always-inserted mode, have to remove
2594 them before detaching. */
cf7f2e2d
JM
2595 remove_breakpoints_pid (PIDGET (inferior_ptid));
2596
2597 prepare_for_detach ();
5796c8dc
SS
2598
2599 for (t = current_target.beneath; t != NULL; t = t->beneath)
2600 {
2601 if (t->to_detach != NULL)
2602 {
2603 t->to_detach (t, args, from_tty);
2604 if (targetdebug)
2605 fprintf_unfiltered (gdb_stdlog, "target_detach (%s, %d)\n",
2606 args, from_tty);
2607 return;
2608 }
2609 }
2610
c50c785c 2611 internal_error (__FILE__, __LINE__, _("could not find a target to detach"));
5796c8dc
SS
2612}
2613
2614void
2615target_disconnect (char *args, int from_tty)
2616{
2617 struct target_ops *t;
2618
2619 /* If we're in breakpoints-always-inserted mode or if breakpoints
2620 are global across processes, we have to remove them before
2621 disconnecting. */
2622 remove_breakpoints ();
2623
2624 for (t = current_target.beneath; t != NULL; t = t->beneath)
2625 if (t->to_disconnect != NULL)
2626 {
2627 if (targetdebug)
2628 fprintf_unfiltered (gdb_stdlog, "target_disconnect (%s, %d)\n",
2629 args, from_tty);
2630 t->to_disconnect (t, args, from_tty);
2631 return;
2632 }
2633
2634 tcomplain ();
2635}
2636
2637ptid_t
2638target_wait (ptid_t ptid, struct target_waitstatus *status, int options)
2639{
2640 struct target_ops *t;
2641
2642 for (t = current_target.beneath; t != NULL; t = t->beneath)
2643 {
2644 if (t->to_wait != NULL)
2645 {
2646 ptid_t retval = (*t->to_wait) (t, ptid, status, options);
2647
2648 if (targetdebug)
2649 {
2650 char *status_string;
ef5ccd6c 2651 char *options_string;
5796c8dc
SS
2652
2653 status_string = target_waitstatus_to_string (status);
ef5ccd6c 2654 options_string = target_options_to_string (options);
5796c8dc 2655 fprintf_unfiltered (gdb_stdlog,
ef5ccd6c
JM
2656 "target_wait (%d, status, options={%s})"
2657 " = %d, %s\n",
2658 PIDGET (ptid), options_string,
2659 PIDGET (retval), status_string);
5796c8dc 2660 xfree (status_string);
ef5ccd6c 2661 xfree (options_string);
5796c8dc
SS
2662 }
2663
2664 return retval;
2665 }
2666 }
2667
2668 noprocess ();
2669}
2670
2671char *
2672target_pid_to_str (ptid_t ptid)
2673{
2674 struct target_ops *t;
2675
2676 for (t = current_target.beneath; t != NULL; t = t->beneath)
2677 {
2678 if (t->to_pid_to_str != NULL)
2679 return (*t->to_pid_to_str) (t, ptid);
2680 }
2681
2682 return normal_pid_to_str (ptid);
2683}
2684
c50c785c
JM
2685char *
2686target_thread_name (struct thread_info *info)
2687{
2688 struct target_ops *t;
2689
2690 for (t = current_target.beneath; t != NULL; t = t->beneath)
2691 {
2692 if (t->to_thread_name != NULL)
2693 return (*t->to_thread_name) (info);
2694 }
2695
2696 return NULL;
2697}
2698
5796c8dc 2699void
ef5ccd6c 2700target_resume (ptid_t ptid, int step, enum gdb_signal signal)
5796c8dc
SS
2701{
2702 struct target_ops *t;
2703
2704 target_dcache_invalidate ();
2705
2706 for (t = current_target.beneath; t != NULL; t = t->beneath)
2707 {
2708 if (t->to_resume != NULL)
2709 {
2710 t->to_resume (t, ptid, step, signal);
2711 if (targetdebug)
2712 fprintf_unfiltered (gdb_stdlog, "target_resume (%d, %s, %s)\n",
2713 PIDGET (ptid),
2714 step ? "step" : "continue",
ef5ccd6c 2715 gdb_signal_to_name (signal));
5796c8dc 2716
cf7f2e2d 2717 registers_changed_ptid (ptid);
5796c8dc
SS
2718 set_executing (ptid, 1);
2719 set_running (ptid, 1);
2720 clear_inline_frame_state (ptid);
2721 return;
2722 }
2723 }
2724
2725 noprocess ();
2726}
a45ae5f8
JM
2727
2728void
2729target_pass_signals (int numsigs, unsigned char *pass_signals)
2730{
2731 struct target_ops *t;
2732
2733 for (t = current_target.beneath; t != NULL; t = t->beneath)
2734 {
2735 if (t->to_pass_signals != NULL)
2736 {
2737 if (targetdebug)
2738 {
2739 int i;
2740
2741 fprintf_unfiltered (gdb_stdlog, "target_pass_signals (%d, {",
2742 numsigs);
2743
2744 for (i = 0; i < numsigs; i++)
2745 if (pass_signals[i])
2746 fprintf_unfiltered (gdb_stdlog, " %s",
ef5ccd6c 2747 gdb_signal_to_name (i));
a45ae5f8
JM
2748
2749 fprintf_unfiltered (gdb_stdlog, " })\n");
2750 }
2751
2752 (*t->to_pass_signals) (numsigs, pass_signals);
2753 return;
2754 }
2755 }
2756}
2757
ef5ccd6c
JM
2758void
2759target_program_signals (int numsigs, unsigned char *program_signals)
2760{
2761 struct target_ops *t;
2762
2763 for (t = current_target.beneath; t != NULL; t = t->beneath)
2764 {
2765 if (t->to_program_signals != NULL)
2766 {
2767 if (targetdebug)
2768 {
2769 int i;
2770
2771 fprintf_unfiltered (gdb_stdlog, "target_program_signals (%d, {",
2772 numsigs);
2773
2774 for (i = 0; i < numsigs; i++)
2775 if (program_signals[i])
2776 fprintf_unfiltered (gdb_stdlog, " %s",
2777 gdb_signal_to_name (i));
2778
2779 fprintf_unfiltered (gdb_stdlog, " })\n");
2780 }
2781
2782 (*t->to_program_signals) (numsigs, program_signals);
2783 return;
2784 }
2785 }
2786}
2787
5796c8dc
SS
2788/* Look through the list of possible targets for a target that can
2789 follow forks. */
2790
2791int
2792target_follow_fork (int follow_child)
2793{
2794 struct target_ops *t;
2795
2796 for (t = current_target.beneath; t != NULL; t = t->beneath)
2797 {
2798 if (t->to_follow_fork != NULL)
2799 {
2800 int retval = t->to_follow_fork (t, follow_child);
cf7f2e2d 2801
5796c8dc
SS
2802 if (targetdebug)
2803 fprintf_unfiltered (gdb_stdlog, "target_follow_fork (%d) = %d\n",
2804 follow_child, retval);
2805 return retval;
2806 }
2807 }
2808
2809 /* Some target returned a fork event, but did not know how to follow it. */
2810 internal_error (__FILE__, __LINE__,
c50c785c 2811 _("could not find a target to follow fork"));
5796c8dc
SS
2812}
2813
2814void
2815target_mourn_inferior (void)
2816{
2817 struct target_ops *t;
cf7f2e2d 2818
5796c8dc
SS
2819 for (t = current_target.beneath; t != NULL; t = t->beneath)
2820 {
2821 if (t->to_mourn_inferior != NULL)
2822 {
2823 t->to_mourn_inferior (t);
2824 if (targetdebug)
2825 fprintf_unfiltered (gdb_stdlog, "target_mourn_inferior ()\n");
2826
2827 /* We no longer need to keep handles on any of the object files.
2828 Make sure to release them to avoid unnecessarily locking any
2829 of them while we're not actually debugging. */
2830 bfd_cache_close_all ();
2831
2832 return;
2833 }
2834 }
2835
2836 internal_error (__FILE__, __LINE__,
c50c785c 2837 _("could not find a target to follow mourn inferior"));
5796c8dc
SS
2838}
2839
2840/* Look for a target which can describe architectural features, starting
2841 from TARGET. If we find one, return its description. */
2842
2843const struct target_desc *
2844target_read_description (struct target_ops *target)
2845{
2846 struct target_ops *t;
2847
2848 for (t = target; t != NULL; t = t->beneath)
2849 if (t->to_read_description != NULL)
2850 {
2851 const struct target_desc *tdesc;
2852
2853 tdesc = t->to_read_description (t);
2854 if (tdesc)
2855 return tdesc;
2856 }
2857
2858 return NULL;
2859}
2860
2861/* The default implementation of to_search_memory.
2862 This implements a basic search of memory, reading target memory and
2863 performing the search here (as opposed to performing the search in on the
2864 target side with, for example, gdbserver). */
2865
2866int
2867simple_search_memory (struct target_ops *ops,
2868 CORE_ADDR start_addr, ULONGEST search_space_len,
2869 const gdb_byte *pattern, ULONGEST pattern_len,
2870 CORE_ADDR *found_addrp)
2871{
2872 /* NOTE: also defined in find.c testcase. */
2873#define SEARCH_CHUNK_SIZE 16000
2874 const unsigned chunk_size = SEARCH_CHUNK_SIZE;
2875 /* Buffer to hold memory contents for searching. */
2876 gdb_byte *search_buf;
2877 unsigned search_buf_size;
2878 struct cleanup *old_cleanups;
2879
2880 search_buf_size = chunk_size + pattern_len - 1;
2881
2882 /* No point in trying to allocate a buffer larger than the search space. */
2883 if (search_space_len < search_buf_size)
2884 search_buf_size = search_space_len;
2885
2886 search_buf = malloc (search_buf_size);
2887 if (search_buf == NULL)
2888 error (_("Unable to allocate memory to perform the search."));
2889 old_cleanups = make_cleanup (free_current_contents, &search_buf);
2890
2891 /* Prime the search buffer. */
2892
2893 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2894 search_buf, start_addr, search_buf_size) != search_buf_size)
2895 {
ef5ccd6c
JM
2896 warning (_("Unable to access %s bytes of target "
2897 "memory at %s, halting search."),
2898 pulongest (search_buf_size), hex_string (start_addr));
5796c8dc
SS
2899 do_cleanups (old_cleanups);
2900 return -1;
2901 }
2902
2903 /* Perform the search.
2904
2905 The loop is kept simple by allocating [N + pattern-length - 1] bytes.
2906 When we've scanned N bytes we copy the trailing bytes to the start and
2907 read in another N bytes. */
2908
2909 while (search_space_len >= pattern_len)
2910 {
2911 gdb_byte *found_ptr;
2912 unsigned nr_search_bytes = min (search_space_len, search_buf_size);
2913
2914 found_ptr = memmem (search_buf, nr_search_bytes,
2915 pattern, pattern_len);
2916
2917 if (found_ptr != NULL)
2918 {
2919 CORE_ADDR found_addr = start_addr + (found_ptr - search_buf);
cf7f2e2d 2920
5796c8dc
SS
2921 *found_addrp = found_addr;
2922 do_cleanups (old_cleanups);
2923 return 1;
2924 }
2925
2926 /* Not found in this chunk, skip to next chunk. */
2927
2928 /* Don't let search_space_len wrap here, it's unsigned. */
2929 if (search_space_len >= chunk_size)
2930 search_space_len -= chunk_size;
2931 else
2932 search_space_len = 0;
2933
2934 if (search_space_len >= pattern_len)
2935 {
2936 unsigned keep_len = search_buf_size - chunk_size;
cf7f2e2d 2937 CORE_ADDR read_addr = start_addr + chunk_size + keep_len;
5796c8dc
SS
2938 int nr_to_read;
2939
2940 /* Copy the trailing part of the previous iteration to the front
2941 of the buffer for the next iteration. */
2942 gdb_assert (keep_len == pattern_len - 1);
2943 memcpy (search_buf, search_buf + chunk_size, keep_len);
2944
2945 nr_to_read = min (search_space_len - keep_len, chunk_size);
2946
2947 if (target_read (ops, TARGET_OBJECT_MEMORY, NULL,
2948 search_buf + keep_len, read_addr,
2949 nr_to_read) != nr_to_read)
2950 {
ef5ccd6c 2951 warning (_("Unable to access %s bytes of target "
c50c785c 2952 "memory at %s, halting search."),
ef5ccd6c 2953 plongest (nr_to_read),
5796c8dc
SS
2954 hex_string (read_addr));
2955 do_cleanups (old_cleanups);
2956 return -1;
2957 }
2958
2959 start_addr += chunk_size;
2960 }
2961 }
2962
2963 /* Not found. */
2964
2965 do_cleanups (old_cleanups);
2966 return 0;
2967}
2968
2969/* Search SEARCH_SPACE_LEN bytes beginning at START_ADDR for the
2970 sequence of bytes in PATTERN with length PATTERN_LEN.
2971
2972 The result is 1 if found, 0 if not found, and -1 if there was an error
2973 requiring halting of the search (e.g. memory read error).
2974 If the pattern is found the address is recorded in FOUND_ADDRP. */
2975
2976int
2977target_search_memory (CORE_ADDR start_addr, ULONGEST search_space_len,
2978 const gdb_byte *pattern, ULONGEST pattern_len,
2979 CORE_ADDR *found_addrp)
2980{
2981 struct target_ops *t;
2982 int found;
2983
2984 /* We don't use INHERIT to set current_target.to_search_memory,
2985 so we have to scan the target stack and handle targetdebug
2986 ourselves. */
2987
2988 if (targetdebug)
2989 fprintf_unfiltered (gdb_stdlog, "target_search_memory (%s, ...)\n",
2990 hex_string (start_addr));
2991
2992 for (t = current_target.beneath; t != NULL; t = t->beneath)
2993 if (t->to_search_memory != NULL)
2994 break;
2995
2996 if (t != NULL)
2997 {
2998 found = t->to_search_memory (t, start_addr, search_space_len,
2999 pattern, pattern_len, found_addrp);
3000 }
3001 else
3002 {
3003 /* If a special version of to_search_memory isn't available, use the
3004 simple version. */
3005 found = simple_search_memory (current_target.beneath,
3006 start_addr, search_space_len,
3007 pattern, pattern_len, found_addrp);
3008 }
3009
3010 if (targetdebug)
3011 fprintf_unfiltered (gdb_stdlog, " = %d\n", found);
3012
3013 return found;
3014}
3015
3016/* Look through the currently pushed targets. If none of them will
3017 be able to restart the currently running process, issue an error
3018 message. */
3019
3020void
3021target_require_runnable (void)
3022{
3023 struct target_ops *t;
3024
3025 for (t = target_stack; t != NULL; t = t->beneath)
3026 {
3027 /* If this target knows how to create a new program, then
3028 assume we will still be able to after killing the current
3029 one. Either killing and mourning will not pop T, or else
3030 find_default_run_target will find it again. */
3031 if (t->to_create_inferior != NULL)
3032 return;
3033
3034 /* Do not worry about thread_stratum targets that can not
3035 create inferiors. Assume they will be pushed again if
3036 necessary, and continue to the process_stratum. */
3037 if (t->to_stratum == thread_stratum
3038 || t->to_stratum == arch_stratum)
3039 continue;
3040
c50c785c
JM
3041 error (_("The \"%s\" target does not support \"run\". "
3042 "Try \"help target\" or \"continue\"."),
5796c8dc
SS
3043 t->to_shortname);
3044 }
3045
3046 /* This function is only called if the target is running. In that
3047 case there should have been a process_stratum target and it
c50c785c
JM
3048 should either know how to create inferiors, or not... */
3049 internal_error (__FILE__, __LINE__, _("No targets found"));
5796c8dc
SS
3050}
3051
3052/* Look through the list of possible targets for a target that can
3053 execute a run or attach command without any other data. This is
3054 used to locate the default process stratum.
3055
3056 If DO_MESG is not NULL, the result is always valid (error() is
3057 called for errors); else, return NULL on error. */
3058
3059static struct target_ops *
3060find_default_run_target (char *do_mesg)
3061{
3062 struct target_ops **t;
3063 struct target_ops *runable = NULL;
3064 int count;
3065
3066 count = 0;
3067
3068 for (t = target_structs; t < target_structs + target_struct_size;
3069 ++t)
3070 {
3071 if ((*t)->to_can_run && target_can_run (*t))
3072 {
3073 runable = *t;
3074 ++count;
3075 }
3076 }
3077
3078 if (count != 1)
3079 {
3080 if (do_mesg)
3081 error (_("Don't know how to %s. Try \"help target\"."), do_mesg);
3082 else
3083 return NULL;
3084 }
3085
3086 return runable;
3087}
3088
3089void
3090find_default_attach (struct target_ops *ops, char *args, int from_tty)
3091{
3092 struct target_ops *t;
3093
3094 t = find_default_run_target ("attach");
3095 (t->to_attach) (t, args, from_tty);
3096 return;
3097}
3098
3099void
3100find_default_create_inferior (struct target_ops *ops,
3101 char *exec_file, char *allargs, char **env,
3102 int from_tty)
3103{
3104 struct target_ops *t;
3105
3106 t = find_default_run_target ("run");
3107 (t->to_create_inferior) (t, exec_file, allargs, env, from_tty);
3108 return;
3109}
3110
3111static int
3112find_default_can_async_p (void)
3113{
3114 struct target_ops *t;
3115
3116 /* This may be called before the target is pushed on the stack;
3117 look for the default process stratum. If there's none, gdb isn't
3118 configured with a native debugger, and target remote isn't
3119 connected yet. */
3120 t = find_default_run_target (NULL);
3121 if (t && t->to_can_async_p)
3122 return (t->to_can_async_p) ();
3123 return 0;
3124}
3125
3126static int
3127find_default_is_async_p (void)
3128{
3129 struct target_ops *t;
3130
3131 /* This may be called before the target is pushed on the stack;
3132 look for the default process stratum. If there's none, gdb isn't
3133 configured with a native debugger, and target remote isn't
3134 connected yet. */
3135 t = find_default_run_target (NULL);
3136 if (t && t->to_is_async_p)
3137 return (t->to_is_async_p) ();
3138 return 0;
3139}
3140
3141static int
3142find_default_supports_non_stop (void)
3143{
3144 struct target_ops *t;
3145
3146 t = find_default_run_target (NULL);
3147 if (t && t->to_supports_non_stop)
3148 return (t->to_supports_non_stop) ();
3149 return 0;
3150}
3151
3152int
3153target_supports_non_stop (void)
3154{
3155 struct target_ops *t;
cf7f2e2d 3156
5796c8dc
SS
3157 for (t = &current_target; t != NULL; t = t->beneath)
3158 if (t->to_supports_non_stop)
3159 return t->to_supports_non_stop ();
3160
3161 return 0;
3162}
3163
ef5ccd6c
JM
3164/* Implement the "info proc" command. */
3165
3166int
3167target_info_proc (char *args, enum info_proc_what what)
3168{
3169 struct target_ops *t;
3170
3171 /* If we're already connected to something that can get us OS
3172 related data, use it. Otherwise, try using the native
3173 target. */
3174 if (current_target.to_stratum >= process_stratum)
3175 t = current_target.beneath;
3176 else
3177 t = find_default_run_target (NULL);
3178
3179 for (; t != NULL; t = t->beneath)
3180 {
3181 if (t->to_info_proc != NULL)
3182 {
3183 t->to_info_proc (t, args, what);
3184
3185 if (targetdebug)
3186 fprintf_unfiltered (gdb_stdlog,
3187 "target_info_proc (\"%s\", %d)\n", args, what);
3188
3189 return 1;
3190 }
3191 }
3192
3193 return 0;
3194}
3195
a45ae5f8
JM
3196static int
3197find_default_supports_disable_randomization (void)
3198{
3199 struct target_ops *t;
3200
3201 t = find_default_run_target (NULL);
3202 if (t && t->to_supports_disable_randomization)
3203 return (t->to_supports_disable_randomization) ();
3204 return 0;
3205}
3206
3207int
3208target_supports_disable_randomization (void)
3209{
3210 struct target_ops *t;
3211
3212 for (t = &current_target; t != NULL; t = t->beneath)
3213 if (t->to_supports_disable_randomization)
3214 return t->to_supports_disable_randomization ();
3215
3216 return 0;
3217}
5796c8dc
SS
3218
3219char *
3220target_get_osdata (const char *type)
3221{
5796c8dc
SS
3222 struct target_ops *t;
3223
3224 /* If we're already connected to something that can get us OS
3225 related data, use it. Otherwise, try using the native
3226 target. */
3227 if (current_target.to_stratum >= process_stratum)
3228 t = current_target.beneath;
3229 else
3230 t = find_default_run_target ("get OS data");
3231
3232 if (!t)
3233 return NULL;
3234
3235 return target_read_stralloc (t, TARGET_OBJECT_OSDATA, type);
3236}
3237
cf7f2e2d
JM
3238/* Determine the current address space of thread PTID. */
3239
3240struct address_space *
3241target_thread_address_space (ptid_t ptid)
3242{
3243 struct address_space *aspace;
3244 struct inferior *inf;
3245 struct target_ops *t;
3246
3247 for (t = current_target.beneath; t != NULL; t = t->beneath)
3248 {
3249 if (t->to_thread_address_space != NULL)
3250 {
3251 aspace = t->to_thread_address_space (t, ptid);
3252 gdb_assert (aspace);
3253
3254 if (targetdebug)
3255 fprintf_unfiltered (gdb_stdlog,
3256 "target_thread_address_space (%s) = %d\n",
3257 target_pid_to_str (ptid),
3258 address_space_num (aspace));
3259 return aspace;
3260 }
3261 }
3262
3263 /* Fall-back to the "main" address space of the inferior. */
3264 inf = find_inferior_pid (ptid_get_pid (ptid));
3265
3266 if (inf == NULL || inf->aspace == NULL)
c50c785c
JM
3267 internal_error (__FILE__, __LINE__,
3268 _("Can't determine the current "
3269 "address space of thread %s\n"),
cf7f2e2d
JM
3270 target_pid_to_str (ptid));
3271
3272 return inf->aspace;
3273}
3274
ef5ccd6c
JM
3275
3276/* Target file operations. */
3277
3278static struct target_ops *
3279default_fileio_target (void)
3280{
3281 /* If we're already connected to something that can perform
3282 file I/O, use it. Otherwise, try using the native target. */
3283 if (current_target.to_stratum >= process_stratum)
3284 return current_target.beneath;
3285 else
3286 return find_default_run_target ("file I/O");
3287}
3288
3289/* Open FILENAME on the target, using FLAGS and MODE. Return a
3290 target file descriptor, or -1 if an error occurs (and set
3291 *TARGET_ERRNO). */
3292int
3293target_fileio_open (const char *filename, int flags, int mode,
3294 int *target_errno)
3295{
3296 struct target_ops *t;
3297
3298 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3299 {
3300 if (t->to_fileio_open != NULL)
3301 {
3302 int fd = t->to_fileio_open (filename, flags, mode, target_errno);
3303
3304 if (targetdebug)
3305 fprintf_unfiltered (gdb_stdlog,
3306 "target_fileio_open (%s,0x%x,0%o) = %d (%d)\n",
3307 filename, flags, mode,
3308 fd, fd != -1 ? 0 : *target_errno);
3309 return fd;
3310 }
3311 }
3312
3313 *target_errno = FILEIO_ENOSYS;
3314 return -1;
3315}
3316
3317/* Write up to LEN bytes from WRITE_BUF to FD on the target.
3318 Return the number of bytes written, or -1 if an error occurs
3319 (and set *TARGET_ERRNO). */
3320int
3321target_fileio_pwrite (int fd, const gdb_byte *write_buf, int len,
3322 ULONGEST offset, int *target_errno)
3323{
3324 struct target_ops *t;
3325
3326 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3327 {
3328 if (t->to_fileio_pwrite != NULL)
3329 {
3330 int ret = t->to_fileio_pwrite (fd, write_buf, len, offset,
3331 target_errno);
3332
3333 if (targetdebug)
3334 fprintf_unfiltered (gdb_stdlog,
3335 "target_fileio_pwrite (%d,...,%d,%s) "
3336 "= %d (%d)\n",
3337 fd, len, pulongest (offset),
3338 ret, ret != -1 ? 0 : *target_errno);
3339 return ret;
3340 }
3341 }
3342
3343 *target_errno = FILEIO_ENOSYS;
3344 return -1;
3345}
3346
3347/* Read up to LEN bytes FD on the target into READ_BUF.
3348 Return the number of bytes read, or -1 if an error occurs
3349 (and set *TARGET_ERRNO). */
3350int
3351target_fileio_pread (int fd, gdb_byte *read_buf, int len,
3352 ULONGEST offset, int *target_errno)
3353{
3354 struct target_ops *t;
3355
3356 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3357 {
3358 if (t->to_fileio_pread != NULL)
3359 {
3360 int ret = t->to_fileio_pread (fd, read_buf, len, offset,
3361 target_errno);
3362
3363 if (targetdebug)
3364 fprintf_unfiltered (gdb_stdlog,
3365 "target_fileio_pread (%d,...,%d,%s) "
3366 "= %d (%d)\n",
3367 fd, len, pulongest (offset),
3368 ret, ret != -1 ? 0 : *target_errno);
3369 return ret;
3370 }
3371 }
3372
3373 *target_errno = FILEIO_ENOSYS;
3374 return -1;
3375}
3376
3377/* Close FD on the target. Return 0, or -1 if an error occurs
3378 (and set *TARGET_ERRNO). */
3379int
3380target_fileio_close (int fd, int *target_errno)
3381{
3382 struct target_ops *t;
3383
3384 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3385 {
3386 if (t->to_fileio_close != NULL)
3387 {
3388 int ret = t->to_fileio_close (fd, target_errno);
3389
3390 if (targetdebug)
3391 fprintf_unfiltered (gdb_stdlog,
3392 "target_fileio_close (%d) = %d (%d)\n",
3393 fd, ret, ret != -1 ? 0 : *target_errno);
3394 return ret;
3395 }
3396 }
3397
3398 *target_errno = FILEIO_ENOSYS;
3399 return -1;
3400}
3401
3402/* Unlink FILENAME on the target. Return 0, or -1 if an error
3403 occurs (and set *TARGET_ERRNO). */
3404int
3405target_fileio_unlink (const char *filename, int *target_errno)
3406{
3407 struct target_ops *t;
3408
3409 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3410 {
3411 if (t->to_fileio_unlink != NULL)
3412 {
3413 int ret = t->to_fileio_unlink (filename, target_errno);
3414
3415 if (targetdebug)
3416 fprintf_unfiltered (gdb_stdlog,
3417 "target_fileio_unlink (%s) = %d (%d)\n",
3418 filename, ret, ret != -1 ? 0 : *target_errno);
3419 return ret;
3420 }
3421 }
3422
3423 *target_errno = FILEIO_ENOSYS;
3424 return -1;
3425}
3426
3427/* Read value of symbolic link FILENAME on the target. Return a
3428 null-terminated string allocated via xmalloc, or NULL if an error
3429 occurs (and set *TARGET_ERRNO). */
3430char *
3431target_fileio_readlink (const char *filename, int *target_errno)
3432{
3433 struct target_ops *t;
3434
3435 for (t = default_fileio_target (); t != NULL; t = t->beneath)
3436 {
3437 if (t->to_fileio_readlink != NULL)
3438 {
3439 char *ret = t->to_fileio_readlink (filename, target_errno);
3440
3441 if (targetdebug)
3442 fprintf_unfiltered (gdb_stdlog,
3443 "target_fileio_readlink (%s) = %s (%d)\n",
3444 filename, ret? ret : "(nil)",
3445 ret? 0 : *target_errno);
3446 return ret;
3447 }
3448 }
3449
3450 *target_errno = FILEIO_ENOSYS;
3451 return NULL;
3452}
3453
3454static void
3455target_fileio_close_cleanup (void *opaque)
3456{
3457 int fd = *(int *) opaque;
3458 int target_errno;
3459
3460 target_fileio_close (fd, &target_errno);
3461}
3462
3463/* Read target file FILENAME. Store the result in *BUF_P and
3464 return the size of the transferred data. PADDING additional bytes are
3465 available in *BUF_P. This is a helper function for
3466 target_fileio_read_alloc; see the declaration of that function for more
3467 information. */
3468
3469static LONGEST
3470target_fileio_read_alloc_1 (const char *filename,
3471 gdb_byte **buf_p, int padding)
3472{
3473 struct cleanup *close_cleanup;
3474 size_t buf_alloc, buf_pos;
3475 gdb_byte *buf;
3476 LONGEST n;
3477 int fd;
3478 int target_errno;
3479
3480 fd = target_fileio_open (filename, FILEIO_O_RDONLY, 0700, &target_errno);
3481 if (fd == -1)
3482 return -1;
3483
3484 close_cleanup = make_cleanup (target_fileio_close_cleanup, &fd);
3485
3486 /* Start by reading up to 4K at a time. The target will throttle
3487 this number down if necessary. */
3488 buf_alloc = 4096;
3489 buf = xmalloc (buf_alloc);
3490 buf_pos = 0;
3491 while (1)
3492 {
3493 n = target_fileio_pread (fd, &buf[buf_pos],
3494 buf_alloc - buf_pos - padding, buf_pos,
3495 &target_errno);
3496 if (n < 0)
3497 {
3498 /* An error occurred. */
3499 do_cleanups (close_cleanup);
3500 xfree (buf);
3501 return -1;
3502 }
3503 else if (n == 0)
3504 {
3505 /* Read all there was. */
3506 do_cleanups (close_cleanup);
3507 if (buf_pos == 0)
3508 xfree (buf);
3509 else
3510 *buf_p = buf;
3511 return buf_pos;
3512 }
3513
3514 buf_pos += n;
3515
3516 /* If the buffer is filling up, expand it. */
3517 if (buf_alloc < buf_pos * 2)
3518 {
3519 buf_alloc *= 2;
3520 buf = xrealloc (buf, buf_alloc);
3521 }
3522
3523 QUIT;
3524 }
3525}
3526
3527/* Read target file FILENAME. Store the result in *BUF_P and return
3528 the size of the transferred data. See the declaration in "target.h"
3529 function for more information about the return value. */
3530
3531LONGEST
3532target_fileio_read_alloc (const char *filename, gdb_byte **buf_p)
3533{
3534 return target_fileio_read_alloc_1 (filename, buf_p, 0);
3535}
3536
3537/* Read target file FILENAME. The result is NUL-terminated and
3538 returned as a string, allocated using xmalloc. If an error occurs
3539 or the transfer is unsupported, NULL is returned. Empty objects
3540 are returned as allocated but empty strings. A warning is issued
3541 if the result contains any embedded NUL bytes. */
3542
3543char *
3544target_fileio_read_stralloc (const char *filename)
3545{
3546 gdb_byte *buffer;
3547 char *bufstr;
3548 LONGEST i, transferred;
3549
3550 transferred = target_fileio_read_alloc_1 (filename, &buffer, 1);
3551 bufstr = (char *) buffer;
3552
3553 if (transferred < 0)
3554 return NULL;
3555
3556 if (transferred == 0)
3557 return xstrdup ("");
3558
3559 bufstr[transferred] = 0;
3560
3561 /* Check for embedded NUL bytes; but allow trailing NULs. */
3562 for (i = strlen (bufstr); i < transferred; i++)
3563 if (bufstr[i] != 0)
3564 {
3565 warning (_("target file %s "
3566 "contained unexpected null characters"),
3567 filename);
3568 break;
3569 }
3570
3571 return bufstr;
3572}
3573
3574
5796c8dc
SS
3575static int
3576default_region_ok_for_hw_watchpoint (CORE_ADDR addr, int len)
3577{
ef5ccd6c 3578 return (len <= gdbarch_ptr_bit (target_gdbarch ()) / TARGET_CHAR_BIT);
5796c8dc
SS
3579}
3580
3581static int
3582default_watchpoint_addr_within_range (struct target_ops *target,
3583 CORE_ADDR addr,
3584 CORE_ADDR start, int length)
3585{
3586 return addr >= start && addr < start + length;
3587}
3588
3589static struct gdbarch *
3590default_thread_architecture (struct target_ops *ops, ptid_t ptid)
3591{
ef5ccd6c 3592 return target_gdbarch ();
5796c8dc
SS
3593}
3594
3595static int
3596return_zero (void)
3597{
3598 return 0;
3599}
3600
3601static int
3602return_one (void)
3603{
3604 return 1;
3605}
3606
3607static int
3608return_minus_one (void)
3609{
3610 return -1;
3611}
3612
3613/* Find a single runnable target in the stack and return it. If for
3614 some reason there is more than one, return NULL. */
3615
3616struct target_ops *
3617find_run_target (void)
3618{
3619 struct target_ops **t;
3620 struct target_ops *runable = NULL;
3621 int count;
3622
3623 count = 0;
3624
3625 for (t = target_structs; t < target_structs + target_struct_size; ++t)
3626 {
3627 if ((*t)->to_can_run && target_can_run (*t))
3628 {
3629 runable = *t;
3630 ++count;
3631 }
3632 }
3633
3634 return (count == 1 ? runable : NULL);
3635}
3636
5796c8dc
SS
3637/*
3638 * Find the next target down the stack from the specified target.
3639 */
3640
3641struct target_ops *
3642find_target_beneath (struct target_ops *t)
3643{
3644 return t->beneath;
3645}
3646
3647\f
3648/* The inferior process has died. Long live the inferior! */
3649
3650void
3651generic_mourn_inferior (void)
3652{
3653 ptid_t ptid;
3654
3655 ptid = inferior_ptid;
3656 inferior_ptid = null_ptid;
3657
ef5ccd6c
JM
3658 /* Mark breakpoints uninserted in case something tries to delete a
3659 breakpoint while we delete the inferior's threads (which would
3660 fail, since the inferior is long gone). */
3661 mark_breakpoints_out ();
3662
5796c8dc
SS
3663 if (!ptid_equal (ptid, null_ptid))
3664 {
3665 int pid = ptid_get_pid (ptid);
cf7f2e2d 3666 exit_inferior (pid);
5796c8dc
SS
3667 }
3668
ef5ccd6c
JM
3669 /* Note this wipes step-resume breakpoints, so needs to be done
3670 after exit_inferior, which ends up referencing the step-resume
3671 breakpoints through clear_thread_inferior_resources. */
5796c8dc 3672 breakpoint_init_inferior (inf_exited);
ef5ccd6c 3673
5796c8dc
SS
3674 registers_changed ();
3675
3676 reopen_exec_file ();
3677 reinit_frame_cache ();
3678
3679 if (deprecated_detach_hook)
3680 deprecated_detach_hook ();
3681}
3682\f
5796c8dc
SS
3683/* Convert a normal process ID to a string. Returns the string in a
3684 static buffer. */
3685
3686char *
3687normal_pid_to_str (ptid_t ptid)
3688{
3689 static char buf[32];
3690
3691 xsnprintf (buf, sizeof buf, "process %d", ptid_get_pid (ptid));
3692 return buf;
3693}
3694
3695static char *
3696dummy_pid_to_str (struct target_ops *ops, ptid_t ptid)
3697{
3698 return normal_pid_to_str (ptid);
3699}
3700
cf7f2e2d
JM
3701/* Error-catcher for target_find_memory_regions. */
3702static int
c50c785c 3703dummy_find_memory_regions (find_memory_region_ftype ignore1, void *ignore2)
5796c8dc 3704{
cf7f2e2d 3705 error (_("Command not implemented for this target."));
5796c8dc
SS
3706 return 0;
3707}
3708
cf7f2e2d
JM
3709/* Error-catcher for target_make_corefile_notes. */
3710static char *
3711dummy_make_corefile_notes (bfd *ignore1, int *ignore2)
3712{
3713 error (_("Command not implemented for this target."));
3714 return NULL;
3715}
3716
3717/* Error-catcher for target_get_bookmark. */
3718static gdb_byte *
3719dummy_get_bookmark (char *ignore1, int ignore2)
5796c8dc 3720{
cf7f2e2d 3721 tcomplain ();
5796c8dc
SS
3722 return NULL;
3723}
3724
cf7f2e2d
JM
3725/* Error-catcher for target_goto_bookmark. */
3726static void
3727dummy_goto_bookmark (gdb_byte *ignore, int from_tty)
3728{
3729 tcomplain ();
3730}
3731
5796c8dc
SS
3732/* Set up the handful of non-empty slots needed by the dummy target
3733 vector. */
3734
3735static void
3736init_dummy_target (void)
3737{
3738 dummy_target.to_shortname = "None";
3739 dummy_target.to_longname = "None";
3740 dummy_target.to_doc = "";
3741 dummy_target.to_attach = find_default_attach;
3742 dummy_target.to_detach =
3743 (void (*)(struct target_ops *, char *, int))target_ignore;
3744 dummy_target.to_create_inferior = find_default_create_inferior;
3745 dummy_target.to_can_async_p = find_default_can_async_p;
3746 dummy_target.to_is_async_p = find_default_is_async_p;
3747 dummy_target.to_supports_non_stop = find_default_supports_non_stop;
a45ae5f8
JM
3748 dummy_target.to_supports_disable_randomization
3749 = find_default_supports_disable_randomization;
5796c8dc
SS
3750 dummy_target.to_pid_to_str = dummy_pid_to_str;
3751 dummy_target.to_stratum = dummy_stratum;
3752 dummy_target.to_find_memory_regions = dummy_find_memory_regions;
3753 dummy_target.to_make_corefile_notes = dummy_make_corefile_notes;
cf7f2e2d
JM
3754 dummy_target.to_get_bookmark = dummy_get_bookmark;
3755 dummy_target.to_goto_bookmark = dummy_goto_bookmark;
5796c8dc
SS
3756 dummy_target.to_xfer_partial = default_xfer_partial;
3757 dummy_target.to_has_all_memory = (int (*) (struct target_ops *)) return_zero;
3758 dummy_target.to_has_memory = (int (*) (struct target_ops *)) return_zero;
3759 dummy_target.to_has_stack = (int (*) (struct target_ops *)) return_zero;
3760 dummy_target.to_has_registers = (int (*) (struct target_ops *)) return_zero;
c50c785c
JM
3761 dummy_target.to_has_execution
3762 = (int (*) (struct target_ops *, ptid_t)) return_zero;
cf7f2e2d
JM
3763 dummy_target.to_stopped_by_watchpoint = return_zero;
3764 dummy_target.to_stopped_data_address =
3765 (int (*) (struct target_ops *, CORE_ADDR *)) return_zero;
5796c8dc
SS
3766 dummy_target.to_magic = OPS_MAGIC;
3767}
3768\f
3769static void
3770debug_to_open (char *args, int from_tty)
3771{
3772 debug_target.to_open (args, from_tty);
3773
3774 fprintf_unfiltered (gdb_stdlog, "target_open (%s, %d)\n", args, from_tty);
3775}
3776
3777void
3778target_close (struct target_ops *targ, int quitting)
3779{
3780 if (targ->to_xclose != NULL)
3781 targ->to_xclose (targ, quitting);
3782 else if (targ->to_close != NULL)
3783 targ->to_close (quitting);
3784
3785 if (targetdebug)
3786 fprintf_unfiltered (gdb_stdlog, "target_close (%d)\n", quitting);
3787}
3788
3789void
3790target_attach (char *args, int from_tty)
3791{
3792 struct target_ops *t;
cf7f2e2d 3793
5796c8dc
SS
3794 for (t = current_target.beneath; t != NULL; t = t->beneath)
3795 {
3796 if (t->to_attach != NULL)
3797 {
3798 t->to_attach (t, args, from_tty);
3799 if (targetdebug)
3800 fprintf_unfiltered (gdb_stdlog, "target_attach (%s, %d)\n",
3801 args, from_tty);
3802 return;
3803 }
3804 }