GDB (xrefs)
Loading...
Searching...
No Matches
/tmp/gdb-13.1/gdb/record-btrace.c
Go to the documentation of this file.
1/* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "record-btrace.h"
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observable.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
35#include "regcache.h"
36#include "frame-unwind.h"
37#include "hashtab.h"
38#include "infrun.h"
39#include "gdbsupport/event-loop.h"
40#include "inf-loop.h"
41#include "inferior.h"
42#include <algorithm>
43#include "gdbarch.h"
44#include "cli/cli-style.h"
45#include "async-event.h"
46#include <forward_list>
47
49 "record-btrace",
50 N_("Branch tracing target"),
51 N_("Collect control-flow trace and provide the execution history.")
52};
53
54/* The target_ops of record-btrace. */
55
56class record_btrace_target final : public target_ops
57{
58public:
59 const target_info &info () const override
61
62 strata stratum () const override { return record_stratum; }
63
64 void close () override;
65 void async (bool) override;
66
67 void detach (inferior *inf, int from_tty) override
68 { record_detach (this, inf, from_tty); }
69
70 void disconnect (const char *, int) override;
71
72 void mourn_inferior () override
73 { record_mourn_inferior (this); }
74
75 void kill () override
76 { record_kill (this); }
77
78 enum record_method record_method (ptid_t ptid) override;
79
80 void stop_recording () override;
81 void info_record () override;
82
83 void insn_history (int size, gdb_disassembly_flags flags) override;
84 void insn_history_from (ULONGEST from, int size,
85 gdb_disassembly_flags flags) override;
86 void insn_history_range (ULONGEST begin, ULONGEST end,
87 gdb_disassembly_flags flags) override;
88 void call_history (int size, record_print_flags flags) override;
89 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
90 override;
91 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 override;
93
94 bool record_is_replaying (ptid_t ptid) override;
95 bool record_will_replay (ptid_t ptid, int dir) override;
96 void record_stop_replaying () override;
97
99 const char *annex,
100 gdb_byte *readbuf,
101 const gdb_byte *writebuf,
102 ULONGEST offset, ULONGEST len,
103 ULONGEST *xfered_len) override;
104
105 int insert_breakpoint (struct gdbarch *,
106 struct bp_target_info *) override;
107 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
108 enum remove_bp_reason) override;
109
110 void fetch_registers (struct regcache *, int) override;
111
112 void store_registers (struct regcache *, int) override;
113 void prepare_to_store (struct regcache *) override;
114
115 const struct frame_unwind *get_unwinder () override;
116
117 const struct frame_unwind *get_tailcall_unwinder () override;
118
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
124 bool thread_alive (ptid_t ptid) override;
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
129 bool can_execute_reverse () override;
130
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
133
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
136
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140};
141
143
144/* Initialize the record-btrace target ops. */
145
146/* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148static const gdb::observers::token record_btrace_thread_observer_token {};
149
150/* Memory access types used in set/show record btrace replay-memory-access. */
151static const char replay_memory_access_read_only[] = "read-only";
152static const char replay_memory_access_read_write[] = "read-write";
153static const char *const replay_memory_access_types[] =
154{
157 NULL
158};
159
160/* The currently allowed replay memory access type. */
162
163/* The cpu state kinds. */
165{
168 CS_CPU
170
171/* The current cpu state. */
173
174/* The current cpu for trace decode. */
175static struct btrace_cpu record_btrace_cpu;
176
177/* Command lists for "set/show record btrace". */
180
181/* The execution direction of the last resume we got. See record-full.c. */
183
184/* The async event handler for reverse/replay execution. */
186
187/* A flag indicating that we are currently generating a core file. */
189
190/* The current branch trace configuration. */
191static struct btrace_config record_btrace_conf;
192
193/* Command list for "record btrace". */
195
196/* Command lists for "set/show record btrace bts". */
199
200/* Command lists for "set/show record btrace pt". */
203
204/* Command list for "set record btrace cpu". */
206
207/* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210#define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 gdb_printf (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
220/* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222const struct btrace_cpu *
224{
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238}
239
240/* Update the branch trace for the current thread and return a pointer to its
241 thread_info.
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
246static struct thread_info *
248{
249 DEBUG ("require");
250
251 if (inferior_ptid == null_ptid)
252 error (_("No thread."));
253
255
257
259
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
262
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
280}
281
282/* The new thread observer. */
283
284static void
286{
287 /* Ignore this thread if its inferior is not recorded by us. */
289 if (rec != &record_btrace_ops)
290 return;
291
292 try
293 {
295 }
296 catch (const gdb_exception_error &error)
297 {
298 warning ("%s", error.what ());
299 }
300}
301
302/* Enable automatic tracing of new threads. */
303
304static void
306{
307 DEBUG ("attach thread observer");
308
311 "record-btrace");
312}
313
314/* Disable automatic tracing of new threads. */
315
316static void
318{
319 DEBUG ("detach thread observer");
320
322}
323
324/* The record-btrace async event handler function. */
325
326static void
328{
330}
331
332/* See record-btrace.h. */
333
334void
336{
337 const char *format;
338
340
342
345 NULL, "record-btrace");
347
348 format = btrace_format_short_string (record_btrace_conf.format);
349 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
350}
351
352/* Disable btrace on a set of threads on scope exit. */
353
355{
357
359
361 {
362 for (thread_info *tp : m_threads)
363 btrace_disable (tp);
364 }
365
366 void add_thread (thread_info *thread)
367 {
368 m_threads.push_front (thread);
369 }
370
371 void discard ()
372 {
373 m_threads.clear ();
374 }
375
376private:
377 std::forward_list<thread_info *> m_threads;
378};
379
380/* Open target record-btrace. */
381
382static void
383record_btrace_target_open (const char *args, int from_tty)
384{
385 /* If we fail to enable btrace for one thread, disable it for the threads for
386 which it was successfully enabled. */
388
389 DEBUG ("open");
390
392
393 if (!target_has_execution ())
394 error (_("The program is not being run."));
395
396 for (thread_info *tp : current_inferior ()->non_exited_threads ())
397 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
398 {
400
401 btrace_disable.add_thread (tp);
402 }
403
405
406 btrace_disable.discard ();
407}
408
409/* The stop_recording method of target record-btrace. */
410
411void
413{
414 DEBUG ("stop recording");
415
417
418 for (thread_info *tp : current_inferior ()->non_exited_threads ())
419 if (tp->btrace.target != NULL)
420 btrace_disable (tp);
421}
422
423/* The disconnect method of target record-btrace. */
424
425void
427 int from_tty)
428{
429 struct target_ops *beneath = this->beneath ();
430
431 /* Do not stop recording, just clean up GDB side. */
433
434 /* Forward disconnect. */
435 beneath->disconnect (args, from_tty);
436}
437
438/* The close method of target record-btrace. */
439
440void
442{
445
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
449
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
452 for (thread_info *tp : current_inferior ()->non_exited_threads ())
453 btrace_teardown (tp);
454}
455
456/* The async method of target record-btrace. */
457
458void
460{
461 if (enable)
463 else
465
466 this->beneath ()->async (enable);
467}
468
469/* Adjusts the size and returns a human readable size suffix. */
470
471static const char *
473{
474 unsigned int sz;
475
476 sz = *size;
477
478 if ((sz & ((1u << 30) - 1)) == 0)
479 {
480 *size = sz >> 30;
481 return "GB";
482 }
483 else if ((sz & ((1u << 20) - 1)) == 0)
484 {
485 *size = sz >> 20;
486 return "MB";
487 }
488 else if ((sz & ((1u << 10) - 1)) == 0)
489 {
490 *size = sz >> 10;
491 return "kB";
492 }
493 else
494 return "";
495}
496
497/* Print a BTS configuration. */
498
499static void
500record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501{
502 const char *suffix;
503 unsigned int size;
504
505 size = conf->size;
506 if (size > 0)
507 {
509 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
510 }
511}
512
513/* Print an Intel Processor Trace configuration. */
514
515static void
516record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517{
518 const char *suffix;
519 unsigned int size;
520
521 size = conf->size;
522 if (size > 0)
523 {
525 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
526 }
527}
528
529/* Print a branch tracing configuration. */
530
531static void
532record_btrace_print_conf (const struct btrace_config *conf)
533{
534 gdb_printf (_("Recording format: %s.\n"),
535 btrace_format_string (conf->format));
536
537 switch (conf->format)
538 {
539 case BTRACE_FORMAT_NONE:
540 return;
541
542 case BTRACE_FORMAT_BTS:
543 record_btrace_print_bts_conf (&conf->bts);
544 return;
545
546 case BTRACE_FORMAT_PT:
547 record_btrace_print_pt_conf (&conf->pt);
548 return;
549 }
550
551 internal_error (_("Unknown branch trace format."));
552}
553
554/* The info_record method of target record-btrace. */
555
556void
558{
559 struct btrace_thread_info *btinfo;
560 const struct btrace_config *conf;
561 struct thread_info *tp;
562 unsigned int insns, calls, gaps;
563
564 DEBUG ("info");
565
566 if (inferior_ptid == null_ptid)
567 error (_("No thread."));
568
569 tp = inferior_thread ();
570
572
573 btinfo = &tp->btrace;
574
575 conf = ::btrace_conf (btinfo);
576 if (conf != NULL)
578
580
581 insns = 0;
582 calls = 0;
583 gaps = 0;
584
585 if (!btrace_is_empty (tp))
586 {
587 struct btrace_call_iterator call;
588 struct btrace_insn_iterator insn;
589
590 btrace_call_end (&call, btinfo);
591 btrace_call_prev (&call, 1);
592 calls = btrace_call_number (&call);
593
594 btrace_insn_end (&insn, btinfo);
595 insns = btrace_insn_number (&insn);
596
597 /* If the last instruction is not a gap, it is the current instruction
598 that is not actually part of the record. */
599 if (btrace_insn_get (&insn) != NULL)
600 insns -= 1;
601
602 gaps = btinfo->ngaps;
603 }
604
605 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
606 "for thread %s (%s).\n"), insns, calls, gaps,
607 print_thread_id (tp),
608 target_pid_to_str (tp->ptid).c_str ());
609
610 if (btrace_is_replaying (tp))
611 gdb_printf (_("Replay in progress. At instruction %u.\n"),
612 btrace_insn_number (btinfo->replay));
613}
614
615/* Print a decode error. */
616
617static void
618btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
619 enum btrace_format format)
620{
621 const char *errstr = btrace_decode_error (format, errcode);
622
623 uiout->text (_("["));
624 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
625 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
626 {
627 uiout->text (_("decode error ("));
628 uiout->field_signed ("errcode", errcode);
629 uiout->text (_("): "));
630 }
631 uiout->text (errstr);
632 uiout->text (_("]\n"));
633}
634
635/* A range of source lines. */
636
638{
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647};
648
649/* Construct a line range. */
650
651static struct btrace_line_range
653{
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661}
662
663/* Add a line to a line range. */
664
665static struct btrace_line_range
667{
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680}
681
682/* Return non-zero if RANGE is empty, zero otherwise. */
683
684static int
686{
687 return range.end <= range.begin;
688}
689
690/* Return non-zero if LHS contains RHS, zero otherwise. */
691
692static int
694 struct btrace_line_range rhs)
695{
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699}
700
701/* Find the line range associated with PC. */
702
703static struct btrace_line_range
705{
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = symtab->linetable ();
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
726 for (i = 0; i < nlines - 1; i++)
727 {
728 /* The test of is_stmt here was added when the is_stmt field was
729 introduced to the 'struct linetable_entry' structure. This
730 ensured that this loop maintained the same behaviour as before we
731 introduced is_stmt. That said, it might be that we would be
732 better off not checking is_stmt here, this would lead to us
733 possibly adding more line numbers to the range. At the time this
734 change was made I was unsure how to test this so chose to go with
735 maintaining the existing experience. */
736 if ((lines[i].pc == pc) && (lines[i].line != 0)
737 && (lines[i].is_stmt == 1))
738 range = btrace_line_range_add (range, lines[i].line);
739 }
740
741 return range;
742}
743
744/* Print source lines in LINES to UIOUT.
745
746 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
747 instructions corresponding to that source line. When printing a new source
748 line, we do the cleanups for the open chain and open a new cleanup chain for
749 the new source line. If the source line range in LINES is not empty, this
750 function will leave the cleanup chain for the last printed source line open
751 so instructions can be added to it. */
752
753static void
754btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
755 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
756 gdb::optional<ui_out_emit_list> *asm_list,
757 gdb_disassembly_flags flags)
758{
759 print_source_lines_flags psl_flags;
760
762 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
763
764 for (int line = lines.begin; line < lines.end; ++line)
765 {
766 asm_list->reset ();
767
768 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
769
770 print_source_lines (lines.symtab, line, line + 1, psl_flags);
771
772 asm_list->emplace (uiout, "line_asm_insn");
773 }
774}
775
776/* Disassemble a section of the recorded instruction trace. */
777
778static void
780 const struct btrace_thread_info *btinfo,
781 const struct btrace_insn_iterator *begin,
782 const struct btrace_insn_iterator *end,
783 gdb_disassembly_flags flags)
784{
785 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
787
789
790 struct gdbarch *gdbarch = target_gdbarch ();
791 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
792
793 ui_out_emit_list list_emitter (uiout, "asm_insns");
794
795 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
796 gdb::optional<ui_out_emit_list> asm_list;
797
799
800 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
801 btrace_insn_next (&it, 1))
802 {
803 const struct btrace_insn *insn;
804
805 insn = btrace_insn_get (&it);
806
807 /* A NULL instruction indicates a gap in the trace. */
808 if (insn == NULL)
809 {
810 const struct btrace_config *conf;
811
812 conf = btrace_conf (btinfo);
813
814 /* We have trace so we must have a configuration. */
815 gdb_assert (conf != NULL);
816
817 uiout->field_fmt ("insn-number", "%u",
818 btrace_insn_number (&it));
819 uiout->text ("\t");
820
822 conf->format);
823 }
824 else
825 {
826 struct disasm_insn dinsn;
827
828 if ((flags & DISASSEMBLY_SOURCE) != 0)
829 {
830 struct btrace_line_range lines;
831
832 lines = btrace_find_line_range (insn->pc);
833 if (!btrace_line_range_is_empty (lines)
834 && !btrace_line_range_contains_range (last_lines, lines))
835 {
836 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
837 flags);
838 last_lines = lines;
839 }
840 else if (!src_and_asm_tuple.has_value ())
841 {
842 gdb_assert (!asm_list.has_value ());
843
844 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
845
846 /* No source information. */
847 asm_list.emplace (uiout, "line_asm_insn");
848 }
849
850 gdb_assert (src_and_asm_tuple.has_value ());
851 gdb_assert (asm_list.has_value ());
852 }
853
854 memset (&dinsn, 0, sizeof (dinsn));
855 dinsn.number = btrace_insn_number (&it);
856 dinsn.addr = insn->pc;
857
858 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
859 dinsn.is_speculative = 1;
860
861 disasm.pretty_print_insn (&dinsn, flags);
862 }
863 }
864}
865
866/* The insn_history method of target record-btrace. */
867
868void
870{
871 struct btrace_thread_info *btinfo;
872 struct btrace_insn_history *history;
873 struct btrace_insn_iterator begin, end;
874 struct ui_out *uiout;
875 unsigned int context, covered;
876
877 uiout = current_uiout;
878 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
879 context = abs (size);
880 if (context == 0)
881 error (_("Bad record instruction-history-size."));
882
883 btinfo = require_btrace ();
884 history = btinfo->insn_history;
885 if (history == NULL)
886 {
887 struct btrace_insn_iterator *replay;
888
889 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
890
891 /* If we're replaying, we start at the replay position. Otherwise, we
892 start at the tail of the trace. */
893 replay = btinfo->replay;
894 if (replay != NULL)
895 begin = *replay;
896 else
897 btrace_insn_end (&begin, btinfo);
898
899 /* We start from here and expand in the requested direction. Then we
900 expand in the other direction, as well, to fill up any remaining
901 context. */
902 end = begin;
903 if (size < 0)
904 {
905 /* We want the current position covered, as well. */
906 covered = btrace_insn_next (&end, 1);
907 covered += btrace_insn_prev (&begin, context - covered);
908 covered += btrace_insn_next (&end, context - covered);
909 }
910 else
911 {
912 covered = btrace_insn_next (&end, context);
913 covered += btrace_insn_prev (&begin, context - covered);
914 }
915 }
916 else
917 {
918 begin = history->begin;
919 end = history->end;
920
921 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
922 btrace_insn_number (&begin), btrace_insn_number (&end));
923
924 if (size < 0)
925 {
926 end = begin;
927 covered = btrace_insn_prev (&begin, context);
928 }
929 else
930 {
931 begin = end;
932 covered = btrace_insn_next (&end, context);
933 }
934 }
935
936 if (covered > 0)
937 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
938 else
939 {
940 if (size < 0)
941 gdb_printf (_("At the start of the branch trace record.\n"));
942 else
943 gdb_printf (_("At the end of the branch trace record.\n"));
944 }
945
946 btrace_set_insn_history (btinfo, &begin, &end);
947}
948
949/* The insn_history_range method of target record-btrace. */
950
951void
953 gdb_disassembly_flags flags)
954{
955 struct btrace_thread_info *btinfo;
956 struct btrace_insn_iterator begin, end;
957 struct ui_out *uiout;
958 unsigned int low, high;
959 int found;
960
961 uiout = current_uiout;
962 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
963 low = from;
964 high = to;
965
966 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
967
968 /* Check for wrap-arounds. */
969 if (low != from || high != to)
970 error (_("Bad range."));
971
972 if (high < low)
973 error (_("Bad range."));
974
975 btinfo = require_btrace ();
976
977 found = btrace_find_insn_by_number (&begin, btinfo, low);
978 if (found == 0)
979 error (_("Range out of bounds."));
980
981 found = btrace_find_insn_by_number (&end, btinfo, high);
982 if (found == 0)
983 {
984 /* Silently truncate the range. */
985 btrace_insn_end (&end, btinfo);
986 }
987 else
988 {
989 /* We want both begin and end to be inclusive. */
990 btrace_insn_next (&end, 1);
991 }
992
993 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
994 btrace_set_insn_history (btinfo, &begin, &end);
995}
996
997/* The insn_history_from method of target record-btrace. */
998
999void
1001 gdb_disassembly_flags flags)
1002{
1003 ULONGEST begin, end, context;
1004
1005 context = abs (size);
1006 if (context == 0)
1007 error (_("Bad record instruction-history-size."));
1008
1009 if (size < 0)
1010 {
1011 end = from;
1012
1013 if (from < context)
1014 begin = 0;
1015 else
1016 begin = from - context + 1;
1017 }
1018 else
1019 {
1020 begin = from;
1021 end = from + context - 1;
1022
1023 /* Check for wrap-around. */
1024 if (end < begin)
1025 end = ULONGEST_MAX;
1026 }
1027
1028 insn_history_range (begin, end, flags);
1029}
1030
1031/* Print the instruction number range for a function call history line. */
1032
1033static void
1035 const struct btrace_function *bfun)
1036{
1037 unsigned int begin, end, size;
1038
1039 size = bfun->insn.size ();
1040 gdb_assert (size > 0);
1041
1042 begin = bfun->insn_offset;
1043 end = begin + size - 1;
1044
1045 uiout->field_unsigned ("insn begin", begin);
1046 uiout->text (",");
1047 uiout->field_unsigned ("insn end", end);
1048}
1049
1050/* Compute the lowest and highest source line for the instructions in BFUN
1051 and return them in PBEGIN and PEND.
1052 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1053 result from inlining or macro expansion. */
1054
1055static void
1057 int *pbegin, int *pend)
1058{
1059 struct symtab *symtab;
1060 struct symbol *sym;
1061 int begin, end;
1062
1063 begin = INT_MAX;
1064 end = INT_MIN;
1065
1066 sym = bfun->sym;
1067 if (sym == NULL)
1068 goto out;
1069
1070 symtab = sym->symtab ();
1071
1072 for (const btrace_insn &insn : bfun->insn)
1073 {
1074 struct symtab_and_line sal;
1075
1076 sal = find_pc_line (insn.pc, 0);
1077 if (sal.symtab != symtab || sal.line == 0)
1078 continue;
1079
1080 begin = std::min (begin, sal.line);
1081 end = std::max (end, sal.line);
1082 }
1083
1084 out:
1085 *pbegin = begin;
1086 *pend = end;
1087}
1088
1089/* Print the source line information for a function call history line. */
1090
1091static void
1093 const struct btrace_function *bfun)
1094{
1095 struct symbol *sym;
1096 int begin, end;
1097
1098 sym = bfun->sym;
1099 if (sym == NULL)
1100 return;
1101
1102 uiout->field_string ("file",
1105
1106 btrace_compute_src_line_range (bfun, &begin, &end);
1107 if (end < begin)
1108 return;
1109
1110 uiout->text (":");
1111 uiout->field_signed ("min line", begin);
1112
1113 if (end == begin)
1114 return;
1115
1116 uiout->text (",");
1117 uiout->field_signed ("max line", end);
1118}
1119
1120/* Get the name of a branch trace function. */
1121
1122static const char *
1124{
1125 struct minimal_symbol *msym;
1126 struct symbol *sym;
1127
1128 if (bfun == NULL)
1129 return "??";
1130
1131 msym = bfun->msym;
1132 sym = bfun->sym;
1133
1134 if (sym != NULL)
1135 return sym->print_name ();
1136 else if (msym != NULL)
1137 return msym->print_name ();
1138 else
1139 return "??";
1140}
1141
1142/* Disassemble a section of the recorded function trace. */
1143
1144static void
1146 const struct btrace_thread_info *btinfo,
1147 const struct btrace_call_iterator *begin,
1148 const struct btrace_call_iterator *end,
1149 int int_flags)
1150{
1151 struct btrace_call_iterator it;
1152 record_print_flags flags = (enum record_print_flag) int_flags;
1153
1154 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1155 btrace_call_number (end));
1156
1157 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1158 {
1159 const struct btrace_function *bfun;
1160 struct minimal_symbol *msym;
1161 struct symbol *sym;
1162
1163 bfun = btrace_call_get (&it);
1164 sym = bfun->sym;
1165 msym = bfun->msym;
1166
1167 /* Print the function index. */
1168 uiout->field_unsigned ("index", bfun->number);
1169 uiout->text ("\t");
1170
1171 /* Indicate gaps in the trace. */
1172 if (bfun->errcode != 0)
1173 {
1174 const struct btrace_config *conf;
1175
1176 conf = btrace_conf (btinfo);
1177
1178 /* We have trace so we must have a configuration. */
1179 gdb_assert (conf != NULL);
1180
1181 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1182
1183 continue;
1184 }
1185
1186 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1187 {
1188 int level = bfun->level + btinfo->level, i;
1189
1190 for (i = 0; i < level; ++i)
1191 uiout->text (" ");
1192 }
1193
1194 if (sym != NULL)
1195 uiout->field_string ("function", sym->print_name (),
1197 else if (msym != NULL)
1198 uiout->field_string ("function", msym->print_name (),
1200 else if (!uiout->is_mi_like_p ())
1201 uiout->field_string ("function", "??",
1203
1204 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1205 {
1206 uiout->text (_("\tinst "));
1207 btrace_call_history_insn_range (uiout, bfun);
1208 }
1209
1210 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1211 {
1212 uiout->text (_("\tat "));
1213 btrace_call_history_src_line (uiout, bfun);
1214 }
1215
1216 uiout->text ("\n");
1217 }
1218}
1219
1220/* The call_history method of target record-btrace. */
1221
1222void
1224{
1225 struct btrace_thread_info *btinfo;
1226 struct btrace_call_history *history;
1227 struct btrace_call_iterator begin, end;
1228 struct ui_out *uiout;
1229 unsigned int context, covered;
1230
1231 uiout = current_uiout;
1232 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1233 context = abs (size);
1234 if (context == 0)
1235 error (_("Bad record function-call-history-size."));
1236
1237 btinfo = require_btrace ();
1238 history = btinfo->call_history;
1239 if (history == NULL)
1240 {
1241 struct btrace_insn_iterator *replay;
1242
1243 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1244
1245 /* If we're replaying, we start at the replay position. Otherwise, we
1246 start at the tail of the trace. */
1247 replay = btinfo->replay;
1248 if (replay != NULL)
1249 {
1250 begin.btinfo = btinfo;
1251 begin.index = replay->call_index;
1252 }
1253 else
1254 btrace_call_end (&begin, btinfo);
1255
1256 /* We start from here and expand in the requested direction. Then we
1257 expand in the other direction, as well, to fill up any remaining
1258 context. */
1259 end = begin;
1260 if (size < 0)
1261 {
1262 /* We want the current position covered, as well. */
1263 covered = btrace_call_next (&end, 1);
1264 covered += btrace_call_prev (&begin, context - covered);
1265 covered += btrace_call_next (&end, context - covered);
1266 }
1267 else
1268 {
1269 covered = btrace_call_next (&end, context);
1270 covered += btrace_call_prev (&begin, context- covered);
1271 }
1272 }
1273 else
1274 {
1275 begin = history->begin;
1276 end = history->end;
1277
1278 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1279 btrace_call_number (&begin), btrace_call_number (&end));
1280
1281 if (size < 0)
1282 {
1283 end = begin;
1284 covered = btrace_call_prev (&begin, context);
1285 }
1286 else
1287 {
1288 begin = end;
1289 covered = btrace_call_next (&end, context);
1290 }
1291 }
1292
1293 if (covered > 0)
1294 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1295 else
1296 {
1297 if (size < 0)
1298 gdb_printf (_("At the start of the branch trace record.\n"));
1299 else
1300 gdb_printf (_("At the end of the branch trace record.\n"));
1301 }
1302
1303 btrace_set_call_history (btinfo, &begin, &end);
1304}
1305
1306/* The call_history_range method of target record-btrace. */
1307
1308void
1310 record_print_flags flags)
1311{
1312 struct btrace_thread_info *btinfo;
1313 struct btrace_call_iterator begin, end;
1314 struct ui_out *uiout;
1315 unsigned int low, high;
1316 int found;
1317
1318 uiout = current_uiout;
1319 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1320 low = from;
1321 high = to;
1322
1323 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1324
1325 /* Check for wrap-arounds. */
1326 if (low != from || high != to)
1327 error (_("Bad range."));
1328
1329 if (high < low)
1330 error (_("Bad range."));
1331
1332 btinfo = require_btrace ();
1333
1334 found = btrace_find_call_by_number (&begin, btinfo, low);
1335 if (found == 0)
1336 error (_("Range out of bounds."));
1337
1338 found = btrace_find_call_by_number (&end, btinfo, high);
1339 if (found == 0)
1340 {
1341 /* Silently truncate the range. */
1342 btrace_call_end (&end, btinfo);
1343 }
1344 else
1345 {
1346 /* We want both begin and end to be inclusive. */
1347 btrace_call_next (&end, 1);
1348 }
1349
1350 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1351 btrace_set_call_history (btinfo, &begin, &end);
1352}
1353
1354/* The call_history_from method of target record-btrace. */
1355
1356void
1358 record_print_flags flags)
1359{
1360 ULONGEST begin, end, context;
1361
1362 context = abs (size);
1363 if (context == 0)
1364 error (_("Bad record function-call-history-size."));
1365
1366 if (size < 0)
1367 {
1368 end = from;
1369
1370 if (from < context)
1371 begin = 0;
1372 else
1373 begin = from - context + 1;
1374 }
1375 else
1376 {
1377 begin = from;
1378 end = from + context - 1;
1379
1380 /* Check for wrap-around. */
1381 if (end < begin)
1382 end = ULONGEST_MAX;
1383 }
1384
1385 call_history_range ( begin, end, flags);
1386}
1387
1388/* The record_method method of target record-btrace. */
1389
1390enum record_method
1392{
1394 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1395
1396 if (tp == NULL)
1397 error (_("No thread."));
1398
1399 if (tp->btrace.target == NULL)
1400 return RECORD_METHOD_NONE;
1401
1402 return RECORD_METHOD_BTRACE;
1403}
1404
1405/* The record_is_replaying method of target record-btrace. */
1406
1407bool
1409{
1411 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1412 if (btrace_is_replaying (tp))
1413 return true;
1414
1415 return false;
1416}
1417
1418/* The record_will_replay method of target record-btrace. */
1419
1420bool
1422{
1423 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1424}
1425
1426/* The xfer_partial method of target record-btrace. */
1427
1430 const char *annex, gdb_byte *readbuf,
1431 const gdb_byte *writebuf, ULONGEST offset,
1432 ULONGEST len, ULONGEST *xfered_len)
1433{
1434 /* Filter out requests that don't make sense during replay. */
1438 {
1439 switch (object)
1440 {
1442 {
1443 const struct target_section *section;
1444
1445 /* We do not allow writing memory in general. */
1446 if (writebuf != NULL)
1447 {
1448 *xfered_len = len;
1450 }
1451
1452 /* We allow reading readonly memory. */
1453 section = target_section_by_addr (this, offset);
1454 if (section != NULL)
1455 {
1456 /* Check if the section we found is readonly. */
1457 if ((bfd_section_flags (section->the_bfd_section)
1458 & SEC_READONLY) != 0)
1459 {
1460 /* Truncate the request to fit into this section. */
1461 len = std::min (len, section->endaddr - offset);
1462 break;
1463 }
1464 }
1465
1466 *xfered_len = len;
1468 }
1469 }
1470 }
1471
1472 /* Forward the request. */
1473 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1474 offset, len, xfered_len);
1475}
1476
1477/* The insert_breakpoint method of target record-btrace. */
1478
1479int
1481 struct bp_target_info *bp_tgt)
1482{
1483 const char *old;
1484 int ret;
1485
1486 /* Inserting breakpoints requires accessing memory. Allow it for the
1487 duration of this function. */
1490
1491 ret = 0;
1492 try
1493 {
1494 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1495 }
1496 catch (const gdb_exception &except)
1497 {
1499 throw;
1500 }
1502
1503 return ret;
1504}
1505
1506/* The remove_breakpoint method of target record-btrace. */
1507
1508int
1510 struct bp_target_info *bp_tgt,
1511 enum remove_bp_reason reason)
1512{
1513 const char *old;
1514 int ret;
1515
1516 /* Removing breakpoints requires accessing memory. Allow it for the
1517 duration of this function. */
1520
1521 ret = 0;
1522 try
1523 {
1524 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1525 }
1526 catch (const gdb_exception &except)
1527 {
1529 throw;
1530 }
1532
1533 return ret;
1534}
1535
1536/* The fetch_registers method of target record-btrace. */
1537
1538void
1540{
1541 btrace_insn_iterator *replay = nullptr;
1542
1543 /* Thread-db may ask for a thread's registers before GDB knows about the
1544 thread. We forward the request to the target beneath in this
1545 case. */
1547 if (tp != nullptr)
1548 replay = tp->btrace.replay;
1549
1550 if (replay != nullptr && !record_btrace_generating_corefile)
1551 {
1552 const struct btrace_insn *insn;
1553 struct gdbarch *gdbarch;
1554 int pcreg;
1555
1556 gdbarch = regcache->arch ();
1557 pcreg = gdbarch_pc_regnum (gdbarch);
1558 if (pcreg < 0)
1559 return;
1560
1561 /* We can only provide the PC register. */
1562 if (regno >= 0 && regno != pcreg)
1563 return;
1564
1565 insn = btrace_insn_get (replay);
1566 gdb_assert (insn != NULL);
1567
1568 regcache->raw_supply (regno, &insn->pc);
1569 }
1570 else
1571 this->beneath ()->fetch_registers (regcache, regno);
1572}
1573
1574/* The store_registers method of target record-btrace. */
1575
1576void
1578{
1581 error (_("Cannot write registers while replaying."));
1582
1583 gdb_assert (may_write_registers);
1584
1585 this->beneath ()->store_registers (regcache, regno);
1586}
1587
1588/* The prepare_to_store method of target record-btrace. */
1589
1590void
1592{
1595 return;
1596
1597 this->beneath ()->prepare_to_store (regcache);
1598}
1599
1600/* The branch trace frame cache. */
1601
1603{
1604 /* The thread. */
1606
1607 /* The frame info. */
1609
1610 /* The branch trace function segment. */
1611 const struct btrace_function *bfun;
1612};
1613
1614/* A struct btrace_frame_cache hash table indexed by NEXT. */
1615
1616static htab_t bfcache;
1617
1618/* hash_f for htab_create_alloc of bfcache. */
1619
1620static hashval_t
1621bfcache_hash (const void *arg)
1622{
1623 const struct btrace_frame_cache *cache
1624 = (const struct btrace_frame_cache *) arg;
1625
1626 return htab_hash_pointer (cache->frame);
1627}
1628
1629/* eq_f for htab_create_alloc of bfcache. */
1630
1631static int
1632bfcache_eq (const void *arg1, const void *arg2)
1633{
1634 const struct btrace_frame_cache *cache1
1635 = (const struct btrace_frame_cache *) arg1;
1636 const struct btrace_frame_cache *cache2
1637 = (const struct btrace_frame_cache *) arg2;
1638
1639 return cache1->frame == cache2->frame;
1640}
1641
1642/* Create a new btrace frame cache. */
1643
1644static struct btrace_frame_cache *
1646{
1647 struct btrace_frame_cache *cache;
1648 void **slot;
1649
1651 cache->frame = frame.get ();
1652
1653 slot = htab_find_slot (bfcache, cache, INSERT);
1654 gdb_assert (*slot == NULL);
1655 *slot = cache;
1656
1657 return cache;
1658}
1659
1660/* Extract the branch trace function from a branch trace frame. */
1661
1662static const struct btrace_function *
1664{
1665 const struct btrace_frame_cache *cache;
1666 struct btrace_frame_cache pattern;
1667 void **slot;
1668
1669 pattern.frame = frame.get ();
1670
1671 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1672 if (slot == NULL)
1673 return NULL;
1674
1675 cache = (const struct btrace_frame_cache *) *slot;
1676 return cache->bfun;
1677}
1678
1679/* Implement stop_reason method for record_btrace_frame_unwind. */
1680
1681static enum unwind_stop_reason
1683 void **this_cache)
1684{
1685 const struct btrace_frame_cache *cache;
1686 const struct btrace_function *bfun;
1687
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
1692 if (bfun->up == 0)
1693 return UNWIND_UNAVAILABLE;
1694
1695 return UNWIND_NO_REASON;
1696}
1697
1698/* Implement this_id method for record_btrace_frame_unwind. */
1699
1700static void
1701record_btrace_frame_this_id (frame_info_ptr this_frame, void **this_cache,
1702 struct frame_id *this_id)
1703{
1704 const struct btrace_frame_cache *cache;
1705 const struct btrace_function *bfun;
1706 struct btrace_call_iterator it;
1707 CORE_ADDR code, special;
1708
1709 cache = (const struct btrace_frame_cache *) *this_cache;
1710
1711 bfun = cache->bfun;
1712 gdb_assert (bfun != NULL);
1713
1714 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1715 bfun = btrace_call_get (&it);
1716
1717 code = get_frame_func (this_frame);
1718 special = bfun->number;
1719
1720 *this_id = frame_id_build_unavailable_stack_special (code, special);
1721
1722 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1723 btrace_get_bfun_name (cache->bfun),
1724 core_addr_to_string_nz (this_id->code_addr),
1725 core_addr_to_string_nz (this_id->special_addr));
1726}
1727
1728/* Implement prev_register method for record_btrace_frame_unwind. */
1729
1730static struct value *
1732 void **this_cache,
1733 int regnum)
1734{
1735 const struct btrace_frame_cache *cache;
1736 const struct btrace_function *bfun, *caller;
1737 struct btrace_call_iterator it;
1738 struct gdbarch *gdbarch;
1739 CORE_ADDR pc;
1740 int pcreg;
1741
1742 gdbarch = get_frame_arch (this_frame);
1743 pcreg = gdbarch_pc_regnum (gdbarch);
1744 if (pcreg < 0 || regnum != pcreg)
1745 throw_error (NOT_AVAILABLE_ERROR,
1746 _("Registers are not available in btrace record history"));
1747
1748 cache = (const struct btrace_frame_cache *) *this_cache;
1749 bfun = cache->bfun;
1750 gdb_assert (bfun != NULL);
1751
1752 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1753 throw_error (NOT_AVAILABLE_ERROR,
1754 _("No caller in btrace record history"));
1755
1756 caller = btrace_call_get (&it);
1757
1758 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1759 pc = caller->insn.front ().pc;
1760 else
1761 {
1762 pc = caller->insn.back ().pc;
1763 pc += gdb_insn_length (gdbarch, pc);
1764 }
1765
1766 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1768 core_addr_to_string_nz (pc));
1769
1770 return frame_unwind_got_address (this_frame, regnum, pc);
1771}
1772
1773/* Implement sniffer method for record_btrace_frame_unwind. */
1774
1775static int
1777 frame_info_ptr this_frame,
1778 void **this_cache)
1779{
1780 const struct btrace_function *bfun;
1781 struct btrace_frame_cache *cache;
1782 struct thread_info *tp;
1783 frame_info_ptr next;
1784
1785 /* THIS_FRAME does not contain a reference to its thread. */
1786 tp = inferior_thread ();
1787
1788 bfun = NULL;
1789 next = get_next_frame (this_frame);
1790 if (next == NULL)
1791 {
1792 const struct btrace_insn_iterator *replay;
1793
1794 replay = tp->btrace.replay;
1795 if (replay != NULL)
1796 bfun = &replay->btinfo->functions[replay->call_index];
1797 }
1798 else
1799 {
1800 const struct btrace_function *callee;
1801 struct btrace_call_iterator it;
1802
1803 callee = btrace_get_frame_function (next);
1804 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1805 return 0;
1806
1807 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1808 return 0;
1809
1810 bfun = btrace_call_get (&it);
1811 }
1812
1813 if (bfun == NULL)
1814 return 0;
1815
1816 DEBUG ("[frame] sniffed frame for %s on level %d",
1817 btrace_get_bfun_name (bfun), bfun->level);
1818
1819 /* This is our frame. Initialize the frame cache. */
1820 cache = bfcache_new (this_frame);
1821 cache->tp = tp;
1822 cache->bfun = bfun;
1823
1824 *this_cache = cache;
1825 return 1;
1826}
1827
1828/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1829
1830static int
1832 frame_info_ptr this_frame,
1833 void **this_cache)
1834{
1835 const struct btrace_function *bfun, *callee;
1836 struct btrace_frame_cache *cache;
1837 struct btrace_call_iterator it;
1838 frame_info_ptr next;
1839 struct thread_info *tinfo;
1840
1841 next = get_next_frame (this_frame);
1842 if (next == NULL)
1843 return 0;
1844
1845 callee = btrace_get_frame_function (next);
1846 if (callee == NULL)
1847 return 0;
1848
1849 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1850 return 0;
1851
1852 tinfo = inferior_thread ();
1853 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1854 return 0;
1855
1856 bfun = btrace_call_get (&it);
1857
1858 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1859 btrace_get_bfun_name (bfun), bfun->level);
1860
1861 /* This is our frame. Initialize the frame cache. */
1862 cache = bfcache_new (this_frame);
1863 cache->tp = tinfo;
1864 cache->bfun = bfun;
1865
1866 *this_cache = cache;
1867 return 1;
1868}
1869
1870static void
1872{
1873 struct btrace_frame_cache *cache;
1874 void **slot;
1875
1876 cache = (struct btrace_frame_cache *) this_cache;
1877
1878 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1879 gdb_assert (slot != NULL);
1880
1881 htab_remove_elt (bfcache, cache);
1882}
1883
1884/* btrace recording does not store previous memory content, neither the stack
1885 frames content. Any unwinding would return erroneous results as the stack
1886 contents no longer matches the changed PC value restored from history.
1887 Therefore this unwinder reports any possibly unwound registers as
1888 <unavailable>. */
1889
1891{
1892 "record-btrace",
1897 NULL,
1900};
1901
1903{
1904 "record-btrace tailcall",
1909 NULL,
1912};
1913
1914/* Implement the get_unwinder method. */
1915
1916const struct frame_unwind *
1918{
1920}
1921
1922/* Implement the get_tailcall_unwinder method. */
1923
1924const struct frame_unwind *
1926{
1928}
1929
1930/* Return a human-readable string for FLAG. */
1931
1932static const char *
1933btrace_thread_flag_to_str (btrace_thread_flags flag)
1934{
1935 switch (flag)
1936 {
1937 case BTHR_STEP:
1938 return "step";
1939
1940 case BTHR_RSTEP:
1941 return "reverse-step";
1942
1943 case BTHR_CONT:
1944 return "cont";
1945
1946 case BTHR_RCONT:
1947 return "reverse-cont";
1948
1949 case BTHR_STOP:
1950 return "stop";
1951 }
1952
1953 return "<invalid>";
1954}
1955
1956/* Indicate that TP should be resumed according to FLAG. */
1957
1958static void
1960 enum btrace_thread_flag flag)
1961{
1962 struct btrace_thread_info *btinfo;
1963
1964 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1965 tp->ptid.to_string ().c_str (), flag,
1967
1968 btinfo = &tp->btrace;
1969
1970 /* Fetch the latest branch trace. */
1972
1973 /* A resume request overwrites a preceding resume or stop request. */
1974 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1975 btinfo->flags |= flag;
1976}
1977
1978/* Get the current frame for TP. */
1979
1980static struct frame_id
1982{
1983 /* Set current thread, which is implicitly used by
1984 get_current_frame. */
1985 scoped_restore_current_thread restore_thread;
1986
1987 switch_to_thread (tp);
1988
1989 process_stratum_target *proc_target = tp->inf->process_target ();
1990
1991 /* Clear the executing flag to allow changes to the current frame.
1992 We are not actually running, yet. We just started a reverse execution
1993 command or a record goto command.
1994 For the latter, EXECUTING is false and this has no effect.
1995 For the former, EXECUTING is true and we're in wait, about to
1996 move the thread. Since we need to recompute the stack, we temporarily
1997 set EXECUTING to false. */
1998 bool executing = tp->executing ();
1999 set_executing (proc_target, inferior_ptid, false);
2000 SCOPE_EXIT
2001 {
2002 set_executing (proc_target, inferior_ptid, executing);
2003 };
2004 return get_frame_id (get_current_frame ());
2005}
2006
2007/* Start replaying a thread. */
2008
2009static struct btrace_insn_iterator *
2011{
2012 struct btrace_insn_iterator *replay;
2013 struct btrace_thread_info *btinfo;
2014
2015 btinfo = &tp->btrace;
2016 replay = NULL;
2017
2018 /* We can't start replaying without trace. */
2019 if (btinfo->functions.empty ())
2020 error (_("No trace."));
2021
2022 /* GDB stores the current frame_id when stepping in order to detects steps
2023 into subroutines.
2024 Since frames are computed differently when we're replaying, we need to
2025 recompute those stored frames and fix them up so we can still detect
2026 subroutines after we started replaying. */
2027 try
2028 {
2029 struct frame_id frame_id;
2030 int upd_step_frame_id, upd_step_stack_frame_id;
2031
2032 /* The current frame without replaying - computed via normal unwind. */
2034
2035 /* Check if we need to update any stepping-related frame id's. */
2036 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2037 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
2038
2039 /* We start replaying at the end of the branch trace. This corresponds
2040 to the current instruction. */
2041 replay = XNEW (struct btrace_insn_iterator);
2042 btrace_insn_end (replay, btinfo);
2043
2044 /* Skip gaps at the end of the trace. */
2045 while (btrace_insn_get (replay) == NULL)
2046 {
2047 unsigned int steps;
2048
2049 steps = btrace_insn_prev (replay, 1);
2050 if (steps == 0)
2051 error (_("No trace."));
2052 }
2053
2054 /* We're not replaying, yet. */
2055 gdb_assert (btinfo->replay == NULL);
2056 btinfo->replay = replay;
2057
2058 /* Make sure we're not using any stale registers. */
2060
2061 /* The current frame with replaying - computed via btrace unwind. */
2063
2064 /* Replace stepping related frames where necessary. */
2065 if (upd_step_frame_id)
2067 if (upd_step_stack_frame_id)
2069 }
2070 catch (const gdb_exception &except)
2071 {
2072 xfree (btinfo->replay);
2073 btinfo->replay = NULL;
2074
2076
2077 throw;
2078 }
2079
2080 return replay;
2081}
2082
2083/* Stop replaying a thread. */
2084
2085static void
2087{
2088 struct btrace_thread_info *btinfo;
2089
2090 btinfo = &tp->btrace;
2091
2092 xfree (btinfo->replay);
2093 btinfo->replay = NULL;
2094
2095 /* Make sure we're not leaving any stale registers. */
2097}
2098
2099/* Stop replaying TP if it is at the end of its execution history. */
2100
2101static void
2103{
2104 struct btrace_insn_iterator *replay, end;
2105 struct btrace_thread_info *btinfo;
2106
2107 btinfo = &tp->btrace;
2108 replay = btinfo->replay;
2109
2110 if (replay == NULL)
2111 return;
2112
2113 btrace_insn_end (&end, btinfo);
2114
2115 if (btrace_insn_cmp (replay, &end) == 0)
2117}
2118
2119/* The resume method of target record-btrace. */
2120
2121void
2122record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2123{
2124 enum btrace_thread_flag flag, cflag;
2125
2126 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
2127 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2128 step ? "step" : "cont");
2129
2130 /* Store the execution direction of the last resume.
2131
2132 If there is more than one resume call, we have to rely on infrun
2133 to not change the execution direction in-between. */
2135
2136 /* As long as we're not replaying, just forward the request.
2137
2138 For non-stop targets this means that no thread is replaying. In order to
2139 make progress, we may need to explicitly move replaying threads to the end
2140 of their execution history. */
2142 && !record_is_replaying (minus_one_ptid))
2143 {
2144 this->beneath ()->resume (ptid, step, signal);
2145 return;
2146 }
2147
2148 /* Compute the btrace thread flag for the requested move. */
2150 {
2151 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2152 cflag = BTHR_RCONT;
2153 }
2154 else
2155 {
2156 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2157 cflag = BTHR_CONT;
2158 }
2159
2160 /* We just indicate the resume intent here. The actual stepping happens in
2161 record_btrace_wait below.
2162
2163 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2164
2166
2167 if (!target_is_non_stop_p ())
2168 {
2169 gdb_assert (inferior_ptid.matches (ptid));
2170
2171 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2172 {
2173 if (tp->ptid.matches (inferior_ptid))
2174 record_btrace_resume_thread (tp, flag);
2175 else
2176 record_btrace_resume_thread (tp, cflag);
2177 }
2178 }
2179 else
2180 {
2181 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2182 record_btrace_resume_thread (tp, flag);
2183 }
2184
2185 /* Async support. */
2186 if (target_can_async_p ())
2187 {
2188 target_async (true);
2190 }
2191}
2192
2193/* Cancel resuming TP. */
2194
2195static void
2197{
2198 btrace_thread_flags flags;
2199
2200 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2201 if (flags == 0)
2202 return;
2203
2204 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2205 print_thread_id (tp),
2206 tp->ptid.to_string ().c_str (), flags.raw (),
2208
2209 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2211}
2212
2213/* Return a target_waitstatus indicating that we ran out of history. */
2214
2215static struct target_waitstatus
2217{
2219
2220 status.set_no_history ();
2221
2222 return status;
2223}
2224
2225/* Return a target_waitstatus indicating that a step finished. */
2226
2227static struct target_waitstatus
2229{
2231
2232 status.set_stopped (GDB_SIGNAL_TRAP);
2233
2234 return status;
2235}
2236
2237/* Return a target_waitstatus indicating that a thread was stopped as
2238 requested. */
2239
2240static struct target_waitstatus
2242{
2244
2245 status.set_stopped (GDB_SIGNAL_0);
2246
2247 return status;
2248}
2249
2250/* Return a target_waitstatus indicating a spurious stop. */
2251
2252static struct target_waitstatus
2254{
2256
2257 status.set_spurious ();
2258
2259 return status;
2260}
2261
2262/* Return a target_waitstatus indicating that the thread was not resumed. */
2263
2264static struct target_waitstatus
2266{
2268
2269 status.set_no_resumed ();
2270
2271 return status;
2272}
2273
2274/* Return a target_waitstatus indicating that we should wait again. */
2275
2276static struct target_waitstatus
2278{
2280
2281 status.set_ignore ();
2282
2283 return status;
2284}
2285
2286/* Clear the record histories. */
2287
2288static void
2290{
2291 xfree (btinfo->insn_history);
2292 xfree (btinfo->call_history);
2293
2294 btinfo->insn_history = NULL;
2295 btinfo->call_history = NULL;
2296}
2297
2298/* Check whether TP's current replay position is at a breakpoint. */
2299
2300static int
2302{
2303 struct btrace_insn_iterator *replay;
2304 struct btrace_thread_info *btinfo;
2305 const struct btrace_insn *insn;
2306
2307 btinfo = &tp->btrace;
2308 replay = btinfo->replay;
2309
2310 if (replay == NULL)
2311 return 0;
2312
2313 insn = btrace_insn_get (replay);
2314 if (insn == NULL)
2315 return 0;
2316
2317 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2318 &btinfo->stop_reason);
2319}
2320
2321/* Step one instruction in forward direction. */
2322
2323static struct target_waitstatus
2325{
2326 struct btrace_insn_iterator *replay, end, start;
2327 struct btrace_thread_info *btinfo;
2328
2329 btinfo = &tp->btrace;
2330 replay = btinfo->replay;
2331
2332 /* We're done if we're not replaying. */
2333 if (replay == NULL)
2334 return btrace_step_no_history ();
2335
2336 /* Check if we're stepping a breakpoint. */
2338 return btrace_step_stopped ();
2339
2340 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2341 jump back to the instruction at which we started. */
2342 start = *replay;
2343 do
2344 {
2345 unsigned int steps;
2346
2347 /* We will bail out here if we continue stepping after reaching the end
2348 of the execution history. */
2349 steps = btrace_insn_next (replay, 1);
2350 if (steps == 0)
2351 {
2352 *replay = start;
2353 return btrace_step_no_history ();
2354 }
2355 }
2356 while (btrace_insn_get (replay) == NULL);
2357
2358 /* Determine the end of the instruction trace. */
2359 btrace_insn_end (&end, btinfo);
2360
2361 /* The execution trace contains (and ends with) the current instruction.
2362 This instruction has not been executed, yet, so the trace really ends
2363 one instruction earlier. */
2364 if (btrace_insn_cmp (replay, &end) == 0)
2365 return btrace_step_no_history ();
2366
2367 return btrace_step_spurious ();
2368}
2369
2370/* Step one instruction in backward direction. */
2371
2372static struct target_waitstatus
2374{
2375 struct btrace_insn_iterator *replay, start;
2376 struct btrace_thread_info *btinfo;
2377
2378 btinfo = &tp->btrace;
2379 replay = btinfo->replay;
2380
2381 /* Start replaying if we're not already doing so. */
2382 if (replay == NULL)
2384
2385 /* If we can't step any further, we reached the end of the history.
2386 Skip gaps during replay. If we end up at a gap (at the beginning of
2387 the trace), jump back to the instruction at which we started. */
2388 start = *replay;
2389 do
2390 {
2391 unsigned int steps;
2392
2393 steps = btrace_insn_prev (replay, 1);
2394 if (steps == 0)
2395 {
2396 *replay = start;
2397 return btrace_step_no_history ();
2398 }
2399 }
2400 while (btrace_insn_get (replay) == NULL);
2401
2402 /* Check if we're stepping a breakpoint.
2403
2404 For reverse-stepping, this check is after the step. There is logic in
2405 infrun.c that handles reverse-stepping separately. See, for example,
2406 proceed and adjust_pc_after_break.
2407
2408 This code assumes that for reverse-stepping, PC points to the last
2409 de-executed instruction, whereas for forward-stepping PC points to the
2410 next to-be-executed instruction. */
2412 return btrace_step_stopped ();
2413
2414 return btrace_step_spurious ();
2415}
2416
2417/* Step a single thread. */
2418
2419static struct target_waitstatus
2421{
2422 struct btrace_thread_info *btinfo;
2424 btrace_thread_flags flags;
2425
2426 btinfo = &tp->btrace;
2427
2428 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2429 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2430
2431 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2432 tp->ptid.to_string ().c_str (), flags.raw (),
2434
2435 /* We can't step without an execution history. */
2436 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2437 return btrace_step_no_history ();
2438
2439 switch (flags)
2440 {
2441 default:
2442 internal_error (_("invalid stepping type."));
2443
2444 case BTHR_STOP:
2446
2447 case BTHR_STEP:
2449 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2450 break;
2451
2452 return btrace_step_stopped ();
2453
2454 case BTHR_RSTEP:
2456 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2457 break;
2458
2459 return btrace_step_stopped ();
2460
2461 case BTHR_CONT:
2463 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2464 break;
2465
2466 btinfo->flags |= flags;
2467 return btrace_step_again ();
2468
2469 case BTHR_RCONT:
2471 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2472 break;
2473
2474 btinfo->flags |= flags;
2475 return btrace_step_again ();
2476 }
2477
2478 /* We keep threads moving at the end of their execution history. The wait
2479 method will stop the thread for whom the event is reported. */
2480 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
2481 btinfo->flags |= flags;
2482
2483 return status;
2484}
2485
2486/* Announce further events if necessary. */
2487
2488static void
2490 (const std::vector<thread_info *> &moving,
2491 const std::vector<thread_info *> &no_history)
2492{
2493 bool more_moving = !moving.empty ();
2494 bool more_no_history = !no_history.empty ();;
2495
2496 if (!more_moving && !more_no_history)
2497 return;
2498
2499 if (more_moving)
2500 DEBUG ("movers pending");
2501
2502 if (more_no_history)
2503 DEBUG ("no-history pending");
2504
2506}
2507
2508/* The wait method of target record-btrace. */
2509
2510ptid_t
2512 target_wait_flags options)
2513{
2514 std::vector<thread_info *> moving;
2515 std::vector<thread_info *> no_history;
2516
2517 /* Clear this, if needed we'll re-mark it below. */
2519
2520 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
2521 (unsigned) options);
2522
2523 /* As long as we're not replaying, just forward the request. */
2525 && !record_is_replaying (minus_one_ptid))
2526 {
2527 return this->beneath ()->wait (ptid, status, options);
2528 }
2529
2530 /* Keep a work list of moving threads. */
2532 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2533 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2534 moving.push_back (tp);
2535
2536 if (moving.empty ())
2537 {
2539
2540 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
2541 status->to_string ().c_str ());
2542
2543 return null_ptid;
2544 }
2545
2546 /* Step moving threads one by one, one step each, until either one thread
2547 reports an event or we run out of threads to step.
2548
2549 When stepping more than one thread, chances are that some threads reach
2550 the end of their execution history earlier than others. If we reported
2551 this immediately, all-stop on top of non-stop would stop all threads and
2552 resume the same threads next time. And we would report the same thread
2553 having reached the end of its execution history again.
2554
2555 In the worst case, this would starve the other threads. But even if other
2556 threads would be allowed to make progress, this would result in far too
2557 many intermediate stops.
2558
2559 We therefore delay the reporting of "no execution history" until we have
2560 nothing else to report. By this time, all threads should have moved to
2561 either the beginning or the end of their execution history. There will
2562 be a single user-visible stop. */
2563 struct thread_info *eventing = NULL;
2564 while ((eventing == NULL) && !moving.empty ())
2565 {
2566 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2567 {
2568 thread_info *tp = moving[ix];
2569
2571
2572 switch (status->kind ())
2573 {
2575 ix++;
2576 break;
2577
2579 no_history.push_back (ordered_remove (moving, ix));
2580 break;
2581
2582 default:
2583 eventing = unordered_remove (moving, ix);
2584 break;
2585 }
2586 }
2587 }
2588
2589 if (eventing == NULL)
2590 {
2591 /* We started with at least one moving thread. This thread must have
2592 either stopped or reached the end of its execution history.
2593
2594 In the former case, EVENTING must not be NULL.
2595 In the latter case, NO_HISTORY must not be empty. */
2596 gdb_assert (!no_history.empty ());
2597
2598 /* We kept threads moving at the end of their execution history. Stop
2599 EVENTING now that we are going to report its stop. */
2600 eventing = unordered_remove (no_history, 0);
2601 eventing->btrace.flags &= ~BTHR_MOVE;
2602
2604 }
2605
2606 gdb_assert (eventing != NULL);
2607
2608 /* We kept threads replaying at the end of their execution history. Stop
2609 replaying EVENTING now that we are going to report its stop. */
2611
2612 /* Stop all other threads. */
2613 if (!target_is_non_stop_p ())
2614 {
2615 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2617 }
2618
2619 /* In async mode, we need to announce further events. */
2620 if (target_is_async_p ())
2621 record_btrace_maybe_mark_async_event (moving, no_history);
2622
2623 /* Start record histories anew from the current position. */
2625
2626 /* We moved the replay position but did not update registers. */
2627 registers_changed_thread (eventing);
2628
2629 DEBUG ("wait ended by thread %s (%s): %s",
2630 print_thread_id (eventing),
2631 eventing->ptid.to_string ().c_str (),
2632 status->to_string ().c_str ());
2633
2634 return eventing->ptid;
2635}
2636
2637/* The stop method of target record-btrace. */
2638
2639void
2641{
2642 DEBUG ("stop %s", ptid.to_string ().c_str ());
2643
2644 /* As long as we're not replaying, just forward the request. */
2646 && !record_is_replaying (minus_one_ptid))
2647 {
2648 this->beneath ()->stop (ptid);
2649 }
2650 else
2651 {
2652 process_stratum_target *proc_target
2654
2655 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2656 {
2657 tp->btrace.flags &= ~BTHR_MOVE;
2658 tp->btrace.flags |= BTHR_STOP;
2659 }
2660 }
2661 }
2662
2663/* The can_execute_reverse method of target record-btrace. */
2664
2665bool
2667{
2668 return true;
2669}
2670
2671/* The stopped_by_sw_breakpoint method of target record-btrace. */
2672
2673bool
2675{
2676 if (record_is_replaying (minus_one_ptid))
2677 {
2678 struct thread_info *tp = inferior_thread ();
2679
2680 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2681 }
2682
2683 return this->beneath ()->stopped_by_sw_breakpoint ();
2684}
2685
2686/* The supports_stopped_by_sw_breakpoint method of target
2687 record-btrace. */
2688
2689bool
2691{
2692 if (record_is_replaying (minus_one_ptid))
2693 return true;
2694
2695 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2696}
2697
2698/* The stopped_by_sw_breakpoint method of target record-btrace. */
2699
2700bool
2702{
2703 if (record_is_replaying (minus_one_ptid))
2704 {
2705 struct thread_info *tp = inferior_thread ();
2706
2707 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2708 }
2709
2710 return this->beneath ()->stopped_by_hw_breakpoint ();
2711}
2712
2713/* The supports_stopped_by_hw_breakpoint method of target
2714 record-btrace. */
2715
2716bool
2718{
2719 if (record_is_replaying (minus_one_ptid))
2720 return true;
2721
2722 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2723}
2724
2725/* The update_thread_list method of target record-btrace. */
2726
2727void
2729{
2730 /* We don't add or remove threads during replay. */
2731 if (record_is_replaying (minus_one_ptid))
2732 return;
2733
2734 /* Forward the request. */
2735 this->beneath ()->update_thread_list ();
2736}
2737
2738/* The thread_alive method of target record-btrace. */
2739
2740bool
2742{
2743 /* We don't add or remove threads during replay. */
2744 if (record_is_replaying (minus_one_ptid))
2745 return true;
2746
2747 /* Forward the request. */
2748 return this->beneath ()->thread_alive (ptid);
2749}
2750
2751/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2752 is stopped. */
2753
2754static void
2756 const struct btrace_insn_iterator *it)
2757{
2758 struct btrace_thread_info *btinfo;
2759
2760 btinfo = &tp->btrace;
2761
2762 if (it == NULL)
2764 else
2765 {
2766 if (btinfo->replay == NULL)
2768 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2769 return;
2770
2771 *btinfo->replay = *it;
2773 }
2774
2775 /* Start anew from the new replay position. */
2777
2780}
2781
2782/* The goto_record_begin method of target record-btrace. */
2783
2784void
2786{
2787 struct thread_info *tp;
2788 struct btrace_insn_iterator begin;
2789
2790 tp = require_btrace_thread ();
2791
2792 btrace_insn_begin (&begin, &tp->btrace);
2793
2794 /* Skip gaps at the beginning of the trace. */
2795 while (btrace_insn_get (&begin) == NULL)
2796 {
2797 unsigned int steps;
2798
2799 steps = btrace_insn_next (&begin, 1);
2800 if (steps == 0)
2801 error (_("No trace."));
2802 }
2803
2804 record_btrace_set_replay (tp, &begin);
2805}
2806
2807/* The goto_record_end method of target record-btrace. */
2808
2809void
2811{
2812 struct thread_info *tp;
2813
2814 tp = require_btrace_thread ();
2815
2816 record_btrace_set_replay (tp, NULL);
2817}
2818
2819/* The goto_record method of target record-btrace. */
2820
2821void
2823{
2824 struct thread_info *tp;
2825 struct btrace_insn_iterator it;
2826 unsigned int number;
2827 int found;
2828
2829 number = insn;
2830
2831 /* Check for wrap-arounds. */
2832 if (number != insn)
2833 error (_("Instruction number out of range."));
2834
2835 tp = require_btrace_thread ();
2836
2837 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2838
2839 /* Check if the instruction could not be found or is a gap. */
2840 if (found == 0 || btrace_insn_get (&it) == NULL)
2841 error (_("No such instruction."));
2842
2843 record_btrace_set_replay (tp, &it);
2844}
2845
2846/* The record_stop_replaying method of target record-btrace. */
2847
2848void
2850{
2851 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2853}
2854
2855/* The execution_direction target method. */
2856
2859{
2861}
2862
2863/* The prepare_to_generate_core target method. */
2864
2865void
2867{
2869}
2870
2871/* The done_generating_core target method. */
2872
2873void
2875{
2877}
2878
2879/* Start recording in BTS format. */
2880
2881static void
2882cmd_record_btrace_bts_start (const char *args, int from_tty)
2883{
2884 if (args != NULL && *args != 0)
2885 error (_("Invalid argument."));
2886
2887 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2888
2889 try
2890 {
2891 execute_command ("target record-btrace", from_tty);
2892 }
2893 catch (const gdb_exception &exception)
2894 {
2895 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2896 throw;
2897 }
2898}
2899
2900/* Start recording in Intel Processor Trace format. */
2901
2902static void
2903cmd_record_btrace_pt_start (const char *args, int from_tty)
2904{
2905 if (args != NULL && *args != 0)
2906 error (_("Invalid argument."));
2907
2908 record_btrace_conf.format = BTRACE_FORMAT_PT;
2909
2910 try
2911 {
2912 execute_command ("target record-btrace", from_tty);
2913 }
2914 catch (const gdb_exception &exception)
2915 {
2916 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2917 throw;
2918 }
2919}
2920
2921/* Alias for "target record". */
2922
2923static void
2924cmd_record_btrace_start (const char *args, int from_tty)
2925{
2926 if (args != NULL && *args != 0)
2927 error (_("Invalid argument."));
2928
2929 record_btrace_conf.format = BTRACE_FORMAT_PT;
2930
2931 try
2932 {
2933 execute_command ("target record-btrace", from_tty);
2934 }
2935 catch (const gdb_exception &exception)
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2938
2939 try
2940 {
2941 execute_command ("target record-btrace", from_tty);
2942 }
2943 catch (const gdb_exception &ex)
2944 {
2945 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2946 throw;
2947 }
2948 }
2949}
2950
2951/* The "show record btrace replay-memory-access" command. */
2952
2953static void
2954cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2955 struct cmd_list_element *c, const char *value)
2956{
2957 gdb_printf (file, _("Replay memory access is %s.\n"),
2959}
2960
2961/* The "set record btrace cpu none" command. */
2962
2963static void
2964cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2965{
2966 if (args != nullptr && *args != 0)
2967 error (_("Trailing junk: '%s'."), args);
2968
2970}
2971
2972/* The "set record btrace cpu auto" command. */
2973
2974static void
2975cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2976{
2977 if (args != nullptr && *args != 0)
2978 error (_("Trailing junk: '%s'."), args);
2979
2981}
2982
2983/* The "set record btrace cpu" command. */
2984
2985static void
2986cmd_set_record_btrace_cpu (const char *args, int from_tty)
2987{
2988 if (args == nullptr)
2989 args = "";
2990
2991 /* We use a hard-coded vendor string for now. */
2992 unsigned int family, model, stepping;
2993 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
2994 &model, &l1, &stepping, &l2);
2995 if (matches == 3)
2996 {
2997 if (strlen (args) != l2)
2998 error (_("Trailing junk: '%s'."), args + l2);
2999 }
3000 else if (matches == 2)
3001 {
3002 if (strlen (args) != l1)
3003 error (_("Trailing junk: '%s'."), args + l1);
3004
3005 stepping = 0;
3006 }
3007 else
3008 error (_("Bad format. See \"help set record btrace cpu\"."));
3009
3010 if (USHRT_MAX < family)
3011 error (_("Cpu family too big."));
3012
3013 if (UCHAR_MAX < model)
3014 error (_("Cpu model too big."));
3015
3016 if (UCHAR_MAX < stepping)
3017 error (_("Cpu stepping too big."));
3018
3019 record_btrace_cpu.vendor = CV_INTEL;
3020 record_btrace_cpu.family = family;
3021 record_btrace_cpu.model = model;
3022 record_btrace_cpu.stepping = stepping;
3023
3025}
3026
3027/* The "show record btrace cpu" command. */
3028
3029static void
3030cmd_show_record_btrace_cpu (const char *args, int from_tty)
3031{
3032 if (args != nullptr && *args != 0)
3033 error (_("Trailing junk: '%s'."), args);
3034
3036 {
3037 case CS_AUTO:
3038 gdb_printf (_("btrace cpu is 'auto'.\n"));
3039 return;
3040
3041 case CS_NONE:
3042 gdb_printf (_("btrace cpu is 'none'.\n"));
3043 return;
3044
3045 case CS_CPU:
3046 switch (record_btrace_cpu.vendor)
3047 {
3048 case CV_INTEL:
3049 if (record_btrace_cpu.stepping == 0)
3050 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3051 record_btrace_cpu.family,
3052 record_btrace_cpu.model);
3053 else
3054 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3055 record_btrace_cpu.family,
3056 record_btrace_cpu.model,
3057 record_btrace_cpu.stepping);
3058 return;
3059 }
3060 }
3061
3062 error (_("Internal error: bad cpu state."));
3063}
3064
3065/* The "record bts buffer-size" show value function. */
3066
3067static void
3068show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3069 struct cmd_list_element *c,
3070 const char *value)
3071{
3072 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3073 value);
3074}
3075
3076/* The "record pt buffer-size" show value function. */
3077
3078static void
3079show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3080 struct cmd_list_element *c,
3081 const char *value)
3082{
3083 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3084 value);
3085}
3086
3087/* Initialize btrace commands. */
3088
3090void
3092{
3093 cmd_list_element *record_btrace_cmd
3095 _("Start branch trace recording."),
3097 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3098
3099 cmd_list_element *record_btrace_bts_cmd
3101 _("\
3102Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3103The processor stores a from/to record for each branch into a cyclic buffer.\n\
3104This format may not be available on all processors."),
3106 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3108
3109 cmd_list_element *record_btrace_pt_cmd
3111 _("\
3112Start branch trace recording in Intel Processor Trace format.\n\n\
3113This format may not be available on all processors."),
3115 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3116
3118 _("Set record options."),
3119 _("Show record options."),
3123
3124 add_setshow_enum_cmd ("replay-memory-access", no_class,
3126Set what memory accesses are allowed during replay."), _("\
3127Show what memory accesses are allowed during replay."),
3128 _("Default is READ-ONLY.\n\n\
3129The btrace record target does not trace data.\n\
3130The memory therefore corresponds to the live target and not \
3131to the current replay position.\n\n\
3132When READ-ONLY, allow accesses to read-only memory during replay.\n\
3133When READ-WRITE, allow accesses to read-only and read-write memory during \
3134replay."),
3138
3140 _("\
3141Set the cpu to be used for trace decode.\n\n\
3142The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3143For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3144When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3145The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3146When GDB does not support that cpu, this option can be used to enable\n\
3147workarounds for a similar cpu that GDB supports.\n\n\
3148When set to \"none\", errata workarounds are disabled."),
3150 1,
3152
3154Automatically determine the cpu to be used for trace decode."),
3156
3158Do not enable errata workarounds for trace decode."),
3160
3162Show the cpu to be used for trace decode."),
3164
3166 _("Set record btrace bts options."),
3167 _("Show record btrace bts options."),
3172
3173 add_setshow_uinteger_cmd ("buffer-size", no_class,
3174 &record_btrace_conf.bts.size,
3175 _("Set the record/replay bts buffer size."),
3176 _("Show the record/replay bts buffer size."), _("\
3177When starting recording request a trace buffer of this size. \
3178The actual buffer size may differ from the requested size. \
3179Use \"info record\" to see the actual buffer size.\n\n\
3180Bigger buffers allow longer recording but also take more time to process \
3181the recorded execution trace.\n\n\
3182The trace buffer size may not be changed while recording."), NULL,
3186
3188 _("Set record btrace pt options."),
3189 _("Show record btrace pt options."),
3194
3195 add_setshow_uinteger_cmd ("buffer-size", no_class,
3196 &record_btrace_conf.pt.size,
3197 _("Set the record/replay pt buffer size."),
3198 _("Show the record/replay pt buffer size."), _("\
3199Bigger buffers allow longer recording but also take more time to process \
3200the recorded execution.\n\
3201The actual buffer size may differ from the requested size. Use \"info record\" \
3202to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3205
3207
3208 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3209 xcalloc, xfree);
3210
3211 record_btrace_conf.bts.size = 64 * 1024;
3212 record_btrace_conf.pt.size = 16 * 1024;
3213}
int regnum
Definition: aarch64-tdep.c:68
void xfree(void *)
int code
Definition: ada-lex.l:688
void * xcalloc(size_t number, size_t size)
Definition: alloc.c:85
struct gdbarch * target_gdbarch(void)
Definition: arch-utils.c:1453
void mark_async_event_handler(async_event_handler *async_handler_ptr)
Definition: async-event.c:294
async_event_handler * create_async_event_handler(async_event_handler_func *proc, gdb_client_data client_data, const char *name)
Definition: async-event.c:269
void clear_async_event_handler(async_event_handler *async_handler_ptr)
Definition: async-event.c:306
void delete_async_event_handler(async_event_handler **async_handler_ptr)
Definition: async-event.c:348
remove_bp_reason
Definition: breakpoint.h:64
void btrace_enable(struct thread_info *tp, const struct btrace_config *conf)
Definition: btrace.c:1607
const struct btrace_function * btrace_call_get(const struct btrace_call_iterator *it)
Definition: btrace.c:2642
unsigned int btrace_call_prev(struct btrace_call_iterator *it, unsigned int stride)
Definition: btrace.c:2730
unsigned int btrace_call_next(struct btrace_call_iterator *it, unsigned int stride)
Definition: btrace.c:2694
unsigned int btrace_call_number(const struct btrace_call_iterator *it)
Definition: btrace.c:2653
void btrace_insn_end(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:2393
int btrace_insn_cmp(const struct btrace_insn_iterator *lhs, const struct btrace_insn_iterator *rhs)
Definition: btrace.c:2559
int btrace_find_insn_by_number(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition: btrace.c:2573
unsigned int btrace_insn_prev(struct btrace_insn_iterator *it, unsigned int stride)
Definition: btrace.c:2501
const char * btrace_decode_error(enum btrace_format format, int errcode)
Definition: btrace.c:1858
int btrace_find_call_by_number(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo, unsigned int number)
Definition: btrace.c:2774
void btrace_set_insn_history(struct btrace_thread_info *btinfo, const struct btrace_insn_iterator *begin, const struct btrace_insn_iterator *end)
Definition: btrace.c:2791
void btrace_disable(struct thread_info *tp)
Definition: btrace.c:1665
void btrace_fetch(struct thread_info *tp, const struct btrace_cpu *cpu)
Definition: btrace.c:1907
void btrace_set_call_history(struct btrace_thread_info *btinfo, const struct btrace_call_iterator *begin, const struct btrace_call_iterator *end)
Definition: btrace.c:2805
const struct btrace_config * btrace_conf(const struct btrace_thread_info *btinfo)
Definition: btrace.c:1654
unsigned int btrace_insn_next(struct btrace_insn_iterator *it, unsigned int stride)
Definition: btrace.c:2419
const struct btrace_insn * btrace_insn_get(const struct btrace_insn_iterator *it)
Definition: btrace.c:2340
void btrace_teardown(struct thread_info *tp)
Definition: btrace.c:1685
unsigned int btrace_insn_number(const struct btrace_insn_iterator *it)
Definition: btrace.c:2371
int btrace_insn_get_error(const struct btrace_insn_iterator *it)
Definition: btrace.c:2363
int btrace_is_empty(struct thread_info *tp)
Definition: btrace.c:2829
void btrace_insn_begin(struct btrace_insn_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:2379
void btrace_call_end(struct btrace_call_iterator *it, const struct btrace_thread_info *btinfo)
Definition: btrace.c:2681
int btrace_is_replaying(struct thread_info *tp)
Definition: btrace.c:2821
int btrace_call_cmp(const struct btrace_call_iterator *lhs, const struct btrace_call_iterator *rhs)
Definition: btrace.c:2764
@ BFUN_UP_LINKS_TO_RET
Definition: btrace.h:90
@ BFUN_UP_LINKS_TO_TAILCALL
Definition: btrace.h:94
@ BTRACE_INSN_FLAG_SPECULATIVE
Definition: btrace.h:62
btrace_thread_flag
Definition: btrace.h:232
@ BTHR_CONT
Definition: btrace.h:240
@ BTHR_RSTEP
Definition: btrace.h:237
@ BTHR_STEP
Definition: btrace.h:234
@ BTHR_MOVE
Definition: btrace.h:246
@ BTHR_RCONT
Definition: btrace.h:243
@ BTHR_STOP
Definition: btrace.h:249
ui_file_style style() const
Definition: cli-style.c:169
int pretty_print_insn(const struct disasm_insn *insn, gdb_disassembly_flags flags)
Definition: disasm.c:361
int unpush_target(struct target_ops *t)
Definition: inferior.c:98
void push_target(struct target_ops *t)
Definition: inferior.h:376
struct process_stratum_target * process_target()
Definition: inferior.h:419
struct address_space * aspace
Definition: inferior.h:544
target_ops * target_at(enum strata stratum)
Definition: inferior.h:423
void stop_recording() override
void call_history_range(ULONGEST begin, ULONGEST end, record_print_flags flags) override
void async(bool) override
void fetch_registers(struct regcache *, int) override
enum record_method record_method(ptid_t ptid) override
void record_stop_replaying() override
void info_record() override
ptid_t wait(ptid_t, struct target_waitstatus *, target_wait_flags) override
enum exec_direction_kind execution_direction() override
bool stopped_by_hw_breakpoint() override
void insn_history(int size, gdb_disassembly_flags flags) override
bool stopped_by_sw_breakpoint() override
void close() override
void goto_record_end() override
void prepare_to_store(struct regcache *) override
void mourn_inferior() override
Definition: record-btrace.c:72
void call_history_from(ULONGEST begin, int size, record_print_flags flags) override
void detach(inferior *inf, int from_tty) override
Definition: record-btrace.c:67
void update_thread_list() override
bool supports_stopped_by_hw_breakpoint() override
void goto_record_begin() override
void store_registers(struct regcache *, int) override
bool supports_stopped_by_sw_breakpoint() override
void insn_history_from(ULONGEST from, int size, gdb_disassembly_flags flags) override
bool can_execute_reverse() override
int insert_breakpoint(struct gdbarch *, struct bp_target_info *) override
bool record_is_replaying(ptid_t ptid) override
const struct frame_unwind * get_tailcall_unwinder() override
enum target_xfer_status xfer_partial(enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) override
void kill() override
Definition: record-btrace.c:75
bool record_will_replay(ptid_t ptid, int dir) override
strata stratum() const override
Definition: record-btrace.c:62
const struct frame_unwind * get_unwinder() override
bool thread_alive(ptid_t ptid) override
int remove_breakpoint(struct gdbarch *, struct bp_target_info *, enum remove_bp_reason) override
void prepare_to_generate_core() override
void goto_record(ULONGEST insn) override
void call_history(int size, record_print_flags flags) override
void resume(ptid_t, int, enum gdb_signal) override
void insn_history_range(ULONGEST begin, ULONGEST end, gdb_disassembly_flags flags) override
void done_generating_core() override
const target_info & info() const override
Definition: record-btrace.c:59
void disconnect(const char *, int) override
gdbarch * arch() const
Definition: regcache.c:230
void raw_supply(int regnum, const void *buf) override
Definition: regcache.c:1053
ptid_t ptid() const
Definition: regcache.h:407
process_stratum_target * target() const
Definition: regcache.h:419
ptid_t ptid
Definition: gdbthread.h:256
struct inferior * inf
Definition: gdbthread.h:298
void set_stop_pc(CORE_ADDR stop_pc)
Definition: gdbthread.h:369
thread_control_state control
Definition: gdbthread.h:340
Definition: ui-out.h:160
void begin(ui_out_type type, const char *id)
Definition: ui-out.c:399
void field_string(const char *fldname, const char *string, const ui_file_style &style=ui_file_style())
Definition: ui-out.c:511
void field_fmt(const char *fldname, const char *format,...) ATTRIBUTE_PRINTF(3
Definition: ui-out.c:525
void field_signed(const char *fldname, LONGEST value)
Definition: ui-out.c:437
void text(const char *string)
Definition: ui-out.c:566
bool is_mi_like_p() const
Definition: ui-out.c:810
void field_unsigned(const char *fldname, ULONGEST value)
Definition: ui-out.c:464
void end(ui_out_type type)
Definition: ui-out.c:429
set_show_commands add_setshow_uinteger_cmd(const char *name, enum command_class theclass, unsigned int *var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:1053
struct cmd_list_element * add_alias_cmd(const char *name, cmd_list_element *target, enum command_class theclass, int abbrev_flag, struct cmd_list_element **list)
Definition: cli-decode.c:294
struct cmd_list_element * add_cmd(const char *name, enum command_class theclass, const char *doc, struct cmd_list_element **list)
Definition: cli-decode.c:233
set_show_commands add_setshow_enum_cmd(const char *name, enum command_class theclass, const char *const *enumlist, const char **var, const char *set_doc, const char *show_doc, const char *help_doc, cmd_func_ftype *set_func, show_value_ftype *show_func, struct cmd_list_element **set_list, struct cmd_list_element **show_list)
Definition: cli-decode.c:618
set_show_commands add_setshow_prefix_cmd(const char *name, command_class theclass, const char *set_doc, const char *show_doc, cmd_list_element **set_subcommands_list, cmd_list_element **show_subcommands_list, cmd_list_element **set_list, cmd_list_element **show_list)
Definition: cli-decode.c:428
struct cmd_list_element * add_prefix_cmd(const char *name, enum command_class theclass, cmd_simple_func_ftype *fun, const char *doc, struct cmd_list_element **subcommands, int allow_unknown, struct cmd_list_element **list)
Definition: cli-decode.c:357
cli_style_option function_name_style
cli_style_option file_name_style
int number_is_in_list(const char *list, int number)
Definition: cli-utils.c:348
@ class_obscure
Definition: command.h:64
@ class_support
Definition: command.h:58
@ no_class
Definition: command.h:53
#define ULONGEST_MAX
Definition: defs.h:473
#define INT_MIN
Definition: defs.h:461
#define INT_MAX
Definition: defs.h:457
@ DISASSEMBLY_SOURCE
Definition: disasm-flags.h:34
@ DISASSEMBLY_FILENAME
Definition: disasm-flags.h:32
@ DISASSEMBLY_SPECULATIVE
Definition: disasm-flags.h:35
int gdb_insn_length(struct gdbarch *gdbarch, CORE_ADDR addr)
Definition: disasm.c:1215
struct value * frame_unwind_got_address(frame_info_ptr frame, int regnum, CORE_ADDR addr)
Definition: frame-unwind.c:329
frame_info_ptr get_next_frame(frame_info_ptr this_frame)
Definition: frame.c:1968
struct frame_id frame_id_build_unavailable_stack_special(CORE_ADDR code_addr, CORE_ADDR special_addr)
Definition: frame.c:699
struct gdbarch * get_frame_arch(frame_info_ptr this_frame)
Definition: frame.c:2907
CORE_ADDR get_frame_func(frame_info_ptr this_frame)
Definition: frame.c:1050
frame_info_ptr get_selected_frame(const char *message)
Definition: frame.c:1813
frame_info_ptr get_current_frame(void)
Definition: frame.c:1615
struct frame_id get_frame_id(frame_info_ptr fi)
Definition: frame.c:607
@ SRC_AND_LOC
Definition: frame.h:594
@ TAILCALL_FRAME
Definition: frame.h:187
@ NORMAL_FRAME
Definition: frame.h:179
void print_stack_frame(frame_info_ptr, int print_level, enum print_what print_what, int set_current_sal)
Definition: stack.c:356
unwind_stop_reason
Definition: frame.h:436
#define FRAME_OBSTACK_ZALLOC(TYPE)
Definition: frame.h:608
int gdbarch_pc_regnum(struct gdbarch *gdbarch)
Definition: gdbarch.c:2023
void execute_command(const char *, int)
Definition: top.c:574
thread_info * find_thread_ptid(inferior *inf, ptid_t ptid)
Definition: thread.c:528
all_non_exited_threads_range all_non_exited_threads(process_stratum_target *proc_target=nullptr, ptid_t filter_ptid=minus_one_ptid)
Definition: gdbthread.h:743
void validate_registers_access(void)
Definition: thread.c:930
void set_executing(process_stratum_target *targ, ptid_t ptid, bool executing)
Definition: thread.c:880
struct thread_info * inferior_thread(void)
Definition: thread.c:83
void switch_to_thread(struct thread_info *thr)
Definition: thread.c:1335
const char * print_thread_id(struct thread_info *thr)
Definition: thread.c:1431
mach_port_t kern_return_t mach_port_t mach_msg_type_name_t msgportsPoly mach_port_t kern_return_t pid_t pid mach_port_t kern_return_t mach_port_t task mach_port_t kern_return_t int flags
Definition: gnu-nat.c:1862
mach_port_t mach_port_t name mach_port_t mach_port_t name kern_return_t int status
Definition: gnu-nat.c:1791
size_t size
Definition: go32-nat.c:241
void inferior_event_handler(enum inferior_event_type event_type)
Definition: inf-loop.c:36
ptid_t inferior_ptid
Definition: infcmd.c:91
struct inferior * current_inferior(void)
Definition: inferior.c:54
exec_direction_kind
Definition: infrun.h:112
@ EXEC_REVERSE
Definition: infrun.h:114
@ EXEC_FORWARD
Definition: infrun.h:113
observable< struct thread_info * > new_thread
observable< struct inferior *, int, const char *, const char * > record_changed
void _initialize_record_btrace()
static void record_btrace_print_conf(const struct btrace_config *conf)
static struct btrace_line_range btrace_line_range_add(struct btrace_line_range range, int line)
static struct cmd_list_element * set_record_btrace_pt_cmdlist
static struct cmd_list_element * show_record_btrace_pt_cmdlist
static struct btrace_line_range btrace_find_line_range(CORE_ADDR pc)
static const char replay_memory_access_read_only[]
static const char * btrace_thread_flag_to_str(btrace_thread_flags flag)
static struct target_waitstatus btrace_step_again(void)
static void btrace_compute_src_line_range(const struct btrace_function *bfun, int *pbegin, int *pend)
static int bfcache_eq(const void *arg1, const void *arg2)
static void record_btrace_maybe_mark_async_event(const std::vector< thread_info * > &moving, const std::vector< thread_info * > &no_history)
static void record_btrace_stop_replaying_at_end(struct thread_info *tp)
static void record_btrace_set_replay(struct thread_info *tp, const struct btrace_insn_iterator *it)
static struct thread_info * require_btrace_thread(void)
static void cmd_record_btrace_pt_start(const char *args, int from_tty)
static void cmd_show_record_btrace_cpu(const char *args, int from_tty)
static struct async_event_handler * record_btrace_async_inferior_event_handler
static void cmd_set_record_btrace_cpu_auto(const char *args, int from_tty)
static void record_btrace_cancel_resume(struct thread_info *tp)
static void record_btrace_auto_disable(void)
static void record_btrace_stop_replaying(struct thread_info *tp)
static const gdb::observers::token record_btrace_thread_observer_token
static void btrace_print_lines(struct btrace_line_range lines, struct ui_out *uiout, gdb::optional< ui_out_emit_tuple > *src_and_asm_tuple, gdb::optional< ui_out_emit_list > *asm_list, gdb_disassembly_flags flags)
static hashval_t bfcache_hash(const void *arg)
static record_btrace_target record_btrace_ops
static struct btrace_cpu record_btrace_cpu
static enum record_btrace_cpu_state_kind record_btrace_cpu_state
record_btrace_cpu_state_kind
@ CS_CPU
@ CS_AUTO
@ CS_NONE
const struct frame_unwind record_btrace_frame_unwind
static void cmd_show_replay_memory_access(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static void record_btrace_clear_histories(struct btrace_thread_info *btinfo)
static struct cmd_list_element * show_record_btrace_cmdlist
static void cmd_set_record_btrace_cpu(const char *args, int from_tty)
static struct btrace_insn_iterator * record_btrace_start_replaying(struct thread_info *tp)
static const char * replay_memory_access
static struct btrace_line_range btrace_mk_line_range(struct symtab *symtab, int begin, int end)
#define DEBUG(msg, args...)
static void btrace_call_history_insn_range(struct ui_out *uiout, const struct btrace_function *bfun)
static void record_btrace_print_bts_conf(const struct btrace_config_bts *conf)
static struct target_waitstatus btrace_step_spurious(void)
static struct value * record_btrace_frame_prev_register(frame_info_ptr this_frame, void **this_cache, int regnum)
static void cmd_set_record_btrace_cpu_none(const char *args, int from_tty)
static const char replay_memory_access_read_write[]
static void cmd_record_btrace_bts_start(const char *args, int from_tty)
static struct target_waitstatus btrace_step_stopped(void)
static struct target_waitstatus record_btrace_single_step_backward(struct thread_info *tp)
const struct btrace_cpu * record_btrace_get_cpu(void)
static void cmd_record_btrace_start(const char *args, int from_tty)
static struct target_waitstatus record_btrace_single_step_forward(struct thread_info *tp)
static struct target_waitstatus btrace_step_stopped_on_request(void)
static void record_btrace_print_pt_conf(const struct btrace_config_pt *conf)
static void btrace_call_history_src_line(struct ui_out *uiout, const struct btrace_function *bfun)
static void btrace_ui_out_decode_error(struct ui_out *uiout, int errcode, enum btrace_format format)
static enum exec_direction_kind record_btrace_resume_exec_dir
static struct cmd_list_element * show_record_btrace_bts_cmdlist
static struct frame_id get_thread_current_frame_id(struct thread_info *tp)
static int record_btrace_generating_corefile
static void record_btrace_handle_async_inferior_event(gdb_client_data data)
const struct frame_unwind record_btrace_tailcall_frame_unwind
void record_btrace_push_target(void)
static struct target_waitstatus record_btrace_step_thread(struct thread_info *tp)
static int record_btrace_frame_sniffer(const struct frame_unwind *self, frame_info_ptr this_frame, void **this_cache)
static struct cmd_list_element * set_record_btrace_cmdlist
static void record_btrace_resume_thread(struct thread_info *tp, enum btrace_thread_flag flag)
static void record_btrace_target_open(const char *args, int from_tty)
static int btrace_line_range_contains_range(struct btrace_line_range lhs, struct btrace_line_range rhs)
static int record_btrace_tailcall_frame_sniffer(const struct frame_unwind *self, frame_info_ptr this_frame, void **this_cache)
static struct btrace_thread_info * require_btrace(void)
static struct btrace_config record_btrace_conf
static void record_btrace_frame_this_id(frame_info_ptr this_frame, void **this_cache, struct frame_id *this_id)
static struct target_waitstatus btrace_step_no_resumed(void)
static int btrace_line_range_is_empty(struct btrace_line_range range)
static void record_btrace_on_new_thread(struct thread_info *tp)
static void show_record_pt_buffer_size_value(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static const char * btrace_get_bfun_name(const struct btrace_function *bfun)
static struct target_waitstatus btrace_step_no_history(void)
static void record_btrace_frame_dealloc_cache(frame_info *self, void *this_cache)
static void show_record_bts_buffer_size_value(struct ui_file *file, int from_tty, struct cmd_list_element *c, const char *value)
static htab_t bfcache
static const char *const replay_memory_access_types[]
static struct cmd_list_element * record_btrace_cmdlist
static const char * record_btrace_adjust_size(unsigned int *size)
static struct cmd_list_element * set_record_btrace_cpu_cmdlist
static void record_btrace_auto_enable(void)
static enum unwind_stop_reason record_btrace_frame_unwind_stop_reason(frame_info_ptr this_frame, void **this_cache)
static const target_info record_btrace_target_info
Definition: record-btrace.c:48
static struct btrace_frame_cache * bfcache_new(frame_info_ptr frame)
static const struct btrace_function * btrace_get_frame_function(frame_info_ptr frame)
static struct cmd_list_element * set_record_btrace_bts_cmdlist
static int record_btrace_replay_at_breakpoint(struct thread_info *tp)
void record_detach(struct target_ops *t, inferior *inf, int from_tty)
Definition: record.c:190
struct cmd_list_element * set_record_cmdlist
Definition: record.c:52
struct cmd_list_element * show_record_cmdlist
Definition: record.c:53
struct cmd_list_element * record_cmdlist
Definition: record.c:50
void record_preopen(void)
Definition: record.c:86
void record_mourn_inferior(struct target_ops *t)
Definition: record.c:205
void record_kill(struct target_ops *t)
Definition: record.c:221
int record_check_stopped_by_breakpoint(const address_space *aspace, CORE_ADDR pc, enum target_stop_reason *reason)
Definition: record.c:237
record_print_flag
Definition: record.h:57
@ RECORD_PRINT_SRC_LINE
Definition: record.h:59
@ RECORD_PRINT_INSN_RANGE
Definition: record.h:62
@ RECORD_PRINT_INDENT_CALLS
Definition: record.h:65
record_method
Definition: record.h:44
@ RECORD_METHOD_BTRACE
Definition: record.h:52
@ RECORD_METHOD_NONE
Definition: record.h:46
CORE_ADDR regcache_read_pc(struct regcache *regcache)
Definition: regcache.c:1324
void registers_changed_thread(thread_info *thread)
Definition: regcache.c:571
struct regcache * get_current_regcache(void)
Definition: regcache.c:426
#define enable()
Definition: ser-go32.c:239
const char * symtab_to_filename_for_display(struct symtab *symtab)
Definition: source.c:1287
void print_source_lines(struct symtab *s, int line, int stopline, print_source_lines_flags flags)
Definition: source.c:1481
@ PRINT_SOURCE_LINES_FILENAME
Definition: source.h:140
struct btrace_call_iterator begin
Definition: btrace.h:226
struct btrace_call_iterator end
Definition: btrace.h:227
unsigned int index
Definition: btrace.h:209
const struct btrace_thread_info * btinfo
Definition: btrace.h:206
const struct btrace_function * bfun
struct thread_info * tp
frame_info * frame
btrace_function_flags flags
Definition: btrace.h:186
unsigned int up
Definition: btrace.h:155
struct minimal_symbol * msym
Definition: btrace.h:142
unsigned int prev
Definition: btrace.h:149
unsigned int number
Definition: btrace.h:175
std::vector< btrace_insn > insn
Definition: btrace.h:160
struct symbol * sym
Definition: btrace.h:143
unsigned int insn_offset
Definition: btrace.h:170
struct btrace_insn_iterator begin
Definition: btrace.h:217
struct btrace_insn_iterator end
Definition: btrace.h:218
unsigned int call_index
Definition: btrace.h:196
const struct btrace_thread_info * btinfo
Definition: btrace.h:193
btrace_insn_flags flags
Definition: btrace.h:81
CORE_ADDR pc
Definition: btrace.h:72
struct symtab * symtab
unsigned int ngaps
Definition: btrace.h:339
std::vector< btrace_function > functions
Definition: btrace.h:331
btrace_thread_flags flags
Definition: btrace.h:342
struct btrace_insn_iterator * replay
Definition: btrace.h:353
struct btrace_call_history * call_history
Definition: btrace.h:348
enum target_stop_reason stop_reason
Definition: btrace.h:356
struct btrace_insn_history * insn_history
Definition: btrace.h:345
unsigned int is_speculative
Definition: disasm.h:337
unsigned int number
Definition: disasm.h:334
CORE_ADDR addr
Definition: disasm.h:331
CORE_ADDR code_addr
Definition: frame-id.h:83
CORE_ADDR special_addr
Definition: frame-id.h:95
const char * print_name() const
Definition: symtab.h:474
Definition: gnu-nat.c:154
Definition: symtab.h:1548
int nitems
Definition: symtab.h:1582
struct linetable_entry item[1]
Definition: symtab.h:1587
Definition: value.c:72
void add_thread(thread_info *thread)
DISABLE_COPY_AND_ASSIGN(scoped_btrace_disable)
scoped_btrace_disable()=default
std::forward_list< thread_info * > m_threads
struct symtab * symtab
Definition: symtab.h:1414
struct symtab * symtab
Definition: symtab.h:2263
CORE_ADDR end
Definition: symtab.h:2273
struct linetable * linetable() const
Definition: symtab.h:1613
virtual ptid_t wait(ptid_t, struct target_waitstatus *, target_wait_flags options) TARGET_DEFAULT_FUNC(default_target_wait)
virtual int remove_breakpoint(struct gdbarch *, struct bp_target_info *, enum remove_bp_reason) TARGET_DEFAULT_NORETURN(noprocess())
virtual void fetch_registers(struct regcache *, int) TARGET_DEFAULT_IGNORE()
virtual bool stopped_by_sw_breakpoint() TARGET_DEFAULT_RETURN(false)
target_ops * beneath() const
Definition: target.c:3020
virtual void store_registers(struct regcache *, int) TARGET_DEFAULT_NORETURN(noprocess())
virtual enum target_xfer_status xfer_partial(enum target_object object, const char *annex, gdb_byte *readbuf, const gdb_byte *writebuf, ULONGEST offset, ULONGEST len, ULONGEST *xfered_len) TARGET_DEFAULT_RETURN(TARGET_XFER_E_IO)
virtual gdb::byte_vector virtual thread_info_to_thread_handle(struct thread_info *) TARGET_DEFAULT_RETURN(gdb voi stop)(ptid_t) TARGET_DEFAULT_IGNORE()
Definition: target.h:684
virtual bool supports_stopped_by_sw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual bool stopped_by_hw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual void async(bool) TARGET_DEFAULT_NORETURN(tcomplain())
virtual bool supports_stopped_by_hw_breakpoint() TARGET_DEFAULT_RETURN(false)
virtual void resume(ptid_t, int TARGET_DEBUG_PRINTER(target_debug_print_step), enum gdb_signal) TARGET_DEFAULT_NORETURN(noprocess())
virtual void update_thread_list() TARGET_DEFAULT_IGNORE()
virtual void prepare_to_store(struct regcache *) TARGET_DEFAULT_NORETURN(noprocess())
virtual bool thread_alive(ptid_t ptid) TARGET_DEFAULT_RETURN(false)
virtual void disconnect(const char *, int) TARGET_DEFAULT_NORETURN(tcomplain())
virtual int insert_breakpoint(struct gdbarch *, struct bp_target_info *) TARGET_DEFAULT_NORETURN(noprocess())
virtual const struct btrace_config * btrace_conf(const struct btrace_target_info *) TARGET_DEFAULT_RETURN(NULL)
CORE_ADDR endaddr
struct bfd_section * the_bfd_section
Definition: value.c:181
struct symtab * find_pc_line_symtab(CORE_ADDR pc)
Definition: symtab.c:3319
struct symtab_and_line find_pc_line(CORE_ADDR pc, int notcurrent)
Definition: symtab.c:3297
bool target_is_async_p()
Definition: target.c:403
void target_async(bool enable)
Definition: target.c:4330
const struct target_section * target_section_by_addr(struct target_ops *target, CORE_ADDR addr)
Definition: target.c:1379
bool target_can_async_p()
Definition: target.c:385
bool target_has_execution(inferior *inf)
Definition: target.c:202
void add_target(const target_info &t, target_open_ftype *func, completer_ftype *completer)
Definition: target.c:863
bool target_is_non_stop_p()
Definition: target.c:4387
std::string target_pid_to_str(ptid_t ptid)
Definition: target.c:2602
@ INF_REG_EVENT
Definition: target.h:129
target_xfer_status
Definition: target.h:214
@ TARGET_XFER_UNAVAILABLE
Definition: target.h:222
bool may_write_registers
target_object
Definition: target.h:138
@ TARGET_OBJECT_MEMORY
Definition: target.h:142
strata
Definition: target.h:89
@ record_stratum
Definition: target.h:94
#define current_uiout
Definition: ui-out.h:40
void gdb_printf(struct ui_file *stream, const char *format,...)
Definition: utils.c:1865
@ TARGET_WAITKIND_SPURIOUS
Definition: waitstatus.h:78
@ TARGET_WAITKIND_NO_HISTORY
Definition: waitstatus.h:93
@ TARGET_WAITKIND_IGNORE
Definition: waitstatus.h:89
@ TARGET_STOPPED_BY_SW_BREAKPOINT
Definition: waitstatus.h:434
@ TARGET_STOPPED_BY_HW_BREAKPOINT
Definition: waitstatus.h:437