Initial commit of OpenSPARC T2 architecture model.
[OpenSPARC-T2-SAM] / sam-t2 / sam / analyzers / rstracer / rstracer.cc
CommitLineData
920dae64
AT
1// ========== Copyright Header Begin ==========================================
2//
3// OpenSPARC T2 Processor File: rstracer.cc
4// Copyright (c) 2006 Sun Microsystems, Inc. All Rights Reserved.
5// DO NOT ALTER OR REMOVE COPYRIGHT NOTICES.
6//
7// The above named program is free software; you can redistribute it and/or
8// modify it under the terms of the GNU General Public
9// License version 2 as published by the Free Software Foundation.
10//
11// The above named program is distributed in the hope that it will be
12// useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14// General Public License for more details.
15//
16// You should have received a copy of the GNU General Public
17// License along with this work; if not, write to the Free Software
18// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19//
20// ========== Copyright Header End ============================================
21/* rstracer.cc -- rstrace for SAM v5 */
22
23#include <stdio.h>
24#include <stdlib.h>
25#include <sys/types.h>
26#include <limits.h>
27#include <string.h>
28#include <unistd.h>
29#include <time.h>
30#include <assert.h>
31
32#include "rstf/rstf.h"
33
34#include "spix_sparc.h"
35
36#define USE_RZ3
37
38#ifdef USE_RZ3
39#include "rstzip/Rstzip.H"
40#endif
41
42#include "system.h"
43#include "dev_registry.h"
44
45#include "vtracer.h"
46
47#include "rstracer.h"
48
49static class rstracer * thetracer = NULL;
50
51// non-reentrant function, called from the UI thread
52// when the module is loaded
53
54extern "C" void vtracer_fini();
55
56extern "C" void * vtracer_init(const char *tmp_modname)
57{
58 atexit(vtracer_fini);
59
60 thetracer = new rstracer(tmp_modname);
61 return (void *) thetracer;
62}
63
64
65//non-reentrant function, called from the UI thread
66int rstrace_cmd_action(void * /* usrdata */, int argc, char **tmp_argv)
67{
68 return thetracer->process_ui_cmd(argc, tmp_argv);
69}
70
71extern void UI_register_cmd_2 (char * name, char *help, int (*efn)(void *, int, char **), int (*hfn)());
72extern void UI_invalidate_cmd (char * name);
73
74char help_str[] = "rstrace -o <file> -n <icount>";
75
76#define MASK_PSTATE_AM(SA_pstate) ( (((SA_pstate)>>3)&1) ? ((uint64_t)(~0u)) : (~0ull) )
77
78
79// non-reentrant function, called from the UI thread
80// (from vtracer_init())
81rstracer::rstracer(const char * tmp_modname)
82{
83 modname = strdup(tmp_modname);
84
85 SAM_intf = NULL;
86
87 // FIXME: get ncpus from the VTracer_SAM_intf
88 ncpus = g_nvcpu;
89 first_vcpu_id = -1;
90 last_vcpu_id = g_vcpu_id_max;
91
92 tracefilename[0] = 0;
93
94 pcs = new rst_pct[last_vcpu_id + 1];
95
96 tracing = false;
97
98 mutex_init(&mu, USYNC_THREAD, NULL);
99 sync_count = 0;
100
101 // FIXME: this is stricly temporary. to be replaced by ui_cmd method
102 UI_register_cmd_2(strdup("rstrace"), help_str, rstrace_cmd_action, NULL);
103
104}
105
106
107// non-reentrant function, called when module is loaded
108int rstracer::attach(VTracer_SAM_intf * sam_intf) {
109 SAM_intf = sam_intf;
110 return 0;
111}
112
113
114const char usage[] =
115 "rstrace # print rstracer status\n"
116 "rstrace off # turn off tracing and close trace file\n"
117 "rstrace [-o <file>] [-n <insts-per-cpu>] [-d <initial-delay>] [-x <ntraces> [-p <period>]]\n"
118 "Alternative rstrace command format:\n"
119 "rstrace <file> [+<delay>] [<totalinsts>] [<ntraces> [<period>]]\n"
120 " <file> should be base name to which cpu<n>.rz3.gz is appended.\n"
121 " <file> can be - for default /tmp/rstracer<pid>_<date>.cpu<n>.rz3.gz\n"
122 " +<delay> can be +0 or + for immediate; must be specified\n"
123 " period is the interval between starting pts of periodic traces\n"
124 " period assumed to be equal to trace size unless larger value specified\n"
125 " totalinsts, if unspecified, is indefinite (until next rstrace off cmd)\n";
126// FIXME: add sampling options
127
128
129// non-reentrant function, called from UI thread
130int rstracer::process_ui_cmd(int argc, char **tmp_argv)
131{
132 if (argc == 1) {
133 print_status();
134 return 0;
135 }
136
137 insts_per_cpu = LLONG_MAX;
138
139 ntraces = 1;
140
141 initial_delay = 0;
142 trace_period = 0;
143
144 // argv[1] can be "off", -, filename or -flag
145 if (strcmp(tmp_argv[1], "off") == 0) {
146 if (!tracing) {
147 fprintf(stderr, "%s: tracing is already off\n", id);
148 return 0;
149 }
150 // FIXME: blaze must be stopped
151 ntraces = 0; // reset target number of traces
152 trace_off();
153 return 0;
154 }
155
156 // tracing must be off for any other command
157 if (tracing) {
158 fprintf(stderr, "%s: ERROR: tracing is currently on. Usage: %s", id, usage);
159 return 0;
160 }
161
162 if ((strcmp(tmp_argv[1], "-") == 0) || (tmp_argv[1][0] != '-')) {
163 // v4 format
164 if (parse_args_v4(argc, (const char **) tmp_argv) != 0) {
165 return 0;
166 }
167 } else {
168 if (parse_args_v5(argc, (const char **) tmp_argv) != 0) {
169 return 0;
170 }
171 }
172
173 // if we are still here, this was a trace start command
174
175 tracing = true;
176
177 traces_done = 0;
178
179 delay = initial_delay;
180
181 trace_on();
182
183 return 0;
184
185} // int rstracer::process_ui_cmd(int argc, char **tmp_argv)
186
187// non-reentrant function - called from process_ui_cmd()
188int rstracer::parse_args_v4(int argc, const char * tmp_argv[])
189{
190 if (argc < 3) {
191 fprintf(stderr, "%s: ERROR: insufficient number of arguments. Usage: \n%s", id, usage);
192 return 1;
193 }
194
195 if (strcmp(tmp_argv[1], "-") == 0) {
196 // use default trace file name
197 } else {
198 // FIXME: the legacy format expects to append the time & date to the filename?
199 strcpy(tracefilename, tmp_argv[1]);
200 }
201
202 if (tmp_argv[2][0] != '+') {
203 fprintf(stderr, "%s: ERROR: second argument must be +[<delay>]. Usage: \n%s", id, usage);
204 return 1;
205 }
206 if (tmp_argv[2][1] == 0) {
207 // use default
208 } else {
209 initial_delay = strtoll(tmp_argv[2] + 1, NULL, 0);
210 }
211
212 // trace size
213 if (argc >= 4) {
214 int64_t trsize = strtoll(tmp_argv[3], NULL, 0);
215 insts_per_cpu = (trsize + ncpus - 1) / ncpus;
216 } else {
217 // use default
218 }
219
220 // number of traces
221 if (argc >= 5) {
222 ntraces = (int) strtol(tmp_argv[4], NULL, 0);
223 }
224
225 // trace period
226 if (argc >= 6) {
227 int64_t psize = (int) strtoll(tmp_argv[4], NULL, 0);
228 trace_period = (psize + ncpus - 1)/ncpus;
229 if (trace_period < insts_per_cpu) {
230 if (trace_period != 0) {
231 fprintf(stderr, "%s: WARNING: period (%lld insts/cpu) < trace size (%lld insts/cpu). ignoring.\n",
232 id, trace_period, insts_per_cpu);
233 }
234 trace_period = insts_per_cpu;
235 }
236 }
237
238 if (argc >= 7) {
239 fprintf(stderr, "%s: ERROR: too many arguments. Usage: \n%s", id, usage);
240 return 1;
241 }
242
243 return 0;
244} // void rstracer::parse_args_v4(int argc, const char * tmp_argv[])
245
246
247// non-reentrant function - called from process_ui_cmd()
248int rstracer::parse_args_v5(int argc, const char * tmp_argv[])
249{
250 // preferred command format
251 int i = 1;
252 while(i < argc) {
253 const char * arg = tmp_argv[i++];
254 if (strcmp(arg, "-o") == 0) {
255 if (i == argc) {
256 fprintf(stderr, "%s: ERROR: -o requires an argument. Usage: \n%s", id, usage);
257 return 1;
258 }
259 strcpy(tracefilename, tmp_argv[i++]);
260 } else if (strcmp(arg, "-n") == 0) {
261 if (i == argc) {
262 fprintf(stderr, "%s: ERROR: -n requires an argument. Usage: \n%s", id, usage);
263 return 1;
264 }
265 insts_per_cpu = strtoll(tmp_argv[i++], NULL, 0);
266 } else if (strcmp(arg, "-d") == 0) {
267 if (i == argc) {
268 fprintf(stderr, "%s: ERROR: -d requires an argument. Usage: \n%s", id, usage);
269 return 1;
270 }
271 initial_delay = strtoll(tmp_argv[i++], NULL, 0);
272 } else if(strcmp(arg, "-x") == 0) {
273 if (i == argc) {
274 fprintf(stderr, "%s: ERROR: -x requires an argument. Usage: \n%s", id, usage);
275 return 1;
276 }
277 ntraces = (int) strtol(tmp_argv[i++], NULL, 0);
278 } else if (strcmp(arg, "-p") == 0) {
279 if (i == argc) {
280 fprintf(stderr, "%s: ERROR: -x requires an argument. Usage: \n%s", id, usage);
281 return 1;
282 }
283 trace_period = strtoll(tmp_argv[i++], NULL, 0);
284 } else {
285 fprintf(stderr, "%s: ERROR: invalid argument %s. Usage: \n%s", id, arg, usage);
286 return 1;
287 }
288 }
289
290 // check args
291 if (trace_period < insts_per_cpu) {
292 if (trace_period != 0) {
293 fprintf(stderr, "%s: WARNING: period (%lld insts/cpu) < trace size (%lld insts/cpu). ignoring.\n",
294 id, trace_period, insts_per_cpu);
295 }
296 trace_period = insts_per_cpu;
297 }
298
299 return 0;
300} // void rstracer::parse_args_v5(int argc, const char * tmp_argv[])
301
302
303// NON-REENTRANT function
304// called from parse_args() (while blaze is stopped) from the UI thread.
305// also called from trace_off() (while all cpus are done tracing) in case
306// one more trace is needed. In that case, trace_off() should not modify
307// state
308void rstracer::trace_on()
309{
310
311 char fname[PATH_MAX];
312
313 if (tracefilename[0] == 0) {
314 // default trace file name
315 time_t curlocaltime = time(NULL);
316 struct tm * localtm = localtime(&curlocaltime);
317 sprintf(fname, "/tmp/rstrace%d_%04d%02d%02d_%02d%02d%02d",
318 getpid(), localtm->tm_year+1900, localtm->tm_mon, localtm->tm_mday,
319 localtm->tm_hour, localtm->tm_min, localtm->tm_sec);
320 } else {
321 strcpy(fname, tracefilename);
322 }
323
324 if (ntraces > 1) {
325 char str[16];
326 sprintf(str, ".trace%03d", traces_done);
327 strcat(fname, str);
328 }
329
330 int i;
331 for (i=0; i<=last_vcpu_id; i++) {
332 if (get_vcpu(i))
333 {
334 if (first_vcpu_id<0)
335 first_vcpu_id = i;
336
337 pcs[i].init(i, fname);
338 // if there is no delay to the trace, do not synchronize
339 pcs[i].state = delay ? rst_pct::state_DELAY : rst_pct::state_TRACE_START;
340 } else {
341 pcs[i].init(-1, NULL);
342 }
343
344 } // for each cpuid
345
346} // void rstracer::trace_on()
347
348
349// NON-REENTRANT function - called from trace_on()
350void rst_pct::init(int arg_cpuid, const char * tmp_tracefilename)
351{
352 cpuid = arg_cpuid;
353 icontext = dcontext = ~0u;
354 pc_pavadiff = ea_pavadiff = ~0ull;
355 ninsts = 0;
356 nrecs = 0;
357 dinsts = 0;
358
359 if (!tmp_tracefilename)
360 {
361 state = rst_pct::state_NIL;
362 return;
363 }
364
365 regval.rtype = REGVAL_T;
366 regval.postInstr = 1;
367 rstf_regvalT_set_cpuid(&regval, cpuid);
368 regval.regtype[0] = regval.regtype[1] = RSTREG_UNUSED_RT;
369
370 hpr = pr = 0;
371
372 memcache = new uint64_t [RSTF_MEMVAL_CACHE_LINES];
373 int i;
374 for (i=0; i<RSTF_MEMVAL_CACHE_LINES; i++) {
375 memcache[i] = ~0ull;
376 }
377
378 memset(&mv64, 0, sizeof(mv64));
379 mv64.rtype = MEMVAL_T;
380 rstf_memval64T_set_cpuid(&mv64, cpuid);
381 mv64.size = 8;
382
383 memset(&mv128, 0, sizeof(mv64));
384 mv128.rtype = MEMVAL_T;
385 mv128.ismemval128 = 1;
386 mv128.isContRec = 1;
387 rstf_memval128T_set_cpuid(&mv128, cpuid);
388
389#ifdef USE_RZ3
390 sprintf(fname, "%s.cpu%d.rz3.gz", tmp_tracefilename, cpuid);
391 rz = new Rstzip;
392 int rzerr = rz->open(fname, "w", "verbose=0");
393 if (rzerr != RSTZIP_OK) {
394 perror(fname);
395 exit(1);
396 }
397#else
398 sprintf(fname, "%s.cpu%d.rst", tmp_tracefilename, cpuid);
399
400 trf = fopen(fname, "w");
401 if (trf == NULL) {
402 perror(fname);
403 exit(1);
404 }
405#endif
406
407} // void rst_pct::init(int arg_cpuid, const char * tmp_tracefilename)
408
409
410static const uint8_t CH_MMU_CONTEXTREG_ASI = 0x58;
411static const uint8_t UA_MMU_CONTEXTREG_ASI = 0x21; // ultrasparc arch 2005 and newer
412
413// REENTRANT function - called from per cpu instr callback
414void rst_pct::emit_trace_preamble()
415{
416 printf("%s: starting trace for cpu%d\n", id, cpuid);
417
418 rstf_unionT ru;
419
420 ru.proto.rtype = RSTHEADER_T;
421 ru.header.majorVer = RSTF_MAJOR_VERSION;
422 ru.header.minorVer = RSTF_MINOR_VERSION;
423 ru.header.percent = '%';
424 sprintf(ru.header.header_str, "%s v%s", RSTF_MAGIC, RSTF_VERSION_STR);
425 addrec(&ru);
426
427 // Traceinfo
428 ru.tlevel.rtype = TRACEINFO_T;
429 ru.tlevel.rtype2 = RSTT2_NLEVEL_T;
430 ru.tlevel.level = 0;
431 ru.tlevel.val32 = 0;
432 time_t curtime = (uint64_t) time(NULL);
433 ru.tlevel.time64 = curtime;
434 addrec(&ru);
435
436 memset(&ru, 0, sizeof(ru));
437 ru.cpuinfo.rtype = TRACEINFO_T;
438 ru.cpuinfo.rtype2 = RSTT2_CPUINFO_T;
439 ru.cpuinfo.numcpus = 1; // in this cpu's trace
440 ru.cpuinfo.min_cpu_id = ru.cpuinfo.max_cpu_id = cpuid;
441 addrec(&ru);
442
443 memset(&ru, 0, sizeof(ru));
444 ru.cpuidinfo.rtype = TRACEINFO_T;
445 ru.cpuidinfo.rtype2 = RSTT2_CPUIDINFO_T;
446 ru.cpuidinfo.cpuids[0] = cpuid;
447 addrec(&ru);
448
449 char desc[512];
450
451 // descriptor string records
452 sprintf(desc, "SAM [rstracer.so]");
453 string2rst(desc);
454
455 struct tm localtm;
456 localtime_r((const time_t *)&curtime, &localtm);
457 sprintf(desc, "date=%04d-%02d-%02d_%02d:%02d:%02d",
458 localtm.tm_year+1900, localtm.tm_mon+1, localtm.tm_mday,
459 localtm.tm_hour, localtm.tm_min, localtm.tm_sec);
460 string2rst(desc);
461
462 sprintf(desc, "host:");
463 string2rst(desc);
464
465 gethostname(desc, 512);
466 string2rst(desc);
467
468 sprintf(desc, "<SAMinfo>");
469 string2rst(desc);
470
471 sprintf(desc, "blz::version=%s", SYSTEM_get_infostr());
472 string2rst(desc);
473
474 // get device ids/names
475 // FIXME: in the next putback, replace this direct access
476 // to sam internal structures with the system abstraction
477 extern devRegistry * samDevs;
478 int devid=1;
479 while(1) {
480 const char * devname = samDevs->getName(devid);
481 if (strcmp(devname, "unknown device") == 0) {
482 break;
483 } else {
484 int namelen = strlen(devname);
485 if (namelen > 18) namelen = 18;
486 // output a record for this device
487 rstf_devidstrT devidstr = {0};
488 devidstr.rtype = DEVIDSTR_T;
489 devidstr.id = devid;
490 strncpy(devidstr.str, devname, namelen);
491 addrec((rstf_unionT*)&devidstr);
492 }
493 devid++;
494 }
495
496 sprintf(desc, "blz::ncpus=%d", g_nvcpu);
497 string2rst(desc);
498
499 Vcpu * my_vcpu = g_vcpu[cpuid];
500 VCPU_TLB * tlb_entries = NULL;
501 int n_entries = my_vcpu->get_tlb_entries(tlb_entries);
502 int i;
503
504 if(n_entries > 0) {
505 rstf_tlbT tlbrec = {0};
506 rstf_tlbT_set_cpuid(&tlbrec, cpuid);
507 tlbrec.rtype = TLB_T;
508 for (i=0; i<n_entries; i++) {
509 tlbrec.tlb_type = tlb_entries[i].tlb_type;
510 tlbrec.tlb_index = tlb_entries[i].tlb_index;
511 tlbrec.tlb_no = tlb_entries[i].tlb_no;
512 tlbrec.tte_tag = tlb_entries[i].tte_tag;
513 tlbrec.tte_data = tlb_entries[i].tte_data;
514
515 // FIXME: this needs changes in rst, rstzip etc.
516 // for sun4v, we use the field currently named "unused16" for context
517 // and bit 0 of the field "unused" for "is_real"
518 if (tlb_entries[i].format == 1) { // sun4v
519 tlbrec.unused16 = tlb_entries[i].tte_context;
520 tlbrec.unused = tlb_entries[i].is_real;
521 }
522 addrec((rstf_unionT *)&tlbrec);
523 }
524 free(tlb_entries);
525 }
526
527 uint64_t v64;
528
529 // hpriv bit
530 int rv = my_vcpu->get_reg(VCPU_HPR_HPSTATE, &v64);
531 if (rv == 0) {
532 hpr = (v64>>2) & 1;
533 } else {
534 hpr = 0;
535 }
536
537 // regvals: hpriv regs
538 for (i=0; i<=32; i++) {
539 if (my_vcpu->get_reg(VCPU_HPR_0 + i, &v64) == 0) {
540 add_regval(RSTREG_HPRIV_RT, i, v64);
541 }
542 }
543 flush_regval();
544
545 // regvals: priv regs
546 for (i=0; i<32; i++) {
547 int regid = VCPU_PR_0 + i;
548 if (my_vcpu->get_reg(regid, &v64)==0) {
549 add_regval(RSTREG_PRIV_RT, i, v64);
550 if (regid == VCPU_PR_PSTATE) {
551 pr = (v64>>2) & 1;
552 mask_pstate_am = MASK_PSTATE_AM(v64);
553 }
554 }
555 }
556 flush_regval();
557
558 // regvals: trap-level regs
559 uint64_t curtl;
560 my_vcpu->get_reg(VCPU_PR_TL, &curtl);
561 int tl;
562 for (tl=1; tl<=curtl; tl++) {
563 my_vcpu->set_reg(VCPU_PR_TL, tl);
564
565 my_vcpu->get_reg(VCPU_PR_TPC, &v64);
566 add_regval(RSTREG_PRIV_RT, RSTREG_TPC_RBASE + 8*0 + tl, v64);
567
568 my_vcpu->get_reg(VCPU_PR_TNPC, &v64);
569 add_regval(RSTREG_PRIV_RT, RSTREG_TPC_RBASE + 8*1 + tl, v64);
570
571 my_vcpu->get_reg(VCPU_PR_TSTATE, &v64);
572 add_regval(RSTREG_PRIV_RT, RSTREG_TPC_RBASE + 8*2 + tl, v64);
573
574 my_vcpu->get_reg(VCPU_PR_TT, &v64);
575 add_regval(RSTREG_PRIV_RT, RSTREG_TPC_RBASE + 8*3 + tl, v64);
576 }
577 my_vcpu->set_reg(VCPU_PR_TL, curtl);
578 flush_regval();
579
580 // regvals: asr regs
581 for (i=0; i<32; i++) {
582 if (my_vcpu->get_reg(VCPU_ASR_0 + i, &v64)==0) {
583 add_regval(RSTREG_OTHER_RT, i, v64);
584 }
585 }
586 flush_regval();
587
588 // regvals: cur int regs
589 for (i=0; i<32; i++) {
590 my_vcpu->get_reg(VCPU_IRF_0 + i, &v64);
591 add_regval(RSTREG_INT_RT, i, v64);
592 }
593 flush_regval();
594
595 // all globals
596
597 // regvals: win int regs
598
599 // regvals: fp regs
600 // for dregs, the regnum encoding is same as in sparcv9
601 // regid is EVEN, and up to 6 bits (0, 2, .. 62).
602 // regnum = {regid[4:1],regid[5]}
603 unsigned regid;
604 for (regid=0; regid<64; regid+=2) {
605 int regnum = (regid & 0x1e) | (regid >> 5);
606 my_vcpu->get_reg(VCPU_DRF_0 + regid/2, &v64);
607 add_regval(RSTREG_FLOAT_RT, 32+(regid/2), v64);
608 }
609 flush_regval();
610
611 // icontext and dcontext regs
612 // FIXME: using magic numbers from UltraSPARC architecture for now
613 uint64_t reg64;
614 uint8_t mmu_asi;
615 if ((my_vcpu->config.cpu_type & VCPU_IMPL_SIM_MASK) == VCPU_IMPL_SIM_BLAZE) {
616 mmu_asi = CH_MMU_CONTEXTREG_ASI;
617 } else {
618 mmu_asi = UA_MMU_CONTEXTREG_ASI;
619 }
620 my_vcpu->get_asi(mmu_asi, RSTREG_MMU_PCONTEXT, reg64); pcontext = (uint32_t) reg64;
621 my_vcpu->get_asi(mmu_asi, RSTREG_MMU_SCONTEXT, reg64); scontext = (uint32_t) reg64;
622
623 add_regval(RSTREG_MMU_RT, RSTREG_MMU_PCONTEXT, pcontext);
624 add_regval(RSTREG_MMU_RT, RSTREG_MMU_SCONTEXT, scontext);
625 flush_regval();
626} // void rst_pct::emit_trace_preamble()
627
628
629void rst_pct::addrec(rstf_unionT * ru)
630{
631#ifdef USE_RZ3
632 rz->compress(ru, 1);
633#else
634 rv = fwrite(ru, sizeof(rstf_unionT), 1, trf);
635 if (rv != 1) perror(fname);
636#endif
637 nrecs++;
638} // void rst_pct::addrec(rstf_unionT * ru)
639
640
641void rst_pct::string2rst(const char * str)
642{
643 int n = (int) strlen(str);
644 // the last strdesc record contains 22 bytes (and a zero byte to terminate)
645 // each preceding strcont record contains 23 bytes
646 // thus total number of records is: strlen/23 + 1
647 int nsr = 1 + n/23;
648 rstf_unionT ru;
649 int i;
650 for (i=0; i<nsr-1; i++) {
651 ru.string.rtype = STRCONT_T;
652 strncpy(ru.string.string, str, 23);
653 str += 23;
654 addrec(&ru);
655 }
656 ru.string.rtype = STRDESC_T;
657 strcpy(ru.string.string, str);
658 addrec(&ru);
659} // void rst_pct::string2rst(const char * str)
660
661
662void rst_pct::add_regval(int rstregtype, int rstregid, uint64_t v64) {
663 if (regval.regtype[0] == RSTREG_UNUSED_RT) {
664 regval.regtype[0] = rstregtype;
665 regval.regid[0] = rstregid;
666 regval.reg64[0] = v64;
667 } else {
668 regval.regtype[1] = rstregtype;
669 regval.regid[1] = rstregid;
670 regval.reg64[1] = v64;
671 flush_regval();
672 }
673} // void rst_pct::add_regval() {
674
675void rst_pct::flush_regval()
676{
677 if(regval.regtype[0] != RSTREG_UNUSED_RT) {
678 addrec((rstf_unionT *) &regval);
679 regval.regtype[0] = regval.regtype[1] = RSTREG_UNUSED_RT;
680 }
681} // void rst_pct::flush_regval()
682
683const uint64_t MEMCACHE_TAGMASK = ~((uint64_t)(RSTF_MEMVAL_CACHE_BLOCKSIZE-1));
684int rst_pct::memcache_ref(uint64_t pa)
685{
686 uint64_t tag = pa & MEMCACHE_TAGMASK;
687 uint64_t idx = tag & (RSTF_MEMVAL_CACHE_LINES-1);
688 int rv = (tag == memcache[idx]);
689
690 if (rv == 0) {
691 // process miss
692 memcache[idx] = tag;
693
694 // output memvals: 2x memval64, 7x memval128
695
696
697 uint64_t addr = tag;
698 mv64.addr = addr;
699 mv64.val = memread64u(mm1, addr);
700 addrec((rstf_unionT*) &mv64);
701 addr += 8;
702
703 mv64.addr = addr;
704 mv64.val = memread64u(mm1, addr);
705 addrec((rstf_unionT*) &mv64);
706 addr += 8;
707
708 int i;
709 for (i=0; i<7; i++) {
710 rstf_memval128T_set_addr(&mv128, addr);
711 mv128.val[0] = memread64u(mm1, addr);
712 addr += 8;
713 mv128.val[1] = memread64u(mm1, addr);
714 addrec((rstf_unionT*)&mv128);
715 addr += 8;
716 }
717
718 } // hit or miss?
719 return rv;
720
721} // int rst_pct::memcache_ref(uint64_t pa)
722
723
724
725// this function is called from a cpu after all cpus are done
726// tracing, or the rstrace off ui command. In either case,
727// it is NON-REENTRANT
728void rstracer::trace_off()
729{
730 int i;
731
732 if (! tracing) {
733 fprintf(stderr, "%s: tracing is already off\n", id);
734 } else {
735 printf("%s: finalizing trace(s)...\n", id);
736
737 // close files
738 for (i=0; i<=last_vcpu_id; i++) {
739 pcs[i].fini();
740 }
741
742 traces_done++;
743
744 if (traces_done >= ntraces) {
745 tracing = false;
746 } else {
747 // start next trace
748 delay = trace_period - insts_per_cpu;
749 trace_on();
750 }
751 } // tracing?
752} // void rstracer::trace_off()
753
754
755
756
757void rst_pct::fini()
758{
759 if (state == rst_pct::state_NIL)
760 return;
761
762#ifdef USE_RZ3
763 rz->close();
764 delete rz;
765 rz = NULL;
766#else
767 fclose(trf);
768 trf = NULL;
769#endif
770 printf("%s: cpu%d: trace written to %s - %lld insts, %lld records\n", id, cpuid, fname, ninsts, nrecs);
771} // void vtrace_per_cpu_tracer::fini()
772
773
774void rstracer::print_status()
775{
776 // use cpu0 to identify status
777
778 if (!tracing) {
779 printf("%s: idle\n", id);
780 } else {
781 enum rst_pct::state_e state = pcs[first_vcpu_id].state;
782 int64_t dleft;
783 switch(state) {
784 case rst_pct::state_DELAY:
785 dleft = (delay-pcs[first_vcpu_id].dinsts);
786 if (dleft < 0) dleft = 0;
787 printf("%s: in delayed tracing mode. remaining delay is approx %lld insts/cpu (%lld total)\n",
788 id, dleft, ncpus*dleft);
789 if (ntraces > 1) {
790 printf(" %d traces out of %d done\n", traces_done, ntraces);
791 }
792 break;
793 case rst_pct::state_TRACING:
794 case rst_pct::state_TRACE_START:
795 case rst_pct::state_WAIT_SYNC_START:
796 case rst_pct::state_WAIT_START:
797 case rst_pct::state_WAIT_SYNC_STOP:
798 case rst_pct::state_WAIT_STOP:
799 printf("%s: tracing: approx %lld insts/cpu out of %lld done\n",
800 id, pcs[first_vcpu_id].ninsts, insts_per_cpu);
801 if (ntraces > 1) {
802 printf(" trace number %d of 0..%d in progress\n", traces_done, ntraces-1);
803 }
804 break;
805 default:
806 fprintf(stderr, "%s: ERROR: in invalid state (%d)\n", id, state);
807 }
808 } // tracing?
809} // void rstracer::print_status()
810
811
812static const int vcpu_rtype_to_rst[] = {
813 RSTREG_UNUSED_RT, // unused in vcpu
814 RSTREG_PRIV_RT,
815 RSTREG_OTHER_RT,
816 RSTREG_INT_RT,
817 RSTREG_FLOAT_RT, // single
818 RSTREG_FLOAT_RT, // double
819 RSTREG_HPRIV_RT, // hyperprivileged registers
820};
821
822
823#define RSTRACER_IOP_IS_FLUSH(_IOP_) (((_IOP_)==SPIX_SPARC_IOP_FLUSH)||((_IOP_)==SPIX_SPARC_IOP_FLUSHA))
824// REENTRANT function
825int rstracer::instr(VCPU_Instruction * ii)
826{
827
828 if (!tracing) {
829 return 0;
830 }
831
832 int cpuid = ii->cpuid;
833
834 if (pcs[cpuid].state != rst_pct::state_TRACING) { // rule out common case quickly
835
836 int i;
837
838 switch(pcs[cpuid].state) {
839 case rst_pct::state_DELAY:
840 if (pcs[cpuid].dinsts >= delay) {
841 pcs[cpuid].state = rst_pct::state_WAIT_SYNC_START;
842 return instr(ii); // recursive call (with changed state)
843 }
844 pcs[cpuid].dinsts++;
845 return 0;
846
847 case rst_pct::state_WAIT_SYNC_START:
848 mutex_lock(&mu);
849 // increment sync_count. if ==ncpu, start everyone
850 sync_count++;
851 if (sync_count == ncpus) {
852 for (i=0; i<ncpus; i++) {
853 pcs[i].state = rst_pct::state_TRACE_START;
854 }
855 sync_count = 0;
856 } else {
857 pcs[cpuid].state = rst_pct::state_WAIT_START;
858 }
859 mutex_unlock(&mu);
860 return instr(ii); // recursive call (with changed state)
861
862 case rst_pct::state_WAIT_START:
863 return 0;
864
865 case rst_pct::state_WAIT_SYNC_STOP:
866 mutex_lock(&mu);
867 // increment sync_count. if ==ncpu, start everyone
868 sync_count++;
869 if (sync_count == ncpus) {
870 trace_off();
871 sync_count = 0;
872 } else {
873 pcs[cpuid].state = rst_pct::state_WAIT_STOP;
874 }
875 mutex_unlock(&mu);
876 return instr(ii); // recursive call (with changed state)
877
878 case rst_pct::state_WAIT_STOP:
879 return 0;
880
881 case rst_pct::state_TRACE_START:
882 pcs[cpuid].emit_trace_preamble();
883 pcs[cpuid].state = rst_pct::state_TRACING;
884 // include this instruction in trace
885 return instr(ii); // recursive call (with changed state)
886
887 default:
888 fprintf(stderr, "%s: ERROR: rstracer::instr() - invalid state (%d)\n",
889 id, (int) pcs[cpuid].state);
890 assert(0);
891
892 } // switch(state)
893 } // if not state_TRACING
894
895 // in state_TRACING: generate trace records for current instruction
896
897 rstf_unionT ru;
898 rstf_pavadiffT pd;
899
900 memset(&ru, 0, sizeof(ru));
901 memset(&pd, 0, sizeof(pd));
902
903 uint64_t pstate_v;
904 // vcpu:pr may already have changed; instr.pr should reflect pr before instr retired
905 ru.instr.hpriv = pcs[cpuid].hpr;
906 ru.instr.pr = pcs[cpuid].pr;
907
908 // ifetch trap?
909 if (ii->pc_pa == 0) {
910 rstf_trapping_instrT ti = {0};
911 ti.rtype = TRAPPING_INSTR_T;
912 rstf_trapping_instrT_set_cpuid(&ti, ii->cpuid);
913 ti.hpriv = ru.instr.hpriv;
914 ti.priv = ru.instr.pr;
915 ti.iftrap = 1;
916 ti.pc_va = ii->pc_va;
917 if (!ti.hpriv) {
918 ti.pc_va &= pcs[cpuid].mask_pstate_am;
919 }
920 pcs[cpuid].addrec((rstf_unionT *)&ti);
921 return 0;
922 }
923
924 pcs[cpuid].memcache_ref(ii->pc_pa);
925
926 bool need_pavadiff = false;
927
928 // generate instr, pavadiff records
929 ru.proto.rtype = INSTR_T;
930 rstf_instrT_set_cpuid(&ru.instr, cpuid);
931
932 pd.icontext = ii->icontext;
933 if (ii->icontext != pcs[cpuid].icontext) {
934 pcs[cpuid].icontext = ii->icontext;
935 need_pavadiff = true;
936 }
937
938 ru.instr.pc_va = ii->pc_va;
939 if (!ru.instr.hpriv) {
940 ru.instr.pc_va &= pcs[cpuid].mask_pstate_am;
941 }
942
943 pd.pc_pa_va = ii->pc_pa - ru.instr.pc_va;
944 if (pd.pc_pa_va != pcs[cpuid].pc_pavadiff) {
945 need_pavadiff = true;
946 pcs[cpuid].pc_pavadiff = pd.pc_pa_va;
947 }
948
949 ru.instr.instr = ii->opcode;
950 spix_sparc_iop_t iop = spix_sparc_iop(SPIX_SPARC_V9, &(ii->opcode));
951
952 uint64_t pstate64;
953 uint64_t tl64;
954 bool is_done_retry = false;
955
956 if (spix_sparc_iop_isload(iop) || spix_sparc_iop_iscstore(iop) || spix_sparc_iop_isustore(iop) ||
957 RSTRACER_IOP_IS_FLUSH(iop)) {
958
959 uint64_t ea_va = ii->ea_va;
960 if (!ru.instr.hpriv) {
961 ea_va &= pcs[cpuid].mask_pstate_am;
962 }
963
964 uint64_t ea_pa = ii->ea_pa;
965
966 if (ea_pa != 0x0) {
967 ru.instr.ea_valid = 1;
968 ru.instr.ea_va = ea_va;
969 pd.ea_valid = 1;
970 pd.ea_pa_va = ea_pa - ea_va;
971 pd.dcontext = ii->dcontext;
972
973 if (ii->itype & (VCPU_LOAD_ITYPE|VCPU_STORE_ITYPE)) {
974 pcs[cpuid].memcache_ref(ea_pa);
975 }
976 } else if (ii->exception) {
977 // output trapping instr record
978 rstf_trapping_instrT ti = {0};
979 rstf_trapping_instrT_set_cpuid(&ti, cpuid);
980 ti.rtype = TRAPPING_INSTR_T;
981 ti.hpriv = ru.instr.hpriv;
982 ti.priv = ru.instr.pr;
983 ti.iftrap = 0;
984 ti.ea_va_valid = 1;
985 ti.ea_pa_valid = 0;
986 ti.instr = ru.instr.instr;
987 ti.pc_va = ii->pc_va;
988 ti.ea_va = ea_va;
989 pcs[cpuid].addrec((rstf_unionT *)&ti);
990 } else if (ii->itype & (VCPU_ASI_LOAD_ITYPE|VCPU_ASI_STORE_ITYPE)) {
991 //ld/st to internal asi - PA not relevant
992 ru.instr.ea_valid = 1;
993 ru.instr.ea_va = ea_va;
994 pd.ea_valid = 0;
995 need_pavadiff = true;
996 } else { // ea_pa is 0x0 but there is no exception. should be a prefetch instr
997#if 0
998 if (! spix_sparc_iop_isprefetch(iop) && ! RSTRACER_IOP_IS_FLUSH(iop)) {
999 fprintf(stderr, "WARNING: rstracer: cpu%d rec%lld: ea_pa==0 and exception==0???",
1000 cpuid, pcs[cpuid].nrecs);
1001 }
1002#endif
1003 }
1004
1005 } else if (spix_sparc_iop_isdcti(iop)) {
1006 ru.instr.ea_valid = 1;
1007 g_vcpu[cpuid]->get_reg(ii->annul? VCPU_ASR_PC:VCPU_ASR_NPC, &ru.instr.ea_va);
1008 if (!ru.instr.hpriv) {
1009 ru.instr.ea_va &= pcs[cpuid].mask_pstate_am;
1010 }
1011 } else if (iop == SPIX_SPARC_IOP_RETRY) {
1012 uint64_t v64;
1013 ru.instr.ea_valid = 1;
1014 g_vcpu[cpuid]->get_reg(VCPU_ASR_PC, &v64);
1015 if (!ru.instr.hpriv) {
1016 ru.instr.ea_va = v64 & pcs[cpuid].mask_pstate_am;
1017 }
1018 // get hpstate
1019 int rv = g_vcpu[cpuid]->get_reg(VCPU_HPR_HPSTATE, &v64);
1020 if (rv == 0) {
1021 pcs[cpuid].hpr = (v64>>2) & 1;
1022 }
1023 // get pstate
1024 g_vcpu[cpuid]->get_reg(VCPU_PR_PSTATE, &pstate64);
1025 if (pcs[cpuid].hpr == 0) {
1026 pcs[cpuid].pr = (pstate64 >> 2) & 1;
1027 } else {
1028 pcs[cpuid].pr = 0;
1029 }
1030 g_vcpu[cpuid]->get_reg(VCPU_PR_TL, &tl64);
1031 pcs[cpuid].mask_pstate_am = MASK_PSTATE_AM(pstate64);
1032 is_done_retry = true;
1033 } else if (iop == SPIX_SPARC_IOP_DONE) {
1034 ru.instr.ea_valid = 1;
1035 uint64_t v64;
1036 g_vcpu[cpuid]->get_reg(VCPU_ASR_NPC, &v64);
1037 ru.instr.ea_va = v64;
1038 if (!ru.instr.hpriv) {
1039 ru.instr.ea_va &= pcs[cpuid].mask_pstate_am;
1040 }
1041 // get hpstate
1042 int rv = g_vcpu[cpuid]->get_reg(VCPU_HPR_HPSTATE, &v64);
1043 if (rv == 0) {
1044 pcs[cpuid].hpr = (v64>>2) & 1;
1045 }
1046 // get pstate
1047 g_vcpu[cpuid]->get_reg(VCPU_PR_PSTATE, &pstate64);
1048 if (pcs[cpuid].hpr == 0) {
1049 pcs[cpuid].pr = (pstate64 >> 2) & 1;
1050 } else {
1051 pcs[cpuid].pr = 0;
1052 }
1053 pcs[cpuid].mask_pstate_am = MASK_PSTATE_AM(pstate64);
1054 // get TL
1055 g_vcpu[cpuid]->get_reg(VCPU_PR_TL, &tl64);
1056 is_done_retry = true;
1057 }
1058
1059 if (pd.ea_valid) {
1060 if (pd.ea_pa_va != pcs[cpuid].ea_pavadiff) {
1061 need_pavadiff = true;
1062 pcs[cpuid].ea_pavadiff = pd.ea_pa_va;
1063 }
1064 if (pd.dcontext != pcs[cpuid].dcontext) {
1065 need_pavadiff = true;
1066 pcs[cpuid].dcontext = pd.dcontext;
1067 }
1068 }
1069
1070 // ru.instr.tr = ii->dmmu_trap||ii->exception;
1071 if (ii->exception) {
1072 ru.instr.tr = 1;
1073 }
1074 ru.instr.bt = ii->taken; // FIXME - only if cti or cmov
1075
1076 if (need_pavadiff) {
1077
1078 // ALSO check if pcontext/scontext have changed
1079 uint64_t reg64;
1080 uint32_t newpcontext, newscontext;
1081
1082 uint8_t mmu_asi;
1083 if ((g_vcpu[cpuid]->config.cpu_type & VCPU_IMPL_SIM_MASK) == VCPU_IMPL_SIM_BLAZE) {
1084 mmu_asi = CH_MMU_CONTEXTREG_ASI;
1085 } else {
1086 mmu_asi = UA_MMU_CONTEXTREG_ASI;
1087 }
1088
1089 g_vcpu[cpuid]->get_asi(mmu_asi, RSTREG_MMU_PCONTEXT, reg64);
1090 newpcontext = (uint32_t) reg64;
1091 g_vcpu[cpuid]->get_asi(mmu_asi, RSTREG_MMU_SCONTEXT, reg64);
1092 newscontext = (uint32_t) reg64;
1093
1094 if ((newpcontext != pcs[cpuid].pcontext) || (newscontext != pcs[cpuid].scontext)) {
1095 pcs[cpuid].pcontext = newpcontext;
1096 pcs[cpuid].scontext = newscontext;
1097 pcs[cpuid].add_regval(RSTREG_MMU_RT, RSTREG_MMU_PCONTEXT, newpcontext);
1098 pcs[cpuid].add_regval(RSTREG_MMU_RT, RSTREG_MMU_SCONTEXT, newscontext);
1099 pcs[cpuid].flush_regval();
1100 }
1101
1102 pd.rtype = PAVADIFF_T;
1103 rstf_pavadiffT_set_cpuid(&pd, cpuid);
1104 pcs[cpuid].addrec((rstf_unionT *)&pd);
1105 memset(&pd, 0, sizeof(pd));
1106 }
1107
1108 pcs[cpuid].addrec(&ru);
1109
1110 // regvals
1111 int i;
1112 for (i=0; i<ii->nregs; i++) {
1113 // need to map vtracer regtypes to rst regtypes:
1114 int regtype = ii->dreg[i].r.type;
1115 int regid = ii->dreg[i].r.id;
1116 if (regtype == VCPU_FP_DOUBLE_RTYPE) {
1117 regid = (regid/2) + 32;
1118 } else if (regtype == VCPU_PR_RTYPE && regid == VCPU_PR_PSTATE) {
1119 pstate64 = ii->dval[i];
1120 pcs[cpuid].pr = (pstate64>>2) & 1;
1121 if (pcs[cpuid].hpr) pcs[cpuid].pr = 0;
1122 pcs[cpuid].mask_pstate_am = MASK_PSTATE_AM(pstate64);
1123 } else if (regtype == VCPU_PR_RTYPE && regid == VCPU_PR_TL) {
1124 tl64 = ii->dval[i];
1125 } else if (regtype == VCPU_HPR_RTYPE) {
1126 if (regid == VCPU_HPR_HPSTATE) {
1127 uint64_t v64 = ii->dval[i];
1128 pcs[cpuid].hpr = (v64 >> 2) & 1;
1129 if (pcs[cpuid].hpr) pcs[cpuid].pr = 0;
1130 }
1131 }
1132 pcs[cpuid].add_regval(vcpu_rtype_to_rst[regtype], regid, ii->dval[i]);
1133 }
1134 pcs[cpuid].flush_regval();
1135
1136 if (is_done_retry) {
1137 pcs[cpuid].add_regval(RSTREG_PRIV_RT, RSTREG_PSTATE_R, pstate64);
1138 pcs[cpuid].add_regval(RSTREG_PRIV_RT, RSTREG_TL_R, tl64);
1139 pcs[cpuid].flush_regval();
1140 }
1141
1142 if (ii->annul) {
1143 ru.instr.an = 1;
1144 ru.instr.pc_va = ii->pc_va + 4;
1145 ru.instr.instr = 0x0;
1146 ru.instr.ea_valid = 0;
1147 pcs[cpuid].addrec(&ru);
1148 }
1149
1150
1151 pcs[cpuid].ninsts++;
1152 if (cpuid == 0) {
1153 if ((pcs[cpuid].ninsts & 0xffffffull) == 0) {
1154 printf("%s: approx %lld insts/cpu traced (out of %lld max)\n",
1155 id, pcs[cpuid].ninsts, insts_per_cpu);
1156 }
1157 }
1158
1159 if (pcs[cpuid].ninsts >= insts_per_cpu) {
1160 pcs[cpuid].state = rst_pct::state_WAIT_SYNC_STOP;
1161 return instr(ii);
1162 // trace_off();
1163 }
1164
1165 return 0;
1166} // rstracer:instr()
1167
1168
1169// in SAM v5, the trap call happens AFTER the trapping instruction call
1170
1171int rstracer::trap ( VCPU_Trap * ti)
1172{
1173 int cpuid = ti->cpuid;
1174
1175 if (!tracing || (pcs[cpuid].state != rst_pct::state_TRACING)) return 0;
1176
1177 rstf_trapT tr = {0};
1178 tr.rtype = TRAP_T;
1179 rstf_trapT_set_cpuid(&tr, cpuid);
1180 tr.is_async = ti->is_async;
1181 tr.ttype = ti->tno;
1182
1183 tr.pc = ti->pc_va;
1184 tr.npc = ti->npc_va;
1185
1186 if (ti->tno == 0x108 || ti->tno == 0x140) { /* syscalls */
1187 tr.syscall = ti->syscallno;
1188 } else if (ti->tno == 0x060) { /* mondo-intrs */
1189 tr.syscall = ti->intrino;
1190 }
1191
1192
1193 // FIXME: the VCPU_Trap structure does not contain regiater values.
1194
1195 // get pstate and tl values
1196 uint64_t tl64, pstate64, v64;
1197 // get hpstate
1198 int rv = g_vcpu[cpuid]->get_reg(VCPU_HPR_HPSTATE, &v64);
1199 if (rv == 0) {
1200 pcs[cpuid].hpr = (v64>>2) & 1;
1201 }
1202
1203 g_vcpu[cpuid]->get_reg(VCPU_PR_TL, &tl64);
1204 g_vcpu[cpuid]->get_reg(VCPU_PR_PSTATE, &pstate64);
1205 pcs[cpuid].pr = (pstate64>>2) & 1;
1206 if (pcs[cpuid].hpr) pcs[cpuid].pr = 0;
1207 pcs[cpuid].mask_pstate_am = MASK_PSTATE_AM(pstate64);
1208
1209 tr.tl = (unsigned) tl64;
1210 tr.pstate = (uint16_t) pstate64;
1211
1212 pcs[cpuid].addrec((rstf_unionT *) &tr);
1213
1214 // in addition, emit regvals for pstate and tl
1215 pcs[cpuid].add_regval(RSTREG_PRIV_RT, RSTREG_TL_R, tl64);
1216 pcs[cpuid].add_regval(RSTREG_PRIV_RT, RSTREG_PSTATE_R, pstate64);
1217
1218 return 0;
1219} //int rstracer::trap ( VCPU_Trap * ti)
1220
1221
1222int rstracer::tlb ( VCPU_TLB * ti)
1223{
1224 if (!tracing || (pcs[ti->cpuid].state != rst_pct::state_TRACING)) return 0;
1225
1226 rstf_tlbT tr = {0};
1227 tr.rtype = TLB_T;
1228 rstf_tlbT_set_cpuid(&tr, ti->cpuid);
1229 tr.demap = ti->demap;
1230 tr.tlb_type = ti->tlb_type;
1231 tr.tlb_index = ti->tlb_index;
1232 tr.tlb_no = ti->tlb_no;
1233 tr.tte_tag = ti->tte_tag;
1234 tr.tte_data = ti->tte_data;
1235
1236 if (ti->format == 1) { // sun4v
1237 tr.unused16 = ti->tte_context;
1238 tr.unused = ti->is_real;
1239 }
1240
1241 pcs[ti->cpuid].addrec((rstf_unionT *)&tr);
1242
1243 return 0;
1244} // int rstracer::tlb ( VCPU_TLB * ti)
1245
1246
1247int rstracer::async(VCPU_AsyncData * di)
1248{
1249 if (!tracing || (pcs[first_vcpu_id].state != rst_pct::state_TRACING)) return 0;
1250
1251 if (di->dma.rtype == DMA_T) {
1252 rstf_unionT dr;
1253 dr.dma.rtype = DMA_T;
1254 dr.dma.iswrite = di->dma.iswrite;
1255 dr.dma.nbytes = (int) di->dma.nbytes;
1256 dr.dma.start_pa = di->dma.pa;
1257 dr.dma.devid = di->dma.devid;
1258 pcs[first_vcpu_id].addrec(&dr);
1259 } else if (di->strdata.rtype == STRDESC_T) {
1260 di->strdata.s[22] = 0; // even if it is already 0
1261 pcs[first_vcpu_id].string2rst(di->strdata.s);
1262 } else {
1263 fprintf(stderr, "%s: ERROR: invalid asyncdata rtype (%d)\n",
1264 id, (int) di->strdata.rtype);
1265 }
1266 return 0;
1267} // int rstracer::async(VCPU_AsyncData * di)
1268
1269int rstracer::sync (VCPU_Sync * si)
1270{
1271 if (!tracing || (pcs[si->cpuid].state != rst_pct::state_TRACING)) return 0;
1272
1273 // output sync record
1274 rstf_timesyncT ts = {0};
1275 ts.rtype = TIMESYNC_T;
1276 ts.subtype = si->synctype;
1277 ts.cpuid = si->cpuid;
1278 ts.data = si->data;
1279 ts.sequence_number = si->syncid;
1280 pcs[si->cpuid].addrec((rstf_unionT*) &ts);
1281 return 0;
1282}
1283
1284
1285int rstracer::hwop(VCPU_HwOp * hi)
1286{
1287 if (!tracing || (pcs[hi->cpuid].state != rst_pct::state_TRACING)) return 0;
1288
1289 rstf_tsb_accessT tr;
1290 memset(&tr, 0, sizeof(tr));
1291 tr.rtype = TSB_ACCESS_T;
1292 tr.isdata = hi->op_type;
1293 rstf_tsb_accessT_set_cpuid(&tr, hi->cpuid);
1294 tr.pa = hi->addr;
1295 pcs[hi->cpuid].addrec((rstf_unionT *) &tr);
1296} // int rstracer::hwop(VCPU_HwOp * hi)
1297
1298
1299rstracer::~rstracer()
1300{
1301 // unregister UI cmds
1302 char * cmd = strdup("rstrace");
1303
1304 if (tracing) {
1305 trace_off();
1306 }
1307
1308 UI_invalidate_cmd(cmd);
1309 free(cmd);
1310} // rstracer::~rstracer()
1311
1312void vtracer_fini()
1313{
1314 if (thetracer != NULL) {
1315 delete thetracer;
1316 }
1317} // void vtracer_fini()